]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_vnops.c
2eae260c8e08f807a7647384fb40406c22b4655c
[apple/hfs.git] / core / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/OSAtomic.h>
30 #include <stdbool.h>
31 #include <sys/systm.h>
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/dirent.h>
35 #include <sys/stat.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/vnode_if.h>
39 #include <sys/malloc.h>
40 #include <sys/ubc.h>
41 #include <sys/paths.h>
42 #include <sys/quota.h>
43 #include <sys/time.h>
44 #include <sys/disk.h>
45 #include <sys/kauth.h>
46 #include <sys/fsctl.h>
47 #include <sys/xattr.h>
48 #include <sys/decmpfs.h>
49 #include <sys/mman.h>
50 #include <sys/doc_tombstone.h>
51 #include <sys/namei.h>
52 #include <string.h>
53 #include <sys/fsevents.h>
54
55 #include <miscfs/specfs/specdev.h>
56 #include <miscfs/fifofs/fifo.h>
57 #include <vfs/vfs_support.h>
58 #include <machine/spl.h>
59
60 #include <sys/kdebug.h>
61 #include <sys/sysctl.h>
62 #include <stdbool.h>
63
64 #include "hfs.h"
65 #include "hfs_catalog.h"
66 #include "hfs_cnode.h"
67 #include "hfs_dbg.h"
68 #include "hfs_mount.h"
69 #include "hfs_quota.h"
70 #include "hfs_endian.h"
71 #include "hfs_kdebug.h"
72 #include "hfs_cprotect.h"
73
74 #if HFS_CONFIG_KEY_ROLL
75 #include "hfs_key_roll.h"
76 #endif
77
78 #include "BTreesInternal.h"
79 #include "FileMgrInternal.h"
80
81 /* Global vfs data structures for hfs */
82
83 /*
84 * Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is
85 * 'no'). At some point this might need to move into VFS and we might
86 * need to provide an API to get at it, but for now, this is only used
87 * by HFS+.
88 */
89 int always_do_fullfsync = 0;
90 SYSCTL_DECL(_vfs_generic);
91 HFS_SYSCTL(INT, _vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called")
92
93 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
94 struct componentname *cnp, struct vnode_attr *vap,
95 vfs_context_t ctx);
96 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
97 int hfs_metasync_all(struct hfsmount *hfsmp);
98
99 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
100 int, int);
101 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
102 int, int, int, struct vnode *, int);
103
104 /* Used here and in cnode teardown -- for symlinks */
105 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
106
107 enum {
108 HFS_MOVE_DATA_INCLUDE_RSRC = 1,
109 };
110 typedef uint32_t hfs_move_data_options_t;
111
112 static int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
113 hfs_move_data_options_t options);
114 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src,
115 filefork_t *dstfork, cnode_t *dst);
116
117 #if HFS_COMPRESSION
118 static int hfs_move_compressed(cnode_t *from_vp, cnode_t *to_vp);
119 #endif
120
121 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
122
123 #if FIFO
124 static int hfsfifo_read(struct vnop_read_args *);
125 static int hfsfifo_write(struct vnop_write_args *);
126 static int hfsfifo_close(struct vnop_close_args *);
127
128 extern int (**fifo_vnodeop_p)(void *);
129 #endif /* FIFO */
130
131 int hfs_vnop_close(struct vnop_close_args*);
132 int hfs_vnop_exchange(struct vnop_exchange_args*);
133 int hfs_vnop_fsync(struct vnop_fsync_args*);
134 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
135 int hfs_vnop_mknod(struct vnop_mknod_args*);
136 int hfs_vnop_getattr(struct vnop_getattr_args*);
137 int hfs_vnop_open(struct vnop_open_args*);
138 int hfs_vnop_readdir(struct vnop_readdir_args*);
139 int hfs_vnop_rename(struct vnop_rename_args*);
140 int hfs_vnop_renamex(struct vnop_renamex_args*);
141 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
142 int hfs_vnop_symlink(struct vnop_symlink_args*);
143 int hfs_vnop_setattr(struct vnop_setattr_args*);
144 int hfs_vnop_readlink(struct vnop_readlink_args *);
145 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
146 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
147 int hfsspec_read(struct vnop_read_args *);
148 int hfsspec_write(struct vnop_write_args *);
149 int hfsspec_close(struct vnop_close_args *);
150
151 /* Options for hfs_removedir and hfs_removefile */
152 #define HFSRM_SKIP_RESERVE 0x01
153
154
155
156 /*****************************************************************************
157 *
158 * Common Operations on vnodes
159 *
160 *****************************************************************************/
161
162 /*
163 * Is the given cnode either the .journal or .journal_info_block file on
164 * a volume with an active journal? Many VNOPs use this to deny access
165 * to those files.
166 *
167 * Note: the .journal file on a volume with an external journal still
168 * returns true here, even though it does not actually hold the contents
169 * of the volume's journal.
170 */
171 static _Bool
172 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
173 {
174 if (hfsmp->jnl != NULL &&
175 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
176 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
177 return true;
178 } else {
179 return false;
180 }
181 }
182
183 /*
184 * Create a regular file.
185 */
186 int
187 hfs_vnop_create(struct vnop_create_args *ap)
188 {
189 /*
190 * We leave handling of certain race conditions here to the caller
191 * which will have a better understanding of the semantics it
192 * requires. For example, if it turns out that the file exists,
193 * it would be wrong of us to return a reference to the existing
194 * file because the caller might not want that and it would be
195 * misleading to suggest the file had been created when it hadn't
196 * been. Note that our NFS server code does not set the
197 * VA_EXCLUSIVE flag so you cannot assume that callers don't want
198 * EEXIST errors if it's not set. The common case, where users
199 * are calling open with the O_CREAT mode, is handled in VFS; when
200 * we return EEXIST, it will loop and do the look-up again.
201 */
202 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
203 }
204
205 /*
206 * Make device special file.
207 */
208 int
209 hfs_vnop_mknod(struct vnop_mknod_args *ap)
210 {
211 struct vnode_attr *vap = ap->a_vap;
212 struct vnode *dvp = ap->a_dvp;
213 struct vnode **vpp = ap->a_vpp;
214 struct cnode *cp;
215 int error;
216
217 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
218 return (ENOTSUP);
219 }
220
221 /* Create the vnode */
222 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
223 if (error)
224 return (error);
225
226 cp = VTOC(*vpp);
227 cp->c_touch_acctime = TRUE;
228 cp->c_touch_chgtime = TRUE;
229 cp->c_touch_modtime = TRUE;
230
231 if ((vap->va_rdev != VNOVAL) &&
232 (vap->va_type == VBLK || vap->va_type == VCHR))
233 cp->c_rdev = vap->va_rdev;
234
235 return (0);
236 }
237
238 #if HFS_COMPRESSION
239 /*
240 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
241 * In the (hopefully rare) case where the data fork vnode is not
242 * present, it will use hfs_vget() to create a new vnode for the
243 * data fork.
244 *
245 * NOTE: If successful and a vnode is returned, the caller is responsible
246 * for releasing the returned vnode with vnode_rele().
247 */
248 static int
249 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
250 {
251 int vref = 0;
252
253 if (!data_vp || !cp) /* sanity check incoming parameters */
254 return EINVAL;
255
256 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
257
258 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
259 struct vnode *c_vp = cp->c_vp;
260 if (c_vp) {
261 /* we already have a data vnode */
262 *data_vp = c_vp;
263 vref = vnode_ref(*data_vp);
264 if (!skiplock) hfs_unlock(cp);
265 if (vref == 0) {
266 return 0;
267 }
268 return EINVAL;
269 }
270 /* no data fork vnode in the cnode, so ask hfs for one. */
271
272 if (!cp->c_rsrc_vp) {
273 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
274 *data_vp = NULL;
275 if (!skiplock) hfs_unlock(cp);
276 return EINVAL;
277 }
278
279 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
280 0 != data_vp) {
281 vref = vnode_ref(*data_vp);
282 vnode_put(*data_vp);
283 if (!skiplock) hfs_unlock(cp);
284 if (vref == 0) {
285 return 0;
286 }
287 return EINVAL;
288 }
289 /* there was an error getting the vnode */
290 *data_vp = NULL;
291 if (!skiplock) hfs_unlock(cp);
292 return EINVAL;
293 }
294
295 /*
296 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
297 * allocating it if necessary; returns NULL if there was an allocation error.
298 * function is non-static so that it can be used from the FCNTL handler.
299 */
300 decmpfs_cnode *
301 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
302 {
303 if (!cp->c_decmp) {
304 decmpfs_cnode *dp = decmpfs_cnode_alloc();
305 decmpfs_cnode_init(dp);
306 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
307 /* another thread got here first, so free the decmpfs_cnode we allocated */
308 decmpfs_cnode_destroy(dp);
309 decmpfs_cnode_free(dp);
310 }
311 }
312
313 return cp->c_decmp;
314 }
315
316 /*
317 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
318 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
319 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
320 * or else fills it in via the decmpfs_file_is_compressed() function.
321 */
322 int
323 hfs_file_is_compressed(struct cnode *cp, int skiplock)
324 {
325 int ret = 0;
326
327 /* fast check to see if file is compressed. If flag is clear, just answer no */
328 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
329 return 0;
330 }
331
332 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
333 if (!dp) {
334 /* error allocating a decmpfs cnode, treat the file as uncompressed */
335 return 0;
336 }
337
338 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
339 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
340 switch(decmpfs_state) {
341 case FILE_IS_COMPRESSED:
342 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
343 return 1;
344 case FILE_IS_NOT_COMPRESSED:
345 return 0;
346 /* otherwise the state is not cached yet */
347 }
348
349 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
350 struct vnode *data_vp = NULL;
351 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
352 if (data_vp) {
353 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
354 vnode_rele(data_vp);
355 }
356 }
357 return ret;
358 }
359
360 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
361 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
362 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
363 * files size is returned in size (required)
364 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
365 * will return an error and the caller should fall back to treating the item as an uncompressed file
366 */
367 int
368 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
369 {
370 int ret = 0;
371 int putaway = 0; /* flag to remember if we used hfs_vget() */
372
373 if (!size) {
374 return EINVAL; /* no place to put the file size */
375 }
376
377 if (NULL == vp) {
378 if (!hfsmp || !fid) { /* make sure we have the required parameters */
379 return EINVAL;
380 }
381 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
382 vp = NULL;
383 } else {
384 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
385 }
386 }
387 /* this double check for compression (hfs_file_is_compressed)
388 * ensures the cached size is present in case decmpfs hasn't
389 * encountered this node yet.
390 */
391 if (vp) {
392 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
393 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
394 } else if (VTOCMP(vp)) {
395 uint32_t cmp_type = decmpfs_cnode_cmp_type(VTOCMP(vp));
396
397 if (cmp_type == DATALESS_CMPFS_TYPE) {
398 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
399 ret = 0;
400 } else if (cmp_type >= CMP_MAX && VTOC(vp)->c_datafork) {
401 // if we don't recognize this type, just use the real data fork size
402 *size = VTOC(vp)->c_datafork->ff_size;
403 ret = 0;
404 } else
405 ret = EINVAL;
406 } else
407 ret = EINVAL;
408 }
409
410 if (putaway) { /* did we use hfs_vget() to get this vnode? */
411 vnode_put(vp); /* if so, release it and set it to null */
412 vp = NULL;
413 }
414 return ret;
415 }
416
417 int
418 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
419 {
420 if (ctx == decmpfs_ctx)
421 return 0;
422 if (!hfs_file_is_compressed(cp, skiplock))
423 return 0;
424 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
425 }
426
427 int
428 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
429 {
430 if (ctx == decmpfs_ctx)
431 return 0;
432 if (!hfs_file_is_compressed(cp, skiplock))
433 return 0;
434 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
435 }
436 #endif /* HFS_COMPRESSION */
437
438 /*
439 * Open a file/directory.
440 */
441 int
442 hfs_vnop_open(struct vnop_open_args *ap)
443 {
444 struct vnode *vp = ap->a_vp;
445 struct filefork *fp;
446 struct timeval tv;
447 int error;
448 static int past_bootup = 0;
449 struct cnode *cp = VTOC(vp);
450 struct hfsmount *hfsmp = VTOHFS(vp);
451
452 #if CONFIG_PROTECT
453 error = cp_handle_open(vp, ap->a_mode);
454 if (error)
455 return error;
456 #endif
457
458 #if HFS_COMPRESSION
459 if (ap->a_mode & FWRITE) {
460 /* open for write */
461 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
462 /* opening a compressed file for write, so convert it to decompressed */
463 struct vnode *data_vp = NULL;
464 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
465 if (0 == error) {
466 if (data_vp) {
467 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
468 vnode_rele(data_vp);
469 } else {
470 error = EINVAL;
471 }
472 }
473 if (error != 0)
474 return error;
475 }
476 } else {
477 /* open for read */
478 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
479 if (VNODE_IS_RSRC(vp)) {
480 /* opening the resource fork of a compressed file, so nothing to do */
481 } else {
482 /* opening a compressed file for read, make sure it validates */
483 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
484 if (error != 0)
485 return error;
486 }
487 }
488 }
489 #endif
490
491 /*
492 * Files marked append-only must be opened for appending.
493 */
494 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
495 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
496 return (EPERM);
497
498 if (vnode_issystem(vp))
499 return (EBUSY); /* file is in use by the kernel */
500
501 /* Don't allow journal to be opened externally. */
502 if (hfs_is_journal_file(hfsmp, cp))
503 return (EPERM);
504
505 bool have_lock = false;
506
507 #if CONFIG_PROTECT
508 if (ISSET(ap->a_mode, FENCRYPTED) && cp->c_cpentry && vnode_isreg(vp)) {
509 bool have_trunc_lock = false;
510
511 #if HFS_CONFIG_KEY_ROLL
512 again:
513 #endif
514
515 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
516 if (have_trunc_lock)
517 hfs_unlock_truncate(cp, 0);
518 return error;
519 }
520
521 have_lock = true;
522
523 if (cp->c_cpentry->cp_raw_open_count + 1
524 < cp->c_cpentry->cp_raw_open_count) {
525 // Overflow; too many raw opens on this file
526 hfs_unlock(cp);
527 if (have_trunc_lock)
528 hfs_unlock_truncate(cp, 0);
529 return ENFILE;
530 }
531
532 #if HFS_CONFIG_KEY_ROLL
533 if (cp_should_auto_roll(hfsmp, cp->c_cpentry)) {
534 if (!have_trunc_lock) {
535 hfs_unlock(cp);
536 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
537 have_trunc_lock = true;
538 goto again;
539 }
540
541 error = hfs_key_roll_start(cp);
542 if (error) {
543 hfs_unlock(cp);
544 hfs_unlock_truncate(cp, 0);
545 return error;
546 }
547 }
548 #endif
549
550 if (have_trunc_lock)
551 hfs_unlock_truncate(cp, 0);
552
553 ++cp->c_cpentry->cp_raw_open_count;
554 }
555 #endif
556
557 if (ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
558 || !vnode_isreg(vp)
559 #if NAMEDSTREAMS
560 || vnode_isnamedstream(vp)
561 #endif
562 || !hfsmp->jnl || vnode_isinuse(vp, 0)) {
563
564 #if CONFIG_PROTECT
565 if (have_lock)
566 hfs_unlock(cp);
567 #endif
568
569 return (0);
570 }
571
572 if (!have_lock && (error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
573 return (error);
574
575 #if QUOTA
576 /* If we're going to write to the file, initialize quotas. */
577 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
578 (void)hfs_getinoquota(cp);
579 #endif /* QUOTA */
580
581 /*
582 * On the first (non-busy) open of a fragmented
583 * file attempt to de-frag it, if it's less than hfs_defrag_max bytes.
584 * That field is initially set to 20MB.
585 */
586 fp = VTOF(vp);
587 if (fp->ff_blocks &&
588 fp->ff_extents[7].blockCount != 0 &&
589 fp->ff_size <= hfsmp->hfs_defrag_max) {
590
591 int no_mods = 0;
592 struct timeval now;
593 /*
594 * Wait until system bootup is done (3 min).
595 * And don't relocate a file that's been modified
596 * within the past minute -- this can lead to
597 * system thrashing.
598 */
599
600 if (hfsmp->hfs_defrag_nowait) {
601 /* If this is toggled, then issue the defrag if appropriate */
602 past_bootup = 1;
603 no_mods = 1;
604 }
605
606 if (!past_bootup) {
607 microuptime(&tv);
608 if (tv.tv_sec > (60*3)) {
609 past_bootup = 1;
610 }
611 }
612
613 microtime(&now);
614 if ((now.tv_sec - cp->c_mtime) > 60) {
615 no_mods = 1;
616 }
617
618 if (past_bootup && no_mods) {
619 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
620 vfs_context_ucred(ap->a_context),
621 vfs_context_proc(ap->a_context));
622 }
623 }
624
625 hfs_unlock(cp);
626
627 return (0);
628 }
629
630
631 /*
632 * Close a file/directory.
633 */
634 int
635 hfs_vnop_close(struct vnop_close_args *ap)
636 {
637 register struct vnode *vp = ap->a_vp;
638 register struct cnode *cp;
639 struct proc *p = vfs_context_proc(ap->a_context);
640 struct hfsmount *hfsmp;
641 int busy;
642 int tooktrunclock = 0;
643 int knownrefs = 0;
644
645 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
646 return (0);
647 cp = VTOC(vp);
648 hfsmp = VTOHFS(vp);
649
650 #if CONFIG_PROTECT
651 if (cp->c_cpentry && ISSET(ap->a_fflag, FENCRYPTED) && vnode_isreg(vp)) {
652 hfs_assert(cp->c_cpentry->cp_raw_open_count > 0);
653 --cp->c_cpentry->cp_raw_open_count;
654 }
655 #endif
656
657 /*
658 * If the rsrc fork is a named stream, it can cause the data fork to
659 * stay around, preventing de-allocation of these blocks.
660 * Do checks for truncation on close. Purge extra extents if they exist.
661 * Make sure the vp is not a directory, and that it has a resource fork,
662 * and that resource fork is also a named stream.
663 */
664
665 if ((vnode_vtype(vp) == VREG) && (cp->c_rsrc_vp)
666 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
667 uint32_t blks;
668
669 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
670 /*
671 * If there are extra blocks and there are only 2 refs on
672 * this vp (ourselves + rsrc fork holding ref on us), go ahead
673 * and try to truncate.
674 */
675 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
676 // release cnode lock; must acquire truncate lock BEFORE cnode lock
677 hfs_unlock(cp);
678
679 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
680 tooktrunclock = 1;
681
682 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
683 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
684 // bail out if we can't re-acquire cnode lock
685 return 0;
686 }
687 // now re-test to make sure it's still valid
688 if (cp->c_rsrc_vp) {
689 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
690 if (!vnode_isinuse(vp, knownrefs)){
691 // now we can truncate the file, if necessary
692 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
693 if (blks < VTOF(vp)->ff_blocks){
694 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY,
695 0, ap->a_context);
696 }
697 }
698 }
699 }
700 }
701
702
703 // if we froze the fs and we're exiting, then "thaw" the fs
704 if (hfsmp->hfs_freeze_state == HFS_FROZEN
705 && hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
706 hfs_thaw(hfsmp, p);
707 }
708
709 busy = vnode_isinuse(vp, 1);
710
711 if (busy) {
712 hfs_touchtimes(VTOHFS(vp), cp);
713 }
714 if (vnode_isdir(vp)) {
715 hfs_reldirhints(cp, busy);
716 } else if (vnode_issystem(vp) && !busy) {
717 vnode_recycle(vp);
718 }
719
720 if (tooktrunclock){
721 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
722 }
723 hfs_unlock(cp);
724
725 if (ap->a_fflag & FWASWRITTEN) {
726 hfs_sync_ejectable(hfsmp);
727 }
728
729 return (0);
730 }
731
732 static bool hfs_should_generate_document_id(hfsmount_t *hfsmp, cnode_t *cp)
733 {
734 return (!ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
735 && ISSET(cp->c_bsdflags, UF_TRACKED)
736 && cp->c_desc.cd_cnid != kHFSRootFolderID
737 && (S_ISDIR(cp->c_mode) || S_ISREG(cp->c_mode) || S_ISLNK(cp->c_mode)));
738 }
739
740 /*
741 * Get basic attributes.
742 */
743 int
744 hfs_vnop_getattr(struct vnop_getattr_args *ap)
745 {
746 #define VNODE_ATTR_TIMES \
747 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
748 #define VNODE_ATTR_AUTH \
749 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
750 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
751
752 struct vnode *vp = ap->a_vp;
753 struct vnode_attr *vap = ap->a_vap;
754 struct vnode *rvp = NULLVP;
755 struct hfsmount *hfsmp;
756 struct cnode *cp;
757 uint64_t data_size;
758 enum vtype v_type;
759 int error = 0;
760 cp = VTOC(vp);
761
762 #if HFS_COMPRESSION
763 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
764 int compressed = 0;
765 int hide_size = 0;
766 off_t uncompressed_size = -1;
767 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
768 /* we only care about whether the file is compressed if asked for the uncompressed size */
769 if (VNODE_IS_RSRC(vp)) {
770 /* if it's a resource fork, decmpfs may want us to hide the size */
771 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
772 } else {
773 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
774 compressed = hfs_file_is_compressed(cp, 0);
775 }
776 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
777 // if it's compressed
778 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && decmpfs_cnode_cmp_type(cp->c_decmp) >= CMP_MAX)) {
779 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
780 /* failed to get the uncompressed size, we'll check for this later */
781 uncompressed_size = -1;
782 } else {
783 // fake that it's compressed
784 compressed = 1;
785 }
786 }
787 }
788 }
789 #endif
790
791 /*
792 * Shortcut for vnode_authorize path. Each of the attributes
793 * in this set is updated atomically so we don't need to take
794 * the cnode lock to access them.
795 */
796 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
797 /* Make sure file still exists. */
798 if (cp->c_flag & C_NOEXISTS)
799 return (ENOENT);
800
801 vap->va_uid = cp->c_uid;
802 vap->va_gid = cp->c_gid;
803 vap->va_mode = cp->c_mode;
804 vap->va_flags = cp->c_bsdflags;
805 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
806
807 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
808 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
809 VATTR_SET_SUPPORTED(vap, va_acl);
810 }
811
812 return (0);
813 }
814
815 hfsmp = VTOHFS(vp);
816 v_type = vnode_vtype(vp);
817
818 if (VATTR_IS_ACTIVE(vap, va_document_id)) {
819 uint32_t document_id;
820
821 if (cp->c_desc.cd_cnid == kHFSRootFolderID)
822 document_id = kHFSRootFolderID;
823 else {
824 /*
825 * This is safe without a lock because we're just reading
826 * a 32 bit aligned integer which should be atomic on all
827 * platforms we support.
828 */
829 document_id = hfs_get_document_id(cp);
830
831 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
832 uint32_t new_document_id;
833
834 error = hfs_generate_document_id(hfsmp, &new_document_id);
835 if (error)
836 return error;
837
838 error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
839 if (error)
840 return error;
841
842 bool want_docid_fsevent = false;
843
844 // Need to check again now that we have the lock
845 document_id = hfs_get_document_id(cp);
846 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
847 cp->c_attr.ca_finderextendeddirinfo.document_id = document_id = new_document_id;
848 want_docid_fsevent = true;
849 SET(cp->c_flag, C_MODIFIED);
850 }
851
852 hfs_unlock(cp);
853
854 if (want_docid_fsevent) {
855 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
856 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
857 FSE_ARG_INO, (ino64_t)0, // src inode #
858 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
859 FSE_ARG_INT32, document_id,
860 FSE_ARG_DONE);
861
862 if (need_fsevent(FSE_STAT_CHANGED, vp)) {
863 add_fsevent(FSE_STAT_CHANGED, ap->a_context,
864 FSE_ARG_VNODE, vp, FSE_ARG_DONE);
865 }
866 }
867 }
868 }
869
870 vap->va_document_id = document_id;
871 VATTR_SET_SUPPORTED(vap, va_document_id);
872 }
873
874 /*
875 * If time attributes are requested and we have cnode times
876 * that require updating, then acquire an exclusive lock on
877 * the cnode before updating the times. Otherwise we can
878 * just acquire a shared lock.
879 */
880 if ((vap->va_active & VNODE_ATTR_TIMES) &&
881 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
882 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
883 return (error);
884 hfs_touchtimes(hfsmp, cp);
885
886 // downgrade to a shared lock since that's all we need from here on out
887 cp->c_lockowner = HFS_SHARED_OWNER;
888 lck_rw_lock_exclusive_to_shared(&cp->c_rwlock);
889
890 } else if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
891 return (error);
892 }
893
894 if (v_type == VDIR) {
895 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
896
897 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
898 int nlink;
899
900 /*
901 * For directories, the va_nlink is esentially a count
902 * of the ".." references to a directory plus the "."
903 * reference and the directory itself. So for HFS+ this
904 * becomes the sub-directory count plus two.
905 *
906 * In the absence of a sub-directory count we use the
907 * directory's item count. This will be too high in
908 * most cases since it also includes files.
909 */
910 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
911 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
912 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
913 else
914 nlink = cp->c_entries;
915
916 /* Account for ourself and our "." entry */
917 nlink += 2;
918 /* Hide our private directories. */
919 if (cp->c_cnid == kHFSRootFolderID) {
920 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
921 --nlink;
922 }
923 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
924 --nlink;
925 }
926 }
927 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
928 }
929 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
930 int entries;
931
932 entries = cp->c_entries;
933 /* Hide our private files and directories. */
934 if (cp->c_cnid == kHFSRootFolderID) {
935 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
936 --entries;
937 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
938 --entries;
939 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
940 entries -= 2; /* hide the journal files */
941 }
942 VATTR_RETURN(vap, va_nchildren, entries);
943 }
944 /*
945 * The va_dirlinkcount is the count of real directory hard links.
946 * (i.e. its not the sum of the implied "." and ".." references)
947 */
948 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
949 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
950 }
951 } else /* !VDIR */ {
952 data_size = VCTOF(vp, cp)->ff_size;
953
954 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
955 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
956 u_int64_t blocks;
957
958 #if HFS_COMPRESSION
959 if (hide_size) {
960 VATTR_RETURN(vap, va_data_alloc, 0);
961 } else if (compressed) {
962 /* for compressed files, we report all allocated blocks as belonging to the data fork */
963 blocks = cp->c_blocks;
964 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
965 }
966 else
967 #endif
968 {
969 blocks = VCTOF(vp, cp)->ff_blocks;
970 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
971 }
972 }
973 }
974
975 /* conditional because 64-bit arithmetic can be expensive */
976 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
977 if (v_type == VDIR) {
978 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
979 } else {
980 u_int64_t total_size = ~0ULL;
981 struct cnode *rcp;
982 #if HFS_COMPRESSION
983 if (hide_size) {
984 /* we're hiding the size of this file, so just return 0 */
985 total_size = 0;
986 } else if (compressed) {
987 if (uncompressed_size == -1) {
988 /*
989 * We failed to get the uncompressed size above,
990 * so we'll fall back to the standard path below
991 * since total_size is still -1
992 */
993 } else {
994 /* use the uncompressed size we fetched above */
995 total_size = uncompressed_size;
996 }
997 }
998 #endif
999 if (total_size == ~0ULL) {
1000 if (cp->c_datafork) {
1001 total_size = cp->c_datafork->ff_size;
1002 }
1003
1004 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1005 /* We deal with rsrc fork vnode iocount at the end of the function */
1006 error = hfs_vgetrsrc(hfsmp, vp, &rvp);
1007 if (error) {
1008 /*
1009 * Note that we call hfs_vgetrsrc with error_on_unlinked
1010 * set to FALSE. This is because we may be invoked via
1011 * fstat() on an open-unlinked file descriptor and we must
1012 * continue to support access to the rsrc fork until it disappears.
1013 * The code at the end of this function will be
1014 * responsible for releasing the iocount generated by
1015 * hfs_vgetrsrc. This is because we can't drop the iocount
1016 * without unlocking the cnode first.
1017 */
1018 goto out;
1019 }
1020
1021 rcp = VTOC(rvp);
1022 if (rcp && rcp->c_rsrcfork) {
1023 total_size += rcp->c_rsrcfork->ff_size;
1024 }
1025 }
1026 }
1027
1028 VATTR_RETURN(vap, va_total_size, total_size);
1029 }
1030 }
1031 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1032 if (v_type == VDIR) {
1033 VATTR_RETURN(vap, va_total_alloc, 0);
1034 } else {
1035 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
1036 }
1037 }
1038
1039 /*
1040 * If the VFS wants extended security data, and we know that we
1041 * don't have any (because it never told us it was setting any)
1042 * then we can return the supported bit and no data. If we do
1043 * have extended security, we can just leave the bit alone and
1044 * the VFS will use the fallback path to fetch it.
1045 */
1046 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1047 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
1048 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
1049 VATTR_SET_SUPPORTED(vap, va_acl);
1050 }
1051 }
1052
1053 vap->va_access_time.tv_sec = cp->c_atime;
1054 vap->va_access_time.tv_nsec = 0;
1055 vap->va_create_time.tv_sec = cp->c_itime;
1056 vap->va_create_time.tv_nsec = 0;
1057 vap->va_modify_time.tv_sec = cp->c_mtime;
1058 vap->va_modify_time.tv_nsec = 0;
1059 vap->va_change_time.tv_sec = cp->c_ctime;
1060 vap->va_change_time.tv_nsec = 0;
1061 vap->va_backup_time.tv_sec = cp->c_btime;
1062 vap->va_backup_time.tv_nsec = 0;
1063
1064 /* See if we need to emit the date added field to the user */
1065 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1066 u_int32_t dateadded = hfs_get_dateadded (cp);
1067 if (dateadded) {
1068 vap->va_addedtime.tv_sec = dateadded;
1069 vap->va_addedtime.tv_nsec = 0;
1070 VATTR_SET_SUPPORTED (vap, va_addedtime);
1071 }
1072 }
1073
1074 /* XXX is this really a good 'optimal I/O size'? */
1075 vap->va_iosize = hfsmp->hfs_logBlockSize;
1076 vap->va_uid = cp->c_uid;
1077 vap->va_gid = cp->c_gid;
1078 vap->va_mode = cp->c_mode;
1079 vap->va_flags = cp->c_bsdflags;
1080
1081 /*
1082 * Exporting file IDs from HFS Plus:
1083 *
1084 * For "normal" files the c_fileid is the same value as the
1085 * c_cnid. But for hard link files, they are different - the
1086 * c_cnid belongs to the active directory entry (ie the link)
1087 * and the c_fileid is for the actual inode (ie the data file).
1088 *
1089 * The stat call (getattr) uses va_fileid and the Carbon APIs,
1090 * which are hardlink-ignorant, will ask for va_linkid.
1091 */
1092 vap->va_fileid = (u_int64_t)cp->c_fileid;
1093 /*
1094 * We need to use the origin cache for both hardlinked files
1095 * and directories. Hardlinked directories have multiple cnids
1096 * and parents (one per link). Hardlinked files also have their
1097 * own parents and link IDs separate from the indirect inode number.
1098 * If we don't use the cache, we could end up vending the wrong ID
1099 * because the cnode will only reflect the link that was looked up most recently.
1100 */
1101 if (cp->c_flag & C_HARDLINK) {
1102 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
1103 vap->va_parentid = (u_int64_t)hfs_currentparent(cp, /* have_lock: */ true);
1104 } else {
1105 vap->va_linkid = (u_int64_t)cp->c_cnid;
1106 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
1107 }
1108
1109 vap->va_fsid = hfsmp->hfs_raw_dev;
1110 if (VATTR_IS_ACTIVE(vap, va_devid)) {
1111 VATTR_RETURN(vap, va_devid, hfsmp->hfs_raw_dev);
1112 }
1113 vap->va_filerev = 0;
1114 vap->va_encoding = cp->c_encoding;
1115 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
1116 #if HFS_COMPRESSION
1117 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1118 if (hide_size)
1119 vap->va_data_size = 0;
1120 else if (compressed) {
1121 if (uncompressed_size == -1) {
1122 /* failed to get the uncompressed size above, so just return data_size */
1123 vap->va_data_size = data_size;
1124 } else {
1125 /* use the uncompressed size we fetched above */
1126 vap->va_data_size = uncompressed_size;
1127 }
1128 } else
1129 vap->va_data_size = data_size;
1130 VATTR_SET_SUPPORTED(vap, va_data_size);
1131 }
1132 #else
1133 vap->va_data_size = data_size;
1134 vap->va_supported |= VNODE_ATTR_va_data_size;
1135 #endif
1136
1137 #if CONFIG_PROTECT
1138 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
1139 vap->va_dataprotect_class = cp->c_cpentry ? CP_CLASS(cp->c_cpentry->cp_pclass) : 0;
1140 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1141 }
1142 #endif
1143 if (VATTR_IS_ACTIVE(vap, va_write_gencount)) {
1144 if (ubc_is_mapped_writable(vp)) {
1145 /*
1146 * Return 0 to the caller to indicate the file may be
1147 * changing. There is no need for us to increment the
1148 * generation counter here because it gets done as part of
1149 * page-out and also when the file is unmapped (to account
1150 * for changes we might not have seen).
1151 */
1152 vap->va_write_gencount = 0;
1153 } else {
1154 vap->va_write_gencount = hfs_get_gencount(cp);
1155 }
1156
1157 VATTR_SET_SUPPORTED(vap, va_write_gencount);
1158 }
1159
1160 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1161 vap->va_supported |= VNODE_ATTR_va_access_time |
1162 VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1163 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1164 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1165 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1166 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1167 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1168 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1169 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1170
1171 /* If this is the root, let VFS to find out the mount name, which
1172 * may be different from the real name. Otherwise, we need to take care
1173 * for hardlinked files, which need to be looked up, if necessary
1174 */
1175 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1176 struct cat_desc linkdesc;
1177 int lockflags;
1178 int uselinkdesc = 0;
1179 cnid_t nextlinkid = 0;
1180 cnid_t prevlinkid = 0;
1181
1182 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1183 * here because the info. for the link ID requested by getattrlist may be
1184 * different than what's currently in the cnode. This is because the cnode
1185 * will be filled in with the information for the most recent link ID that went
1186 * through namei/lookup(). If there are competing lookups for hardlinks that point
1187 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1188 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1189 */
1190
1191 if ((cp->c_flag & C_HARDLINK) &&
1192 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1193 /*
1194 * If we have no name and our link ID is the raw inode number, then we may
1195 * have an open-unlinked file. Go to the next link in this case.
1196 */
1197 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1198 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1199 goto out;
1200 }
1201 }
1202 else {
1203 /* just use link obtained from vap above */
1204 nextlinkid = vap->va_linkid;
1205 }
1206
1207 /* We need to probe the catalog for the descriptor corresponding to the link ID
1208 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1209 * for the cnode here, so we can't just update the descriptor. Instead,
1210 * we should just store the descriptor's value locally and then use it to pass
1211 * out the name value as needed below.
1212 */
1213 if (nextlinkid){
1214 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1215 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1216 hfs_systemfile_unlock(hfsmp, lockflags);
1217 if (error == 0) {
1218 uselinkdesc = 1;
1219 }
1220 }
1221 }
1222
1223 /* By this point, we've either patched up the name above and the c_desc
1224 * points to the correct data, or it already did, in which case we just proceed
1225 * by copying the name into the vap. Note that we will never set va_name to
1226 * supported if nextlinkid is never initialized. This could happen in the degenerate
1227 * case above involving the raw inode number, where it has no nextlinkid. In this case
1228 * we will simply not mark the name bit as supported.
1229 */
1230 if (uselinkdesc) {
1231 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1232 VATTR_SET_SUPPORTED(vap, va_name);
1233 cat_releasedesc(&linkdesc);
1234 }
1235 else if (cp->c_desc.cd_namelen) {
1236 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1237 VATTR_SET_SUPPORTED(vap, va_name);
1238 }
1239 }
1240
1241 out:
1242 hfs_unlock(cp);
1243 /*
1244 * We need to vnode_put the rsrc fork vnode only *after* we've released
1245 * the cnode lock, since vnode_put can trigger an inactive call, which
1246 * will go back into HFS and try to acquire a cnode lock.
1247 */
1248 if (rvp) {
1249 vnode_put (rvp);
1250 }
1251
1252 return (error);
1253 }
1254
1255 int
1256 hfs_vnop_setattr(struct vnop_setattr_args *ap)
1257 {
1258 struct vnode_attr *vap = ap->a_vap;
1259 struct vnode *vp = ap->a_vp;
1260 struct cnode *cp = NULL;
1261 struct hfsmount *hfsmp;
1262 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1263 struct proc *p = vfs_context_proc(ap->a_context);
1264 int error = 0;
1265 uid_t nuid;
1266 gid_t ngid;
1267 time_t orig_ctime;
1268
1269 orig_ctime = VTOC(vp)->c_ctime;
1270
1271 #if HFS_COMPRESSION
1272 int decmpfs_reset_state = 0;
1273 /*
1274 we call decmpfs_update_attributes even if the file is not compressed
1275 because we want to update the incoming flags if the xattrs are invalid
1276 */
1277 error = decmpfs_update_attributes(vp, vap);
1278 if (error)
1279 return error;
1280 #endif
1281 //
1282 // if this is not a size-changing setattr and it is not just
1283 // an atime update, then check for a snapshot.
1284 //
1285 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1286 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1287 }
1288
1289 #if CONFIG_PROTECT
1290 /*
1291 * All metadata changes should be allowed except a size-changing setattr, which
1292 * has effects on file content and requires calling into cp_handle_vnop
1293 * to have content protection check.
1294 */
1295 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1296 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1297 return (error);
1298 }
1299 }
1300 #endif /* CONFIG_PROTECT */
1301
1302 hfsmp = VTOHFS(vp);
1303
1304 /* Don't allow modification of the journal. */
1305 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1306 return (EPERM);
1307 }
1308
1309 //
1310 // Check if we'll need a document_id and if so, get it before we lock the
1311 // the cnode to avoid any possible deadlock with the root vnode which has
1312 // to get locked to get the document id
1313 //
1314 u_int32_t document_id=0;
1315 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
1316 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
1317 //
1318 // If the document_id is not set, get a new one. It will be set
1319 // on the file down below once we hold the cnode lock.
1320 //
1321 if (fip->document_id == 0) {
1322 if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
1323 document_id = 0;
1324 }
1325 }
1326 }
1327
1328
1329 /*
1330 * File size change request.
1331 * We are guaranteed that this is not a directory, and that
1332 * the filesystem object is writeable.
1333 *
1334 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1335 */
1336 VATTR_SET_SUPPORTED(vap, va_data_size);
1337 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
1338 #if HFS_COMPRESSION
1339 /* keep the compressed state locked until we're done truncating the file */
1340 decmpfs_cnode *dp = VTOCMP(vp);
1341 if (!dp) {
1342 /*
1343 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1344 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1345 * on this file while it's truncating
1346 */
1347 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1348 if (!dp) {
1349 /* failed to allocate a decmpfs_cnode */
1350 return ENOMEM; /* what should this be? */
1351 }
1352 }
1353
1354 nspace_snapshot_event(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1355
1356 decmpfs_lock_compressed_data(dp, 1);
1357 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1358 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1359 if (error != 0) {
1360 decmpfs_unlock_compressed_data(dp, 1);
1361 return error;
1362 }
1363 }
1364 #endif
1365
1366 // Take truncate lock
1367 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1368
1369 // hfs_truncate will deal with the cnode lock
1370 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff,
1371 0, ap->a_context);
1372
1373 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1374 #if HFS_COMPRESSION
1375 decmpfs_unlock_compressed_data(dp, 1);
1376 #endif
1377 if (error)
1378 return error;
1379 }
1380 if (cp == NULL) {
1381 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1382 return (error);
1383 cp = VTOC(vp);
1384 }
1385
1386 /*
1387 * If it is just an access time update request by itself
1388 * we know the request is from kernel level code, and we
1389 * can delay it without being as worried about consistency.
1390 * This change speeds up mmaps, in the rare case that they
1391 * get caught behind a sync.
1392 */
1393
1394 if (vap->va_active == VNODE_ATTR_va_access_time) {
1395 cp->c_touch_acctime=TRUE;
1396 goto out;
1397 }
1398
1399
1400
1401 /*
1402 * Owner/group change request.
1403 * We are guaranteed that the new owner/group is valid and legal.
1404 */
1405 VATTR_SET_SUPPORTED(vap, va_uid);
1406 VATTR_SET_SUPPORTED(vap, va_gid);
1407 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1408 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1409 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1410 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1411 goto out;
1412
1413 /*
1414 * Mode change request.
1415 * We are guaranteed that the mode value is valid and that in
1416 * conjunction with the owner and group, this change is legal.
1417 */
1418 VATTR_SET_SUPPORTED(vap, va_mode);
1419 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1420 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1421 goto out;
1422
1423 /*
1424 * File flags change.
1425 * We are guaranteed that only flags allowed to change given the
1426 * current securelevel are being changed.
1427 */
1428 VATTR_SET_SUPPORTED(vap, va_flags);
1429 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1430 u_int16_t *fdFlags;
1431
1432 #if HFS_COMPRESSION
1433 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1434 /*
1435 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1436 * but we don't want to actually do the update until we've released the cnode lock down below
1437 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1438 * turn off the flag and look at the "raw" file for debugging purposes
1439 */
1440 decmpfs_reset_state = 1;
1441 }
1442 #endif
1443 if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
1444 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1445
1446 //
1447 // we're marking this item UF_TRACKED. if the document_id is
1448 // not set, get a new one and put it on the file.
1449 //
1450 if (fip->document_id == 0) {
1451 if (document_id != 0) {
1452 // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
1453 fip->document_id = (uint32_t)document_id;
1454 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1455 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1456 FSE_ARG_INO, (ino64_t)0, // src inode #
1457 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1458 FSE_ARG_INT32, document_id,
1459 FSE_ARG_DONE);
1460 } else {
1461 // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
1462 }
1463 }
1464
1465 } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
1466 //
1467 // UF_TRACKED is being cleared so clear the document_id
1468 //
1469 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1470 if (fip->document_id) {
1471 // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
1472 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1473 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1474 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
1475 FSE_ARG_INO, (ino64_t)0, // dst inode #
1476 FSE_ARG_INT32, fip->document_id, // document id
1477 FSE_ARG_DONE);
1478 fip->document_id = 0;
1479 cp->c_bsdflags &= ~UF_TRACKED;
1480 }
1481 }
1482
1483 cp->c_bsdflags = vap->va_flags;
1484 cp->c_flag |= C_MODIFIED;
1485 cp->c_touch_chgtime = TRUE;
1486
1487
1488 /*
1489 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1490 *
1491 * The fdFlags for files and frFlags for folders are both 8 bytes
1492 * into the userInfo (the first 16 bytes of the Finder Info). They
1493 * are both 16-bit fields.
1494 */
1495 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1496 if (vap->va_flags & UF_HIDDEN)
1497 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1498 else
1499 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1500 }
1501
1502 /*
1503 * Timestamp updates.
1504 */
1505 VATTR_SET_SUPPORTED(vap, va_create_time);
1506 VATTR_SET_SUPPORTED(vap, va_access_time);
1507 VATTR_SET_SUPPORTED(vap, va_modify_time);
1508 VATTR_SET_SUPPORTED(vap, va_backup_time);
1509 VATTR_SET_SUPPORTED(vap, va_change_time);
1510 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1511 VATTR_IS_ACTIVE(vap, va_access_time) ||
1512 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1513 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1514 if (VATTR_IS_ACTIVE(vap, va_create_time))
1515 cp->c_itime = vap->va_create_time.tv_sec;
1516 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1517 cp->c_atime = vap->va_access_time.tv_sec;
1518 cp->c_touch_acctime = FALSE;
1519 }
1520 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1521 cp->c_mtime = vap->va_modify_time.tv_sec;
1522 cp->c_touch_modtime = FALSE;
1523 cp->c_touch_chgtime = TRUE;
1524
1525 hfs_clear_might_be_dirty_flag(cp);
1526
1527 /*
1528 * The utimes system call can reset the modification
1529 * time but it doesn't know about HFS create times.
1530 * So we need to ensure that the creation time is
1531 * always at least as old as the modification time.
1532 */
1533 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1534 (cp->c_cnid != kHFSRootFolderID) &&
1535 !VATTR_IS_ACTIVE(vap, va_create_time) &&
1536 (cp->c_mtime < cp->c_itime)) {
1537 cp->c_itime = cp->c_mtime;
1538 }
1539 }
1540 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1541 cp->c_btime = vap->va_backup_time.tv_sec;
1542 cp->c_flag |= C_MINOR_MOD;
1543 }
1544
1545 // Set the date added time
1546 VATTR_SET_SUPPORTED(vap, va_addedtime);
1547 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1548 hfs_write_dateadded(&cp->c_attr, vap->va_addedtime.tv_sec);
1549 cp->c_flag &= ~C_NEEDS_DATEADDED;
1550 cp->c_touch_chgtime = true;
1551 }
1552
1553 /*
1554 * Set name encoding.
1555 */
1556 VATTR_SET_SUPPORTED(vap, va_encoding);
1557 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1558 cp->c_encoding = vap->va_encoding;
1559 cp->c_flag |= C_MODIFIED;
1560 hfs_setencodingbits(hfsmp, cp->c_encoding);
1561 }
1562
1563 if ((error = hfs_update(vp, 0)) != 0)
1564 goto out;
1565
1566 out:
1567 if (cp) {
1568 /* Purge origin cache for cnode, since caller now has correct link ID for it
1569 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1570 */
1571 if ((cp->c_flag & C_HARDLINK) && (vnode_vtype(vp) != VDIR)){
1572 hfs_relorigin(cp, 0);
1573 }
1574
1575 hfs_unlock(cp);
1576 #if HFS_COMPRESSION
1577 if (decmpfs_reset_state) {
1578 /*
1579 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1580 * but don't do it while holding the hfs cnode lock
1581 */
1582 decmpfs_cnode *dp = VTOCMP(vp);
1583 if (!dp) {
1584 /*
1585 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1586 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1587 * on this file if it's locked
1588 */
1589 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1590 if (!dp) {
1591 /* failed to allocate a decmpfs_cnode */
1592 return ENOMEM; /* what should this be? */
1593 }
1594 }
1595 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1596 }
1597 #endif
1598 }
1599
1600 #if CONFIG_PROTECT
1601 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1602 if (!error && VATTR_IS_ACTIVE(vap, va_dataprotect_class))
1603 error = cp_vnode_setclass(vp, vap->va_dataprotect_class);
1604 #endif
1605
1606 return (error);
1607 }
1608
1609
1610 /*
1611 * Change the mode on a file.
1612 * cnode must be locked before calling.
1613 */
1614 int
1615 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1616 {
1617 register struct cnode *cp = VTOC(vp);
1618
1619 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1620 return (0);
1621
1622 // Don't allow modification of the journal or journal_info_block
1623 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1624 return EPERM;
1625 }
1626
1627 #if OVERRIDE_UNKNOWN_PERMISSIONS
1628 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1629 return (0);
1630 };
1631 #endif
1632
1633 mode_t new_mode = (cp->c_mode & ~ALLPERMS) | (mode & ALLPERMS);
1634 if (new_mode != cp->c_mode) {
1635 cp->c_mode = new_mode;
1636 cp->c_flag |= C_MINOR_MOD;
1637 }
1638 cp->c_touch_chgtime = TRUE;
1639 return (0);
1640 }
1641
1642
1643 int
1644 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1645 {
1646 struct cnode *cp = VTOC(vp);
1647 int retval = 0;
1648 int is_member;
1649
1650 /*
1651 * Disallow write attempts on read-only file systems;
1652 * unless the file is a socket, fifo, or a block or
1653 * character device resident on the file system.
1654 */
1655 switch (vnode_vtype(vp)) {
1656 case VDIR:
1657 case VLNK:
1658 case VREG:
1659 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1660 return (EROFS);
1661 break;
1662 default:
1663 break;
1664 }
1665
1666 /* If immutable bit set, nobody gets to write it. */
1667 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1668 return (EPERM);
1669
1670 /* Otherwise, user id 0 always gets access. */
1671 if (!suser(cred, NULL))
1672 return (0);
1673
1674 /* Otherwise, check the owner. */
1675 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1676 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1677
1678 /* Otherwise, check the groups. */
1679 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1680 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1681 }
1682
1683 /* Otherwise, check everyone else. */
1684 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1685 }
1686
1687
1688 /*
1689 * Perform chown operation on cnode cp;
1690 * code must be locked prior to call.
1691 */
1692 int
1693 #if !QUOTA
1694 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1695 __unused struct proc *p)
1696 #else
1697 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1698 __unused struct proc *p)
1699 #endif
1700 {
1701 register struct cnode *cp = VTOC(vp);
1702 uid_t ouid;
1703 gid_t ogid;
1704 #if QUOTA
1705 int error = 0;
1706 register int i;
1707 int64_t change;
1708 #endif /* QUOTA */
1709
1710 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1711 return (ENOTSUP);
1712
1713 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1714 return (0);
1715
1716 if (uid == (uid_t)VNOVAL)
1717 uid = cp->c_uid;
1718 if (gid == (gid_t)VNOVAL)
1719 gid = cp->c_gid;
1720
1721 #if 0 /* we are guaranteed that this is already the case */
1722 /*
1723 * If we don't own the file, are trying to change the owner
1724 * of the file, or are not a member of the target group,
1725 * the caller must be superuser or the call fails.
1726 */
1727 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1728 (gid != cp->c_gid &&
1729 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1730 (error = suser(cred, 0)))
1731 return (error);
1732 #endif
1733
1734 ogid = cp->c_gid;
1735 ouid = cp->c_uid;
1736
1737 if (ouid == uid && ogid == gid) {
1738 // No change, just set change time
1739 cp->c_touch_chgtime = TRUE;
1740 return 0;
1741 }
1742
1743 #if QUOTA
1744 if ((error = hfs_getinoquota(cp)))
1745 return (error);
1746 if (ouid == uid) {
1747 dqrele(cp->c_dquot[USRQUOTA]);
1748 cp->c_dquot[USRQUOTA] = NODQUOT;
1749 }
1750 if (ogid == gid) {
1751 dqrele(cp->c_dquot[GRPQUOTA]);
1752 cp->c_dquot[GRPQUOTA] = NODQUOT;
1753 }
1754
1755 /*
1756 * Eventually need to account for (fake) a block per directory
1757 * if (vnode_isdir(vp))
1758 * change = VTOHFS(vp)->blockSize;
1759 * else
1760 */
1761
1762 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1763 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1764 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1765 for (i = 0; i < MAXQUOTAS; i++) {
1766 dqrele(cp->c_dquot[i]);
1767 cp->c_dquot[i] = NODQUOT;
1768 }
1769 #endif /* QUOTA */
1770 cp->c_gid = gid;
1771 cp->c_uid = uid;
1772 #if QUOTA
1773 if ((error = hfs_getinoquota(cp)) == 0) {
1774 if (ouid == uid) {
1775 dqrele(cp->c_dquot[USRQUOTA]);
1776 cp->c_dquot[USRQUOTA] = NODQUOT;
1777 }
1778 if (ogid == gid) {
1779 dqrele(cp->c_dquot[GRPQUOTA]);
1780 cp->c_dquot[GRPQUOTA] = NODQUOT;
1781 }
1782 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1783 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1784 goto good;
1785 else
1786 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1787 }
1788 for (i = 0; i < MAXQUOTAS; i++) {
1789 dqrele(cp->c_dquot[i]);
1790 cp->c_dquot[i] = NODQUOT;
1791 }
1792 }
1793 cp->c_gid = ogid;
1794 cp->c_uid = ouid;
1795 if (hfs_getinoquota(cp) == 0) {
1796 if (ouid == uid) {
1797 dqrele(cp->c_dquot[USRQUOTA]);
1798 cp->c_dquot[USRQUOTA] = NODQUOT;
1799 }
1800 if (ogid == gid) {
1801 dqrele(cp->c_dquot[GRPQUOTA]);
1802 cp->c_dquot[GRPQUOTA] = NODQUOT;
1803 }
1804 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1805 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1806 (void) hfs_getinoquota(cp);
1807 }
1808 return (error);
1809 good:
1810 if (hfs_getinoquota(cp))
1811 panic("hfs_chown: lost quota");
1812 #endif /* QUOTA */
1813
1814 /*
1815 * Without quotas, we could probably make this a minor
1816 * modification.
1817 */
1818 cp->c_flag |= C_MODIFIED;
1819
1820 /*
1821 According to the SUSv3 Standard, chown() shall mark
1822 for update the st_ctime field of the file.
1823 (No exceptions mentioned)
1824 */
1825 cp->c_touch_chgtime = TRUE;
1826 return (0);
1827 }
1828
1829 #if HFS_COMPRESSION
1830 /*
1831 * Flush the resource fork if it exists. vp is the data fork and has
1832 * an iocount.
1833 */
1834 static int hfs_flush_rsrc(vnode_t vp, vfs_context_t ctx)
1835 {
1836 cnode_t *cp = VTOC(vp);
1837
1838 hfs_lock(cp, HFS_SHARED_LOCK, 0);
1839
1840 vnode_t rvp = cp->c_rsrc_vp;
1841
1842 if (!rvp) {
1843 hfs_unlock(cp);
1844 return 0;
1845 }
1846
1847 int vid = vnode_vid(rvp);
1848
1849 hfs_unlock(cp);
1850
1851 int error = vnode_getwithvid(rvp, vid);
1852
1853 if (error)
1854 return error == ENOENT ? 0 : error;
1855
1856 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
1857 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
1858 hfs_filedone(rvp, ctx, HFS_FILE_DONE_NO_SYNC);
1859 hfs_unlock(cp);
1860 hfs_unlock_truncate(cp, 0);
1861
1862 error = ubc_msync(rvp, 0, ubc_getsize(rvp), NULL,
1863 UBC_PUSHALL | UBC_SYNC);
1864
1865 vnode_put(rvp);
1866
1867 return error;
1868 }
1869 #endif // HFS_COMPRESSION
1870
1871 /*
1872 * hfs_vnop_exchange:
1873 *
1874 * Inputs:
1875 * 'from' vnode/cnode
1876 * 'to' vnode/cnode
1877 * options flag bits
1878 * vfs_context
1879 *
1880 * Discussion:
1881 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
1882 * Per the requirements of that system call, this function "swaps" some
1883 * of the information that lives in one catalog record for some that
1884 * lives in another. Note that not everything is swapped; in particular,
1885 * the extent information stored in each cnode is kept local to that
1886 * cnode. This allows existing file descriptor references to continue
1887 * to operate on the same content, regardless of the location in the
1888 * namespace that the file may have moved to. See inline comments
1889 * in the function for more information.
1890 */
1891 int
1892 hfs_vnop_exchange(struct vnop_exchange_args *ap)
1893 {
1894 struct vnode *from_vp = ap->a_fvp;
1895 struct vnode *to_vp = ap->a_tvp;
1896 struct cnode *from_cp;
1897 struct cnode *to_cp;
1898 struct hfsmount *hfsmp;
1899 struct cat_desc tempdesc;
1900 struct cat_attr tempattr;
1901 const unsigned char *from_nameptr;
1902 const unsigned char *to_nameptr;
1903 char from_iname[32];
1904 char to_iname[32];
1905 uint32_t to_flag_special;
1906 uint32_t from_flag_special;
1907 cnid_t from_parid;
1908 cnid_t to_parid;
1909 int lockflags;
1910 int error = 0, started_tr = 0, got_cookie = 0;
1911 cat_cookie_t cookie;
1912 time_t orig_from_ctime, orig_to_ctime;
1913 bool have_cnode_locks = false, have_from_trunc_lock = false, have_to_trunc_lock = false;
1914
1915 /*
1916 * VFS does the following checks:
1917 * 1. Validate that both are files.
1918 * 2. Validate that both are on the same mount.
1919 * 3. Validate that they're not the same vnode.
1920 */
1921
1922 from_cp = VTOC(from_vp);
1923 to_cp = VTOC(to_vp);
1924 hfsmp = VTOHFS(from_vp);
1925
1926 orig_from_ctime = from_cp->c_ctime;
1927 orig_to_ctime = to_cp->c_ctime;
1928
1929 #if CONFIG_PROTECT
1930 /*
1931 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
1932 * because the EAs will not be swapped. As a result, the persistent keys would not
1933 * match and the files will be garbage.
1934 */
1935 if (cp_fs_protected (vnode_mount(from_vp))) {
1936 return EINVAL;
1937 }
1938 #endif
1939
1940 #if HFS_COMPRESSION
1941 if (!ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
1942 if ( hfs_file_is_compressed(from_cp, 0) ) {
1943 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
1944 return error;
1945 }
1946 }
1947
1948 if ( hfs_file_is_compressed(to_cp, 0) ) {
1949 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
1950 return error;
1951 }
1952 }
1953 }
1954 #endif // HFS_COMPRESSION
1955
1956 // Resource forks cannot be exchanged.
1957 if (VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
1958 return EINVAL;
1959
1960 /*
1961 * Normally, we want to notify the user handlers about the event,
1962 * except if it's a handler driving the event.
1963 */
1964 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
1965 nspace_snapshot_event(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1966 nspace_snapshot_event(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1967 } else {
1968 /*
1969 * This is currently used by mtmd so we should tidy up the
1970 * file now because the data won't be used again in the
1971 * destination file.
1972 */
1973 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, 0);
1974 hfs_lock_always(from_cp, HFS_EXCLUSIVE_LOCK);
1975 hfs_filedone(from_vp, ap->a_context, HFS_FILE_DONE_NO_SYNC);
1976 hfs_unlock(from_cp);
1977 hfs_unlock_truncate(from_cp, 0);
1978
1979 // Flush all the data from the source file
1980 error = ubc_msync(from_vp, 0, ubc_getsize(from_vp), NULL,
1981 UBC_PUSHALL | UBC_SYNC);
1982 if (error)
1983 goto exit;
1984
1985 #if HFS_COMPRESSION
1986 /*
1987 * If this is a compressed file, we need to do the same for
1988 * the resource fork.
1989 */
1990 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
1991 error = hfs_flush_rsrc(from_vp, ap->a_context);
1992 if (error)
1993 goto exit;
1994 }
1995 #endif
1996
1997 /*
1998 * We're doing a data-swap so we need to take the truncate
1999 * lock exclusively. We need an exclusive lock because we
2000 * will be completely truncating the source file and we must
2001 * make sure nobody else sneaks in and trys to issue I/O
2002 * whilst we don't have the cnode lock.
2003 *
2004 * After taking the truncate lock we do a quick check to
2005 * verify there are no other references (including mmap
2006 * references), but we must remember that this does not stop
2007 * anybody coming in later and taking a reference. We will
2008 * have the truncate lock exclusively so that will prevent
2009 * them from issuing any I/O.
2010 */
2011
2012 if (to_cp < from_cp) {
2013 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2014 have_to_trunc_lock = true;
2015 }
2016
2017 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2018 have_from_trunc_lock = true;
2019
2020 /*
2021 * Do an early check to verify the source is not in use by
2022 * anyone. We should be called from an FD opened as F_EVTONLY
2023 * so that doesn't count as a reference.
2024 */
2025 if (vnode_isinuse(from_vp, 0)) {
2026 error = EBUSY;
2027 goto exit;
2028 }
2029
2030 if (to_cp >= from_cp) {
2031 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2032 have_to_trunc_lock = true;
2033 }
2034 }
2035
2036 if ((error = hfs_lockpair(from_cp, to_cp, HFS_EXCLUSIVE_LOCK)))
2037 goto exit;
2038 have_cnode_locks = true;
2039
2040 // Don't allow modification of the journal or journal_info_block
2041 if (hfs_is_journal_file(hfsmp, from_cp) ||
2042 hfs_is_journal_file(hfsmp, to_cp)) {
2043 error = EPERM;
2044 goto exit;
2045 }
2046
2047 /*
2048 * Ok, now that all of the pre-flighting is done, call the underlying
2049 * function if needed.
2050 */
2051 if (ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2052 #if HFS_COMPRESSION
2053 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2054 error = hfs_move_compressed(from_cp, to_cp);
2055 goto exit;
2056 }
2057 #endif
2058
2059 error = hfs_move_data(from_cp, to_cp, 0);
2060 goto exit;
2061 }
2062
2063 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2064 goto exit;
2065 }
2066 started_tr = 1;
2067
2068 /*
2069 * Reserve some space in the Catalog file.
2070 */
2071 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
2072 goto exit;
2073 }
2074 got_cookie = 1;
2075
2076 /* The backend code always tries to delete the virtual
2077 * extent id for exchanging files so we need to lock
2078 * the extents b-tree.
2079 */
2080 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2081
2082 /* Account for the location of the catalog objects. */
2083 if (from_cp->c_flag & C_HARDLINK) {
2084 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
2085 from_cp->c_attr.ca_linkref);
2086 from_nameptr = (unsigned char *)from_iname;
2087 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2088 from_cp->c_hint = 0;
2089 } else {
2090 from_nameptr = from_cp->c_desc.cd_nameptr;
2091 from_parid = from_cp->c_parentcnid;
2092 }
2093 if (to_cp->c_flag & C_HARDLINK) {
2094 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
2095 to_cp->c_attr.ca_linkref);
2096 to_nameptr = (unsigned char *)to_iname;
2097 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2098 to_cp->c_hint = 0;
2099 } else {
2100 to_nameptr = to_cp->c_desc.cd_nameptr;
2101 to_parid = to_cp->c_parentcnid;
2102 }
2103
2104 /*
2105 * ExchangeFileIDs swaps the on-disk, or in-BTree extent information
2106 * attached to two different file IDs. It also swaps the extent
2107 * information that may live in the extents-overflow B-Tree.
2108 *
2109 * We do this in a transaction as this may require a lot of B-Tree nodes
2110 * to do completely, particularly if one of the files in question
2111 * has a lot of extents.
2112 *
2113 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
2114 * For the on-disk records, which are assumed to be synced, we will
2115 * first swap the resident inline-8 extents as part of the catalog records.
2116 * Then we will swap any extents overflow records for each file.
2117 *
2118 * When ExchangeFileIDs returns successfully, "file1" will have fileID 52,
2119 * and "file2" will have fileID 50. However, note that this is only
2120 * approximately half of the work that exchangedata(2) will need to
2121 * accomplish. In other words, we swap "too much" of the information
2122 * because if we only called ExchangeFileIDs, both the fileID and extent
2123 * information would be the invariants of this operation. We don't
2124 * actually want that; we want to conclude with "file1" having
2125 * file ID 50, and "file2" having fileID 52.
2126 *
2127 * The remainder of hfs_vnop_exchange will swap the file ID and other cnode
2128 * data back to the proper ownership, while still allowing the cnode to remain
2129 * pointing at the same set of extents that it did originally.
2130 */
2131 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
2132 to_parid, from_cp->c_hint, to_cp->c_hint);
2133 hfs_systemfile_unlock(hfsmp, lockflags);
2134
2135 /*
2136 * Note that we don't need to exchange any extended attributes
2137 * since the attributes are keyed by file ID.
2138 */
2139
2140 if (error != E_NONE) {
2141 error = MacToVFSError(error);
2142 goto exit;
2143 }
2144
2145 /* Purge the vnodes from the name cache */
2146 if (from_vp)
2147 cache_purge(from_vp);
2148 if (to_vp)
2149 cache_purge(to_vp);
2150
2151 /* Bump both source and destination write counts before any swaps. */
2152 {
2153 hfs_incr_gencount (from_cp);
2154 hfs_incr_gencount (to_cp);
2155 }
2156
2157 /* Save a copy of "from" attributes before swapping. */
2158 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
2159 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
2160
2161 /* Save whether or not each cnode is a hardlink or has EAs */
2162 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2163 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2164
2165 /* Drop the special bits from each cnode */
2166 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2167 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2168
2169 /*
2170 * Now complete the in-memory portion of the copy.
2171 *
2172 * ExchangeFileIDs swaps the on-disk records involved. We complete the
2173 * operation by swapping the in-memory contents of the two files here.
2174 * We swap the cnode descriptors, which contain name, BSD attributes,
2175 * timestamps, etc, about the file.
2176 *
2177 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
2178 * already swapped the on-disk extent information. As long as we swap the
2179 * IDs, the in-line resident 8 extents that live in the filefork data
2180 * structure will point to the right data for the new file ID if we leave
2181 * them alone.
2182 *
2183 * As a result, any file descriptor that points to a particular
2184 * vnode (even though it should change names), will continue
2185 * to point to the same content.
2186 */
2187
2188 /* Copy the "to" -> "from" cnode */
2189 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
2190
2191 from_cp->c_hint = 0;
2192 /*
2193 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
2194 * when we bcopy'd the descriptor above. However, the cnode attributes
2195 * are not bcopied. As a result, make sure to swap the file IDs of each item.
2196 *
2197 * Further, other hardlink attributes must be moved along in this swap:
2198 * the linkcount, the linkref, and the firstlink all need to move
2199 * along with the file IDs. See note below regarding the flags and
2200 * what moves vs. what does not.
2201 *
2202 * For Reference:
2203 * linkcount == total # of hardlinks.
2204 * linkref == the indirect inode pointer.
2205 * firstlink == the first hardlink in the chain (written to the raw inode).
2206 * These three are tied to the fileID and must move along with the rest of the data.
2207 */
2208 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
2209
2210 from_cp->c_itime = to_cp->c_itime;
2211 from_cp->c_btime = to_cp->c_btime;
2212 from_cp->c_atime = to_cp->c_atime;
2213 from_cp->c_ctime = to_cp->c_ctime;
2214 from_cp->c_gid = to_cp->c_gid;
2215 from_cp->c_uid = to_cp->c_uid;
2216 from_cp->c_bsdflags = to_cp->c_bsdflags;
2217 from_cp->c_mode = to_cp->c_mode;
2218 from_cp->c_linkcount = to_cp->c_linkcount;
2219 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
2220 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
2221
2222 /*
2223 * The cnode flags need to stay with the cnode and not get transferred
2224 * over along with everything else because they describe the content; they are
2225 * not attributes that reflect changes specific to the file ID. In general,
2226 * fields that are tied to the file ID are the ones that will move.
2227 *
2228 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
2229 * or other extents, which may not yet have been written to the catalog. If
2230 * they were, they would have been transferred above in the ExchangeFileIDs call above...
2231 *
2232 * The flags that are special are:
2233 * C_HARDLINK, C_HASXATTRS
2234 *
2235 * These flags move with the item and file ID in the namespace since their
2236 * state is tied to that of the file ID.
2237 *
2238 * So to transfer the flags, we have to take the following steps
2239 * 1) Store in a localvar whether or not the special bits are set.
2240 * 2) Drop the special bits from the current flags
2241 * 3) swap the special flag bits to their destination
2242 */
2243 from_cp->c_flag |= to_flag_special | C_MODIFIED;
2244 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
2245 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
2246
2247
2248 /* Copy the "from" -> "to" cnode */
2249 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
2250 to_cp->c_hint = 0;
2251 /*
2252 * Pull the file ID from the tempattr we copied above. We can't assume
2253 * it is the same as the CNID.
2254 */
2255 to_cp->c_fileid = tempattr.ca_fileid;
2256 to_cp->c_itime = tempattr.ca_itime;
2257 to_cp->c_btime = tempattr.ca_btime;
2258 to_cp->c_atime = tempattr.ca_atime;
2259 to_cp->c_ctime = tempattr.ca_ctime;
2260 to_cp->c_gid = tempattr.ca_gid;
2261 to_cp->c_uid = tempattr.ca_uid;
2262 to_cp->c_bsdflags = tempattr.ca_flags;
2263 to_cp->c_mode = tempattr.ca_mode;
2264 to_cp->c_linkcount = tempattr.ca_linkcount;
2265 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
2266 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
2267
2268 /*
2269 * Only OR in the "from" flags into our cnode flags below.
2270 * Leave the rest of the flags alone.
2271 */
2272 to_cp->c_flag |= from_flag_special | C_MODIFIED;
2273
2274 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
2275 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
2276
2277
2278 /* Rehash the cnodes using their new file IDs */
2279 hfs_chash_rehash(hfsmp, from_cp, to_cp);
2280
2281 /*
2282 * When a file moves out of "Cleanup At Startup"
2283 * we can drop its NODUMP status.
2284 */
2285 if ((from_cp->c_bsdflags & UF_NODUMP) &&
2286 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
2287 from_cp->c_bsdflags &= ~UF_NODUMP;
2288 from_cp->c_touch_chgtime = TRUE;
2289 }
2290 if ((to_cp->c_bsdflags & UF_NODUMP) &&
2291 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
2292 to_cp->c_bsdflags &= ~UF_NODUMP;
2293 to_cp->c_touch_chgtime = TRUE;
2294 }
2295
2296 exit:
2297 if (got_cookie) {
2298 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
2299 }
2300 if (started_tr) {
2301 hfs_end_transaction(hfsmp);
2302 }
2303
2304 if (have_cnode_locks)
2305 hfs_unlockpair(from_cp, to_cp);
2306
2307 if (have_from_trunc_lock)
2308 hfs_unlock_truncate(from_cp, 0);
2309
2310 if (have_to_trunc_lock)
2311 hfs_unlock_truncate(to_cp, 0);
2312
2313 return (error);
2314 }
2315
2316 #if HFS_COMPRESSION
2317 /*
2318 * This function is used specifically for the case when a namespace
2319 * handler is trying to steal data before it's deleted. Note that we
2320 * don't bother deleting the xattr from the source because it will get
2321 * deleted a short time later anyway.
2322 *
2323 * cnodes must be locked
2324 */
2325 static int hfs_move_compressed(cnode_t *from_cp, cnode_t *to_cp)
2326 {
2327 int ret;
2328 void *data = NULL;
2329
2330 CLR(from_cp->c_bsdflags, UF_COMPRESSED);
2331 SET(from_cp->c_flag, C_MODIFIED);
2332
2333 ret = hfs_move_data(from_cp, to_cp, HFS_MOVE_DATA_INCLUDE_RSRC);
2334 if (ret)
2335 goto exit;
2336
2337 /*
2338 * Transfer the xattr that decmpfs uses. Ideally, this code
2339 * should be with the other decmpfs code but it's file system
2340 * agnostic and this path is currently, and likely to remain, HFS+
2341 * specific. It's easier and more performant if we implement it
2342 * here.
2343 */
2344
2345 size_t size;
2346 data = hfs_malloc(size = MAX_DECMPFS_XATTR_SIZE);
2347
2348 ret = hfs_xattr_read(from_cp->c_vp, DECMPFS_XATTR_NAME, data, &size);
2349 if (ret)
2350 goto exit;
2351
2352 ret = hfs_xattr_write(to_cp->c_vp, DECMPFS_XATTR_NAME, data, size);
2353 if (ret)
2354 goto exit;
2355
2356 SET(to_cp->c_bsdflags, UF_COMPRESSED);
2357 SET(to_cp->c_flag, C_MODIFIED);
2358
2359 exit:
2360 hfs_free(data, MAX_DECMPFS_XATTR_SIZE);
2361
2362 return ret;
2363 }
2364 #endif // HFS_COMPRESSION
2365
2366 int
2367 hfs_vnop_mmap(struct vnop_mmap_args *ap)
2368 {
2369 struct vnode *vp = ap->a_vp;
2370 cnode_t *cp = VTOC(vp);
2371 int error;
2372
2373 if (VNODE_IS_RSRC(vp)) {
2374 /* allow pageins of the resource fork */
2375 } else {
2376 int compressed = hfs_file_is_compressed(cp, 1); /* 1 == don't take the cnode lock */
2377 time_t orig_ctime = cp->c_ctime;
2378
2379 if (!compressed && (cp->c_bsdflags & UF_COMPRESSED)) {
2380 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2381 if (error != 0) {
2382 return error;
2383 }
2384 }
2385
2386 if (ap->a_fflags & PROT_WRITE) {
2387 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2388 }
2389 }
2390
2391 #if CONFIG_PROTECT
2392 error = cp_handle_vnop(vp, (ap->a_fflags & PROT_WRITE
2393 ? CP_WRITE_ACCESS : 0) | CP_READ_ACCESS, 0);
2394 if (error)
2395 return error;
2396 #endif
2397
2398 //
2399 // NOTE: we return ENOTSUP because we want the cluster layer
2400 // to actually do all the real work.
2401 //
2402 return (ENOTSUP);
2403 }
2404
2405 static errno_t hfs_vnop_mnomap(struct vnop_mnomap_args *ap)
2406 {
2407 vnode_t vp = ap->a_vp;
2408
2409 /*
2410 * Whilst the file was mapped, there may not have been any
2411 * page-outs so we need to increment the generation counter now.
2412 * Unfortunately this may lead to a change in the generation
2413 * counter when no actual change has been made, but there is
2414 * little we can do about that with our current architecture.
2415 */
2416 if (ubc_is_mapped_writable(vp)) {
2417 cnode_t *cp = VTOC(vp);
2418 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2419 hfs_incr_gencount(cp);
2420
2421 /*
2422 * We don't want to set the modification time here since a
2423 * change to that is not acceptable if no changes were made.
2424 * Instead we set a flag so that if we get any page-outs we
2425 * know to update the modification time. It's possible that
2426 * they weren't actually because of changes made whilst the
2427 * file was mapped but that's not easy to fix now.
2428 */
2429 SET(cp->c_flag, C_MIGHT_BE_DIRTY_FROM_MAPPING);
2430
2431 hfs_unlock(cp);
2432 }
2433
2434 return 0;
2435 }
2436
2437 /*
2438 * Mark the resource fork as needing a ubc_setsize when we drop the
2439 * cnode lock later.
2440 */
2441 static void hfs_rsrc_setsize(cnode_t *cp)
2442 {
2443 /*
2444 * We need to take an iocount if we don't have one. vnode_get
2445 * will return ENOENT if the vnode is terminating which is what we
2446 * want as it's not safe to call ubc_setsize in that case.
2447 */
2448 if (cp->c_rsrc_vp && !vnode_get(cp->c_rsrc_vp)) {
2449 // Shouldn't happen, but better safe...
2450 if (ISSET(cp->c_flag, C_NEED_RVNODE_PUT))
2451 vnode_put(cp->c_rsrc_vp);
2452 SET(cp->c_flag, C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE);
2453 }
2454 }
2455
2456 /*
2457 * hfs_move_data
2458 *
2459 * This is a non-symmetric variant of exchangedata. In this function,
2460 * the contents of the data fork (and optionally the resource fork)
2461 * are moved from from_cp to to_cp.
2462 *
2463 * The cnodes must be locked.
2464 *
2465 * The cnode pointed to by 'to_cp' *must* be empty prior to invoking
2466 * this function. We impose this restriction because we may not be
2467 * able to fully delete the entire file's contents in a single
2468 * transaction, particularly if it has a lot of extents. In the
2469 * normal file deletion codepath, the file is screened for two
2470 * conditions: 1) bigger than 400MB, and 2) more than 8 extents. If
2471 * so, the file is relocated to the hidden directory and the deletion
2472 * is broken up into multiple truncates. We can't do that here
2473 * because both files need to exist in the namespace. The main reason
2474 * this is imposed is that we may have to touch a whole lot of bitmap
2475 * blocks if there are many extents.
2476 *
2477 * Any data written to 'from_cp' after this call completes is not
2478 * guaranteed to be moved.
2479 *
2480 * Arguments:
2481 * cnode_t *from_cp : source file
2482 * cnode_t *to_cp : destination file; must be empty
2483 *
2484 * Returns:
2485 *
2486 * EBUSY - File has been deleted or is in use
2487 * EFBIG - Destination file was not empty
2488 * EIO - An I/O error
2489 * 0 - success
2490 * other - Other errors that can be returned from called functions
2491 */
2492 int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
2493 hfs_move_data_options_t options)
2494 {
2495 hfsmount_t *hfsmp = VTOHFS(from_cp->c_vp);
2496 int error = 0;
2497 int lockflags = 0;
2498 bool return_EIO_on_error = false;
2499 const bool include_rsrc = ISSET(options, HFS_MOVE_DATA_INCLUDE_RSRC);
2500
2501 /* Verify that neither source/dest file is open-unlinked */
2502 if (ISSET(from_cp->c_flag, C_DELETED | C_NOEXISTS)
2503 || ISSET(to_cp->c_flag, C_DELETED | C_NOEXISTS)) {
2504 return EBUSY;
2505 }
2506
2507 /*
2508 * Verify the source file is not in use by anyone besides us.
2509 *
2510 * This function is typically invoked by a namespace handler
2511 * process responding to a temporarily stalled system call.
2512 * The FD that it is working off of is opened O_EVTONLY, so
2513 * it really has no active usecounts (the kusecount from O_EVTONLY
2514 * is subtracted from the total usecounts).
2515 *
2516 * As a result, we shouldn't have any active usecounts against
2517 * this vnode when we go to check it below.
2518 */
2519 if (vnode_isinuse(from_cp->c_vp, 0))
2520 return EBUSY;
2521
2522 if (include_rsrc && from_cp->c_rsrc_vp) {
2523 if (vnode_isinuse(from_cp->c_rsrc_vp, 0))
2524 return EBUSY;
2525
2526 /*
2527 * In the code below, if the destination file doesn't have a
2528 * c_rsrcfork then we don't create it which means we we cannot
2529 * transfer the ff_invalidranges and cf_vblocks fields. These
2530 * shouldn't be set because we flush the resource fork before
2531 * calling this function but there is a tiny window when we
2532 * did not have any locks...
2533 */
2534 if (!to_cp->c_rsrcfork
2535 && (!TAILQ_EMPTY(&from_cp->c_rsrcfork->ff_invalidranges)
2536 || from_cp->c_rsrcfork->ff_unallocblocks)) {
2537 /*
2538 * The file isn't really busy now but something did slip
2539 * in and tinker with the file while we didn't have any
2540 * locks, so this is the most meaningful return code for
2541 * the caller.
2542 */
2543 return EBUSY;
2544 }
2545 }
2546
2547 // Check the destination file is empty
2548 if (to_cp->c_datafork->ff_blocks
2549 || to_cp->c_datafork->ff_size
2550 || (include_rsrc
2551 && (to_cp->c_blocks
2552 || (to_cp->c_rsrcfork && to_cp->c_rsrcfork->ff_size)))) {
2553 return EFBIG;
2554 }
2555
2556 if ((error = hfs_start_transaction (hfsmp)))
2557 return error;
2558
2559 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE,
2560 HFS_EXCLUSIVE_LOCK);
2561
2562 // filefork_t is 128 bytes which should be OK
2563 filefork_t rfork_buf, *from_rfork = NULL;
2564
2565 if (include_rsrc) {
2566 from_rfork = from_cp->c_rsrcfork;
2567
2568 /*
2569 * Creating resource fork vnodes is expensive, so just get get
2570 * the fork data if we need it.
2571 */
2572 if (!from_rfork && hfs_has_rsrc(from_cp)) {
2573 from_rfork = &rfork_buf;
2574
2575 from_rfork->ff_cp = from_cp;
2576 TAILQ_INIT(&from_rfork->ff_invalidranges);
2577
2578 error = cat_idlookup(hfsmp, from_cp->c_fileid, 0, 1, NULL, NULL,
2579 &from_rfork->ff_data);
2580
2581 if (error)
2582 goto exit;
2583 }
2584 }
2585
2586 /*
2587 * From here on, any failures mean that we might be leaving things
2588 * in a weird or inconsistent state. Ideally, we should back out
2589 * all the changes, but to do that properly we need to fix
2590 * MoveData. We'll save fixing that for another time. For now,
2591 * just return EIO in all cases to the caller so that they know.
2592 */
2593 return_EIO_on_error = true;
2594
2595 bool data_overflow_extents = overflow_extents(from_cp->c_datafork);
2596
2597 // Move the data fork
2598 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp,
2599 to_cp->c_datafork, to_cp))) {
2600 goto exit;
2601 }
2602
2603 SET(from_cp->c_flag, C_NEED_DATA_SETSIZE);
2604 SET(to_cp->c_flag, C_NEED_DATA_SETSIZE);
2605
2606 // We move the resource fork later
2607
2608 /*
2609 * Note that because all we're doing is moving the extents around,
2610 * we can probably do this in a single transaction: Each extent
2611 * record (group of 8) is 64 bytes. A extent overflow B-Tree node
2612 * is typically 4k. This means each node can hold roughly ~60
2613 * extent records == (480 extents).
2614 *
2615 * If a file was massively fragmented and had 20k extents, this
2616 * means we'd roughly touch 20k/480 == 41 to 42 nodes, plus the
2617 * index nodes, for half of the operation. (inserting or
2618 * deleting). So if we're manipulating 80-100 nodes, this is
2619 * basically 320k of data to write to the journal in a bad case.
2620 */
2621 if (data_overflow_extents) {
2622 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0)))
2623 goto exit;
2624 }
2625
2626 if (from_rfork && overflow_extents(from_rfork)) {
2627 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1)))
2628 goto exit;
2629 }
2630
2631 // Touch times
2632 from_cp->c_touch_acctime = TRUE;
2633 from_cp->c_touch_chgtime = TRUE;
2634 from_cp->c_touch_modtime = TRUE;
2635 hfs_touchtimes(hfsmp, from_cp);
2636
2637 to_cp->c_touch_acctime = TRUE;
2638 to_cp->c_touch_chgtime = TRUE;
2639 to_cp->c_touch_modtime = TRUE;
2640 hfs_touchtimes(hfsmp, to_cp);
2641
2642 struct cat_fork dfork_buf;
2643 const struct cat_fork *dfork, *rfork;
2644
2645 dfork = hfs_prepare_fork_for_update(to_cp->c_datafork, NULL,
2646 &dfork_buf, hfsmp->blockSize);
2647 rfork = hfs_prepare_fork_for_update(from_rfork, NULL,
2648 &rfork_buf.ff_data, hfsmp->blockSize);
2649
2650 // Update the catalog nodes, to_cp first
2651 if ((error = cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2652 dfork, rfork))) {
2653 goto exit;
2654 }
2655
2656 CLR(to_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
2657
2658 // Update in-memory resource fork data here
2659 if (from_rfork) {
2660 // Update c_blocks
2661 uint32_t moving = from_rfork->ff_blocks + from_rfork->ff_unallocblocks;
2662
2663 from_cp->c_blocks -= moving;
2664 to_cp->c_blocks += moving;
2665
2666 // Update to_cp's resource data if it has it
2667 filefork_t *to_rfork = to_cp->c_rsrcfork;
2668 if (to_rfork) {
2669 TAILQ_SWAP(&to_rfork->ff_invalidranges,
2670 &from_rfork->ff_invalidranges, rl_entry, rl_link);
2671 to_rfork->ff_data = from_rfork->ff_data;
2672
2673 // Deal with ubc_setsize
2674 hfs_rsrc_setsize(to_cp);
2675 }
2676
2677 // Wipe out the resource fork in from_cp
2678 rl_init(&from_rfork->ff_invalidranges);
2679 bzero(&from_rfork->ff_data, sizeof(from_rfork->ff_data));
2680
2681 // Deal with ubc_setsize
2682 hfs_rsrc_setsize(from_cp);
2683 }
2684
2685 // Currently unnecessary, but might be useful in future...
2686 dfork = hfs_prepare_fork_for_update(from_cp->c_datafork, NULL, &dfork_buf,
2687 hfsmp->blockSize);
2688 rfork = hfs_prepare_fork_for_update(from_rfork, NULL, &rfork_buf.ff_data,
2689 hfsmp->blockSize);
2690
2691 // Update from_cp
2692 if ((error = cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2693 dfork, rfork))) {
2694 goto exit;
2695 }
2696
2697 CLR(from_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
2698
2699 exit:
2700 if (lockflags) {
2701 hfs_systemfile_unlock(hfsmp, lockflags);
2702 hfs_end_transaction(hfsmp);
2703 }
2704
2705 if (error && error != EIO && return_EIO_on_error) {
2706 printf("hfs_move_data: encountered error %d\n", error);
2707 error = EIO;
2708 }
2709
2710 return error;
2711 }
2712
2713 /*
2714 * Move all of the catalog and runtime data in srcfork to dstfork.
2715 *
2716 * This allows us to maintain the invalid ranges across the move data
2717 * operation so we don't need to force all of the pending IO right
2718 * now. In addition, we move all non overflow-extent extents into the
2719 * destination here.
2720 *
2721 * The destination fork must be empty and should have been checked
2722 * prior to calling this.
2723 */
2724 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src_cp,
2725 filefork_t *dstfork, cnode_t *dst_cp)
2726 {
2727 // Move the invalid ranges
2728 TAILQ_SWAP(&dstfork->ff_invalidranges, &srcfork->ff_invalidranges,
2729 rl_entry, rl_link);
2730 rl_remove_all(&srcfork->ff_invalidranges);
2731
2732 // Move the fork data (copy whole structure)
2733 dstfork->ff_data = srcfork->ff_data;
2734 bzero(&srcfork->ff_data, sizeof(srcfork->ff_data));
2735
2736 // Update c_blocks
2737 src_cp->c_blocks -= dstfork->ff_blocks + dstfork->ff_unallocblocks;
2738 dst_cp->c_blocks += dstfork->ff_blocks + dstfork->ff_unallocblocks;
2739
2740 return 0;
2741 }
2742
2743 /*
2744 * cnode must be locked
2745 */
2746 int
2747 hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode, struct proc *p)
2748 {
2749 struct cnode *cp = VTOC(vp);
2750 struct filefork *fp = NULL;
2751 int retval = 0;
2752 struct hfsmount *hfsmp = VTOHFS(vp);
2753 struct timeval tv;
2754 int waitdata; /* attributes necessary for data retrieval */
2755 int wait; /* all other attributes (e.g. atime, etc.) */
2756 int took_trunc_lock = 0;
2757 int fsync_default = 1;
2758
2759 /*
2760 * Applications which only care about data integrity rather than full
2761 * file integrity may opt out of (delay) expensive metadata update
2762 * operations as a performance optimization.
2763 */
2764 wait = (waitfor == MNT_WAIT);
2765 waitdata = (waitfor == MNT_DWAIT) | wait;
2766
2767 if (always_do_fullfsync)
2768 fsyncmode = HFS_FSYNC_FULL;
2769 if (fsyncmode != HFS_FSYNC)
2770 fsync_default = 0;
2771
2772 /* HFS directories don't have any data blocks. */
2773 if (vnode_isdir(vp))
2774 goto metasync;
2775 fp = VTOF(vp);
2776
2777 /*
2778 * For system files flush the B-tree header and
2779 * for regular files write out any clusters
2780 */
2781 if (vnode_issystem(vp)) {
2782 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2783 // XXXdbg
2784 if (hfsmp->jnl == NULL) {
2785 BTFlushPath(VTOF(vp));
2786 }
2787 }
2788 } else {
2789 hfs_unlock(cp);
2790 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
2791 took_trunc_lock = 1;
2792
2793 if (fp->ff_unallocblocks != 0) {
2794 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2795
2796 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2797 }
2798
2799 /* Don't hold cnode lock when calling into cluster layer. */
2800 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2801
2802 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2803 }
2804 /*
2805 * When MNT_WAIT is requested and the zero fill timeout
2806 * has expired then we must explicitly zero out any areas
2807 * that are currently marked invalid (holes).
2808 *
2809 * Files with NODUMP can bypass zero filling here.
2810 */
2811 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
2812 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
2813 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
2814 (vnode_issystem(vp) ==0) &&
2815 cp->c_zftimeout != 0))) {
2816
2817 microuptime(&tv);
2818 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && fsync_default && tv.tv_sec < (long)cp->c_zftimeout) {
2819 /* Remember that a force sync was requested. */
2820 cp->c_flag |= C_ZFWANTSYNC;
2821 goto datasync;
2822 }
2823 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
2824 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
2825 hfs_unlock(cp);
2826 if (took_trunc_lock) {
2827 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2828 }
2829 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2830 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2831 took_trunc_lock = 1;
2832 }
2833 hfs_flush_invalid_ranges(vp);
2834 hfs_unlock(cp);
2835 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2836 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2837 }
2838 }
2839 datasync:
2840 if (took_trunc_lock) {
2841 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2842 took_trunc_lock = 0;
2843 }
2844
2845 if (!hfsmp->jnl)
2846 buf_flushdirtyblks(vp, waitdata, 0, "hfs_fsync");
2847 else if (fsync_default && vnode_islnk(vp)
2848 && vnode_hasdirtyblks(vp) && vnode_isrecycled(vp)) {
2849 /*
2850 * If it's a symlink that's dirty and is about to be recycled,
2851 * we need to flush the journal.
2852 */
2853 fsync_default = 0;
2854 }
2855
2856 metasync:
2857 if (vnode_isreg(vp) && vnode_issystem(vp)) {
2858 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2859 microuptime(&tv);
2860 BTSetLastSync(VTOF(vp), tv.tv_sec);
2861 }
2862 cp->c_touch_acctime = FALSE;
2863 cp->c_touch_chgtime = FALSE;
2864 cp->c_touch_modtime = FALSE;
2865 } else if (!vnode_isswap(vp)) {
2866 retval = hfs_update(vp, HFS_UPDATE_FORCE);
2867
2868 /*
2869 * When MNT_WAIT is requested push out the catalog record for
2870 * this file. If they asked for a full fsync, we can skip this
2871 * because the journal_flush or hfs_metasync_all will push out
2872 * all of the metadata changes.
2873 */
2874 if ((retval == 0) && wait && fsync_default && cp->c_hint &&
2875 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
2876 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
2877 }
2878
2879 /*
2880 * If this was a full fsync, make sure all metadata
2881 * changes get to stable storage.
2882 */
2883 if (!fsync_default) {
2884 if (hfsmp->jnl) {
2885 if (fsyncmode == HFS_FSYNC_FULL)
2886 hfs_flush(hfsmp, HFS_FLUSH_FULL);
2887 else
2888 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_BARRIER);
2889 } else {
2890 retval = hfs_metasync_all(hfsmp);
2891 /* XXX need to pass context! */
2892 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
2893 }
2894 }
2895 }
2896
2897 if (!hfs_is_dirty(cp) && !ISSET(cp->c_flag, C_DELETED))
2898 vnode_cleardirty(vp);
2899
2900 return (retval);
2901 }
2902
2903
2904 /* Sync an hfs catalog b-tree node */
2905 int
2906 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
2907 {
2908 vnode_t vp;
2909 buf_t bp;
2910 int lockflags;
2911
2912 vp = HFSTOVCB(hfsmp)->catalogRefNum;
2913
2914 // XXXdbg - don't need to do this on a journaled volume
2915 if (hfsmp->jnl) {
2916 return 0;
2917 }
2918
2919 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2920 /*
2921 * Look for a matching node that has been delayed
2922 * but is not part of a set (B_LOCKED).
2923 *
2924 * BLK_ONLYVALID causes buf_getblk to return a
2925 * buf_t for the daddr64_t specified only if it's
2926 * currently resident in the cache... the size
2927 * parameter to buf_getblk is ignored when this flag
2928 * is set
2929 */
2930 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
2931
2932 if (bp) {
2933 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
2934 (void) VNOP_BWRITE(bp);
2935 else
2936 buf_brelse(bp);
2937 }
2938
2939 hfs_systemfile_unlock(hfsmp, lockflags);
2940
2941 return (0);
2942 }
2943
2944
2945 /*
2946 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
2947 * without a journal. Note that the volume bitmap does not get written;
2948 * we rely on fsck_hfs to fix that up (which it can do without any loss
2949 * of data).
2950 */
2951 int
2952 hfs_metasync_all(struct hfsmount *hfsmp)
2953 {
2954 int lockflags;
2955
2956 /* Lock all of the B-trees so we get a mutually consistent state */
2957 lockflags = hfs_systemfile_lock(hfsmp,
2958 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2959
2960 /* Sync each of the B-trees */
2961 if (hfsmp->hfs_catalog_vp)
2962 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
2963 if (hfsmp->hfs_extents_vp)
2964 hfs_btsync(hfsmp->hfs_extents_vp, 0);
2965 if (hfsmp->hfs_attribute_vp)
2966 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
2967
2968 /* Wait for all of the writes to complete */
2969 if (hfsmp->hfs_catalog_vp)
2970 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
2971 if (hfsmp->hfs_extents_vp)
2972 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
2973 if (hfsmp->hfs_attribute_vp)
2974 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
2975
2976 hfs_systemfile_unlock(hfsmp, lockflags);
2977
2978 return 0;
2979 }
2980
2981
2982 /*ARGSUSED 1*/
2983 static int
2984 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
2985 {
2986 buf_clearflags(bp, B_LOCKED);
2987 (void) buf_bawrite(bp);
2988
2989 return(BUF_CLAIMED);
2990 }
2991
2992
2993 int
2994 hfs_btsync(struct vnode *vp, int sync_transaction)
2995 {
2996 struct cnode *cp = VTOC(vp);
2997 struct timeval tv;
2998 int flags = 0;
2999
3000 if (sync_transaction)
3001 flags |= BUF_SKIP_NONLOCKED;
3002 /*
3003 * Flush all dirty buffers associated with b-tree.
3004 */
3005 buf_iterate(vp, hfs_btsync_callback, flags, 0);
3006
3007 microuptime(&tv);
3008 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
3009 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
3010 cp->c_touch_acctime = FALSE;
3011 cp->c_touch_chgtime = FALSE;
3012 cp->c_touch_modtime = FALSE;
3013
3014 return 0;
3015 }
3016
3017 /*
3018 * Remove a directory.
3019 */
3020 int
3021 hfs_vnop_rmdir(struct vnop_rmdir_args *ap)
3022 {
3023 struct vnode *dvp = ap->a_dvp;
3024 struct vnode *vp = ap->a_vp;
3025 struct cnode *dcp = VTOC(dvp);
3026 struct cnode *cp = VTOC(vp);
3027 int error;
3028 time_t orig_ctime;
3029
3030 orig_ctime = VTOC(vp)->c_ctime;
3031
3032 if (!S_ISDIR(cp->c_mode)) {
3033 return (ENOTDIR);
3034 }
3035 if (dvp == vp) {
3036 return (EINVAL);
3037 }
3038
3039 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3040 cp = VTOC(vp);
3041
3042 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3043 return (error);
3044 }
3045
3046 /* Check for a race with rmdir on the parent directory */
3047 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3048 hfs_unlockpair (dcp, cp);
3049 return ENOENT;
3050 }
3051
3052 //
3053 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3054 //
3055 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3056 uint32_t newid;
3057
3058 hfs_unlockpair(dcp, cp);
3059
3060 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3061 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3062 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3063 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3064 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3065 FSE_ARG_INO, (ino64_t)0, // src inode #
3066 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3067 FSE_ARG_INT32, newid,
3068 FSE_ARG_DONE);
3069 } else {
3070 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3071 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3072 }
3073 }
3074
3075 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
3076
3077 hfs_unlockpair(dcp, cp);
3078
3079 return (error);
3080 }
3081
3082 /*
3083 * Remove a directory
3084 *
3085 * Both dvp and vp cnodes are locked
3086 */
3087 int
3088 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3089 int skip_reserve, int only_unlink)
3090 {
3091 struct cnode *cp;
3092 struct cnode *dcp;
3093 struct hfsmount * hfsmp;
3094 struct cat_desc desc;
3095 int lockflags;
3096 int error = 0, started_tr = 0;
3097
3098 cp = VTOC(vp);
3099 dcp = VTOC(dvp);
3100 hfsmp = VTOHFS(vp);
3101
3102 if (dcp == cp) {
3103 return (EINVAL); /* cannot remove "." */
3104 }
3105 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3106 return (0);
3107 }
3108 if (cp->c_entries != 0) {
3109 return (ENOTEMPTY);
3110 }
3111
3112 /*
3113 * If the directory is open or in use (e.g. opendir() or current working
3114 * directory for some process); wait for inactive/reclaim to actually
3115 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
3116 * of removing open-unlinked directories from the catalog, as well as getting rid
3117 * of EAs still on the element. So change only_unlink to true, so that it will get
3118 * cleaned up below.
3119 *
3120 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
3121 * but it really means C_NOEXISTS because the item was actually removed from the
3122 * catalog. Then when we try to remove the entry from the catalog later on, it won't
3123 * really be there anymore.
3124 */
3125 if (vnode_isinuse(vp, 0)) {
3126 only_unlink = 1;
3127 }
3128
3129 /* Deal with directory hardlinks */
3130 if (cp->c_flag & C_HARDLINK) {
3131 /*
3132 * Note that if we have a directory which was a hardlink at any point,
3133 * its actual directory data is stored in the directory inode in the hidden
3134 * directory rather than the leaf element(s) present in the namespace.
3135 *
3136 * If there are still other hardlinks to this directory,
3137 * then we'll just eliminate this particular link and the vnode will still exist.
3138 * If this is the last link to an empty directory, then we'll open-unlink the
3139 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
3140 *
3141 * We could also return EBUSY here.
3142 */
3143
3144 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3145 }
3146
3147 /*
3148 * In a few cases, we may want to allow the directory to persist in an
3149 * open-unlinked state. If the directory is being open-unlinked (still has usecount
3150 * references), or if it has EAs, or if it was being deleted as part of a rename,
3151 * then we go ahead and move it to the hidden directory.
3152 *
3153 * If the directory is being open-unlinked, then we want to keep the catalog entry
3154 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
3155 *
3156 * If the directory had EAs, then we want to use the open-unlink trick so that the
3157 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
3158 * due to overflowing the journal.
3159 *
3160 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
3161 * in order to maintain rename atomicity.
3162 *
3163 * Note that the allow_dirs argument to hfs_removefile specifies that it is
3164 * supposed to handle directories for this case.
3165 */
3166
3167 if (((hfsmp->hfs_attribute_vp != NULL) &&
3168 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
3169 (only_unlink != 0)) {
3170
3171 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
3172 /*
3173 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
3174 * it here just in case we were invoked by rmdir() on a directory that had
3175 * EAs. To ensure that we start reclaiming the space as soon as possible,
3176 * we call vnode_recycle on the directory.
3177 */
3178 vnode_recycle(vp);
3179
3180 return ret;
3181
3182 }
3183
3184 dcp->c_flag |= C_DIR_MODIFICATION;
3185
3186 #if QUOTA
3187 if (hfsmp->hfs_flags & HFS_QUOTAS)
3188 (void)hfs_getinoquota(cp);
3189 #endif
3190 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3191 goto out;
3192 }
3193 started_tr = 1;
3194
3195 /*
3196 * Verify the directory is empty (and valid).
3197 * (Rmdir ".." won't be valid since
3198 * ".." will contain a reference to
3199 * the current directory and thus be
3200 * non-empty.)
3201 */
3202 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
3203 error = EPERM;
3204 goto out;
3205 }
3206
3207 /* Remove the entry from the namei cache: */
3208 cache_purge(vp);
3209
3210 /*
3211 * Protect against a race with rename by using the component
3212 * name passed in and parent id from dvp (instead of using
3213 * the cp->c_desc which may have changed).
3214 */
3215 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3216 desc.cd_namelen = cnp->cn_namelen;
3217 desc.cd_parentcnid = dcp->c_fileid;
3218 desc.cd_cnid = cp->c_cnid;
3219 desc.cd_flags = CD_ISDIR;
3220 desc.cd_encoding = cp->c_encoding;
3221 desc.cd_hint = 0;
3222
3223 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3224 error = 0;
3225 goto out;
3226 }
3227
3228 /* Remove entry from catalog */
3229 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3230
3231 if (!skip_reserve) {
3232 /*
3233 * Reserve some space in the Catalog file.
3234 */
3235 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3236 hfs_systemfile_unlock(hfsmp, lockflags);
3237 goto out;
3238 }
3239 }
3240
3241 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3242
3243 if (!error) {
3244 //
3245 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3246 // we don't need to touch the document_id as it's handled by the rename code.
3247 // otherwise it's a normal remove and we need to save the document id in the
3248 // per thread struct and clear it from the cnode.
3249 //
3250 struct doc_tombstone *ut;
3251 ut = doc_tombstone_get();
3252 if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED)
3253 && doc_tombstone_should_save(ut, vp, cnp)) {
3254
3255 uint32_t doc_id = hfs_get_document_id(cp);
3256
3257 // this event is more of a "pending-delete"
3258 if (ut->t_lastop_document_id) {
3259 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3260 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
3261 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
3262 FSE_ARG_INO, (ino64_t)0, // dst inode #
3263 FSE_ARG_INT32, doc_id,
3264 FSE_ARG_DONE);
3265 }
3266
3267 doc_tombstone_save(dvp, vp, cnp, doc_id, cp->c_fileid);
3268
3269 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
3270
3271 // clear this so it's never returned again
3272 fip->document_id = 0;
3273 cp->c_bsdflags &= ~UF_TRACKED;
3274 }
3275
3276 /* The parent lost a child */
3277 if (dcp->c_entries > 0)
3278 dcp->c_entries--;
3279 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3280 dcp->c_dirchangecnt++;
3281 hfs_incr_gencount(dcp);
3282
3283 dcp->c_touch_chgtime = TRUE;
3284 dcp->c_touch_modtime = TRUE;
3285 dcp->c_flag |= C_MODIFIED;
3286
3287 hfs_update(dcp->c_vp, 0);
3288 }
3289
3290 hfs_systemfile_unlock(hfsmp, lockflags);
3291
3292 if (error)
3293 goto out;
3294
3295 #if QUOTA
3296 if (hfsmp->hfs_flags & HFS_QUOTAS)
3297 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3298 #endif /* QUOTA */
3299
3300 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
3301
3302 /* Mark C_NOEXISTS since the catalog entry is now gone */
3303 cp->c_flag |= C_NOEXISTS;
3304
3305 out:
3306 dcp->c_flag &= ~C_DIR_MODIFICATION;
3307 wakeup((caddr_t)&dcp->c_flag);
3308
3309 if (started_tr) {
3310 hfs_end_transaction(hfsmp);
3311 }
3312
3313 return (error);
3314 }
3315
3316
3317 /*
3318 * Remove a file or link.
3319 */
3320 int
3321 hfs_vnop_remove(struct vnop_remove_args *ap)
3322 {
3323 struct vnode *dvp = ap->a_dvp;
3324 struct vnode *vp = ap->a_vp;
3325 struct cnode *dcp = VTOC(dvp);
3326 struct cnode *cp;
3327 struct vnode *rvp = NULL;
3328 int error=0, recycle_rsrc=0;
3329 int recycle_vnode = 0;
3330 uint32_t rsrc_vid = 0;
3331 time_t orig_ctime;
3332
3333 if (dvp == vp) {
3334 return (EINVAL);
3335 }
3336
3337 orig_ctime = VTOC(vp)->c_ctime;
3338 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
3339 error = nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3340 if (error) {
3341 // XXXdbg - decide on a policy for handling namespace handler failures!
3342 // for now we just let them proceed.
3343 }
3344 }
3345 error = 0;
3346
3347 cp = VTOC(vp);
3348
3349 relock:
3350
3351 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3352
3353 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3354 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3355 if (rvp) {
3356 vnode_put (rvp);
3357 }
3358 return (error);
3359 }
3360 //
3361 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3362 //
3363 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3364 uint32_t newid;
3365
3366 hfs_unlockpair(dcp, cp);
3367
3368 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3369 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3370 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3371 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3372 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3373 FSE_ARG_INO, (ino64_t)0, // src inode #
3374 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3375 FSE_ARG_INT32, newid,
3376 FSE_ARG_DONE);
3377 } else {
3378 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3379 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3380 }
3381 }
3382
3383 /*
3384 * Lazily respond to determining if there is a valid resource fork
3385 * vnode attached to 'cp' if it is a regular file or symlink.
3386 * If the vnode does not exist, then we may proceed without having to
3387 * create it.
3388 *
3389 * If, however, it does exist, then we need to acquire an iocount on the
3390 * vnode after acquiring its vid. This ensures that if we have to do I/O
3391 * against it, it can't get recycled from underneath us in the middle
3392 * of this call.
3393 *
3394 * Note: this function may be invoked for directory hardlinks, so just skip these
3395 * steps if 'vp' is a directory.
3396 */
3397
3398 enum vtype vtype = vnode_vtype(vp);
3399 if ((vtype == VLNK) || (vtype == VREG)) {
3400 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
3401 /* We need to acquire the rsrc vnode */
3402 rvp = cp->c_rsrc_vp;
3403 rsrc_vid = vnode_vid (rvp);
3404
3405 /* Unlock everything to acquire iocount on the rsrc vnode */
3406 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
3407 hfs_unlockpair (dcp, cp);
3408 /* Use the vid to maintain identity on rvp */
3409 if (vnode_getwithvid(rvp, rsrc_vid)) {
3410 /*
3411 * If this fails, then it was recycled or
3412 * reclaimed in the interim. Reset fields and
3413 * start over.
3414 */
3415 rvp = NULL;
3416 rsrc_vid = 0;
3417 }
3418 goto relock;
3419 }
3420 }
3421
3422 /*
3423 * Check to see if we raced rmdir for the parent directory
3424 * hfs_removefile already checks for a race on vp/cp
3425 */
3426 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3427 error = ENOENT;
3428 goto rm_done;
3429 }
3430
3431 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
3432
3433 /*
3434 * If the remove succeeded in deleting the file, then we may need to mark
3435 * the resource fork for recycle so that it is reclaimed as quickly
3436 * as possible. If it were not recycled quickly, then this resource fork
3437 * vnode could keep a v_parent reference on the data fork, which prevents it
3438 * from going through reclaim (by giving it extra usecounts), except in the force-
3439 * unmount case.
3440 *
3441 * However, a caveat: we need to continue to supply resource fork
3442 * access to open-unlinked files even if the resource fork is not open. This is
3443 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3444 * this already if the data fork has been re-parented to the hidden directory.
3445 *
3446 * As a result, all we really need to do here is mark the resource fork vnode
3447 * for recycle. If it goes out of core, it can be brought in again if needed.
3448 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3449 * more work.
3450 */
3451 if (error == 0) {
3452 hfs_hotfile_deleted(vp);
3453
3454 if (rvp) {
3455 recycle_rsrc = 1;
3456 }
3457 /*
3458 * If the target was actually removed from the catalog schedule it for
3459 * full reclamation/inactivation. We hold an iocount on it so it should just
3460 * get marked with MARKTERM
3461 */
3462 if (cp->c_flag & C_NOEXISTS) {
3463 recycle_vnode = 1;
3464 }
3465 }
3466
3467
3468 /*
3469 * Drop the truncate lock before unlocking the cnode
3470 * (which can potentially perform a vnode_put and
3471 * recycle the vnode which in turn might require the
3472 * truncate lock)
3473 */
3474 rm_done:
3475 hfs_unlockpair(dcp, cp);
3476 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3477
3478 if (recycle_rsrc) {
3479 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3480 vnode_recycle(rvp);
3481 }
3482 if (recycle_vnode) {
3483 vnode_recycle (vp);
3484 }
3485
3486 if (rvp) {
3487 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3488 vnode_put(rvp);
3489 }
3490
3491 return (error);
3492 }
3493
3494
3495 int
3496 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3497
3498 if ( !(buf_flags(bp) & B_META))
3499 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3500 /*
3501 * it's part of the current transaction, kill it.
3502 */
3503 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3504
3505 return (BUF_CLAIMED);
3506 }
3507
3508 /*
3509 * hfs_removefile
3510 *
3511 * Similar to hfs_vnop_remove except there are additional options.
3512 * This function may be used to remove directories if they have
3513 * lots of EA's -- note the 'allow_dirs' argument.
3514 *
3515 * This function is able to delete blocks & fork data for the resource
3516 * fork even if it does not exist in core (and have a backing vnode).
3517 * It should infer the correct behavior based on the number of blocks
3518 * in the cnode and whether or not the resource fork pointer exists or
3519 * not. As a result, one only need pass in the 'vp' corresponding to the
3520 * data fork of this file (or main vnode in the case of a directory).
3521 * Passing in a resource fork will result in an error.
3522 *
3523 * Because we do not create any vnodes in this function, we are not at
3524 * risk of deadlocking against ourselves by double-locking.
3525 *
3526 * Requires cnode and truncate locks to be held.
3527 */
3528 int
3529 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3530 int flags, int skip_reserve, int allow_dirs,
3531 __unused struct vnode *rvp, int only_unlink)
3532 {
3533 struct cnode *cp;
3534 struct cnode *dcp;
3535 struct vnode *rsrc_vp = NULL;
3536 struct hfsmount *hfsmp;
3537 struct cat_desc desc;
3538 struct timeval tv;
3539 int dataforkbusy = 0;
3540 int rsrcforkbusy = 0;
3541 int lockflags;
3542 int error = 0;
3543 int started_tr = 0;
3544 int isbigfile = 0, defer_remove=0, isdir=0;
3545 int update_vh = 0;
3546
3547 cp = VTOC(vp);
3548 dcp = VTOC(dvp);
3549 hfsmp = VTOHFS(vp);
3550
3551 /* Check if we lost a race post lookup. */
3552 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3553 return (0);
3554 }
3555
3556 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3557 return 0;
3558 }
3559
3560 /* Make sure a remove is permitted */
3561 if (VNODE_IS_RSRC(vp)) {
3562 return (EPERM);
3563 }
3564 else {
3565 /*
3566 * We know it's a data fork.
3567 * Probe the cnode to see if we have a valid resource fork
3568 * in hand or not.
3569 */
3570 rsrc_vp = cp->c_rsrc_vp;
3571 }
3572
3573 /* Don't allow deleting the journal or journal_info_block. */
3574 if (hfs_is_journal_file(hfsmp, cp)) {
3575 return (EPERM);
3576 }
3577
3578 /*
3579 * Hard links require special handling.
3580 */
3581 if (cp->c_flag & C_HARDLINK) {
3582 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3583 return (EBUSY);
3584 } else {
3585 /* A directory hard link with a link count of one is
3586 * treated as a regular directory. Therefore it should
3587 * only be removed using rmdir().
3588 */
3589 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3590 (allow_dirs == 0)) {
3591 return (EPERM);
3592 }
3593 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3594 }
3595 }
3596
3597 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3598 if (vnode_isdir(vp)) {
3599 if (allow_dirs == 0)
3600 return (EPERM); /* POSIX */
3601 isdir = 1;
3602 }
3603 /* Sanity check the parent ids. */
3604 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3605 (cp->c_parentcnid != dcp->c_fileid)) {
3606 return (EINVAL);
3607 }
3608
3609 dcp->c_flag |= C_DIR_MODIFICATION;
3610
3611 // this guy is going away so mark him as such
3612 cp->c_flag |= C_DELETED;
3613
3614
3615 /* Remove our entry from the namei cache. */
3616 cache_purge(vp);
3617
3618 /*
3619 * If the caller was operating on a file (as opposed to a
3620 * directory with EAs), then we need to figure out
3621 * whether or not it has a valid resource fork vnode.
3622 *
3623 * If there was a valid resource fork vnode, then we need
3624 * to use hfs_truncate to eliminate its data. If there is
3625 * no vnode, then we hold the cnode lock which would
3626 * prevent it from being created. As a result,
3627 * we can use the data deletion functions which do not
3628 * require that a cnode/vnode pair exist.
3629 */
3630
3631 /* Check if this file is being used. */
3632 if (isdir == 0) {
3633 dataforkbusy = vnode_isinuse(vp, 0);
3634 /*
3635 * At this point, we know that 'vp' points to the
3636 * a data fork because we checked it up front. And if
3637 * there is no rsrc fork, rsrc_vp will be NULL.
3638 */
3639 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3640 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3641 }
3642 }
3643
3644 /* Check if we have to break the deletion into multiple pieces. */
3645 if (isdir == 0)
3646 isbigfile = cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE;
3647
3648 /* Check if the file has xattrs. If it does we'll have to delete them in
3649 individual transactions in case there are too many */
3650 if ((hfsmp->hfs_attribute_vp != NULL) &&
3651 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3652 defer_remove = 1;
3653 }
3654
3655 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3656 if (only_unlink) {
3657 defer_remove = 1;
3658 }
3659
3660 /*
3661 * Carbon semantics prohibit deleting busy files.
3662 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3663 */
3664 if (dataforkbusy || rsrcforkbusy) {
3665 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3666 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3667 error = EBUSY;
3668 goto out;
3669 }
3670 }
3671
3672 #if QUOTA
3673 if (hfsmp->hfs_flags & HFS_QUOTAS)
3674 (void)hfs_getinoquota(cp);
3675 #endif /* QUOTA */
3676
3677 /*
3678 * Do a ubc_setsize to indicate we need to wipe contents if:
3679 * 1) item is a regular file.
3680 * 2) Neither fork is busy AND we are not told to unlink this.
3681 *
3682 * We need to check for the defer_remove since it can be set without
3683 * having a busy data or rsrc fork
3684 */
3685 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3686 /*
3687 * A ubc_setsize can cause a pagein so defer it
3688 * until after the cnode lock is dropped. The
3689 * cnode lock cannot be dropped/reacquired here
3690 * since we might already hold the journal lock.
3691 */
3692 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3693 cp->c_flag |= C_NEED_DATA_SETSIZE;
3694 }
3695 if (!rsrcforkbusy && rsrc_vp) {
3696 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3697 }
3698 }
3699
3700 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3701 goto out;
3702 }
3703 started_tr = 1;
3704
3705 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3706 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3707 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3708 }
3709
3710 /*
3711 * Prepare to truncate any non-busy forks. Busy forks will
3712 * get truncated when their vnode goes inactive.
3713 * Note that we will only enter this region if we
3714 * can avoid creating an open-unlinked file. If
3715 * either region is busy, we will have to create an open
3716 * unlinked file.
3717 *
3718 * Since we are deleting the file, we need to stagger the runtime
3719 * modifications to do things in such a way that a crash won't
3720 * result in us getting overlapped extents or any other
3721 * bad inconsistencies. As such, we call prepare_release_storage
3722 * which updates the UBC, updates quota information, and releases
3723 * any loaned blocks that belong to this file. No actual
3724 * truncation or bitmap manipulation is done until *AFTER*
3725 * the catalog record is removed.
3726 */
3727 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3728
3729 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3730
3731 error = hfs_prepare_release_storage (hfsmp, vp);
3732 if (error) {
3733 goto out;
3734 }
3735 update_vh = 1;
3736 }
3737
3738 /*
3739 * If the resource fork vnode does not exist, we can skip this step.
3740 */
3741 if (!rsrcforkbusy && rsrc_vp) {
3742 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3743 if (error) {
3744 goto out;
3745 }
3746 update_vh = 1;
3747 }
3748 }
3749
3750 /*
3751 * Protect against a race with rename by using the component
3752 * name passed in and parent id from dvp (instead of using
3753 * the cp->c_desc which may have changed). Also, be aware that
3754 * because we allow directories to be passed in, we need to special case
3755 * this temporary descriptor in case we were handed a directory.
3756 */
3757 if (isdir) {
3758 desc.cd_flags = CD_ISDIR;
3759 }
3760 else {
3761 desc.cd_flags = 0;
3762 }
3763 desc.cd_encoding = cp->c_desc.cd_encoding;
3764 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3765 desc.cd_namelen = cnp->cn_namelen;
3766 desc.cd_parentcnid = dcp->c_fileid;
3767 desc.cd_hint = cp->c_desc.cd_hint;
3768 desc.cd_cnid = cp->c_cnid;
3769 microtime(&tv);
3770
3771 /*
3772 * There are two cases to consider:
3773 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
3774 * 2. File is not in use ==> remove the file
3775 *
3776 * We can get a directory in case 1 because it may have had lots of attributes,
3777 * which need to get removed here.
3778 */
3779 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
3780 char delname[32];
3781 struct cat_desc to_desc;
3782 struct cat_desc todir_desc;
3783
3784 /*
3785 * Orphan this file or directory (move to hidden directory).
3786 * Again, we need to take care that we treat directories as directories,
3787 * and files as files. Because directories with attributes can be passed in
3788 * check to make sure that we have a directory or a file before filling in the
3789 * temporary descriptor's flags. We keep orphaned directories AND files in
3790 * the FILE_HARDLINKS private directory since we're generalizing over all
3791 * orphaned filesystem objects.
3792 */
3793 bzero(&todir_desc, sizeof(todir_desc));
3794 todir_desc.cd_parentcnid = 2;
3795
3796 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
3797 bzero(&to_desc, sizeof(to_desc));
3798 to_desc.cd_nameptr = (const u_int8_t *)delname;
3799 to_desc.cd_namelen = strlen(delname);
3800 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
3801 if (isdir) {
3802 to_desc.cd_flags = CD_ISDIR;
3803 }
3804 else {
3805 to_desc.cd_flags = 0;
3806 }
3807 to_desc.cd_cnid = cp->c_cnid;
3808
3809 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3810 if (!skip_reserve) {
3811 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
3812 hfs_systemfile_unlock(hfsmp, lockflags);
3813 goto out;
3814 }
3815 }
3816
3817 error = cat_rename(hfsmp, &desc, &todir_desc,
3818 &to_desc, (struct cat_desc *)NULL);
3819
3820 if (error == 0) {
3821 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
3822 if (isdir == 1) {
3823 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
3824 }
3825 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
3826 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
3827
3828 /* Update the parent directory */
3829 if (dcp->c_entries > 0)
3830 dcp->c_entries--;
3831 if (isdir == 1) {
3832 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3833 }
3834 dcp->c_dirchangecnt++;
3835 hfs_incr_gencount(dcp);
3836
3837 dcp->c_ctime = tv.tv_sec;
3838 dcp->c_mtime = tv.tv_sec;
3839 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3840
3841 /* Update the file or directory's state */
3842 cp->c_flag |= C_DELETED;
3843 cp->c_ctime = tv.tv_sec;
3844 --cp->c_linkcount;
3845 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
3846 }
3847 hfs_systemfile_unlock(hfsmp, lockflags);
3848 if (error)
3849 goto out;
3850
3851 }
3852 else {
3853 /*
3854 * Nobody is using this item; we can safely remove everything.
3855 */
3856 struct filefork *temp_rsrc_fork = NULL;
3857 #if QUOTA
3858 off_t savedbytes;
3859 int blksize = hfsmp->blockSize;
3860 #endif
3861 u_int32_t fileid = cp->c_fileid;
3862
3863 /*
3864 * Figure out if we need to read the resource fork data into
3865 * core before wiping out the catalog record.
3866 *
3867 * 1) Must not be a directory
3868 * 2) cnode's c_rsrcfork ptr must be NULL.
3869 * 3) rsrc fork must have actual blocks
3870 */
3871 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
3872 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3873 /*
3874 * The resource fork vnode & filefork did not exist.
3875 * Create a temporary one for use in this function only.
3876 */
3877 temp_rsrc_fork = hfs_mallocz(sizeof(struct filefork));
3878 temp_rsrc_fork->ff_cp = cp;
3879 rl_init(&temp_rsrc_fork->ff_invalidranges);
3880 }
3881
3882 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3883
3884 /* Look up the resource fork first, if necessary */
3885 if (temp_rsrc_fork) {
3886 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
3887 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
3888 if (error) {
3889 hfs_free(temp_rsrc_fork, sizeof(struct filefork));
3890 hfs_systemfile_unlock (hfsmp, lockflags);
3891 goto out;
3892 }
3893 }
3894
3895 if (!skip_reserve) {
3896 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3897 if (temp_rsrc_fork) {
3898 hfs_free(temp_rsrc_fork, sizeof(struct filefork));
3899 }
3900 hfs_systemfile_unlock(hfsmp, lockflags);
3901 goto out;
3902 }
3903 }
3904
3905 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3906
3907 if (error && error != ENXIO && error != ENOENT) {
3908 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
3909 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
3910 }
3911
3912 if (error == 0) {
3913 /* Update the parent directory */
3914 if (dcp->c_entries > 0)
3915 dcp->c_entries--;
3916 dcp->c_dirchangecnt++;
3917 hfs_incr_gencount(dcp);
3918
3919 dcp->c_ctime = tv.tv_sec;
3920 dcp->c_mtime = tv.tv_sec;
3921 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3922 }
3923 hfs_systemfile_unlock(hfsmp, lockflags);
3924
3925 if (error) {
3926 if (temp_rsrc_fork) {
3927 hfs_free(temp_rsrc_fork, sizeof(struct filefork));
3928 }
3929 goto out;
3930 }
3931
3932 /*
3933 * Now that we've wiped out the catalog record, the file effectively doesn't
3934 * exist anymore. So update the quota records to reflect the loss of the
3935 * data fork and the resource fork.
3936 */
3937 #if QUOTA
3938 if (cp->c_datafork->ff_blocks > 0) {
3939 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
3940 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3941 }
3942
3943 /*
3944 * We may have just deleted the catalog record for a resource fork even
3945 * though it did not exist in core as a vnode. However, just because there
3946 * was a resource fork pointer in the cnode does not mean that it had any blocks.
3947 */
3948 if (temp_rsrc_fork || cp->c_rsrcfork) {
3949 if (cp->c_rsrcfork) {
3950 if (cp->c_rsrcfork->ff_blocks > 0) {
3951 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
3952 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3953 }
3954 }
3955 else {
3956 /* we must have used a temporary fork */
3957 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
3958 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3959 }
3960 }
3961
3962 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3963 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3964 }
3965 #endif
3966
3967 if (vnode_islnk(vp) && cp->c_datafork->ff_symlinkptr) {
3968 hfs_free(cp->c_datafork->ff_symlinkptr, cp->c_datafork->ff_size);
3969 cp->c_datafork->ff_symlinkptr = NULL;
3970 }
3971
3972 /*
3973 * If we didn't get any errors deleting the catalog entry, then go ahead
3974 * and release the backing store now. The filefork pointers are still valid.
3975 */
3976 if (temp_rsrc_fork) {
3977 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
3978 }
3979 else {
3980 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
3981 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
3982 }
3983 if (error) {
3984 /*
3985 * If we encountered an error updating the extents and bitmap,
3986 * mark the volume inconsistent. At this point, the catalog record has
3987 * already been deleted, so we can't recover it at this point. We need
3988 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
3989 * The subsequent fsck should be able to recover the free space for us.
3990 */
3991 hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE);
3992 }
3993 else {
3994 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
3995 update_vh = 0;
3996 }
3997
3998 /* Get rid of the temporary rsrc fork */
3999 if (temp_rsrc_fork) {
4000 hfs_free(temp_rsrc_fork, sizeof(struct filefork));
4001 }
4002
4003 cp->c_flag |= C_NOEXISTS;
4004 cp->c_flag &= ~C_DELETED;
4005
4006 cp->c_touch_chgtime = TRUE;
4007 --cp->c_linkcount;
4008
4009 /*
4010 * We must never get a directory if we're in this else block. We could
4011 * accidentally drop the number of files in the volume header if we did.
4012 */
4013 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
4014
4015 }
4016
4017 //
4018 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
4019 // we don't need to touch the document_id as it's handled by the rename code.
4020 // otherwise it's a normal remove and we need to save the document id in the
4021 // per thread struct and clear it from the cnode.
4022 //
4023 if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED)
4024 && cp->c_linkcount <= 1) {
4025 struct doc_tombstone *ut;
4026 ut = doc_tombstone_get();
4027 if (doc_tombstone_should_save(ut, vp, cnp)) {
4028 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4029 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4030 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
4031 FSE_ARG_INO, (ino64_t)0, // dst inode #
4032 FSE_ARG_INT32, hfs_get_document_id(cp), // document id
4033 FSE_ARG_DONE);
4034
4035 doc_tombstone_save(dvp, vp, cnp, hfs_get_document_id(cp),
4036 cp->c_fileid);
4037
4038 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
4039
4040 fip->document_id = 0;
4041 cp->c_bsdflags &= ~UF_TRACKED;
4042 }
4043 }
4044
4045 /*
4046 * All done with this cnode's descriptor...
4047 *
4048 * Note: all future catalog calls for this cnode must be by
4049 * fileid only. This is OK for HFS (which doesn't have file
4050 * thread records) since HFS doesn't support the removal of
4051 * busy files.
4052 */
4053 cat_releasedesc(&cp->c_desc);
4054
4055 out:
4056 if (error) {
4057 cp->c_flag &= ~C_DELETED;
4058 }
4059
4060 if (update_vh) {
4061 /*
4062 * If we bailed out earlier, we may need to update the volume header
4063 * to deal with the borrowed blocks accounting.
4064 */
4065 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
4066 }
4067
4068 if (started_tr) {
4069 hfs_end_transaction(hfsmp);
4070 }
4071
4072 dcp->c_flag &= ~C_DIR_MODIFICATION;
4073 wakeup((caddr_t)&dcp->c_flag);
4074
4075 return (error);
4076 }
4077
4078
4079 void
4080 replace_desc(struct cnode *cp, struct cat_desc *cdp)
4081 {
4082 // fixes 4348457 and 4463138
4083 if (&cp->c_desc == cdp) {
4084 return;
4085 }
4086
4087 /* First release allocated name buffer */
4088 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
4089 const u_int8_t *name = cp->c_desc.cd_nameptr;
4090
4091 cp->c_desc.cd_nameptr = 0;
4092 cp->c_desc.cd_namelen = 0;
4093 cp->c_desc.cd_flags &= ~CD_HASBUF;
4094 vfs_removename((const char *)name);
4095 }
4096 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
4097
4098 /* Cnode now owns the name buffer */
4099 cdp->cd_nameptr = 0;
4100 cdp->cd_namelen = 0;
4101 cdp->cd_flags &= ~CD_HASBUF;
4102 }
4103
4104 /*
4105 * hfs_vnop_rename
4106 *
4107 * Just forwards the arguments from VNOP_RENAME into those of
4108 * VNOP_RENAMEX but zeros out the flags word.
4109 */
4110 int hfs_vnop_rename (struct vnop_rename_args *args) {
4111 struct vnop_renamex_args vrx;
4112
4113 vrx.a_desc = args->a_desc; // we aren't using it to switch into the vnop array, so fine as is.
4114 vrx.a_fdvp = args->a_fdvp;
4115 vrx.a_fvp = args->a_fvp;
4116 vrx.a_fcnp = args->a_fcnp;
4117 vrx.a_tdvp = args->a_tdvp;
4118 vrx.a_tvp = args->a_tvp;
4119 vrx.a_tcnp = args->a_tcnp;
4120 vrx.a_vap = NULL; // not used
4121 vrx.a_flags = 0; //zero out the flags.
4122 vrx.a_context = args->a_context;
4123
4124 return hfs_vnop_renamex (&vrx);
4125 }
4126
4127
4128
4129 /*
4130 * Rename a cnode.
4131 *
4132 * The VFS layer guarantees that:
4133 * - source and destination will either both be directories, or
4134 * both not be directories.
4135 * - all the vnodes are from the same file system
4136 *
4137 * When the target is a directory, HFS must ensure that its empty.
4138 *
4139 * Note that this function requires up to 6 vnodes in order to work properly
4140 * if it is operating on files (and not on directories). This is because only
4141 * files can have resource forks, and we now require iocounts to be held on the
4142 * vnodes corresponding to the resource forks (if applicable) as well as
4143 * the files or directories undergoing rename. The problem with not holding
4144 * iocounts on the resource fork vnodes is that it can lead to a deadlock
4145 * situation: The rsrc fork of the source file may be recycled and reclaimed
4146 * in order to provide a vnode for the destination file's rsrc fork. Since
4147 * data and rsrc forks share the same cnode, we'd eventually try to lock the
4148 * source file's cnode in order to sync its rsrc fork to disk, but it's already
4149 * been locked. By taking the rsrc fork vnodes up front we ensure that they
4150 * cannot be recycled, and that the situation mentioned above cannot happen.
4151 */
4152 int
4153 hfs_vnop_renamex(struct vnop_renamex_args *ap)
4154 {
4155 struct vnode *tvp = ap->a_tvp;
4156 struct vnode *tdvp = ap->a_tdvp;
4157 struct vnode *fvp = ap->a_fvp;
4158 struct vnode *fdvp = ap->a_fdvp;
4159 /*
4160 * Note that we only need locals for the target/destination's
4161 * resource fork vnode (and only if necessary). We don't care if the
4162 * source has a resource fork vnode or not.
4163 */
4164 struct vnode *tvp_rsrc = NULLVP;
4165 uint32_t tvp_rsrc_vid = 0;
4166 struct componentname *tcnp = ap->a_tcnp;
4167 struct componentname *fcnp = ap->a_fcnp;
4168 struct proc *p = vfs_context_proc(ap->a_context);
4169 struct cnode *fcp;
4170 struct cnode *fdcp;
4171 struct cnode *tdcp;
4172 struct cnode *tcp;
4173 struct cnode *error_cnode;
4174 struct cat_desc from_desc;
4175 struct cat_desc to_desc;
4176 struct cat_desc out_desc;
4177 struct hfsmount *hfsmp;
4178 cat_cookie_t cookie;
4179 int tvp_deleted = 0;
4180 int started_tr = 0, got_cookie = 0;
4181 int took_trunc_lock = 0;
4182 int lockflags;
4183 int error;
4184 time_t orig_from_ctime, orig_to_ctime;
4185 int emit_rename = 1;
4186 int emit_delete = 1;
4187 int is_tracked = 0;
4188 int unlocked;
4189 vnode_t old_doc_vp = NULL;
4190 int rename_exclusive = 0;
4191
4192 orig_from_ctime = VTOC(fvp)->c_ctime;
4193 if (tvp && VTOC(tvp)) {
4194 orig_to_ctime = VTOC(tvp)->c_ctime;
4195 } else {
4196 orig_to_ctime = ~0;
4197 }
4198
4199 hfsmp = VTOHFS(tdvp);
4200
4201 /* Check the flags first, so we can avoid grabbing locks if necessary */
4202 if (ap->a_flags) {
4203 /* These are the only flags we support for now */
4204 if ((ap->a_flags & (VFS_RENAME_EXCL)) == 0) {
4205 return ENOTSUP;
4206 }
4207
4208 /* The rename flags are mutually exclusive for HFS+ */
4209 switch (ap->a_flags & VFS_RENAME_FLAGS_MASK) {
4210 case VFS_RENAME_EXCL:
4211 rename_exclusive = true;
4212 break;
4213 default:
4214 return ENOTSUP;
4215 }
4216 }
4217
4218 /*
4219 * Do special case checks here. If fvp == tvp then we need to check the
4220 * cnode with locks held.
4221 */
4222 if (fvp == tvp) {
4223 int is_hardlink = 0;
4224 /*
4225 * In this case, we do *NOT* ever emit a DELETE event.
4226 * We may not necessarily emit a RENAME event
4227 */
4228 emit_delete = 0;
4229 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
4230 return error;
4231 }
4232 /* Check to see if the item is a hardlink or not */
4233 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
4234 hfs_unlock (VTOC(fvp));
4235
4236 /*
4237 * If the item is not a hardlink, then case sensitivity must be off, otherwise
4238 * two names should not resolve to the same cnode unless they were case variants.
4239 */
4240 if (is_hardlink) {
4241 emit_rename = 0;
4242 /*
4243 * Hardlinks are a little trickier. We only want to emit a rename event
4244 * if the item is a hardlink, the parent directories are the same, case sensitivity
4245 * is off, and the case folded names are the same. See the fvp == tvp case below for more
4246 * info.
4247 */
4248
4249 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
4250 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4251 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4252 /* Then in this case only it is ok to emit a rename */
4253 emit_rename = 1;
4254 }
4255 }
4256 }
4257 }
4258 if (emit_rename) {
4259 /* c_bsdflags should only be assessed while holding the cnode lock.
4260 * This is not done consistently throughout the code and can result
4261 * in race. This will be fixed via rdar://12181064
4262 */
4263 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
4264 is_tracked = 1;
4265 }
4266 nspace_snapshot_event(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
4267 }
4268
4269 if (tvp && VTOC(tvp)) {
4270 if (emit_delete) {
4271 nspace_snapshot_event(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
4272 }
4273 }
4274
4275 retry:
4276 /* When tvp exists, take the truncate lock for hfs_removefile(). */
4277 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
4278 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4279 took_trunc_lock = 1;
4280 }
4281
4282 relock:
4283 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
4284 HFS_EXCLUSIVE_LOCK, &error_cnode);
4285 if (error) {
4286 if (took_trunc_lock) {
4287 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4288 took_trunc_lock = 0;
4289 }
4290
4291 /*
4292 * We hit an error path. If we were trying to re-acquire the locks
4293 * after coming through here once, we might have already obtained
4294 * an iocount on tvp's resource fork vnode. Drop that before dealing
4295 * with the failure. Note this is safe -- since we are in an
4296 * error handling path, we can't be holding the cnode locks.
4297 */
4298 if (tvp_rsrc) {
4299 vnode_put (tvp_rsrc);
4300 tvp_rsrc_vid = 0;
4301 tvp_rsrc = NULL;
4302 }
4303
4304 /*
4305 * tvp might no longer exist. If the cause of the lock failure
4306 * was tvp, then we can try again with tvp/tcp set to NULL.
4307 * This is ok because the vfs syscall will vnode_put the vnodes
4308 * after we return from hfs_vnop_rename.
4309 */
4310 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
4311 tcp = NULL;
4312 tvp = NULL;
4313 goto retry;
4314 }
4315
4316 /* If we want to reintroduce notifications for failed renames, this
4317 is the place to do it. */
4318
4319 return (error);
4320 }
4321
4322 fdcp = VTOC(fdvp);
4323 fcp = VTOC(fvp);
4324 tdcp = VTOC(tdvp);
4325 tcp = tvp ? VTOC(tvp) : NULL;
4326
4327
4328 /*
4329 * If caller requested an exclusive rename (VFS_RENAME_EXCL) and 'tcp' exists
4330 * then we must fail the operation.
4331 */
4332 if (tcp && rename_exclusive) {
4333 error = EEXIST;
4334 goto out;
4335 }
4336
4337 //
4338 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
4339 //
4340 unlocked = 0;
4341 if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4342 uint32_t newid;
4343
4344 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4345 unlocked = 1;
4346
4347 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4348 hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4349 ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4350 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4351 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4352 FSE_ARG_INO, (ino64_t)0, // src inode #
4353 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4354 FSE_ARG_INT32, newid,
4355 FSE_ARG_DONE);
4356 hfs_unlock(fcp);
4357 } else {
4358 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4359 }
4360
4361 //
4362 // check if we're going to need to fix tcp as well. if we aren't, go back relock
4363 // everything. otherwise continue on and fix up tcp as well before relocking.
4364 //
4365 if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
4366 goto relock;
4367 }
4368 }
4369
4370 //
4371 // same thing for tcp if it's set
4372 //
4373 if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4374 uint32_t newid;
4375
4376 if (!unlocked) {
4377 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4378 unlocked = 1;
4379 }
4380
4381 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4382 hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4383 ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4384 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4385 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4386 FSE_ARG_INO, (ino64_t)0, // src inode #
4387 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
4388 FSE_ARG_INT32, newid,
4389 FSE_ARG_DONE);
4390 hfs_unlock(tcp);
4391 } else {
4392 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4393 }
4394
4395 // go back up and relock everything. next time through the if statement won't be true
4396 // and we'll skip over this block of code.
4397 goto relock;
4398 }
4399
4400
4401
4402 /*
4403 * Acquire iocounts on the destination's resource fork vnode
4404 * if necessary. If dst/src are files and the dst has a resource
4405 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
4406 * If it does not exist, then we don't care and can skip it.
4407 */
4408 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
4409 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
4410 tvp_rsrc = tcp->c_rsrc_vp;
4411 /*
4412 * We can look at the vid here because we're holding the
4413 * cnode lock on the underlying cnode for this rsrc vnode.
4414 */
4415 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
4416
4417 /* Unlock everything to acquire iocount on this rsrc vnode */
4418 if (took_trunc_lock) {
4419 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
4420 took_trunc_lock = 0;
4421 }
4422 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4423
4424 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
4425 /* iocount acquisition failed. Reset fields and start over.. */
4426 tvp_rsrc_vid = 0;
4427 tvp_rsrc = NULL;
4428 }
4429 goto retry;
4430 }
4431 }
4432
4433
4434
4435 /* Ensure we didn't race src or dst parent directories with rmdir. */
4436 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4437 error = ENOENT;
4438 goto out;
4439 }
4440
4441 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4442 error = ENOENT;
4443 goto out;
4444 }
4445
4446
4447 /* Check for a race against unlink. The hfs_valid_cnode checks validate
4448 * the parent/child relationship with fdcp and tdcp, as well as the
4449 * component name of the target cnodes.
4450 */
4451 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
4452 error = ENOENT;
4453 goto out;
4454 }
4455
4456 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
4457 //
4458 // hmm, the destination vnode isn't valid any more.
4459 // in this case we can just drop him and pretend he
4460 // never existed in the first place.
4461 //
4462 if (took_trunc_lock) {
4463 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4464 took_trunc_lock = 0;
4465 }
4466 error = 0;
4467
4468 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4469
4470 tcp = NULL;
4471 tvp = NULL;
4472
4473 // retry the locking with tvp null'ed out
4474 goto retry;
4475 }
4476
4477 fdcp->c_flag |= C_DIR_MODIFICATION;
4478 if (fdvp != tdvp) {
4479 tdcp->c_flag |= C_DIR_MODIFICATION;
4480 }
4481
4482 /*
4483 * Disallow renaming of a directory hard link if the source and
4484 * destination parent directories are different, or a directory whose
4485 * descendant is a directory hard link and the one of the ancestors
4486 * of the destination directory is a directory hard link.
4487 */
4488 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4489 if (fcp->c_flag & C_HARDLINK) {
4490 error = EPERM;
4491 goto out;
4492 }
4493 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4494 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4495 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4496 error = EPERM;
4497 hfs_systemfile_unlock(hfsmp, lockflags);
4498 goto out;
4499 }
4500 hfs_systemfile_unlock(hfsmp, lockflags);
4501 }
4502 }
4503
4504 /*
4505 * The following edge case is caught here:
4506 * (to cannot be a descendent of from)
4507 *
4508 * o fdvp
4509 * /
4510 * /
4511 * o fvp
4512 * \
4513 * \
4514 * o tdvp
4515 * /
4516 * /
4517 * o tvp
4518 */
4519 if (tdcp->c_parentcnid == fcp->c_fileid) {
4520 error = EINVAL;
4521 goto out;
4522 }
4523
4524 /*
4525 * The following two edge cases are caught here:
4526 * (note tvp is not empty)
4527 *
4528 * o tdvp o tdvp
4529 * / /
4530 * / /
4531 * o tvp tvp o fdvp
4532 * \ \
4533 * \ \
4534 * o fdvp o fvp
4535 * /
4536 * /
4537 * o fvp
4538 */
4539 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
4540 error = ENOTEMPTY;
4541 goto out;
4542 }
4543
4544 /*
4545 * The following edge case is caught here:
4546 * (the from child and parent are the same)
4547 *
4548 * o tdvp
4549 * /
4550 * /
4551 * fdvp o fvp
4552 */
4553 if (fdvp == fvp) {
4554 error = EINVAL;
4555 goto out;
4556 }
4557
4558 /*
4559 * Make sure "from" vnode and its parent are changeable.
4560 */
4561 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
4562 error = EPERM;
4563 goto out;
4564 }
4565
4566 /*
4567 * If the destination parent directory is "sticky", then the
4568 * user must own the parent directory, or the destination of
4569 * the rename, otherwise the destination may not be changed
4570 * (except by root). This implements append-only directories.
4571 *
4572 * Note that checks for immutable and write access are done
4573 * by the call to hfs_removefile.
4574 */
4575 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4576 (suser(vfs_context_ucred(ap->a_context), NULL)) &&
4577 (kauth_cred_getuid(vfs_context_ucred(ap->a_context)) != tdcp->c_uid) &&
4578 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(ap->a_context), p, false)) ) {
4579 error = EPERM;
4580 goto out;
4581 }
4582
4583 /* Don't allow modification of the journal or journal_info_block */
4584 if (hfs_is_journal_file(hfsmp, fcp) ||
4585 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4586 error = EPERM;
4587 goto out;
4588 }
4589
4590 #if QUOTA
4591 if (tvp)
4592 (void)hfs_getinoquota(tcp);
4593 #endif
4594 /* Preflighting done, take fvp out of the name space. */
4595 cache_purge(fvp);
4596
4597 #if CONFIG_SECLUDED_RENAME
4598 /*
4599 * Check for "secure" rename that imposes additional restrictions on the
4600 * source vnode. We wait until here to check in order to prevent a race
4601 * with other threads that manage to look up fvp, but their open or link
4602 * is blocked by our locks. At this point, with fvp out of the name cache,
4603 * and holding the lock on fdvp, no other thread can find fvp.
4604 *
4605 * TODO: Do we need to limit these checks to regular files only?
4606 */
4607 if (fcnp->cn_flags & CN_SECLUDE_RENAME) {
4608 if (vnode_isdir(fvp)) {
4609 error = EISDIR;
4610 goto out;
4611 }
4612
4613 /*
4614 * Neither fork of source may be open or memory mapped.
4615 * We also don't want it in use by any other system call.
4616 * The file must not have hard links.
4617 *
4618 * We can't simply use vnode_isinuse() because that does not
4619 * count opens with O_EVTONLY. We don't want a malicious
4620 * process using O_EVTONLY to subvert a secluded rename.
4621 */
4622 if (fcp->c_linkcount != 1) {
4623 error = EMLINK;
4624 goto out;
4625 }
4626
4627 if (fcp->c_rsrc_vp && (vnode_usecount(fcp->c_rsrc_vp) > 0 ||
4628 vnode_iocount(fcp->c_rsrc_vp) > 0)) {
4629 /* Resource fork is in use (including O_EVTONLY) */
4630 error = EBUSY;
4631 goto out;
4632 }
4633 if (fcp->c_vp && (vnode_usecount(fcp->c_vp) > (fcp->c_rsrc_vp ? 1 : 0) ||
4634 vnode_iocount(fcp->c_vp) > 1)) {
4635 /*
4636 * Data fork is in use, including O_EVTONLY, but not
4637 * including a reference from the resource fork.
4638 */
4639 error = EBUSY;
4640 goto out;
4641 }
4642 }
4643 #endif
4644
4645 bzero(&from_desc, sizeof(from_desc));
4646 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4647 from_desc.cd_namelen = fcnp->cn_namelen;
4648 from_desc.cd_parentcnid = fdcp->c_fileid;
4649 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4650 from_desc.cd_cnid = fcp->c_cnid;
4651
4652 bzero(&to_desc, sizeof(to_desc));
4653 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4654 to_desc.cd_namelen = tcnp->cn_namelen;
4655 to_desc.cd_parentcnid = tdcp->c_fileid;
4656 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4657 to_desc.cd_cnid = fcp->c_cnid;
4658
4659 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4660 goto out;
4661 }
4662 started_tr = 1;
4663
4664 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
4665 * inside a journal transaction and without holding a cnode lock.
4666 * As setting of this bit depends on being in journal transaction for
4667 * concurrency, check this bit again after we start journal transaction for rename
4668 * to ensure that this directory does not have any descendant that
4669 * is a directory hard link.
4670 */
4671 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4672 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4673 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4674 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4675 error = EPERM;
4676 hfs_systemfile_unlock(hfsmp, lockflags);
4677 goto out;
4678 }
4679 hfs_systemfile_unlock(hfsmp, lockflags);
4680 }
4681 }
4682
4683 // if it's a hardlink then re-lookup the name so
4684 // that we get the correct cnid in from_desc (see
4685 // the comment in hfs_removefile for more details)
4686 //
4687 if (fcp->c_flag & C_HARDLINK) {
4688 struct cat_desc tmpdesc;
4689 cnid_t real_cnid;
4690
4691 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4692 tmpdesc.cd_namelen = fcnp->cn_namelen;
4693 tmpdesc.cd_parentcnid = fdcp->c_fileid;
4694 tmpdesc.cd_hint = fdcp->c_childhint;
4695 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
4696 tmpdesc.cd_encoding = 0;
4697
4698 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4699
4700 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
4701 hfs_systemfile_unlock(hfsmp, lockflags);
4702 goto out;
4703 }
4704
4705 // use the real cnid instead of whatever happened to be there
4706 from_desc.cd_cnid = real_cnid;
4707 hfs_systemfile_unlock(hfsmp, lockflags);
4708 }
4709
4710 /*
4711 * Reserve some space in the Catalog file.
4712 */
4713 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
4714 goto out;
4715 }
4716 got_cookie = 1;
4717
4718 /*
4719 * If the destination exists then it may need to be removed.
4720 *
4721 * Due to HFS's locking system, we should always move the
4722 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
4723 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
4724 * of the actual vnop that it was trying to do (stat, link, readlink),
4725 * we must release the cnode lock of that element during the interim to
4726 * do MAC checking, vnode authorization, and other calls. In that time,
4727 * the item can be deleted (or renamed over). However, only in the rename
4728 * case is it inappropriate to return ENOENT from any of those calls. Either
4729 * the call should return information about the old element (stale), or get
4730 * information about the newer element that we are about to write in its place.
4731 *
4732 * HFS lookup has been modified to detect a rename and re-drive its
4733 * lookup internally. For other calls that have already succeeded in
4734 * their lookup call and are waiting to acquire the cnode lock in order
4735 * to proceed, that cnode lock will not fail due to the cnode being marked
4736 * C_NOEXISTS, because it won't have been marked as such. It will only
4737 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
4738 * element. All future callers will get the new element.
4739 *
4740 * To implement this behavior, we pass the "only_unlink" argument to
4741 * hfs_removefile and hfs_removedir. This will result in the vnode acting
4742 * as though it is open-unlinked. Additionally, when we are done moving the
4743 * element to the hidden directory, we vnode_recycle the target so that it is
4744 * reclaimed as soon as possible. Reclaim and inactive are both
4745 * capable of clearing out unused blocks for an open-unlinked file or dir.
4746 */
4747 if (tvp) {
4748 //
4749 // if the destination has a document id, we need to preserve it
4750 //
4751 if (fvp != tvp) {
4752 uint32_t document_id;
4753 struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4754 struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
4755
4756 if (ffip->document_id && tfip->document_id) {
4757 // both documents are tracked. only save a tombstone from tcp and do nothing else.
4758 doc_tombstone_save(tdvp, tvp, tcnp, hfs_get_document_id(tcp),
4759 tcp->c_fileid);
4760 } else {
4761 struct doc_tombstone *ut;
4762 ut = doc_tombstone_get();
4763
4764 document_id = tfip->document_id;
4765 tfip->document_id = 0;
4766
4767 if (document_id != 0) {
4768 // clear UF_TRACKED as well since tcp is now no longer tracked
4769 tcp->c_bsdflags &= ~UF_TRACKED;
4770 (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
4771 }
4772
4773 if (ffip->document_id == 0 && document_id != 0) {
4774 // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
4775 fcp->c_bsdflags |= UF_TRACKED;
4776 ffip->document_id = document_id;
4777
4778 (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4779 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4780 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4781 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
4782 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4783 FSE_ARG_INT32, (uint32_t)ffip->document_id,
4784 FSE_ARG_DONE);
4785 }
4786 else if ((fcp->c_bsdflags & UF_TRACKED) && doc_tombstone_should_save(ut, fvp, fcnp)) {
4787
4788 if (ut->t_lastop_document_id) {
4789 doc_tombstone_clear(ut, NULL);
4790 }
4791 doc_tombstone_save(fdvp, fvp, fcnp,
4792 hfs_get_document_id(fcp), fcp->c_fileid);
4793
4794 //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
4795 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4796 }
4797 }
4798 }
4799
4800 /*
4801 * When fvp matches tvp they could be case variants
4802 * or matching hard links.
4803 */
4804 if (fvp == tvp) {
4805 if (!(fcp->c_flag & C_HARDLINK)) {
4806 /*
4807 * If they're not hardlinks, then fvp == tvp must mean we
4808 * are using case-insensitive HFS because case-sensitive would
4809 * not use the same vnode for both. In this case we just update
4810 * the catalog for: a -> A
4811 */
4812 goto skip_rm; /* simple case variant */
4813
4814 }
4815 /* For all cases below, we must be using hardlinks */
4816 else if ((fdvp != tdvp) ||
4817 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4818 /*
4819 * If the parent directories are not the same, AND the two items
4820 * are hardlinks, posix says to do nothing:
4821 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4822 * We just return 0 in this case.
4823 *
4824 * If case sensitivity is on, and we are using hardlinks
4825 * then renaming is supposed to do nothing.
4826 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
4827 */
4828 goto out; /* matching hardlinks, nothing to do */
4829
4830 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4831 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4832 /*
4833 * If we get here, then the following must be true:
4834 * a) We are running case-insensitive HFS+.
4835 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
4836 * c) the two names are case-variants of each other.
4837 *
4838 * In this case, we are really only dealing with a single catalog record
4839 * whose name is being updated.
4840 *
4841 * op is dir1/fred -> dir1/FRED
4842 *
4843 * We need to special case the name matching, because if
4844 * dir1/fred <-> dir1/bob were the two links, and the
4845 * op was dir1/fred -> dir1/bob
4846 * That would fail/do nothing.
4847 */
4848 goto skip_rm; /* case-variant hardlink in the same dir */
4849 } else {
4850 goto out; /* matching hardlink, nothing to do */
4851 }
4852 }
4853
4854
4855 if (vnode_isdir(tvp)) {
4856 /*
4857 * hfs_removedir will eventually call hfs_removefile on the directory
4858 * we're working on, because only hfs_removefile does the renaming of the
4859 * item to the hidden directory. The directory will stay around in the
4860 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
4861 * That way, we can destroy all of the EAs as needed and allow new ones to be
4862 * written.
4863 */
4864 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
4865 }
4866 else {
4867 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
4868
4869 /*
4870 * If the destination file had a resource fork vnode, then we need to get rid of
4871 * its blocks when there are no more references to it. Because the call to
4872 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
4873 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
4874 * the resource fork vnode could prevent the data fork vnode from going out of scope
4875 * because it holds a v_parent reference on it. So we mark it for termination
4876 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
4877 * can clean up the blocks of open-unlinked files and resource forks.
4878 *
4879 * We can safely call vnode_recycle on the resource fork because we took an iocount
4880 * reference on it at the beginning of the function.
4881 */
4882
4883 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
4884 vnode_recycle(tvp_rsrc);
4885 }
4886 }
4887
4888 if (error) {
4889 goto out;
4890 }
4891
4892 tvp_deleted = 1;
4893
4894 /* Mark 'tcp' as being deleted due to a rename */
4895 tcp->c_flag |= C_RENAMED;
4896
4897 /*
4898 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
4899 * as quickly as possible.
4900 */
4901 vnode_recycle(tvp);
4902 } else {
4903 struct doc_tombstone *ut;
4904 ut = doc_tombstone_get();
4905
4906 //
4907 // There is nothing at the destination. If the file being renamed is
4908 // tracked, save a "tombstone" of the document_id. If the file is
4909 // not a tracked file, then see if it needs to inherit a tombstone.
4910 //
4911 // NOTE: we do not save a tombstone if the file being renamed begins
4912 // with "atmp" which is done to work-around AutoCad's bizarre
4913 // 5-step un-safe save behavior
4914 //
4915 if (fcp->c_bsdflags & UF_TRACKED) {
4916 if (doc_tombstone_should_save(ut, fvp, fcnp)) {
4917 doc_tombstone_save(fdvp, fvp, fcnp, hfs_get_document_id(fcp),
4918 fcp->c_fileid);
4919
4920 //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
4921 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4922 } else {
4923 // intentionally do nothing
4924 }
4925 } else if ( ut->t_lastop_document_id != 0
4926 && tdvp == ut->t_lastop_parent
4927 && vnode_vid(tdvp) == ut->t_lastop_parent_vid
4928 && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
4929
4930 //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
4931 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4932 fcp->c_bsdflags |= UF_TRACKED;
4933 fip->document_id = ut->t_lastop_document_id;
4934 cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4935
4936 doc_tombstone_clear(ut, &old_doc_vp);
4937 } else if (ut->t_lastop_document_id && doc_tombstone_should_save(ut, fvp, fcnp) && doc_tombstone_should_save(ut, tvp, tcnp)) {
4938 // no match, clear the tombstone
4939 //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
4940 doc_tombstone_clear(ut, NULL);
4941 }
4942
4943 }
4944 skip_rm:
4945 /*
4946 * All done with tvp and fvp.
4947 *
4948 * We also jump to this point if there was no destination observed during lookup and namei.
4949 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
4950 * competing thread from racing us and creating a file or dir at the destination of this rename
4951 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
4952 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
4953 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
4954 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
4955 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
4956 * will be swallowed and it will restart the operation.
4957 */
4958
4959 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4960 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
4961 hfs_systemfile_unlock(hfsmp, lockflags);
4962
4963 if (error) {
4964 if (error == EEXIST) {
4965 error = ERECYCLE;
4966 }
4967 goto out;
4968 }
4969
4970 /* Invalidate negative cache entries in the destination directory */
4971 if (tdcp->c_flag & C_NEG_ENTRIES) {
4972 cache_purge_negatives(tdvp);
4973 tdcp->c_flag &= ~C_NEG_ENTRIES;
4974 }
4975
4976 /* Update cnode's catalog descriptor */
4977 replace_desc(fcp, &out_desc);
4978 fcp->c_parentcnid = tdcp->c_fileid;
4979 fcp->c_hint = 0;
4980
4981 /*
4982 * Now indicate this cnode needs to have date-added written to the
4983 * finderinfo, but only if moving to a different directory, or if
4984 * it doesn't already have it.
4985 */
4986 if (fdvp != tdvp || !ISSET(fcp->c_attr.ca_recflags, kHFSHasDateAddedMask))
4987 fcp->c_flag |= C_NEEDS_DATEADDED;
4988
4989 (void) hfs_update (fvp, 0);
4990
4991 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
4992 (fdcp->c_cnid == kHFSRootFolderID));
4993 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
4994 (tdcp->c_cnid == kHFSRootFolderID));
4995
4996 /* Update both parent directories. */
4997 if (fdvp != tdvp) {
4998 if (vnode_isdir(fvp)) {
4999 /* If the source directory has directory hard link
5000 * descendants, set the kHFSHasChildLinkBit in the
5001 * destination parent hierarchy
5002 */
5003 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
5004 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
5005
5006 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
5007
5008 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
5009 if (error) {
5010 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
5011 error = 0;
5012 }
5013 }
5014 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
5015 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
5016 }
5017 tdcp->c_entries++;
5018 tdcp->c_dirchangecnt++;
5019 tdcp->c_flag |= C_MODIFIED;
5020 hfs_incr_gencount(tdcp);
5021
5022 if (fdcp->c_entries > 0)
5023 fdcp->c_entries--;
5024 fdcp->c_dirchangecnt++;
5025 fdcp->c_flag |= C_MODIFIED;
5026 fdcp->c_touch_chgtime = TRUE;
5027 fdcp->c_touch_modtime = TRUE;
5028
5029 if (ISSET(fcp->c_flag, C_HARDLINK)) {
5030 hfs_relorigin(fcp, fdcp->c_fileid);
5031 if (fdcp->c_fileid != fdcp->c_cnid)
5032 hfs_relorigin(fcp, fdcp->c_cnid);
5033 }
5034
5035 (void) hfs_update(fdvp, 0);
5036 }
5037 hfs_incr_gencount(fdcp);
5038
5039 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5040 tdcp->c_touch_chgtime = TRUE;
5041 tdcp->c_touch_modtime = TRUE;
5042
5043 (void) hfs_update(tdvp, 0);
5044
5045 /* Update the vnode's name now that the rename has completed. */
5046 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
5047 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
5048
5049 /*
5050 * At this point, we may have a resource fork vnode attached to the
5051 * 'from' vnode. If it exists, we will want to update its name, because
5052 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
5053 *
5054 * Note that the only thing we need to update here is the name attached to
5055 * the vnode, since a resource fork vnode does not have a separate resource
5056 * cnode -- it's still 'fcp'.
5057 */
5058 if (fcp->c_rsrc_vp) {
5059 char* rsrc_path = NULL;
5060 int len;
5061
5062 /* Create a new temporary buffer that's going to hold the new name */
5063 rsrc_path = hfs_malloc(MAXPATHLEN);
5064 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
5065 len = MIN(len, MAXPATHLEN);
5066
5067 /*
5068 * vnode_update_identity will do the following for us:
5069 * 1) release reference on the existing rsrc vnode's name.
5070 * 2) copy/insert new name into the name cache
5071 * 3) attach the new name to the resource vnode
5072 * 4) update the vnode's vid
5073 */
5074 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
5075
5076 /* Free the memory associated with the resource fork's name */
5077 hfs_free(rsrc_path, MAXPATHLEN);
5078 }
5079 out:
5080 if (got_cookie) {
5081 cat_postflight(hfsmp, &cookie, p);
5082 }
5083 if (started_tr) {
5084 hfs_end_transaction(hfsmp);
5085 }
5086
5087 fdcp->c_flag &= ~C_DIR_MODIFICATION;
5088 wakeup((caddr_t)&fdcp->c_flag);
5089 if (fdvp != tdvp) {
5090 tdcp->c_flag &= ~C_DIR_MODIFICATION;
5091 wakeup((caddr_t)&tdcp->c_flag);
5092 }
5093
5094 const ino64_t file_id = fcp->c_fileid;
5095
5096 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
5097
5098 if (took_trunc_lock) {
5099 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
5100 }
5101
5102 /* Now vnode_put the resource forks vnodes if necessary */
5103 if (tvp_rsrc) {
5104 vnode_put(tvp_rsrc);
5105 tvp_rsrc = NULL;
5106 }
5107
5108 /* After tvp is removed the only acceptable error is EIO */
5109 if (error && tvp_deleted)
5110 error = EIO;
5111
5112 /* If we want to reintroduce notifications for renames, this is the
5113 place to do it. */
5114
5115 if (old_doc_vp) {
5116 cnode_t *ocp = VTOC(old_doc_vp);
5117 hfs_lock_always(ocp, HFS_EXCLUSIVE_LOCK);
5118 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
5119
5120 const uint32_t doc_id = ofip->document_id;
5121 const ino64_t old_file_id = ocp->c_fileid;
5122
5123 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
5124 ofip->document_id = 0;
5125 ocp->c_bsdflags &= ~UF_TRACKED;
5126 ocp->c_flag |= C_MODIFIED;
5127
5128 hfs_unlock(ocp);
5129 vnode_put(old_doc_vp);
5130
5131 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
5132 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
5133 FSE_ARG_INO, old_file_id, // src inode #
5134 FSE_ARG_INO, file_id, // dst inode #
5135 FSE_ARG_INT32, doc_id,
5136 FSE_ARG_DONE);
5137 }
5138
5139 return (error);
5140 }
5141
5142
5143 /*
5144 * Make a directory.
5145 */
5146 int
5147 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
5148 {
5149 /***** HACK ALERT ********/
5150 ap->a_cnp->cn_flags |= MAKEENTRY;
5151 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
5152 }
5153
5154
5155 /*
5156 * Create a symbolic link.
5157 */
5158 int
5159 hfs_vnop_symlink(struct vnop_symlink_args *ap)
5160 {
5161 struct vnode **vpp = ap->a_vpp;
5162 struct vnode *dvp = ap->a_dvp;
5163 struct vnode *vp = NULL;
5164 struct cnode *cp = NULL;
5165 struct hfsmount *hfsmp;
5166 struct filefork *fp;
5167 struct buf *bp = NULL;
5168 char *datap;
5169 int started_tr = 0;
5170 u_int32_t len;
5171 int error;
5172
5173 /* HFS standard disks don't support symbolic links */
5174 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
5175 return (ENOTSUP);
5176
5177 /* Check for empty target name */
5178 if (ap->a_target[0] == 0)
5179 return (EINVAL);
5180
5181 hfsmp = VTOHFS(dvp);
5182 len = strlen(ap->a_target);
5183
5184 /* Check for free space */
5185 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
5186 return (ENOSPC);
5187 }
5188
5189 /* Create the vnode */
5190 ap->a_vap->va_mode |= S_IFLNK;
5191 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
5192 goto out;
5193 }
5194 vp = *vpp;
5195 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5196 goto out;
5197 }
5198 cp = VTOC(vp);
5199 fp = VTOF(vp);
5200
5201 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
5202 goto out;
5203 }
5204
5205 #if QUOTA
5206 (void)hfs_getinoquota(cp);
5207 #endif /* QUOTA */
5208
5209 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5210 goto out;
5211 }
5212 started_tr = 1;
5213
5214 /*
5215 * Allocate space for the link.
5216 *
5217 * Since we're already inside a transaction,
5218 *
5219 * Don't need truncate lock since a symlink is treated as a system file.
5220 */
5221 error = hfs_truncate(vp, len, IO_NOZEROFILL, 0, ap->a_context);
5222
5223 /* On errors, remove the symlink file */
5224 if (error) {
5225 /*
5226 * End the transaction so we don't re-take the cnode lock
5227 * below while inside a transaction (lock order violation).
5228 */
5229 hfs_end_transaction(hfsmp);
5230
5231 /* hfs_removefile() requires holding the truncate lock */
5232 hfs_unlock(cp);
5233 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
5234 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5235
5236 if (hfs_start_transaction(hfsmp) != 0) {
5237 started_tr = 0;
5238 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5239 goto out;
5240 }
5241
5242 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
5243 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5244 goto out;
5245 }
5246
5247 /* Write the link to disk */
5248 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
5249 0, 0, BLK_META);
5250 if (hfsmp->jnl) {
5251 journal_modify_block_start(hfsmp->jnl, bp);
5252 }
5253 datap = (char *)buf_dataptr(bp);
5254 bzero(datap, buf_size(bp));
5255 bcopy(ap->a_target, datap, len);
5256
5257 if (hfsmp->jnl) {
5258 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
5259 } else {
5260 buf_bawrite(bp);
5261 }
5262 out:
5263 if (started_tr)
5264 hfs_end_transaction(hfsmp);
5265 if ((cp != NULL) && (vp != NULL)) {
5266 hfs_unlock(cp);
5267 }
5268 if (error) {
5269 if (vp) {
5270 vnode_put(vp);
5271 }
5272 *vpp = NULL;
5273 }
5274 return (error);
5275 }
5276
5277
5278 /* structures to hold a "." or ".." directory entry */
5279 struct hfs_stddotentry {
5280 u_int32_t d_fileno; /* unique file number */
5281 u_int16_t d_reclen; /* length of this structure */
5282 u_int8_t d_type; /* dirent file type */
5283 u_int8_t d_namlen; /* len of filename */
5284 char d_name[4]; /* "." or ".." */
5285 };
5286
5287 struct hfs_extdotentry {
5288 u_int64_t d_fileno; /* unique file number */
5289 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
5290 u_int16_t d_reclen; /* length of this structure */
5291 u_int16_t d_namlen; /* len of filename */
5292 u_int8_t d_type; /* dirent file type */
5293 u_char d_name[3]; /* "." or ".." */
5294 };
5295
5296 typedef union {
5297 struct hfs_stddotentry std;
5298 struct hfs_extdotentry ext;
5299 } hfs_dotentry_t;
5300
5301 /*
5302 * hfs_vnop_readdir reads directory entries into the buffer pointed
5303 * to by uio, in a filesystem independent format. Up to uio_resid
5304 * bytes of data can be transferred. The data in the buffer is a
5305 * series of packed dirent structures where each one contains the
5306 * following entries:
5307 *
5308 * u_int32_t d_fileno; // file number of entry
5309 * u_int16_t d_reclen; // length of this record
5310 * u_int8_t d_type; // file type
5311 * u_int8_t d_namlen; // length of string in d_name
5312 * char d_name[MAXNAMELEN+1]; // null terminated file name
5313 *
5314 * The current position (uio_offset) refers to the next block of
5315 * entries. The offset can only be set to a value previously
5316 * returned by hfs_vnop_readdir or zero. This offset does not have
5317 * to match the number of bytes returned (in uio_resid).
5318 *
5319 * In fact, the offset used by HFS is essentially an index (26 bits)
5320 * with a tag (6 bits). The tag is for associating the next request
5321 * with the current request. This enables us to have multiple threads
5322 * reading the directory while the directory is also being modified.
5323 *
5324 * Each tag/index pair is tied to a unique directory hint. The hint
5325 * contains information (filename) needed to build the catalog b-tree
5326 * key for finding the next set of entries.
5327 *
5328 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
5329 * do NOT synthesize entries for "." and "..".
5330 */
5331 int
5332 hfs_vnop_readdir(struct vnop_readdir_args *ap)
5333 {
5334 struct vnode *vp = ap->a_vp;
5335 uio_t uio = ap->a_uio;
5336 struct cnode *cp = VTOC(vp);
5337 struct hfsmount *hfsmp = VTOHFS(vp);
5338 directoryhint_t *dirhint = NULL;
5339 directoryhint_t localhint;
5340 off_t offset;
5341 off_t startoffset;
5342 int error = 0;
5343 int eofflag = 0;
5344 user_addr_t user_start = 0;
5345 user_size_t user_len = 0;
5346 int index;
5347 unsigned int tag;
5348 int items;
5349 int lockflags;
5350 int extended;
5351 int nfs_cookies;
5352 cnid_t cnid_hint = 0;
5353 int bump_valence = 0;
5354
5355 items = 0;
5356 startoffset = offset = uio_offset(uio);
5357 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5358 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
5359
5360 /* Sanity check the uio data. */
5361 if (uio_iovcnt(uio) > 1)
5362 return (EINVAL);
5363
5364 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
5365 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
5366 if (VTOCMP(vp) != NULL && !compressed) {
5367 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
5368 if (error) {
5369 return error;
5370 }
5371 }
5372 }
5373
5374 //
5375 // We have to lock the user's buffer here so that we won't
5376 // fault on it after we've acquired a shared lock on the
5377 // catalog file. The issue is that you can get a 3-way
5378 // deadlock if someone else starts a transaction and then
5379 // tries to lock the catalog file but can't because we're
5380 // here and we can't service our page fault because VM is
5381 // blocked trying to start a transaction as a result of
5382 // trying to free up pages for our page fault. It's messy
5383 // but it does happen on dual-processors that are paging
5384 // heavily (see radar 3082639 for more info). By locking
5385 // the buffer up-front we prevent ourselves from faulting
5386 // while holding the shared catalog file lock.
5387 //
5388 // Fortunately this and hfs_search() are the only two places
5389 // currently (10/30/02) that can fault on user data with a
5390 // shared lock on the catalog file.
5391 //
5392 if (hfsmp->jnl && uio_isuserspace(uio)) {
5393 user_start = uio_curriovbase(uio);
5394 user_len = uio_curriovlen(uio);
5395
5396 /* Bounds check the user buffer */
5397 if (user_len > (256 * 1024)) {
5398 /* only allow the user to wire down at most 256k */
5399 user_len = (256 * 1024);
5400 uio_setresid (uio, (user_ssize_t)(256 * 1024));
5401 }
5402
5403 if ((error = vslock(user_start, user_len)) != 0) {
5404 return error;
5405 }
5406 }
5407
5408 /* Note that the dirhint calls require an exclusive lock. */
5409 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5410 if (user_start) {
5411 vsunlock(user_start, user_len, TRUE);
5412 }
5413 return error;
5414 }
5415
5416 /* Pick up cnid hint (if any). */
5417 if (nfs_cookies) {
5418 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
5419 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
5420 if (cnid_hint == INT_MAX) { /* searching pass the last item */
5421 eofflag = 1;
5422 goto out;
5423 }
5424 }
5425 /*
5426 * Synthesize entries for "." and "..", unless the directory has
5427 * been deleted, but not closed yet (lazy delete in progress).
5428 */
5429 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
5430
5431 size_t uiosize;
5432
5433 /*
5434 * We could use a union of the two types of dot entries (HFS / HFS+)
5435 * but it makes static analysis of this code difficult. The problem is that
5436 * the HFS standard dot entry is smaller than the HFS+ one, and we also ideally
5437 * want the uiomove to operate on a two-element adjacent array. If we used the
5438 * array of unions, we would have to do two separate uiomoves because the memory
5439 * for the hfs standard dot entries would not be adjacent to one another.
5440 * So just allocate the entries on the stack in separate cases.
5441 */
5442
5443 if (extended) {
5444 hfs_dotentry_t dotentry[2];
5445
5446 /* HFS Plus */
5447 struct hfs_extdotentry *entry = &dotentry[0].ext;
5448
5449 entry->d_fileno = cp->c_cnid;
5450 entry->d_reclen = sizeof(struct hfs_extdotentry);
5451 entry->d_type = DT_DIR;
5452 entry->d_namlen = 1;
5453 entry->d_name[0] = '.';
5454 entry->d_name[1] = '\0';
5455 entry->d_name[2] = '\0';
5456 entry->d_seekoff = 1;
5457
5458 ++entry;
5459 entry->d_fileno = cp->c_parentcnid;
5460 entry->d_reclen = sizeof(struct hfs_extdotentry);
5461 entry->d_type = DT_DIR;
5462 entry->d_namlen = 2;
5463 entry->d_name[0] = '.';
5464 entry->d_name[1] = '.';
5465 entry->d_name[2] = '\0';
5466 entry->d_seekoff = 2;
5467 uiosize = 2 * sizeof(struct hfs_extdotentry);
5468
5469 if ((error = uiomove((caddr_t)dotentry, uiosize, uio))) {
5470 goto out;
5471 }
5472
5473 } else {
5474 struct hfs_stddotentry hfs_std_dotentries[2];
5475
5476 /* HFS Standard */
5477 struct hfs_stddotentry *entry = &hfs_std_dotentries[0];
5478
5479 entry->d_fileno = cp->c_cnid;
5480 entry->d_reclen = sizeof(struct hfs_stddotentry);
5481 entry->d_type = DT_DIR;
5482 entry->d_namlen = 1;
5483 *(int *)&entry->d_name[0] = 0;
5484 entry->d_name[0] = '.';
5485
5486 ++entry;
5487 entry->d_fileno = cp->c_parentcnid;
5488 entry->d_reclen = sizeof(struct hfs_stddotentry);
5489 entry->d_type = DT_DIR;
5490 entry->d_namlen = 2;
5491 *(int *)&entry->d_name[0] = 0;
5492 entry->d_name[0] = '.';
5493 entry->d_name[1] = '.';
5494 uiosize = 2 * sizeof(struct hfs_stddotentry);
5495
5496 if ((error = uiomove((caddr_t)hfs_std_dotentries, uiosize, uio))) {
5497 goto out;
5498 }
5499 }
5500
5501 offset += 2;
5502 }
5503
5504 /*
5505 * Intentionally avoid checking the valence here. If we
5506 * have FS corruption that reports the valence is 0, even though it
5507 * has contents, we might artificially skip over iterating
5508 * this directory.
5509 */
5510
5511 /* Convert offset into a catalog directory index. */
5512 index = (offset & HFS_INDEX_MASK) - 2;
5513 tag = offset & ~HFS_INDEX_MASK;
5514
5515 /* Lock catalog during cat_findname and cat_getdirentries. */
5516 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5517
5518 /* When called from NFS, try and resolve a cnid hint. */
5519 if (nfs_cookies && cnid_hint != 0) {
5520 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
5521 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
5522 localhint.dh_index = index - 1;
5523 localhint.dh_time = 0;
5524 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
5525 dirhint = &localhint; /* don't forget to release the descriptor */
5526 } else {
5527 cat_releasedesc(&localhint.dh_desc);
5528 }
5529 }
5530 }
5531
5532 /* Get a directory hint (cnode must be locked exclusive) */
5533 if (dirhint == NULL) {
5534 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
5535
5536 /* Hide tag from catalog layer. */
5537 dirhint->dh_index &= HFS_INDEX_MASK;
5538 if (dirhint->dh_index == HFS_INDEX_MASK) {
5539 dirhint->dh_index = -1;
5540 }
5541 }
5542
5543 if (index == 0) {
5544 dirhint->dh_threadhint = cp->c_dirthreadhint;
5545 }
5546 else {
5547 /*
5548 * If we have a non-zero index, there is a possibility that during the last
5549 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
5550 * then we don't want to return any new entries for the caller. Just return 0
5551 * items, mark the eofflag, and bail out. Because we won't have done any work, the
5552 * code at the end of the function will release the dirhint for us.
5553 *
5554 * Don't forget to unlock the catalog lock on the way out, too.
5555 */
5556 if (dirhint->dh_desc.cd_flags & CD_EOF) {
5557 error = 0;
5558 eofflag = 1;
5559 uio_setoffset(uio, startoffset);
5560 hfs_systemfile_unlock (hfsmp, lockflags);
5561
5562 goto seekoffcalc;
5563 }
5564 }
5565
5566 /* Pack the buffer with dirent entries. */
5567 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
5568
5569 if (index == 0 && error == 0) {
5570 cp->c_dirthreadhint = dirhint->dh_threadhint;
5571 }
5572
5573 hfs_systemfile_unlock(hfsmp, lockflags);
5574
5575 if (error != 0) {
5576 goto out;
5577 }
5578
5579 /* Get index to the next item */
5580 index += items;
5581
5582 if (items >= (int)cp->c_entries) {
5583 eofflag = 1;
5584 }
5585
5586 /*
5587 * Detect valence FS corruption.
5588 *
5589 * We are holding the cnode lock exclusive, so there should not be
5590 * anybody modifying the valence field of this cnode. If we enter
5591 * this block, that means we observed filesystem corruption, because
5592 * this directory reported a valence of 0, yet we found at least one
5593 * item. In this case, we need to minimally self-heal this
5594 * directory to prevent userland from tripping over a directory
5595 * that appears empty (getattr of valence reports 0), but actually
5596 * has contents.
5597 *
5598 * We'll force the cnode update at the end of the function after
5599 * completing all of the normal getdirentries steps.
5600 */
5601 if ((cp->c_entries == 0) && (items > 0)) {
5602 /* disk corruption */
5603 cp->c_entries++;
5604 /* Mark the cnode as dirty. */
5605 cp->c_flag |= C_MODIFIED;
5606 printf("hfs_vnop_readdir: repairing valence to non-zero! \n");
5607 bump_valence++;
5608 }
5609
5610
5611 /* Convert catalog directory index back into an offset. */
5612 while (tag == 0)
5613 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
5614 uio_setoffset(uio, (index + 2) | tag);
5615 dirhint->dh_index |= tag;
5616
5617 seekoffcalc:
5618 cp->c_touch_acctime = TRUE;
5619
5620 if (ap->a_numdirent) {
5621 if (startoffset == 0)
5622 items += 2;
5623 *ap->a_numdirent = items;
5624 }
5625
5626 out:
5627 if (user_start) {
5628 vsunlock(user_start, user_len, TRUE);
5629 }
5630 /* If we didn't do anything then go ahead and dump the hint. */
5631 if ((dirhint != NULL) &&
5632 (dirhint != &localhint) &&
5633 (uio_offset(uio) == startoffset)) {
5634 hfs_reldirhint(cp, dirhint);
5635 eofflag = 1;
5636 }
5637 if (ap->a_eofflag) {
5638 *ap->a_eofflag = eofflag;
5639 }
5640 if (dirhint == &localhint) {
5641 cat_releasedesc(&localhint.dh_desc);
5642 }
5643
5644 if (bump_valence) {
5645 /* force the update before dropping the cnode lock*/
5646 hfs_update(vp, 0);
5647 }
5648
5649 hfs_unlock(cp);
5650
5651 return (error);
5652 }
5653
5654
5655 /*
5656 * Read contents of a symbolic link.
5657 */
5658 int
5659 hfs_vnop_readlink(struct vnop_readlink_args *ap)
5660 {
5661 struct vnode *vp = ap->a_vp;
5662 struct cnode *cp;
5663 struct filefork *fp;
5664 int error;
5665
5666 if (!vnode_islnk(vp))
5667 return (EINVAL);
5668
5669 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5670 return (error);
5671 cp = VTOC(vp);
5672 fp = VTOF(vp);
5673
5674 /* Zero length sym links are not allowed */
5675 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
5676 error = EINVAL;
5677 goto exit;
5678 }
5679
5680 /* Cache the path so we don't waste buffer cache resources */
5681 if (fp->ff_symlinkptr == NULL) {
5682 struct buf *bp = NULL;
5683
5684 fp->ff_symlinkptr = hfs_malloc(fp->ff_size);
5685 error = (int)buf_meta_bread(vp, (daddr64_t)0,
5686 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
5687 vfs_context_ucred(ap->a_context), &bp);
5688 if (error) {
5689 if (bp)
5690 buf_brelse(bp);
5691 if (fp->ff_symlinkptr) {
5692 hfs_free(fp->ff_symlinkptr, fp->ff_size);
5693 fp->ff_symlinkptr = NULL;
5694 }
5695 goto exit;
5696 }
5697 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
5698
5699 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
5700 buf_markinvalid(bp); /* data no longer needed */
5701 }
5702 buf_brelse(bp);
5703 }
5704 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
5705
5706 /*
5707 * Keep track blocks read
5708 */
5709 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
5710
5711 /*
5712 * If this file hasn't been seen since the start of
5713 * the current sampling period then start over.
5714 */
5715 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
5716 VTOF(vp)->ff_bytesread = fp->ff_size;
5717 else
5718 VTOF(vp)->ff_bytesread += fp->ff_size;
5719
5720 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
5721 // cp->c_touch_acctime = TRUE;
5722 }
5723
5724 exit:
5725 hfs_unlock(cp);
5726 return (error);
5727 }
5728
5729
5730 /*
5731 * Get configurable pathname variables.
5732 */
5733 int
5734 hfs_vnop_pathconf(struct vnop_pathconf_args *ap)
5735 {
5736
5737 #if CONFIG_HFS_STD
5738 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
5739 #endif
5740
5741 switch (ap->a_name) {
5742 case _PC_LINK_MAX:
5743 #if CONFIG_HFS_STD
5744 if (std_hfs) {
5745 *ap->a_retval = 1;
5746 } else
5747 #endif
5748 {
5749 *ap->a_retval = HFS_LINK_MAX;
5750 }
5751 break;
5752 case _PC_NAME_MAX:
5753 #if CONFIG_HFS_STD
5754 if (std_hfs) {
5755 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5756 } else
5757 #endif
5758 {
5759 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5760 }
5761 break;
5762 case _PC_PATH_MAX:
5763 *ap->a_retval = PATH_MAX; /* 1024 */
5764 break;
5765 case _PC_PIPE_BUF:
5766 *ap->a_retval = PIPE_BUF;
5767 break;
5768 case _PC_CHOWN_RESTRICTED:
5769 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
5770 break;
5771 case _PC_NO_TRUNC:
5772 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
5773 break;
5774 case _PC_NAME_CHARS_MAX:
5775 #if CONFIG_HFS_STD
5776 if (std_hfs) {
5777 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5778 } else
5779 #endif
5780 {
5781 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5782 }
5783 break;
5784 case _PC_CASE_SENSITIVE:
5785 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
5786 *ap->a_retval = 1;
5787 else
5788 *ap->a_retval = 0;
5789 break;
5790 case _PC_CASE_PRESERVING:
5791 *ap->a_retval = 1;
5792 break;
5793 case _PC_FILESIZEBITS:
5794 /* number of bits to store max file size */
5795 #if CONFIG_HFS_STD
5796 if (std_hfs) {
5797 *ap->a_retval = 32;
5798 } else
5799 #endif
5800 {
5801 *ap->a_retval = 64;
5802 }
5803 break;
5804 case _PC_XATTR_SIZE_BITS:
5805 /* Number of bits to store maximum extended attribute size */
5806 *ap->a_retval = HFS_XATTR_SIZE_BITS;
5807 break;
5808 default:
5809 return (EINVAL);
5810 }
5811
5812 return (0);
5813 }
5814
5815 /*
5816 * Prepares a fork for cat_update by making sure ff_size and ff_blocks
5817 * are no bigger than the valid data on disk thus reducing the chance
5818 * of exposing uninitialised data in the event of a non clean unmount.
5819 * fork_buf is where to put the temporary copy if required. (It can
5820 * be inside pfork.)
5821 */
5822 const struct cat_fork *
5823 hfs_prepare_fork_for_update(filefork_t *ff,
5824 const struct cat_fork *cf,
5825 struct cat_fork *cf_buf,
5826 uint32_t block_size)
5827 {
5828 if (!ff)
5829 return NULL;
5830
5831 if (!cf)
5832 cf = &ff->ff_data;
5833 if (!cf_buf)
5834 cf_buf = &ff->ff_data;
5835
5836 off_t max_size = ff->ff_size;
5837
5838 // Check first invalid range
5839 if (!TAILQ_EMPTY(&ff->ff_invalidranges))
5840 max_size = TAILQ_FIRST(&ff->ff_invalidranges)->rl_start;
5841
5842 if (!ff->ff_unallocblocks && ff->ff_size <= max_size)
5843 return cf; // Nothing to do
5844
5845 if (ff->ff_blocks < ff->ff_unallocblocks) {
5846 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
5847 ff->ff_blocks, ff->ff_unallocblocks);
5848 }
5849
5850 struct cat_fork *out = cf_buf;
5851
5852 if (out != cf)
5853 bcopy(cf, out, sizeof(*cf));
5854
5855 // Adjust cf_blocks for cf_vblocks
5856 out->cf_blocks -= out->cf_vblocks;
5857
5858 /*
5859 * Here we trim the size with the updated cf_blocks. This is
5860 * probably unnecessary now because the invalid ranges should
5861 * catch this (but that wasn't always the case).
5862 */
5863 off_t alloc_bytes = hfs_blk_to_bytes(out->cf_blocks, block_size);
5864 if (out->cf_size > alloc_bytes)
5865 out->cf_size = alloc_bytes;
5866
5867 // Trim cf_size to first invalid range
5868 if (out->cf_size > max_size)
5869 out->cf_size = max_size;
5870
5871 return out;
5872 }
5873
5874 /*
5875 * Update a cnode's on-disk metadata.
5876 *
5877 * The cnode must be locked exclusive. See declaration for possible
5878 * options.
5879 */
5880 int
5881 hfs_update(struct vnode *vp, int options)
5882 {
5883 struct cnode *cp = VTOC(vp);
5884 struct proc *p;
5885 const struct cat_fork *dataforkp = NULL;
5886 const struct cat_fork *rsrcforkp = NULL;
5887 struct cat_fork datafork;
5888 struct cat_fork rsrcfork;
5889 struct hfsmount *hfsmp;
5890 int lockflags;
5891 int error;
5892 uint32_t tstate = 0;
5893
5894 if (ISSET(cp->c_flag, C_NOEXISTS))
5895 return 0;
5896
5897 p = current_proc();
5898 hfsmp = VTOHFS(vp);
5899
5900 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
5901 hfsmp->hfs_catalog_vp == NULL){
5902 return (0);
5903 }
5904 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
5905 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD | C_NEEDS_DATEADDED);
5906 cp->c_touch_acctime = 0;
5907 cp->c_touch_chgtime = 0;
5908 cp->c_touch_modtime = 0;
5909 return (0);
5910 }
5911 if (kdebug_enable) {
5912 if (cp->c_touch_acctime || cp->c_atime != cp->c_attr.ca_atimeondisk)
5913 tstate |= DBG_HFS_UPDATE_ACCTIME;
5914 if (cp->c_touch_modtime)
5915 tstate |= DBG_HFS_UPDATE_MODTIME;
5916 if (cp->c_touch_chgtime)
5917 tstate |= DBG_HFS_UPDATE_CHGTIME;
5918
5919 if (cp->c_flag & C_MODIFIED)
5920 tstate |= DBG_HFS_UPDATE_MODIFIED;
5921 if (ISSET(options, HFS_UPDATE_FORCE))
5922 tstate |= DBG_HFS_UPDATE_FORCE;
5923 if (cp->c_flag & C_NEEDS_DATEADDED)
5924 tstate |= DBG_HFS_UPDATE_DATEADDED;
5925 if (cp->c_flag & C_MINOR_MOD)
5926 tstate |= DBG_HFS_UPDATE_MINOR;
5927 }
5928 hfs_touchtimes(hfsmp, cp);
5929
5930 if (!ISSET(cp->c_flag, C_MODIFIED | C_MINOR_MOD)
5931 && !hfs_should_save_atime(cp)) {
5932 // Nothing to update
5933 return 0;
5934 }
5935
5936 KDBG(HFSDBG_UPDATE | DBG_FUNC_START, kdebug_vnode(vp), tstate);
5937
5938 bool check_txn = false;
5939
5940 if (!ISSET(options, HFS_UPDATE_FORCE) && !ISSET(cp->c_flag, C_MODIFIED)) {
5941 /*
5942 * This must be a minor modification. If the current
5943 * transaction already has an update for this node, then we
5944 * bundle in the modification.
5945 */
5946 if (hfsmp->jnl
5947 && journal_current_txn(hfsmp->jnl) == cp->c_update_txn) {
5948 check_txn = true;
5949 } else {
5950 tstate |= DBG_HFS_UPDATE_SKIPPED;
5951 error = 0;
5952 goto exit;
5953 }
5954 }
5955
5956 if ((error = hfs_start_transaction(hfsmp)) != 0)
5957 goto exit;
5958
5959 if (check_txn
5960 && journal_current_txn(hfsmp->jnl) != cp->c_update_txn) {
5961 hfs_end_transaction(hfsmp);
5962 tstate |= DBG_HFS_UPDATE_SKIPPED;
5963 error = 0;
5964 goto exit;
5965 }
5966
5967 if (cp->c_datafork)
5968 dataforkp = &cp->c_datafork->ff_data;
5969 if (cp->c_rsrcfork)
5970 rsrcforkp = &cp->c_rsrcfork->ff_data;
5971
5972 /*
5973 * Modify the values passed to cat_update based on whether or not
5974 * the file has invalid ranges or borrowed blocks.
5975 */
5976 dataforkp = hfs_prepare_fork_for_update(cp->c_datafork, NULL, &datafork, hfsmp->blockSize);
5977 rsrcforkp = hfs_prepare_fork_for_update(cp->c_rsrcfork, NULL, &rsrcfork, hfsmp->blockSize);
5978
5979 if (__builtin_expect(kdebug_enable & KDEBUG_TRACE, 0)) {
5980 long dbg_parms[NUMPARMS];
5981 int dbg_namelen;
5982
5983 dbg_namelen = NUMPARMS * sizeof(long);
5984 vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
5985
5986 if (dbg_namelen < (int)sizeof(dbg_parms))
5987 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
5988
5989 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
5990 }
5991
5992 /*
5993 * Lock the Catalog b-tree file.
5994 */
5995 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5996
5997 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
5998
5999 if (hfsmp->jnl)
6000 cp->c_update_txn = journal_current_txn(hfsmp->jnl);
6001
6002 hfs_systemfile_unlock(hfsmp, lockflags);
6003
6004 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD);
6005
6006 hfs_end_transaction(hfsmp);
6007
6008 exit:
6009
6010 KDBG(HFSDBG_UPDATE | DBG_FUNC_END, kdebug_vnode(vp), tstate, error);
6011
6012 return error;
6013 }
6014
6015 /*
6016 * Allocate a new node
6017 */
6018 int
6019 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
6020 struct vnode_attr *vap, vfs_context_t ctx)
6021 {
6022 struct cnode *cp = NULL;
6023 struct cnode *dcp = NULL;
6024 struct vnode *tvp;
6025 struct hfsmount *hfsmp;
6026 struct cat_desc in_desc, out_desc;
6027 struct cat_attr attr;
6028 struct timeval tv;
6029 int lockflags;
6030 int error, started_tr = 0;
6031 enum vtype vnodetype;
6032 int mode;
6033 int newvnode_flags = 0;
6034 u_int32_t gnv_flags = 0;
6035 int protectable_target = 0;
6036 int nocache = 0;
6037 vnode_t old_doc_vp = NULL;
6038
6039 #if CONFIG_PROTECT
6040 struct cprotect *entry = NULL;
6041 int32_t cp_class = -1;
6042
6043 /*
6044 * By default, it's OK for AKS to overrride our target class preferences.
6045 */
6046 uint32_t keywrap_flags = CP_KEYWRAP_DIFFCLASS;
6047
6048 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
6049 cp_class = (int32_t)vap->va_dataprotect_class;
6050 /*
6051 * Since the user specifically requested this target class be used,
6052 * we want to fail this creation operation if we cannot wrap to their
6053 * target class. The CP_KEYWRAP_DIFFCLASS bit says that it is OK to
6054 * use a different class than the one specified, so we turn that off
6055 * now.
6056 */
6057 keywrap_flags &= ~CP_KEYWRAP_DIFFCLASS;
6058 }
6059 int protected_mount = 0;
6060 #endif
6061
6062
6063 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
6064 return (error);
6065
6066 /* set the cnode pointer only after successfully acquiring lock */
6067 dcp = VTOC(dvp);
6068
6069 /* Don't allow creation of new entries in open-unlinked directories */
6070 if ((error = hfs_checkdeleted(dcp))) {
6071 hfs_unlock(dcp);
6072 return error;
6073 }
6074
6075 dcp->c_flag |= C_DIR_MODIFICATION;
6076
6077 hfsmp = VTOHFS(dvp);
6078
6079 *vpp = NULL;
6080 tvp = NULL;
6081 out_desc.cd_flags = 0;
6082 out_desc.cd_nameptr = NULL;
6083
6084 vnodetype = vap->va_type;
6085 if (vnodetype == VNON)
6086 vnodetype = VREG;
6087 mode = MAKEIMODE(vnodetype, vap->va_mode);
6088
6089 if (S_ISDIR (mode) || S_ISREG (mode)) {
6090 protectable_target = 1;
6091 }
6092
6093
6094 /* Check if were out of usable disk space. */
6095 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
6096 error = ENOSPC;
6097 goto exit;
6098 }
6099
6100 microtime(&tv);
6101
6102 /* Setup the default attributes */
6103 bzero(&attr, sizeof(attr));
6104 attr.ca_mode = mode;
6105 attr.ca_linkcount = 1;
6106 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
6107 attr.ca_rdev = vap->va_rdev;
6108 }
6109 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
6110 VATTR_SET_SUPPORTED(vap, va_create_time);
6111 attr.ca_itime = vap->va_create_time.tv_sec;
6112 } else {
6113 attr.ca_itime = tv.tv_sec;
6114 }
6115 #if CONFIG_HFS_STD
6116 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
6117 attr.ca_itime += 3600; /* Same as what hfs_update does */
6118 }
6119 #endif
6120 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
6121 attr.ca_atimeondisk = attr.ca_atime;
6122 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6123 VATTR_SET_SUPPORTED(vap, va_flags);
6124 attr.ca_flags = vap->va_flags;
6125 }
6126
6127 /*
6128 * HFS+ only: all files get ThreadExists
6129 * HFSX only: dirs get HasFolderCount
6130 */
6131 #if CONFIG_HFS_STD
6132 if (!(hfsmp->hfs_flags & HFS_STANDARD))
6133 #endif
6134 {
6135 if (vnodetype == VDIR) {
6136 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
6137 attr.ca_recflags = kHFSHasFolderCountMask;
6138 } else {
6139 attr.ca_recflags = kHFSThreadExistsMask;
6140 }
6141 }
6142
6143 #if CONFIG_PROTECT
6144 if (cp_fs_protected(hfsmp->hfs_mp)) {
6145 protected_mount = 1;
6146 }
6147 /*
6148 * On a content-protected HFS+/HFSX filesystem, files and directories
6149 * cannot be created without atomically setting/creating the EA that
6150 * contains the protection class metadata and keys at the same time, in
6151 * the same transaction. As a result, pre-set the "EAs exist" flag
6152 * on the cat_attr for protectable catalog record creations. This will
6153 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
6154 * as having EAs.
6155 */
6156 if ((protected_mount) && (protectable_target)) {
6157 attr.ca_recflags |= kHFSHasAttributesMask;
6158 /* delay entering in the namecache */
6159 nocache = 1;
6160 }
6161 #endif
6162
6163
6164 /*
6165 * Add the date added to the item. See above, as
6166 * all of the dates are set to the itime.
6167 */
6168 hfs_write_dateadded (&attr, attr.ca_atime);
6169
6170 /* Initialize the gen counter to 1 */
6171 hfs_write_gencount(&attr, (uint32_t)1);
6172
6173 attr.ca_uid = vap->va_uid;
6174 attr.ca_gid = vap->va_gid;
6175 VATTR_SET_SUPPORTED(vap, va_mode);
6176 VATTR_SET_SUPPORTED(vap, va_uid);
6177 VATTR_SET_SUPPORTED(vap, va_gid);
6178
6179 #if QUOTA
6180 /* check to see if this node's creation would cause us to go over
6181 * quota. If so, abort this operation.
6182 */
6183 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6184 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
6185 vfs_context_ucred(ctx)))) {
6186 goto exit;
6187 }
6188 }
6189 #endif
6190
6191
6192 /* Tag symlinks with a type and creator. */
6193 if (vnodetype == VLNK) {
6194 struct FndrFileInfo *fip;
6195
6196 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
6197 fip->fdType = SWAP_BE32(kSymLinkFileType);
6198 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
6199 }
6200
6201 /* Setup the descriptor */
6202 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
6203 in_desc.cd_namelen = cnp->cn_namelen;
6204 in_desc.cd_parentcnid = dcp->c_fileid;
6205 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
6206 in_desc.cd_hint = dcp->c_childhint;
6207 in_desc.cd_encoding = 0;
6208
6209 #if CONFIG_PROTECT
6210 /*
6211 * To preserve file creation atomicity with regards to the content protection EA,
6212 * we must create the file in the catalog and then write out its EA in the same
6213 * transaction.
6214 *
6215 * We only denote the target class in this EA; key generation is not completed
6216 * until the file has been inserted into the catalog and will be done
6217 * in a separate transaction.
6218 */
6219 if ((protected_mount) && (protectable_target)) {
6220 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
6221 if (error) {
6222 goto exit;
6223 }
6224 }
6225 #endif
6226
6227 if ((error = hfs_start_transaction(hfsmp)) != 0) {
6228 goto exit;
6229 }
6230 started_tr = 1;
6231
6232 // have to also lock the attribute file because cat_create() needs
6233 // to check that any fileID it wants to use does not have orphaned
6234 // attributes in it.
6235 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
6236 cnid_t new_id;
6237
6238 /* Reserve some space in the Catalog file. */
6239 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
6240 hfs_systemfile_unlock(hfsmp, lockflags);
6241 goto exit;
6242 }
6243
6244 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
6245 hfs_systemfile_unlock (hfsmp, lockflags);
6246 goto exit;
6247 }
6248
6249 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
6250 if (error == 0) {
6251 /* Update the parent directory */
6252 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
6253 dcp->c_entries++;
6254
6255 if (vnodetype == VDIR) {
6256 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
6257 }
6258 dcp->c_dirchangecnt++;
6259 hfs_incr_gencount(dcp);
6260
6261 dcp->c_touch_chgtime = dcp->c_touch_modtime = true;
6262 dcp->c_flag |= C_MODIFIED;
6263
6264 hfs_update(dcp->c_vp, 0);
6265
6266 #if CONFIG_PROTECT
6267 /*
6268 * If we are creating a content protected file, now is when
6269 * we create the EA. We must create it in the same transaction
6270 * that creates the file. We can also guarantee that the file
6271 * MUST exist because we are still holding the catalog lock
6272 * at this point.
6273 */
6274 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6275 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
6276
6277 if (error) {
6278 int delete_err;
6279 /*
6280 * If we fail the EA creation, then we need to delete the file.
6281 * Luckily, we are still holding all of the right locks.
6282 */
6283 delete_err = cat_delete (hfsmp, &out_desc, &attr);
6284 if (delete_err == 0) {
6285 /* Update the parent directory */
6286 if (dcp->c_entries > 0)
6287 dcp->c_entries--;
6288 dcp->c_dirchangecnt++;
6289 dcp->c_ctime = tv.tv_sec;
6290 dcp->c_mtime = tv.tv_sec;
6291 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6292 }
6293
6294 /* Emit EINVAL if we fail to create EA*/
6295 error = EINVAL;
6296 }
6297 }
6298 #endif
6299 }
6300 hfs_systemfile_unlock(hfsmp, lockflags);
6301 if (error)
6302 goto exit;
6303
6304 uint32_t txn = hfsmp->jnl ? journal_current_txn(hfsmp->jnl) : 0;
6305
6306 /* Invalidate negative cache entries in the directory */
6307 if (dcp->c_flag & C_NEG_ENTRIES) {
6308 cache_purge_negatives(dvp);
6309 dcp->c_flag &= ~C_NEG_ENTRIES;
6310 }
6311
6312 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
6313 (dcp->c_cnid == kHFSRootFolderID));
6314
6315 // XXXdbg
6316 // have to end the transaction here before we call hfs_getnewvnode()
6317 // because that can cause us to try and reclaim a vnode on a different
6318 // file system which could cause us to start a transaction which can
6319 // deadlock with someone on that other file system (since we could be
6320 // holding two transaction locks as well as various vnodes and we did
6321 // not obtain the locks on them in the proper order).
6322 //
6323 // NOTE: this means that if the quota check fails or we have to update
6324 // the change time on a block-special device that those changes
6325 // will happen as part of independent transactions.
6326 //
6327 if (started_tr) {
6328 hfs_end_transaction(hfsmp);
6329 started_tr = 0;
6330 }
6331
6332 #if CONFIG_PROTECT
6333 /*
6334 * At this point, we must have encountered success with writing the EA.
6335 * Destroy our temporary cprotect (which had no keys).
6336 */
6337
6338 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6339 cp_entry_destroy (hfsmp, entry);
6340 entry = NULL;
6341 }
6342 #endif
6343 gnv_flags |= GNV_CREATE;
6344 if (nocache) {
6345 gnv_flags |= GNV_NOCACHE;
6346 }
6347
6348 /*
6349 * Create a vnode for the object just created.
6350 *
6351 * NOTE: Maintaining the cnode lock on the parent directory is important,
6352 * as it prevents race conditions where other threads want to look up entries
6353 * in the directory and/or add things as we are in the process of creating
6354 * the vnode below. However, this has the potential for causing a
6355 * double lock panic when dealing with shadow files on a HFS boot partition.
6356 * The panic could occur if we are not cleaning up after ourselves properly
6357 * when done with a shadow file or in the error cases. The error would occur if we
6358 * try to create a new vnode, and then end up reclaiming another shadow vnode to
6359 * create the new one. However, if everything is working properly, this should
6360 * be a non-issue as we would never enter that reclaim codepath.
6361 *
6362 * The cnode is locked on successful return.
6363 */
6364 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
6365 NULL, &tvp, &newvnode_flags);
6366 if (error)
6367 goto exit;
6368
6369 cp = VTOC(tvp);
6370
6371 cp->c_update_txn = txn;
6372
6373 struct doc_tombstone *ut;
6374 ut = doc_tombstone_get();
6375 if ( ut->t_lastop_document_id != 0
6376 && ut->t_lastop_parent == dvp
6377 && ut->t_lastop_parent_vid == vnode_vid(dvp)
6378 && strcmp((char *)ut->t_lastop_filename, (const char *)cp->c_desc.cd_nameptr) == 0) {
6379 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
6380
6381 //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
6382 fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
6383
6384 cp->c_bsdflags |= UF_TRACKED;
6385 cp->c_flag |= C_MODIFIED;
6386
6387 if ((error = hfs_start_transaction(hfsmp)) == 0) {
6388 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6389
6390 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
6391
6392 hfs_systemfile_unlock (hfsmp, lockflags);
6393 (void) hfs_end_transaction(hfsmp);
6394 }
6395
6396 doc_tombstone_clear(ut, &old_doc_vp);
6397 } else if (ut->t_lastop_document_id != 0) {
6398 int len = cnp->cn_namelen;
6399 if (len == 0) {
6400 len = strlen(cnp->cn_nameptr);
6401 }
6402
6403 if (doc_tombstone_should_ignore_name(cnp->cn_nameptr, cnp->cn_namelen)) {
6404 // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
6405 } else {
6406 // Clear the tombstone because the thread is not recreating the same path
6407 // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
6408 doc_tombstone_clear(ut, NULL);
6409 }
6410 }
6411
6412 if ((hfsmp->hfs_flags & HFS_CS_HOTFILE_PIN) && (vnode_isfastdevicecandidate(dvp) && !vnode_isautocandidate(dvp))) {
6413
6414 //printf("hfs: flagging %s (fileid: %d) as VFASTDEVCANDIDATE (dvp name: %s)\n",
6415 // cnp->cn_nameptr ? cnp->cn_nameptr : "<NONAME>",
6416 // cp->c_fileid,
6417 // dvp->v_name ? dvp->v_name : "no-dir-name");
6418
6419 //
6420 // On new files we set the FastDevCandidate flag so that
6421 // any new blocks allocated to it will be pinned.
6422 //
6423 cp->c_attr.ca_recflags |= kHFSFastDevCandidateMask;
6424 vnode_setfastdevicecandidate(tvp);
6425
6426 //
6427 // properly inherit auto-cached flags
6428 //
6429 if (vnode_isautocandidate(dvp)) {
6430 cp->c_attr.ca_recflags |= kHFSAutoCandidateMask;
6431 vnode_setautocandidate(tvp);
6432 }
6433
6434
6435 //
6436 // We also want to add it to the hotfile adoption list so
6437 // that it will eventually land in the hotfile btree
6438 //
6439 (void) hfs_addhotfile(tvp);
6440 }
6441
6442 *vpp = tvp;
6443
6444 #if CONFIG_PROTECT
6445 /*
6446 * Now that we have a vnode-in-hand, generate keys for this namespace item.
6447 * If we fail to create the keys, then attempt to delete the item from the
6448 * namespace. If we can't delete the item, that's not desirable but also not fatal..
6449 * All of the places which deal with restoring/unwrapping keys must also be
6450 * prepared to encounter an entry that does not have keys.
6451 */
6452 if ((protectable_target) && (protected_mount)) {
6453 struct cprotect *keyed_entry = NULL;
6454
6455 if (cp->c_cpentry == NULL) {
6456 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
6457 }
6458
6459 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), keywrap_flags, &keyed_entry);
6460 if (error == 0) {
6461 /*
6462 * Upon success, the keys were generated and written out.
6463 * Update the cp pointer in the cnode.
6464 */
6465 cp_replace_entry (hfsmp, cp, keyed_entry);
6466 if (nocache) {
6467 cache_enter (dvp, tvp, cnp);
6468 }
6469 }
6470 else {
6471 /* If key creation OR the setxattr failed, emit EPERM to userland */
6472 error = EPERM;
6473
6474 /*
6475 * Beware! This slightly violates the lock ordering for the
6476 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
6477 * which guards file size changes before acquiring the normal cnode lock
6478 * and calling hfs_removefile on an item.
6479 *
6480 * However, in this case, we are still holding the directory lock so
6481 * 'tvp' is not lookup-able and it was a newly created vnode so it
6482 * cannot have any content yet. The only reason we are initiating
6483 * the removefile is because we could not generate content protection keys
6484 * for this namespace item. Note also that we pass a '1' in the allow_dirs
6485 * argument for hfs_removefile because we may be creating a directory here.
6486 *
6487 * All this to say that while it is technically a violation it is
6488 * impossible to race with another thread for this cnode so it is safe.
6489 */
6490 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
6491 if (err) {
6492 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
6493 }
6494
6495 /* Release the cnode lock and mark the vnode for termination */
6496 hfs_unlock (cp);
6497 err = vnode_recycle (tvp);
6498 if (err) {
6499 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
6500 }
6501
6502 /* Drop the iocount on the new vnode to force reclamation/recycling */
6503 vnode_put (tvp);
6504 cp = NULL;
6505 *vpp = NULL;
6506 }
6507 }
6508 #endif
6509
6510 #if QUOTA
6511 /*
6512 * Once we create this vnode, we need to initialize its quota data
6513 * structures, if necessary. We know that it is OK to just go ahead and
6514 * initialize because we've already validated earlier (through the hfs_quotacheck
6515 * function) to see if creating this cnode/vnode would cause us to go over quota.
6516 */
6517 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6518 if (cp) {
6519 /* cp could have been zeroed earlier */
6520 (void) hfs_getinoquota(cp);
6521 }
6522 }
6523 #endif
6524
6525 exit:
6526 cat_releasedesc(&out_desc);
6527
6528 #if CONFIG_PROTECT
6529 /*
6530 * We may have jumped here in error-handling various situations above.
6531 * If we haven't already dumped the temporary CP used to initialize
6532 * the file atomically, then free it now. cp_entry_destroy should null
6533 * out the pointer if it was called already.
6534 */
6535 if (entry) {
6536 cp_entry_destroy (hfsmp, entry);
6537 entry = NULL;
6538 }
6539 #endif
6540
6541 /*
6542 * Make sure we release cnode lock on dcp.
6543 */
6544 if (dcp) {
6545 dcp->c_flag &= ~C_DIR_MODIFICATION;
6546 wakeup((caddr_t)&dcp->c_flag);
6547
6548 hfs_unlock(dcp);
6549 }
6550 ino64_t file_id = 0;
6551 if (error == 0 && cp != NULL) {
6552 file_id = cp->c_fileid;
6553 hfs_unlock(cp);
6554 }
6555 if (started_tr) {
6556 hfs_end_transaction(hfsmp);
6557 started_tr = 0;
6558 }
6559
6560 if (old_doc_vp) {
6561 cnode_t *ocp = VTOC(old_doc_vp);
6562 hfs_lock_always(ocp, HFS_EXCLUSIVE_LOCK);
6563 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
6564
6565 const uint32_t doc_id = ofip->document_id;
6566 const ino64_t old_file_id = ocp->c_fileid;
6567
6568 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
6569 ofip->document_id = 0;
6570 ocp->c_bsdflags &= ~UF_TRACKED;
6571 ocp->c_flag |= C_MODIFIED;
6572
6573 hfs_unlock(ocp);
6574 vnode_put(old_doc_vp);
6575
6576 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
6577 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
6578 FSE_ARG_INO, old_file_id, // src inode #
6579 FSE_ARG_INO, file_id, // dst inode #
6580 FSE_ARG_INT32, doc_id,
6581 FSE_ARG_DONE);
6582 }
6583
6584 return (error);
6585 }
6586
6587
6588 /*
6589 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the
6590 * cnode that is found in 'vp'. The cnode should be locked upon entry
6591 * and will be returned locked, but it may be dropped temporarily.
6592 *
6593 * If the resource fork vnode does not exist, HFS will attempt to acquire an
6594 * empty (uninitialized) vnode from VFS so as to avoid deadlocks with
6595 * jetsam. If we let the normal getnewvnode code produce the vnode for us
6596 * we would be doing so while holding the cnode lock of our cnode.
6597 *
6598 * On success, *rvpp wlll hold the resource fork vnode with an
6599 * iocount. *Don't* forget the vnode_put.
6600 */
6601 int
6602 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp)
6603 {
6604 struct vnode *rvp = NULLVP;
6605 struct vnode *empty_rvp = NULLVP;
6606 struct vnode *dvp = NULLVP;
6607 struct cnode *cp = VTOC(vp);
6608 int error;
6609 int vid;
6610
6611 if (vnode_vtype(vp) == VDIR) {
6612 return EINVAL;
6613 }
6614
6615 restart:
6616 /* Attempt to use existing vnode */
6617 if ((rvp = cp->c_rsrc_vp)) {
6618 vid = vnode_vid(rvp);
6619
6620 // vnode_getwithvid can block so we need to drop the cnode lock
6621 hfs_unlock(cp);
6622
6623 error = vnode_getwithvid(rvp, vid);
6624
6625 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
6626
6627 /*
6628 * When our lock was relinquished, the resource fork
6629 * could have been recycled. Check for this and try
6630 * again.
6631 */
6632 if (error == ENOENT)
6633 goto restart;
6634
6635 if (error) {
6636 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
6637
6638 if (name)
6639 printf("hfs_vgetrsrc: couldn't get resource"
6640 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
6641 return (error);
6642 }
6643 } else {
6644 struct cat_fork rsrcfork;
6645 struct componentname cn;
6646 struct cat_desc *descptr = NULL;
6647 struct cat_desc to_desc;
6648 char delname[32];
6649 int lockflags;
6650 int newvnode_flags = 0;
6651
6652 /*
6653 * In this case, we don't currently see a resource fork vnode attached
6654 * to this cnode. In most cases, we were called from a read-only VNOP
6655 * like getattr, so it should be safe to drop the cnode lock and then
6656 * re-acquire it.
6657 *
6658 * Here, we drop the lock so that we can acquire an empty/husk
6659 * vnode so that we don't deadlock against jetsam.
6660 *
6661 * It does not currently appear possible to hold the truncate lock via
6662 * FS re-entrancy when we get to this point. (8/2014)
6663 */
6664 hfs_unlock (cp);
6665
6666 error = vnode_create_empty (&empty_rvp);
6667
6668 hfs_lock_always (cp, HFS_EXCLUSIVE_LOCK);
6669
6670 if (error) {
6671 /* If acquiring the 'empty' vnode failed, then nothing to clean up */
6672 return error;
6673 }
6674
6675 /*
6676 * We could have raced with another thread here while we dropped our cnode
6677 * lock. See if the cnode now has a resource fork vnode and restart if appropriate.
6678 *
6679 * Note: We just released the cnode lock, so there is a possibility that the
6680 * cnode that we just acquired has been deleted or even removed from disk
6681 * completely, though this is unlikely. If the file is open-unlinked, the
6682 * check below will resolve it for us. If it has been completely
6683 * removed (even from the catalog!), then when we examine the catalog
6684 * directly, below, while holding the catalog lock, we will not find the
6685 * item and we can fail out properly.
6686 */
6687 if (cp->c_rsrc_vp) {
6688 /* Drop the empty vnode before restarting */
6689 vnode_put (empty_rvp);
6690 empty_rvp = NULL;
6691 rvp = NULL;
6692 goto restart;
6693 }
6694
6695 /*
6696 * hfs_vgetsrc may be invoked for a cnode that has already been marked
6697 * C_DELETED. This is because we need to continue to provide rsrc
6698 * fork access to open-unlinked files. In this case, build a fake descriptor
6699 * like in hfs_removefile. If we don't do this, buildkey will fail in
6700 * cat_lookup because this cnode has no name in its descriptor.
6701 */
6702 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
6703 bzero (&to_desc, sizeof(to_desc));
6704 bzero (delname, 32);
6705 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
6706 to_desc.cd_nameptr = (const u_int8_t*) delname;
6707 to_desc.cd_namelen = strlen(delname);
6708 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
6709 to_desc.cd_flags = 0;
6710 to_desc.cd_cnid = cp->c_cnid;
6711
6712 descptr = &to_desc;
6713 }
6714 else {
6715 descptr = &cp->c_desc;
6716 }
6717
6718
6719 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
6720
6721 /*
6722 * We call cat_idlookup (instead of cat_lookup) below because we can't
6723 * trust the descriptor in the provided cnode for lookups at this point.
6724 * Between the time of the original lookup of this vnode and now, the
6725 * descriptor could have gotten swapped or replaced. If this occurred,
6726 * the parent/name combo originally desired may not necessarily be provided
6727 * if we use the descriptor. Even worse, if the vnode represents
6728 * a hardlink, we could have removed one of the links from the namespace
6729 * but left the descriptor alone, since hfs_unlink does not invalidate
6730 * the descriptor in the cnode if other links still point to the inode.
6731 *
6732 * Consider the following (slightly contrived) scenario:
6733 * /tmp/a <--> /tmp/b (hardlinks).
6734 * 1. Thread A: open rsrc fork on /tmp/b.
6735 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
6736 * 2. Thread B does 'mv /foo/b /tmp/b'
6737 * 2. Thread B succeeds.
6738 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
6739 *
6740 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
6741 * is not removed/updated during the unlink process. So, if you were to
6742 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
6743 * fork.
6744 *
6745 * As a result, we use the fileid, which should be invariant for the lifetime
6746 * of the cnode (possibly barring calls to exchangedata).
6747 *
6748 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
6749 * have thread records for files. They were only required for directories. So
6750 * we need to do the lookup with the catalog name. This is OK since hardlinks were
6751 * never allowed on HFS standard.
6752 */
6753
6754 /* Get resource fork data */
6755 #if CONFIG_HFS_STD
6756 if (ISSET(hfsmp->hfs_flags, HFS_STANDARD)) {
6757 /*
6758 * HFS standard only:
6759 *
6760 * Get the resource fork for this item with a cat_lookup call, but do not
6761 * force a case lookup since HFS standard is case-insensitive only. We
6762 * don't want the descriptor; just the fork data here. If we tried to
6763 * do a ID lookup (via thread record -> catalog record), then we might fail
6764 * prematurely since, as noted above, thread records were not strictly required
6765 * on files in HFS.
6766 */
6767 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
6768 (struct cat_attr*)NULL, &rsrcfork, NULL);
6769 } else
6770 #endif
6771 {
6772 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
6773 }
6774
6775 hfs_systemfile_unlock(hfsmp, lockflags);
6776 if (error) {
6777 /* Drop our 'empty' vnode ! */
6778 vnode_put (empty_rvp);
6779 return (error);
6780 }
6781 /*
6782 * Supply hfs_getnewvnode with a component name.
6783 */
6784 cn.cn_pnbuf = NULL;
6785 if (descptr->cd_nameptr) {
6786 void *buf = hfs_malloc(MAXPATHLEN);
6787
6788 cn = (struct componentname){
6789 .cn_nameiop = LOOKUP,
6790 .cn_flags = ISLASTCN,
6791 .cn_pnlen = MAXPATHLEN,
6792 .cn_pnbuf = buf,
6793 .cn_nameptr = buf,
6794 .cn_namelen = snprintf(buf, MAXPATHLEN,
6795 "%s%s", descptr->cd_nameptr,
6796 _PATH_RSRCFORKSPEC)
6797 };
6798
6799 // Should never happen because cn.cn_nameptr won't ever be long...
6800 if (cn.cn_namelen >= MAXPATHLEN) {
6801 hfs_free(buf, MAXPATHLEN);
6802 /* Drop our 'empty' vnode ! */
6803 vnode_put (empty_rvp);
6804 return ENAMETOOLONG;
6805
6806 }
6807 }
6808 dvp = vnode_getparent(vp);
6809
6810 /*
6811 * We are about to call hfs_getnewvnode and pass in the vnode that we acquired
6812 * earlier when we were not holding any locks. The semantics of GNV_USE_VP require that
6813 * either hfs_getnewvnode consume the vnode and vend it back to us, properly initialized,
6814 * or it will consume/dispose of it properly if it errors out.
6815 */
6816 rvp = empty_rvp;
6817
6818 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
6819 descptr, (GNV_WANTRSRC | GNV_SKIPLOCK | GNV_USE_VP),
6820 &cp->c_attr, &rsrcfork, &rvp, &newvnode_flags);
6821
6822 if (dvp)
6823 vnode_put(dvp);
6824 hfs_free(cn.cn_pnbuf, MAXPATHLEN);
6825 if (error)
6826 return (error);
6827 } /* End 'else' for rsrc fork not existing */
6828
6829 *rvpp = rvp;
6830 return (0);
6831 }
6832
6833 /*
6834 * Wrapper for special device reads
6835 */
6836 int
6837 hfsspec_read(struct vnop_read_args *ap)
6838 {
6839 /*
6840 * Set access flag.
6841 */
6842 cnode_t *cp = VTOC(ap->a_vp);
6843
6844 if (cp)
6845 cp->c_touch_acctime = TRUE;
6846
6847 return spec_read(ap);
6848 }
6849
6850 /*
6851 * Wrapper for special device writes
6852 */
6853 int
6854 hfsspec_write(struct vnop_write_args *ap)
6855 {
6856 /*
6857 * Set update and change flags.
6858 */
6859 cnode_t *cp = VTOC(ap->a_vp);
6860
6861 if (cp) {
6862 cp->c_touch_chgtime = TRUE;
6863 cp->c_touch_modtime = TRUE;
6864 }
6865
6866 return spec_write(ap);
6867 }
6868
6869 /*
6870 * Wrapper for special device close
6871 *
6872 * Update the times on the cnode then do device close.
6873 */
6874 int
6875 hfsspec_close(struct vnop_close_args *ap)
6876 {
6877 struct vnode *vp = ap->a_vp;
6878 cnode_t *cp = VTOC(vp);
6879
6880 if (cp && vnode_isinuse(ap->a_vp, 0)) {
6881 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6882 hfs_touchtimes(VTOHFS(vp), cp);
6883 hfs_unlock(cp);
6884 }
6885 }
6886 return spec_close(ap);
6887 }
6888
6889 #if FIFO
6890 /*
6891 * Wrapper for fifo reads
6892 */
6893 static int
6894 hfsfifo_read(struct vnop_read_args *ap)
6895 {
6896 /*
6897 * Set access flag.
6898 */
6899 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6900 return fifo_read(ap);
6901 }
6902
6903 /*
6904 * Wrapper for fifo writes
6905 */
6906 static int
6907 hfsfifo_write(struct vnop_write_args *ap)
6908 {
6909 /*
6910 * Set update and change flags.
6911 */
6912 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6913 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6914 return fifo_write(ap);
6915 }
6916
6917 /*
6918 * Wrapper for fifo close
6919 *
6920 * Update the times on the cnode then do device close.
6921 */
6922 static int
6923 hfsfifo_close(struct vnop_close_args *ap)
6924 {
6925 struct vnode *vp = ap->a_vp;
6926 struct cnode *cp;
6927
6928 if (vnode_isinuse(ap->a_vp, 1)) {
6929 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6930 cp = VTOC(vp);
6931 hfs_touchtimes(VTOHFS(vp), cp);
6932 hfs_unlock(cp);
6933 }
6934 }
6935 return fifo_close(ap);
6936 }
6937
6938
6939 #endif /* FIFO */
6940
6941 /*
6942 * Getter for the document_id
6943 * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
6944 */
6945 static u_int32_t
6946 hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
6947 {
6948 const uint8_t *finfo = NULL;
6949 u_int32_t doc_id = 0;
6950
6951 /* overlay the FinderInfo to the correct pointer, and advance */
6952 finfo = finderinfo + 16;
6953
6954 if (S_ISDIR(mode) || S_ISREG(mode)) {
6955 const struct FndrExtendedFileInfo *extinfo = (const struct FndrExtendedFileInfo *)finfo;
6956 doc_id = extinfo->document_id;
6957 }
6958
6959 return doc_id;
6960 }
6961
6962
6963 /* getter(s) for document id */
6964 u_int32_t
6965 hfs_get_document_id(struct cnode *cp)
6966 {
6967 return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
6968 cp->c_attr.ca_mode));
6969 }
6970
6971 /* If you have finderinfo and mode, you can use this */
6972 u_int32_t
6973 hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
6974 {
6975 return (hfs_get_document_id_internal(finderinfo, mode));
6976 }
6977
6978 /*
6979 * Synchronize a file's in-core state with that on disk.
6980 */
6981 int
6982 hfs_vnop_fsync(struct vnop_fsync_args *ap)
6983 {
6984 struct vnode* vp = ap->a_vp;
6985 int error;
6986
6987 /* Note: We check hfs flags instead of vfs mount flag because during
6988 * read-write update, hfs marks itself read-write much earlier than
6989 * the vfs, and hence won't result in skipping of certain writes like
6990 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
6991 */
6992 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
6993 return 0;
6994 }
6995
6996 /*
6997 * No need to call cp_handle_vnop to resolve fsync(). Any dirty data
6998 * should have caused the keys to be unwrapped at the time the data was
6999 * put into the UBC, either at mmap/pagein/read-write. If we did manage
7000 * to let this by, then strategy will auto-resolve for us.
7001 *
7002 * We also need to allow ENOENT lock errors since unlink
7003 * system call can call VNOP_FSYNC during vclean.
7004 */
7005 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
7006 if (error)
7007 return (0);
7008
7009 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
7010
7011 hfs_unlock(VTOC(vp));
7012 return (error);
7013 }
7014
7015 int (**hfs_vnodeop_p)(void *);
7016
7017 #define VOPFUNC int (*)(void *)
7018
7019
7020 #if CONFIG_HFS_STD
7021 int (**hfs_std_vnodeop_p) (void *);
7022 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
7023
7024 /*
7025 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
7026 * is for use with HFS standard to block out operations that would modify the file system
7027 */
7028
7029 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
7030 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7031 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7032 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
7033 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
7034 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7035 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7036 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7037 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
7038 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7039 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
7040 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7041 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7042 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7043 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
7044 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
7045 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
7046 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
7047 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
7048 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
7049 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
7050 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
7051 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
7052 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7053 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7054 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7055 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7056 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7057 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7058 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7059 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7060 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
7061 #if CONFIG_SEARCHFS
7062 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7063 #else
7064 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7065 #endif
7066 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
7067 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7068 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
7069 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
7070 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7071 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7072 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7073 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7074 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
7075 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
7076 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7077 #if NAMEDSTREAMS
7078 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7079 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7080 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7081 #endif
7082 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7083 { NULL, (VOPFUNC)NULL }
7084 };
7085
7086 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
7087 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
7088 #endif
7089
7090 /* VNOP table for HFS+ */
7091 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
7092 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7093 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7094 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
7095 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
7096 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7097 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7098 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7099 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7100 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7101 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
7102 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7103 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7104 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7105 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
7106 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
7107 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7108 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
7109 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
7110 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
7111 { &vnop_renamex_desc, (VOPFUNC)hfs_vnop_renamex }, /* renamex (with flags) */
7112 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
7113 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
7114 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
7115 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7116 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7117 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7118 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7119 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7120 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7121 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7122 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7123 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
7124 #if CONFIG_SEARCHFS
7125 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7126 #else
7127 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7128 #endif
7129 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
7130 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7131 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
7132 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7133 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7134 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7135 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7136 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7137 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7138 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7139 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7140 #if NAMEDSTREAMS
7141 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7142 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
7143 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
7144 #endif
7145 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7146 { &vnop_mnomap_desc, (VOPFUNC)hfs_vnop_mnomap },
7147 { NULL, (VOPFUNC)NULL }
7148 };
7149
7150 struct vnodeopv_desc hfs_vnodeop_opv_desc =
7151 { &hfs_vnodeop_p, hfs_vnodeop_entries };
7152
7153
7154 /* Spec Op vnop table for HFS+ */
7155 int (**hfs_specop_p)(void *);
7156 struct vnodeopv_entry_desc hfs_specop_entries[] = {
7157 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7158 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
7159 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
7160 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
7161 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
7162 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
7163 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7164 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7165 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
7166 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
7167 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
7168 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
7169 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
7170 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
7171 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7172 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
7173 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
7174 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
7175 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
7176 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
7177 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
7178 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
7179 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
7180 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7181 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7182 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
7183 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
7184 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7185 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7186 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7187 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7188 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7189 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7190 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7191 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7192 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7193 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7194 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7195 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7196 };
7197 struct vnodeopv_desc hfs_specop_opv_desc =
7198 { &hfs_specop_p, hfs_specop_entries };
7199
7200 #if FIFO
7201 /* HFS+ FIFO VNOP table */
7202 int (**hfs_fifoop_p)(void *);
7203 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
7204 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7205 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
7206 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
7207 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
7208 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
7209 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
7210 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7211 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7212 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
7213 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
7214 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
7215 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
7216 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
7217 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
7218 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7219 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
7220 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
7221 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
7222 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
7223 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
7224 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
7225 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
7226 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
7227 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7228 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7229 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
7230 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
7231 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7232 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7233 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7234 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7235 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7236 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7237 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7238 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7239 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7240 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7241 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7242 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7243 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7244 };
7245 struct vnodeopv_desc hfs_fifoop_opv_desc =
7246 { &hfs_fifoop_p, hfs_fifoop_entries };
7247 #endif /* FIFO */