]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_vnops.c
hfs-556.41.1.tar.gz
[apple/hfs.git] / core / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/OSAtomic.h>
30 #include <stdbool.h>
31 #include <sys/systm.h>
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/dirent.h>
35 #include <sys/stat.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/vnode_if.h>
39 #include <sys/malloc.h>
40 #include <sys/ubc.h>
41 #include <sys/paths.h>
42 #include <sys/quota.h>
43 #include <sys/time.h>
44 #include <sys/disk.h>
45 #include <sys/kauth.h>
46 #include <sys/fsctl.h>
47 #include <sys/xattr.h>
48 #include <sys/decmpfs.h>
49 #include <sys/mman.h>
50 #include <sys/doc_tombstone.h>
51 #include <sys/namei.h>
52 #include <string.h>
53 #include <sys/fsevents.h>
54
55 #include <miscfs/specfs/specdev.h>
56 #include <miscfs/fifofs/fifo.h>
57 #include <vfs/vfs_support.h>
58
59 #include <sys/kdebug.h>
60 #include <sys/sysctl.h>
61 #include <stdbool.h>
62
63 #include "hfs.h"
64 #include "hfs_catalog.h"
65 #include "hfs_cnode.h"
66 #include "hfs_dbg.h"
67 #include "hfs_mount.h"
68 #include "hfs_quota.h"
69 #include "hfs_endian.h"
70 #include "hfs_kdebug.h"
71 #include "hfs_cprotect.h"
72
73 #if HFS_CONFIG_KEY_ROLL
74 #include "hfs_key_roll.h"
75 #endif
76
77 #include "BTreesInternal.h"
78 #include "FileMgrInternal.h"
79
80 /* Global vfs data structures for hfs */
81
82 /*
83 * Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is
84 * 'no'). At some point this might need to move into VFS and we might
85 * need to provide an API to get at it, but for now, this is only used
86 * by HFS+.
87 */
88 int always_do_fullfsync = 0;
89 SYSCTL_DECL(_vfs_generic);
90 HFS_SYSCTL(INT, _vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called")
91
92 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
93 struct componentname *cnp, struct vnode_attr *vap,
94 vfs_context_t ctx);
95 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
96 int hfs_metasync_all(struct hfsmount *hfsmp);
97
98 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
99 int, int);
100 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
101 int, int, int, struct vnode *, int);
102
103 /* Used here and in cnode teardown -- for symlinks */
104 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
105
106 enum {
107 HFS_MOVE_DATA_INCLUDE_RSRC = 1,
108 };
109 typedef uint32_t hfs_move_data_options_t;
110
111 static int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
112 hfs_move_data_options_t options);
113 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src,
114 filefork_t *dstfork, cnode_t *dst);
115
116
117 static int hfs_exchangedata_getxattr (struct vnode *vp, uint32_t name_selector, void **buffer, size_t *xattr_size);
118 static int hfs_exchangedata_setxattr (struct hfsmount *hfsmp, uint32_t fileid,
119 uint32_t name_selector, void *buffer, size_t xattr_size);
120
121 enum XATTR_NAME_ENTRIES {
122 quarantine = 0,
123 MAX_NUM_XATTR_NAMES //must be last
124 };
125
126
127 /* These are special EAs that follow the content in exchangedata(2). */
128 const char *XATTR_NAMES [MAX_NUM_XATTR_NAMES] = { "com.apple.quarantine" };
129
130 #define MAX_EXCHANGE_EA_SIZE 4096
131
132 #if HFS_COMPRESSION
133 static int hfs_move_compressed(cnode_t *from_vp, cnode_t *to_vp);
134 #endif
135
136 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
137
138 #if FIFO
139 static int hfsfifo_read(struct vnop_read_args *);
140 static int hfsfifo_write(struct vnop_write_args *);
141 static int hfsfifo_close(struct vnop_close_args *);
142
143 extern int (**fifo_vnodeop_p)(void *);
144 #endif /* FIFO */
145
146 int hfs_vnop_close(struct vnop_close_args*);
147 int hfs_vnop_exchange(struct vnop_exchange_args*);
148 int hfs_vnop_fsync(struct vnop_fsync_args*);
149 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
150 int hfs_vnop_mknod(struct vnop_mknod_args*);
151 int hfs_vnop_getattr(struct vnop_getattr_args*);
152 int hfs_vnop_open(struct vnop_open_args*);
153 int hfs_vnop_readdir(struct vnop_readdir_args*);
154 int hfs_vnop_rename(struct vnop_rename_args*);
155 int hfs_vnop_renamex(struct vnop_renamex_args*);
156 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
157 int hfs_vnop_symlink(struct vnop_symlink_args*);
158 int hfs_vnop_setattr(struct vnop_setattr_args*);
159 int hfs_vnop_readlink(struct vnop_readlink_args *);
160 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
161 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
162 int hfsspec_read(struct vnop_read_args *);
163 int hfsspec_write(struct vnop_write_args *);
164 int hfsspec_close(struct vnop_close_args *);
165
166 /* Options for hfs_removedir and hfs_removefile */
167 #define HFSRM_SKIP_RESERVE 0x01
168
169
170
171 /*****************************************************************************
172 *
173 * Common Operations on vnodes
174 *
175 *****************************************************************************/
176
177 /*
178 * Is the given cnode either the .journal or .journal_info_block file on
179 * a volume with an active journal? Many VNOPs use this to deny access
180 * to those files.
181 *
182 * Note: the .journal file on a volume with an external journal still
183 * returns true here, even though it does not actually hold the contents
184 * of the volume's journal.
185 */
186 bool
187 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
188 {
189 if (hfsmp->jnl != NULL &&
190 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
191 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
192 return true;
193 } else {
194 return false;
195 }
196 }
197
198 /*
199 * Create a regular file.
200 */
201 int
202 hfs_vnop_create(struct vnop_create_args *ap)
203 {
204 /*
205 * We leave handling of certain race conditions here to the caller
206 * which will have a better understanding of the semantics it
207 * requires. For example, if it turns out that the file exists,
208 * it would be wrong of us to return a reference to the existing
209 * file because the caller might not want that and it would be
210 * misleading to suggest the file had been created when it hadn't
211 * been. Note that our NFS server code does not set the
212 * VA_EXCLUSIVE flag so you cannot assume that callers don't want
213 * EEXIST errors if it's not set. The common case, where users
214 * are calling open with the O_CREAT mode, is handled in VFS; when
215 * we return EEXIST, it will loop and do the look-up again.
216 */
217 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
218 }
219
220 /*
221 * Make device special file.
222 */
223 int
224 hfs_vnop_mknod(struct vnop_mknod_args *ap)
225 {
226 struct vnode_attr *vap = ap->a_vap;
227 struct vnode *dvp = ap->a_dvp;
228 struct vnode **vpp = ap->a_vpp;
229 struct cnode *cp;
230 int error;
231
232 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
233 return (ENOTSUP);
234 }
235
236 /* Create the vnode */
237 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
238 if (error)
239 return (error);
240
241 cp = VTOC(*vpp);
242 cp->c_touch_acctime = TRUE;
243 cp->c_touch_chgtime = TRUE;
244 cp->c_touch_modtime = TRUE;
245
246 if ((vap->va_rdev != VNOVAL) &&
247 (vap->va_type == VBLK || vap->va_type == VCHR))
248 cp->c_rdev = vap->va_rdev;
249
250 return (0);
251 }
252
253 #if HFS_COMPRESSION
254 /*
255 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
256 * In the (hopefully rare) case where the data fork vnode is not
257 * present, it will use hfs_vget() to create a new vnode for the
258 * data fork.
259 *
260 * NOTE: If successful and a vnode is returned, the caller is responsible
261 * for releasing the returned vnode with vnode_rele().
262 */
263 static int
264 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
265 {
266 int vref = 0;
267
268 if (!data_vp || !cp) /* sanity check incoming parameters */
269 return EINVAL;
270
271 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
272
273 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
274 struct vnode *c_vp = cp->c_vp;
275 if (c_vp) {
276 /* we already have a data vnode */
277 *data_vp = c_vp;
278 vref = vnode_ref(*data_vp);
279 if (!skiplock) hfs_unlock(cp);
280 if (vref == 0) {
281 return 0;
282 }
283 return EINVAL;
284 }
285 /* no data fork vnode in the cnode, so ask hfs for one. */
286
287 if (!cp->c_rsrc_vp) {
288 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
289 *data_vp = NULL;
290 if (!skiplock) hfs_unlock(cp);
291 return EINVAL;
292 }
293
294 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
295 0 != data_vp) {
296 vref = vnode_ref(*data_vp);
297 vnode_put(*data_vp);
298 if (!skiplock) hfs_unlock(cp);
299 if (vref == 0) {
300 return 0;
301 }
302 return EINVAL;
303 }
304 /* there was an error getting the vnode */
305 *data_vp = NULL;
306 if (!skiplock) hfs_unlock(cp);
307 return EINVAL;
308 }
309
310 /*
311 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
312 * allocating it if necessary; returns NULL if there was an allocation error.
313 * function is non-static so that it can be used from the FCNTL handler.
314 */
315 decmpfs_cnode *
316 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
317 {
318 if (!cp->c_decmp) {
319 decmpfs_cnode *dp = decmpfs_cnode_alloc();
320 decmpfs_cnode_init(dp);
321 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
322 /* another thread got here first, so free the decmpfs_cnode we allocated */
323 decmpfs_cnode_destroy(dp);
324 decmpfs_cnode_free(dp);
325 }
326 }
327
328 return cp->c_decmp;
329 }
330
331 /*
332 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
333 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
334 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
335 * or else fills it in via the decmpfs_file_is_compressed() function.
336 */
337 int
338 hfs_file_is_compressed(struct cnode *cp, int skiplock)
339 {
340 int ret = 0;
341
342 /* fast check to see if file is compressed. If flag is clear, just answer no */
343 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
344 return 0;
345 }
346
347 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
348 if (!dp) {
349 /* error allocating a decmpfs cnode, treat the file as uncompressed */
350 return 0;
351 }
352
353 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
354 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
355 switch(decmpfs_state) {
356 case FILE_IS_COMPRESSED:
357 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
358 return 1;
359 case FILE_IS_NOT_COMPRESSED:
360 return 0;
361 /* otherwise the state is not cached yet */
362 }
363
364 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
365 struct vnode *data_vp = NULL;
366 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
367 if (data_vp) {
368 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
369 vnode_rele(data_vp);
370 }
371 }
372 return ret;
373 }
374
375 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
376 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
377 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
378 * files size is returned in size (required)
379 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
380 * will return an error and the caller should fall back to treating the item as an uncompressed file
381 */
382 int
383 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
384 {
385 int ret = 0;
386 int putaway = 0; /* flag to remember if we used hfs_vget() */
387
388 if (!size) {
389 return EINVAL; /* no place to put the file size */
390 }
391
392 if (NULL == vp) {
393 if (!hfsmp || !fid) { /* make sure we have the required parameters */
394 return EINVAL;
395 }
396 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
397 vp = NULL;
398 } else {
399 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
400 }
401 }
402 /* this double check for compression (hfs_file_is_compressed)
403 * ensures the cached size is present in case decmpfs hasn't
404 * encountered this node yet.
405 */
406 if (vp) {
407 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
408 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
409 } else if (VTOCMP(vp)) {
410 uint32_t cmp_type = decmpfs_cnode_cmp_type(VTOCMP(vp));
411
412 if (cmp_type == DATALESS_CMPFS_TYPE) {
413 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
414 ret = 0;
415 } else if (cmp_type >= CMP_MAX && VTOC(vp)->c_datafork) {
416 // if we don't recognize this type, just use the real data fork size
417 *size = VTOC(vp)->c_datafork->ff_size;
418 ret = 0;
419 } else
420 ret = EINVAL;
421 } else
422 ret = EINVAL;
423 }
424
425 if (putaway) { /* did we use hfs_vget() to get this vnode? */
426 vnode_put(vp); /* if so, release it and set it to null */
427 vp = NULL;
428 }
429 return ret;
430 }
431
432 int
433 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
434 {
435 if (ctx == decmpfs_ctx)
436 return 0;
437 if (!hfs_file_is_compressed(cp, skiplock))
438 return 0;
439 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
440 }
441
442 int
443 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
444 {
445 if (ctx == decmpfs_ctx)
446 return 0;
447 if (!hfs_file_is_compressed(cp, skiplock))
448 return 0;
449 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
450 }
451 #endif /* HFS_COMPRESSION */
452
453 /*
454 * Open a file/directory.
455 */
456 int
457 hfs_vnop_open(struct vnop_open_args *ap)
458 {
459 struct vnode *vp = ap->a_vp;
460 struct filefork *fp;
461 struct timeval tv;
462 int error;
463 static int past_bootup = 0;
464 struct cnode *cp = VTOC(vp);
465 struct hfsmount *hfsmp = VTOHFS(vp);
466
467 #if CONFIG_PROTECT
468 error = cp_handle_open(vp, ap->a_mode);
469 if (error)
470 return error;
471 #endif
472
473 #if HFS_COMPRESSION
474 if (ap->a_mode & FWRITE) {
475 /* open for write */
476 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
477 /* opening a compressed file for write, so convert it to decompressed */
478 struct vnode *data_vp = NULL;
479 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
480 if (0 == error) {
481 if (data_vp) {
482 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
483 vnode_rele(data_vp);
484 } else {
485 error = EINVAL;
486 }
487 }
488 if (error != 0)
489 return error;
490 }
491 } else {
492 /* open for read */
493 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
494 if (VNODE_IS_RSRC(vp)) {
495 /* opening the resource fork of a compressed file, so nothing to do */
496 } else {
497 /* opening a compressed file for read, make sure it validates */
498 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
499 if (error != 0)
500 return error;
501 }
502 }
503 }
504 #endif
505
506 /*
507 * Files marked append-only must be opened for appending.
508 */
509 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
510 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
511 return (EPERM);
512
513 if (vnode_issystem(vp))
514 return (EBUSY); /* file is in use by the kernel */
515
516 /* Don't allow journal to be opened externally. */
517 if (hfs_is_journal_file(hfsmp, cp))
518 return (EPERM);
519
520 bool have_lock = false;
521
522 #if CONFIG_PROTECT
523 if (ISSET(ap->a_mode, FENCRYPTED) && cp->c_cpentry && vnode_isreg(vp)) {
524 bool have_trunc_lock = false;
525
526 #if HFS_CONFIG_KEY_ROLL
527 again:
528 #endif
529
530 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
531 if (have_trunc_lock)
532 hfs_unlock_truncate(cp, 0);
533 return error;
534 }
535
536 have_lock = true;
537
538 if (cp->c_cpentry->cp_raw_open_count + 1
539 < cp->c_cpentry->cp_raw_open_count) {
540 // Overflow; too many raw opens on this file
541 hfs_unlock(cp);
542 if (have_trunc_lock)
543 hfs_unlock_truncate(cp, 0);
544 return ENFILE;
545 }
546
547 #if HFS_CONFIG_KEY_ROLL
548 if (cp_should_auto_roll(hfsmp, cp->c_cpentry)) {
549 if (!have_trunc_lock) {
550 hfs_unlock(cp);
551 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
552 have_trunc_lock = true;
553 goto again;
554 }
555
556 error = hfs_key_roll_start(cp);
557 if (error) {
558 hfs_unlock(cp);
559 hfs_unlock_truncate(cp, 0);
560 return error;
561 }
562 }
563 #endif
564
565 if (have_trunc_lock)
566 hfs_unlock_truncate(cp, 0);
567
568 ++cp->c_cpentry->cp_raw_open_count;
569 }
570 #endif
571
572 if (ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
573 || !vnode_isreg(vp)
574 #if NAMEDSTREAMS
575 || vnode_isnamedstream(vp)
576 #endif
577 || !hfsmp->jnl || vnode_isinuse(vp, 0)) {
578
579 #if CONFIG_PROTECT
580 if (have_lock)
581 hfs_unlock(cp);
582 #endif
583
584 return (0);
585 }
586
587 if (!have_lock && (error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
588 return (error);
589
590 #if QUOTA
591 /* If we're going to write to the file, initialize quotas. */
592 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
593 (void)hfs_getinoquota(cp);
594 #endif /* QUOTA */
595
596 /*
597 * On the first (non-busy) open of a fragmented
598 * file attempt to de-frag it, if it's less than hfs_defrag_max bytes.
599 * That field is initially set to 20MB.
600 */
601 fp = VTOF(vp);
602 if (fp->ff_blocks &&
603 fp->ff_extents[7].blockCount != 0 &&
604 fp->ff_size <= hfsmp->hfs_defrag_max) {
605
606 int no_mods = 0;
607 struct timeval now;
608 /*
609 * Wait until system bootup is done (3 min).
610 * And don't relocate a file that's been modified
611 * within the past minute -- this can lead to
612 * system thrashing.
613 */
614
615 if (hfsmp->hfs_defrag_nowait) {
616 /* If this is toggled, then issue the defrag if appropriate */
617 past_bootup = 1;
618 no_mods = 1;
619 }
620
621 if (!past_bootup) {
622 microuptime(&tv);
623 if (tv.tv_sec > (60*3)) {
624 past_bootup = 1;
625 }
626 }
627
628 microtime(&now);
629 if ((now.tv_sec - cp->c_mtime) > 60) {
630 no_mods = 1;
631 }
632
633 if (past_bootup && no_mods) {
634 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
635 vfs_context_ucred(ap->a_context),
636 vfs_context_proc(ap->a_context));
637 }
638 }
639
640 hfs_unlock(cp);
641
642 return (0);
643 }
644
645
646 /*
647 * Close a file/directory.
648 */
649 int
650 hfs_vnop_close(struct vnop_close_args *ap)
651 {
652 register struct vnode *vp = ap->a_vp;
653 register struct cnode *cp;
654 struct proc *p = vfs_context_proc(ap->a_context);
655 struct hfsmount *hfsmp;
656 int busy;
657 int tooktrunclock = 0;
658 int knownrefs = 0;
659
660 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
661 return (0);
662 cp = VTOC(vp);
663 hfsmp = VTOHFS(vp);
664
665 #if CONFIG_PROTECT
666 if (cp->c_cpentry && ISSET(ap->a_fflag, FENCRYPTED) && vnode_isreg(vp)) {
667 hfs_assert(cp->c_cpentry->cp_raw_open_count > 0);
668 --cp->c_cpentry->cp_raw_open_count;
669 }
670 #endif
671
672 /*
673 * If the rsrc fork is a named stream, it can cause the data fork to
674 * stay around, preventing de-allocation of these blocks.
675 * Do checks for truncation on close. Purge extra extents if they exist.
676 * Make sure the vp is not a directory, and that it has a resource fork,
677 * and that resource fork is also a named stream.
678 */
679
680 if ((vnode_vtype(vp) == VREG) && (cp->c_rsrc_vp)
681 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
682 uint32_t blks;
683
684 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
685 /*
686 * If there are extra blocks and there are only 2 refs on
687 * this vp (ourselves + rsrc fork holding ref on us), go ahead
688 * and try to truncate.
689 */
690 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
691 // release cnode lock; must acquire truncate lock BEFORE cnode lock
692 hfs_unlock(cp);
693
694 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
695 tooktrunclock = 1;
696
697 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
698 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
699 // bail out if we can't re-acquire cnode lock
700 return 0;
701 }
702 // now re-test to make sure it's still valid
703 if (cp->c_rsrc_vp) {
704 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
705 if (!vnode_isinuse(vp, knownrefs)){
706 // now we can truncate the file, if necessary
707 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
708 if (blks < VTOF(vp)->ff_blocks){
709 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY,
710 0, ap->a_context);
711 }
712 }
713 }
714 }
715 }
716
717
718 // if we froze the fs and we're exiting, then "thaw" the fs
719 if (hfsmp->hfs_freeze_state == HFS_FROZEN
720 && hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
721 hfs_thaw(hfsmp, p);
722 }
723
724 busy = vnode_isinuse(vp, 1);
725
726 if (busy) {
727 hfs_touchtimes(VTOHFS(vp), cp);
728 }
729 if (vnode_isdir(vp)) {
730 hfs_reldirhints(cp, busy);
731 } else if (vnode_issystem(vp) && !busy) {
732 vnode_recycle(vp);
733 }
734
735 if (tooktrunclock){
736 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
737 }
738 hfs_unlock(cp);
739
740 if (ap->a_fflag & FWASWRITTEN) {
741 hfs_sync_ejectable(hfsmp);
742 }
743
744 return (0);
745 }
746
747 static bool hfs_should_generate_document_id(hfsmount_t *hfsmp, cnode_t *cp)
748 {
749 return (!ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
750 && ISSET(cp->c_bsdflags, UF_TRACKED)
751 && cp->c_desc.cd_cnid != kHFSRootFolderID
752 && (S_ISDIR(cp->c_mode) || S_ISREG(cp->c_mode) || S_ISLNK(cp->c_mode)));
753 }
754
755 /*
756 * Get basic attributes.
757 */
758 int
759 hfs_vnop_getattr(struct vnop_getattr_args *ap)
760 {
761 #define VNODE_ATTR_TIMES \
762 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
763 #define VNODE_ATTR_AUTH \
764 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
765 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
766
767 struct vnode *vp = ap->a_vp;
768 struct vnode_attr *vap = ap->a_vap;
769 struct vnode *rvp = NULLVP;
770 struct hfsmount *hfsmp;
771 struct cnode *cp;
772 uint64_t data_size;
773 enum vtype v_type;
774 int error = 0;
775 cp = VTOC(vp);
776
777 #if HFS_COMPRESSION
778 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
779 int compressed = 0;
780 int hide_size = 0;
781 off_t uncompressed_size = -1;
782 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
783 /* we only care about whether the file is compressed if asked for the uncompressed size */
784 if (VNODE_IS_RSRC(vp)) {
785 /* if it's a resource fork, decmpfs may want us to hide the size */
786 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
787 } else {
788 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
789 compressed = hfs_file_is_compressed(cp, 0);
790 }
791 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
792 // if it's compressed
793 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && decmpfs_cnode_cmp_type(cp->c_decmp) >= CMP_MAX)) {
794 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
795 /* failed to get the uncompressed size, we'll check for this later */
796 uncompressed_size = -1;
797 } else {
798 // fake that it's compressed
799 compressed = 1;
800 }
801 }
802 }
803 }
804 #endif
805
806 /*
807 * Shortcut for vnode_authorize path. Each of the attributes
808 * in this set is updated atomically so we don't need to take
809 * the cnode lock to access them.
810 */
811 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
812 /* Make sure file still exists. */
813 if (cp->c_flag & C_NOEXISTS)
814 return (ENOENT);
815
816 vap->va_uid = cp->c_uid;
817 vap->va_gid = cp->c_gid;
818 vap->va_mode = cp->c_mode;
819 vap->va_flags = cp->c_bsdflags;
820 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
821
822 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
823 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
824 VATTR_SET_SUPPORTED(vap, va_acl);
825 }
826
827 return (0);
828 }
829
830 hfsmp = VTOHFS(vp);
831 v_type = vnode_vtype(vp);
832
833 if (VATTR_IS_ACTIVE(vap, va_document_id)) {
834 uint32_t document_id;
835
836 if (cp->c_desc.cd_cnid == kHFSRootFolderID)
837 document_id = kHFSRootFolderID;
838 else {
839 /*
840 * This is safe without a lock because we're just reading
841 * a 32 bit aligned integer which should be atomic on all
842 * platforms we support.
843 */
844 document_id = hfs_get_document_id(cp);
845
846 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
847 uint32_t new_document_id;
848
849 error = hfs_generate_document_id(hfsmp, &new_document_id);
850 if (error)
851 return error;
852
853 error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
854 if (error)
855 return error;
856
857 bool want_docid_fsevent = false;
858
859 // Need to check again now that we have the lock
860 document_id = hfs_get_document_id(cp);
861 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
862 cp->c_attr.ca_finderextendeddirinfo.document_id = document_id = new_document_id;
863 want_docid_fsevent = true;
864 SET(cp->c_flag, C_MODIFIED);
865 }
866
867 hfs_unlock(cp);
868
869 if (want_docid_fsevent) {
870 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
871 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
872 FSE_ARG_INO, (ino64_t)0, // src inode #
873 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
874 FSE_ARG_INT32, document_id,
875 FSE_ARG_DONE);
876
877 if (need_fsevent(FSE_STAT_CHANGED, vp)) {
878 add_fsevent(FSE_STAT_CHANGED, ap->a_context,
879 FSE_ARG_VNODE, vp, FSE_ARG_DONE);
880 }
881 }
882 }
883 }
884
885 vap->va_document_id = document_id;
886 VATTR_SET_SUPPORTED(vap, va_document_id);
887 }
888
889 /*
890 * If time attributes are requested and we have cnode times
891 * that require updating, then acquire an exclusive lock on
892 * the cnode before updating the times. Otherwise we can
893 * just acquire a shared lock.
894 */
895 if ((vap->va_active & VNODE_ATTR_TIMES) &&
896 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
897 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
898 return (error);
899 hfs_touchtimes(hfsmp, cp);
900
901 // downgrade to a shared lock since that's all we need from here on out
902 cp->c_lockowner = HFS_SHARED_OWNER;
903 lck_rw_lock_exclusive_to_shared(&cp->c_rwlock);
904
905 } else if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
906 return (error);
907 }
908
909 if (v_type == VDIR) {
910 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
911
912 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
913 int nlink;
914
915 /*
916 * For directories, the va_nlink is esentially a count
917 * of the ".." references to a directory plus the "."
918 * reference and the directory itself. So for HFS+ this
919 * becomes the sub-directory count plus two.
920 *
921 * In the absence of a sub-directory count we use the
922 * directory's item count. This will be too high in
923 * most cases since it also includes files.
924 */
925 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
926 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
927 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
928 else
929 nlink = cp->c_entries;
930
931 /* Account for ourself and our "." entry */
932 nlink += 2;
933 /* Hide our private directories. */
934 if (cp->c_cnid == kHFSRootFolderID) {
935 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
936 --nlink;
937 }
938 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
939 --nlink;
940 }
941 }
942 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
943 }
944 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
945 int entries;
946
947 entries = cp->c_entries;
948 /* Hide our private files and directories. */
949 if (cp->c_cnid == kHFSRootFolderID) {
950 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
951 --entries;
952 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
953 --entries;
954 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
955 entries -= 2; /* hide the journal files */
956 }
957 VATTR_RETURN(vap, va_nchildren, entries);
958 }
959 /*
960 * The va_dirlinkcount is the count of real directory hard links.
961 * (i.e. its not the sum of the implied "." and ".." references)
962 */
963 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
964 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
965 }
966 } else /* !VDIR */ {
967 data_size = VCTOF(vp, cp)->ff_size;
968
969 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
970 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
971 u_int64_t blocks;
972
973 #if HFS_COMPRESSION
974 if (hide_size) {
975 VATTR_RETURN(vap, va_data_alloc, 0);
976 } else if (compressed) {
977 /* for compressed files, we report all allocated blocks as belonging to the data fork */
978 blocks = cp->c_blocks;
979 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
980 }
981 else
982 #endif
983 {
984 blocks = VCTOF(vp, cp)->ff_blocks;
985 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
986 }
987 }
988 }
989
990 /* conditional because 64-bit arithmetic can be expensive */
991 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
992 if (v_type == VDIR) {
993 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
994 } else {
995 u_int64_t total_size = ~0ULL;
996 struct cnode *rcp;
997 #if HFS_COMPRESSION
998 if (hide_size) {
999 /* we're hiding the size of this file, so just return 0 */
1000 total_size = 0;
1001 } else if (compressed) {
1002 if (uncompressed_size == -1) {
1003 /*
1004 * We failed to get the uncompressed size above,
1005 * so we'll fall back to the standard path below
1006 * since total_size is still -1
1007 */
1008 } else {
1009 /* use the uncompressed size we fetched above */
1010 total_size = uncompressed_size;
1011 }
1012 }
1013 #endif
1014 if (total_size == ~0ULL) {
1015 if (cp->c_datafork) {
1016 total_size = cp->c_datafork->ff_size;
1017 }
1018
1019 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1020 /* We deal with rsrc fork vnode iocount at the end of the function */
1021 error = hfs_vgetrsrc(hfsmp, vp, &rvp);
1022 if (error) {
1023 /*
1024 * Note that we call hfs_vgetrsrc with error_on_unlinked
1025 * set to FALSE. This is because we may be invoked via
1026 * fstat() on an open-unlinked file descriptor and we must
1027 * continue to support access to the rsrc fork until it disappears.
1028 * The code at the end of this function will be
1029 * responsible for releasing the iocount generated by
1030 * hfs_vgetrsrc. This is because we can't drop the iocount
1031 * without unlocking the cnode first.
1032 */
1033 goto out;
1034 }
1035
1036 rcp = VTOC(rvp);
1037 if (rcp && rcp->c_rsrcfork) {
1038 total_size += rcp->c_rsrcfork->ff_size;
1039 }
1040 }
1041 }
1042
1043 VATTR_RETURN(vap, va_total_size, total_size);
1044 }
1045 }
1046 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1047 if (v_type == VDIR) {
1048 VATTR_RETURN(vap, va_total_alloc, 0);
1049 } else {
1050 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
1051 }
1052 }
1053
1054 /*
1055 * If the VFS wants extended security data, and we know that we
1056 * don't have any (because it never told us it was setting any)
1057 * then we can return the supported bit and no data. If we do
1058 * have extended security, we can just leave the bit alone and
1059 * the VFS will use the fallback path to fetch it.
1060 */
1061 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1062 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
1063 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
1064 VATTR_SET_SUPPORTED(vap, va_acl);
1065 }
1066 }
1067
1068 vap->va_access_time.tv_sec = cp->c_atime;
1069 vap->va_access_time.tv_nsec = 0;
1070 vap->va_create_time.tv_sec = cp->c_itime;
1071 vap->va_create_time.tv_nsec = 0;
1072 vap->va_modify_time.tv_sec = cp->c_mtime;
1073 vap->va_modify_time.tv_nsec = 0;
1074 vap->va_change_time.tv_sec = cp->c_ctime;
1075 vap->va_change_time.tv_nsec = 0;
1076 vap->va_backup_time.tv_sec = cp->c_btime;
1077 vap->va_backup_time.tv_nsec = 0;
1078
1079 /* See if we need to emit the date added field to the user */
1080 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1081 u_int32_t dateadded = hfs_get_dateadded (cp);
1082 if (dateadded) {
1083 vap->va_addedtime.tv_sec = dateadded;
1084 vap->va_addedtime.tv_nsec = 0;
1085 VATTR_SET_SUPPORTED (vap, va_addedtime);
1086 }
1087 }
1088
1089 /* XXX is this really a good 'optimal I/O size'? */
1090 vap->va_iosize = hfsmp->hfs_logBlockSize;
1091 vap->va_uid = cp->c_uid;
1092 vap->va_gid = cp->c_gid;
1093 vap->va_mode = cp->c_mode;
1094 vap->va_flags = cp->c_bsdflags;
1095
1096 /*
1097 * Exporting file IDs from HFS Plus:
1098 *
1099 * For "normal" files the c_fileid is the same value as the
1100 * c_cnid. But for hard link files, they are different - the
1101 * c_cnid belongs to the active directory entry (ie the link)
1102 * and the c_fileid is for the actual inode (ie the data file).
1103 *
1104 * The stat call (getattr) uses va_fileid and the Carbon APIs,
1105 * which are hardlink-ignorant, will ask for va_linkid.
1106 */
1107 vap->va_fileid = (u_int64_t)cp->c_fileid;
1108 /*
1109 * We need to use the origin cache for both hardlinked files
1110 * and directories. Hardlinked directories have multiple cnids
1111 * and parents (one per link). Hardlinked files also have their
1112 * own parents and link IDs separate from the indirect inode number.
1113 * If we don't use the cache, we could end up vending the wrong ID
1114 * because the cnode will only reflect the link that was looked up most recently.
1115 */
1116 if (cp->c_flag & C_HARDLINK) {
1117 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
1118 vap->va_parentid = (u_int64_t)hfs_currentparent(cp, /* have_lock: */ true);
1119 } else {
1120 vap->va_linkid = (u_int64_t)cp->c_cnid;
1121 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
1122 }
1123
1124 vap->va_fsid = hfsmp->hfs_raw_dev;
1125 if (VATTR_IS_ACTIVE(vap, va_devid)) {
1126 VATTR_RETURN(vap, va_devid, hfsmp->hfs_raw_dev);
1127 }
1128 vap->va_filerev = 0;
1129 vap->va_encoding = cp->c_encoding;
1130 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
1131 #if HFS_COMPRESSION
1132 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1133 if (hide_size)
1134 vap->va_data_size = 0;
1135 else if (compressed) {
1136 if (uncompressed_size == -1) {
1137 /* failed to get the uncompressed size above, so just return data_size */
1138 vap->va_data_size = data_size;
1139 } else {
1140 /* use the uncompressed size we fetched above */
1141 vap->va_data_size = uncompressed_size;
1142 }
1143 } else
1144 vap->va_data_size = data_size;
1145 VATTR_SET_SUPPORTED(vap, va_data_size);
1146 }
1147 #else
1148 vap->va_data_size = data_size;
1149 vap->va_supported |= VNODE_ATTR_va_data_size;
1150 #endif
1151
1152 #if CONFIG_PROTECT
1153 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
1154 vap->va_dataprotect_class = cp->c_cpentry ? CP_CLASS(cp->c_cpentry->cp_pclass) : 0;
1155 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1156 }
1157 #endif
1158 if (VATTR_IS_ACTIVE(vap, va_write_gencount)) {
1159 if (ubc_is_mapped_writable(vp)) {
1160 /*
1161 * Return 0 to the caller to indicate the file may be
1162 * changing. There is no need for us to increment the
1163 * generation counter here because it gets done as part of
1164 * page-out and also when the file is unmapped (to account
1165 * for changes we might not have seen).
1166 */
1167 vap->va_write_gencount = 0;
1168 } else {
1169 vap->va_write_gencount = hfs_get_gencount(cp);
1170 }
1171
1172 VATTR_SET_SUPPORTED(vap, va_write_gencount);
1173 }
1174
1175 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1176 vap->va_supported |= VNODE_ATTR_va_access_time |
1177 VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1178 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1179 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1180 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1181 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1182 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1183 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1184 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1185
1186 /* If this is the root, let VFS to find out the mount name, which
1187 * may be different from the real name. Otherwise, we need to take care
1188 * for hardlinked files, which need to be looked up, if necessary
1189 */
1190 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1191 struct cat_desc linkdesc;
1192 int lockflags;
1193 int uselinkdesc = 0;
1194 cnid_t nextlinkid = 0;
1195 cnid_t prevlinkid = 0;
1196
1197 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1198 * here because the info. for the link ID requested by getattrlist may be
1199 * different than what's currently in the cnode. This is because the cnode
1200 * will be filled in with the information for the most recent link ID that went
1201 * through namei/lookup(). If there are competing lookups for hardlinks that point
1202 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1203 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1204 */
1205
1206 if ((cp->c_flag & C_HARDLINK) &&
1207 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1208 /*
1209 * If we have no name and our link ID is the raw inode number, then we may
1210 * have an open-unlinked file. Go to the next link in this case.
1211 */
1212 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1213 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1214 goto out;
1215 }
1216 }
1217 else {
1218 /* just use link obtained from vap above */
1219 nextlinkid = vap->va_linkid;
1220 }
1221
1222 /* We need to probe the catalog for the descriptor corresponding to the link ID
1223 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1224 * for the cnode here, so we can't just update the descriptor. Instead,
1225 * we should just store the descriptor's value locally and then use it to pass
1226 * out the name value as needed below.
1227 */
1228 if (nextlinkid){
1229 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1230 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1231 hfs_systemfile_unlock(hfsmp, lockflags);
1232 if (error == 0) {
1233 uselinkdesc = 1;
1234 }
1235 }
1236 }
1237
1238 /* By this point, we've either patched up the name above and the c_desc
1239 * points to the correct data, or it already did, in which case we just proceed
1240 * by copying the name into the vap. Note that we will never set va_name to
1241 * supported if nextlinkid is never initialized. This could happen in the degenerate
1242 * case above involving the raw inode number, where it has no nextlinkid. In this case
1243 * we will simply not mark the name bit as supported.
1244 */
1245 if (uselinkdesc) {
1246 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1247 VATTR_SET_SUPPORTED(vap, va_name);
1248 cat_releasedesc(&linkdesc);
1249 }
1250 else if (cp->c_desc.cd_namelen) {
1251 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1252 VATTR_SET_SUPPORTED(vap, va_name);
1253 }
1254 }
1255
1256 out:
1257 hfs_unlock(cp);
1258 /*
1259 * We need to vnode_put the rsrc fork vnode only *after* we've released
1260 * the cnode lock, since vnode_put can trigger an inactive call, which
1261 * will go back into HFS and try to acquire a cnode lock.
1262 */
1263 if (rvp) {
1264 vnode_put (rvp);
1265 }
1266
1267 return (error);
1268 }
1269
1270 int
1271 hfs_set_bsd_flags(struct hfsmount *hfsmp, struct cnode *cp,
1272 u_int32_t new_bsd_flags, u_int32_t document_id,
1273 vfs_context_t ctx, int *compression_changedp)
1274 {
1275 u_int16_t *fdFlags;
1276
1277 if ((new_bsd_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
1278 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1279
1280 //
1281 // we're marking this item UF_TRACKED. if the document_id is
1282 // not set, get a new one and put it on the file.
1283 //
1284 if (fip->document_id == 0) {
1285 if (document_id != 0) {
1286 // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
1287 fip->document_id = (uint32_t)document_id;
1288 add_fsevent(FSE_DOCID_CHANGED, ctx,
1289 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1290 FSE_ARG_INO, (ino64_t)0, // src inode #
1291 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1292 FSE_ARG_INT32, document_id,
1293 FSE_ARG_DONE);
1294 } else {
1295 // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
1296 }
1297 }
1298
1299 } else if (!(new_bsd_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
1300 //
1301 // UF_TRACKED is being cleared so clear the document_id
1302 //
1303 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1304 if (fip->document_id) {
1305 // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
1306 add_fsevent(FSE_DOCID_CHANGED, ctx,
1307 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1308 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
1309 FSE_ARG_INO, (ino64_t)0, // dst inode #
1310 FSE_ARG_INT32, fip->document_id, // document id
1311 FSE_ARG_DONE);
1312 fip->document_id = 0;
1313 cp->c_bsdflags &= ~UF_TRACKED;
1314 }
1315 }
1316
1317 #if HFS_COMPRESSION
1318 if ((cp->c_bsdflags ^ new_bsd_flags) & UF_COMPRESSED) {
1319 /*
1320 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1321 * but we don't want to actually do the update until we've released the cnode lock down below
1322 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1323 * turn off the flag and look at the "raw" file for debugging purposes
1324 */
1325 *compression_changedp = 1;
1326 }
1327 #endif
1328
1329 cp->c_bsdflags = new_bsd_flags;
1330 cp->c_flag |= C_MODIFIED;
1331 cp->c_touch_chgtime = TRUE;
1332
1333 /*
1334 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1335 *
1336 * The fdFlags for files and frFlags for folders are both 8 bytes
1337 * into the userInfo (the first 16 bytes of the Finder Info). They
1338 * are both 16-bit fields.
1339 */
1340 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1341 if (new_bsd_flags & UF_HIDDEN)
1342 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1343 else
1344 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1345
1346 return 0;
1347 }
1348
1349 int
1350 hfs_vnop_setattr(struct vnop_setattr_args *ap)
1351 {
1352 struct vnode_attr *vap = ap->a_vap;
1353 struct vnode *vp = ap->a_vp;
1354 struct cnode *cp = NULL;
1355 struct hfsmount *hfsmp;
1356 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1357 struct proc *p = vfs_context_proc(ap->a_context);
1358 int error = 0;
1359 uid_t nuid;
1360 gid_t ngid;
1361 time_t orig_ctime;
1362
1363 orig_ctime = VTOC(vp)->c_ctime;
1364
1365 #if HFS_COMPRESSION
1366 int decmpfs_reset_state = 0;
1367 /*
1368 we call decmpfs_update_attributes even if the file is not compressed
1369 because we want to update the incoming flags if the xattrs are invalid
1370 */
1371 error = decmpfs_update_attributes(vp, vap);
1372 if (error)
1373 return error;
1374 #endif
1375 //
1376 // if this is not a size-changing setattr and it is not just
1377 // an atime update, then check for a snapshot.
1378 //
1379 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1380 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1381 }
1382
1383 #if CONFIG_PROTECT
1384 /*
1385 * All metadata changes should be allowed except a size-changing setattr, which
1386 * has effects on file content and requires calling into cp_handle_vnop
1387 * to have content protection check.
1388 */
1389 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1390 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1391 return (error);
1392 }
1393 }
1394 #endif /* CONFIG_PROTECT */
1395
1396 hfsmp = VTOHFS(vp);
1397
1398 /* Don't allow modification of the journal. */
1399 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1400 return (EPERM);
1401 }
1402
1403 //
1404 // Check if we'll need a document_id and if so, get it before we lock the
1405 // the cnode to avoid any possible deadlock with the root vnode which has
1406 // to get locked to get the document id
1407 //
1408 u_int32_t document_id=0;
1409 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
1410 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
1411 //
1412 // If the document_id is not set, get a new one. It will be set
1413 // on the file down below once we hold the cnode lock.
1414 //
1415 if (fip->document_id == 0) {
1416 if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
1417 document_id = 0;
1418 }
1419 }
1420 }
1421
1422
1423 /*
1424 * File size change request.
1425 * We are guaranteed that this is not a directory, and that
1426 * the filesystem object is writeable.
1427 *
1428 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1429 */
1430 VATTR_SET_SUPPORTED(vap, va_data_size);
1431 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1432 if (!vnode_isreg(vp)) {
1433 if (vnode_isdir(vp)) {
1434 return EISDIR;
1435 }
1436 //otherwise return EINVAL
1437 return EINVAL;
1438 }
1439
1440 #if HFS_COMPRESSION
1441 /* keep the compressed state locked until we're done truncating the file */
1442 decmpfs_cnode *dp = VTOCMP(vp);
1443 if (!dp) {
1444 /*
1445 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1446 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1447 * on this file while it's truncating
1448 */
1449 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1450 if (!dp) {
1451 /* failed to allocate a decmpfs_cnode */
1452 return ENOMEM; /* what should this be? */
1453 }
1454 }
1455
1456 nspace_snapshot_event(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1457
1458 decmpfs_lock_compressed_data(dp, 1);
1459 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1460 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1461 if (error != 0) {
1462 decmpfs_unlock_compressed_data(dp, 1);
1463 return error;
1464 }
1465 }
1466 #endif
1467
1468 // Take truncate lock
1469 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1470
1471 // hfs_truncate will deal with the cnode lock
1472 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff,
1473 0, ap->a_context);
1474
1475 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1476 #if HFS_COMPRESSION
1477 decmpfs_unlock_compressed_data(dp, 1);
1478 #endif
1479 if (error)
1480 return error;
1481 }
1482 if (cp == NULL) {
1483 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1484 return (error);
1485 cp = VTOC(vp);
1486 }
1487
1488 /*
1489 * If it is just an access time update request by itself
1490 * we know the request is from kernel level code, and we
1491 * can delay it without being as worried about consistency.
1492 * This change speeds up mmaps, in the rare case that they
1493 * get caught behind a sync.
1494 */
1495
1496 if (vap->va_active == VNODE_ATTR_va_access_time) {
1497 cp->c_touch_acctime=TRUE;
1498 goto out;
1499 }
1500
1501
1502
1503 /*
1504 * Owner/group change request.
1505 * We are guaranteed that the new owner/group is valid and legal.
1506 */
1507 VATTR_SET_SUPPORTED(vap, va_uid);
1508 VATTR_SET_SUPPORTED(vap, va_gid);
1509 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1510 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1511 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1512 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1513 goto out;
1514
1515 /*
1516 * Mode change request.
1517 * We are guaranteed that the mode value is valid and that in
1518 * conjunction with the owner and group, this change is legal.
1519 */
1520 VATTR_SET_SUPPORTED(vap, va_mode);
1521 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1522 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1523 goto out;
1524
1525 /*
1526 * File flags change.
1527 * We are guaranteed that only flags allowed to change given the
1528 * current securelevel are being changed.
1529 */
1530 VATTR_SET_SUPPORTED(vap, va_flags);
1531 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1532 if ((error = hfs_set_bsd_flags(hfsmp, cp, vap->va_flags, document_id,
1533 ap->a_context,
1534 &decmpfs_reset_state)) != 0) {
1535 goto out;
1536 }
1537 }
1538
1539 /*
1540 * Timestamp updates.
1541 */
1542 VATTR_SET_SUPPORTED(vap, va_create_time);
1543 VATTR_SET_SUPPORTED(vap, va_access_time);
1544 VATTR_SET_SUPPORTED(vap, va_modify_time);
1545 VATTR_SET_SUPPORTED(vap, va_backup_time);
1546 VATTR_SET_SUPPORTED(vap, va_change_time);
1547 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1548 VATTR_IS_ACTIVE(vap, va_access_time) ||
1549 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1550 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1551 if (VATTR_IS_ACTIVE(vap, va_create_time))
1552 cp->c_itime = vap->va_create_time.tv_sec;
1553 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1554 cp->c_atime = vap->va_access_time.tv_sec;
1555 cp->c_touch_acctime = FALSE;
1556 }
1557 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1558 cp->c_mtime = vap->va_modify_time.tv_sec;
1559 cp->c_touch_modtime = FALSE;
1560 cp->c_touch_chgtime = TRUE;
1561
1562 hfs_clear_might_be_dirty_flag(cp);
1563
1564 /*
1565 * The utimes system call can reset the modification
1566 * time but it doesn't know about HFS create times.
1567 * So we need to ensure that the creation time is
1568 * always at least as old as the modification time.
1569 */
1570 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1571 (cp->c_cnid != kHFSRootFolderID) &&
1572 !VATTR_IS_ACTIVE(vap, va_create_time) &&
1573 (cp->c_mtime < cp->c_itime)) {
1574 cp->c_itime = cp->c_mtime;
1575 }
1576 }
1577 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1578 cp->c_btime = vap->va_backup_time.tv_sec;
1579 cp->c_flag |= C_MINOR_MOD;
1580 }
1581
1582 // Set the date added time
1583 VATTR_SET_SUPPORTED(vap, va_addedtime);
1584 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1585 hfs_write_dateadded(&cp->c_attr, vap->va_addedtime.tv_sec);
1586 cp->c_flag &= ~C_NEEDS_DATEADDED;
1587 cp->c_touch_chgtime = true;
1588 }
1589
1590 /*
1591 * Set name encoding.
1592 */
1593 VATTR_SET_SUPPORTED(vap, va_encoding);
1594 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1595 cp->c_encoding = vap->va_encoding;
1596 cp->c_flag |= C_MODIFIED;
1597 hfs_setencodingbits(hfsmp, cp->c_encoding);
1598 }
1599
1600 if ((error = hfs_update(vp, 0)) != 0)
1601 goto out;
1602
1603 out:
1604 if (cp) {
1605 /* Purge origin cache for cnode, since caller now has correct link ID for it
1606 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1607 */
1608 if ((cp->c_flag & C_HARDLINK) && (vnode_vtype(vp) != VDIR)){
1609 hfs_relorigin(cp, 0);
1610 }
1611
1612 hfs_unlock(cp);
1613 #if HFS_COMPRESSION
1614 if (decmpfs_reset_state) {
1615 /*
1616 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1617 * but don't do it while holding the hfs cnode lock
1618 */
1619 decmpfs_cnode *dp = VTOCMP(vp);
1620 if (!dp) {
1621 /*
1622 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1623 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1624 * on this file if it's locked
1625 */
1626 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1627 if (!dp) {
1628 /* failed to allocate a decmpfs_cnode */
1629 return ENOMEM; /* what should this be? */
1630 }
1631 }
1632 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1633 }
1634 #endif
1635 }
1636
1637 #if CONFIG_PROTECT
1638 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1639 if (!error && VATTR_IS_ACTIVE(vap, va_dataprotect_class))
1640 error = cp_vnode_setclass(vp, vap->va_dataprotect_class);
1641 #endif
1642
1643 return (error);
1644 }
1645
1646
1647 /*
1648 * Change the mode on a file.
1649 * cnode must be locked before calling.
1650 */
1651 int
1652 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1653 {
1654 register struct cnode *cp = VTOC(vp);
1655
1656 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1657 return (0);
1658
1659 // Don't allow modification of the journal or journal_info_block
1660 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1661 return EPERM;
1662 }
1663
1664 #if OVERRIDE_UNKNOWN_PERMISSIONS
1665 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1666 return (0);
1667 };
1668 #endif
1669
1670 mode_t new_mode = (cp->c_mode & ~ALLPERMS) | (mode & ALLPERMS);
1671 if (new_mode != cp->c_mode) {
1672 cp->c_mode = new_mode;
1673 cp->c_flag |= C_MINOR_MOD;
1674 }
1675 cp->c_touch_chgtime = TRUE;
1676 return (0);
1677 }
1678
1679
1680 int
1681 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1682 {
1683 struct cnode *cp = VTOC(vp);
1684 int retval = 0;
1685 int is_member;
1686
1687 /*
1688 * Disallow write attempts on read-only file systems;
1689 * unless the file is a socket, fifo, or a block or
1690 * character device resident on the file system.
1691 */
1692 switch (vnode_vtype(vp)) {
1693 case VDIR:
1694 case VLNK:
1695 case VREG:
1696 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1697 return (EROFS);
1698 break;
1699 default:
1700 break;
1701 }
1702
1703 /* If immutable bit set, nobody gets to write it. */
1704 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1705 return (EPERM);
1706
1707 /* Otherwise, user id 0 always gets access. */
1708 if (!suser(cred, NULL))
1709 return (0);
1710
1711 /* Otherwise, check the owner. */
1712 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1713 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1714
1715 /* Otherwise, check the groups. */
1716 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1717 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1718 }
1719
1720 /* Otherwise, check everyone else. */
1721 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1722 }
1723
1724
1725 /*
1726 * Perform chown operation on cnode cp;
1727 * code must be locked prior to call.
1728 */
1729 int
1730 #if !QUOTA
1731 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1732 __unused struct proc *p)
1733 #else
1734 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1735 __unused struct proc *p)
1736 #endif
1737 {
1738 register struct cnode *cp = VTOC(vp);
1739 uid_t ouid;
1740 gid_t ogid;
1741 #if QUOTA
1742 int error = 0;
1743 register int i;
1744 int64_t change;
1745 #endif /* QUOTA */
1746
1747 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1748 return (ENOTSUP);
1749
1750 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1751 return (0);
1752
1753 if (uid == (uid_t)VNOVAL)
1754 uid = cp->c_uid;
1755 if (gid == (gid_t)VNOVAL)
1756 gid = cp->c_gid;
1757
1758 #if 0 /* we are guaranteed that this is already the case */
1759 /*
1760 * If we don't own the file, are trying to change the owner
1761 * of the file, or are not a member of the target group,
1762 * the caller must be superuser or the call fails.
1763 */
1764 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1765 (gid != cp->c_gid &&
1766 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1767 (error = suser(cred, 0)))
1768 return (error);
1769 #endif
1770
1771 ogid = cp->c_gid;
1772 ouid = cp->c_uid;
1773
1774 if (ouid == uid && ogid == gid) {
1775 // No change, just set change time
1776 cp->c_touch_chgtime = TRUE;
1777 return 0;
1778 }
1779
1780 #if QUOTA
1781 if ((error = hfs_getinoquota(cp)))
1782 return (error);
1783 if (ouid == uid) {
1784 dqrele(cp->c_dquot[USRQUOTA]);
1785 cp->c_dquot[USRQUOTA] = NODQUOT;
1786 }
1787 if (ogid == gid) {
1788 dqrele(cp->c_dquot[GRPQUOTA]);
1789 cp->c_dquot[GRPQUOTA] = NODQUOT;
1790 }
1791
1792 /*
1793 * Eventually need to account for (fake) a block per directory
1794 * if (vnode_isdir(vp))
1795 * change = VTOHFS(vp)->blockSize;
1796 * else
1797 */
1798
1799 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1800 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1801 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1802 for (i = 0; i < MAXQUOTAS; i++) {
1803 dqrele(cp->c_dquot[i]);
1804 cp->c_dquot[i] = NODQUOT;
1805 }
1806 #endif /* QUOTA */
1807 cp->c_gid = gid;
1808 cp->c_uid = uid;
1809 #if QUOTA
1810 if ((error = hfs_getinoquota(cp)) == 0) {
1811 if (ouid == uid) {
1812 dqrele(cp->c_dquot[USRQUOTA]);
1813 cp->c_dquot[USRQUOTA] = NODQUOT;
1814 }
1815 if (ogid == gid) {
1816 dqrele(cp->c_dquot[GRPQUOTA]);
1817 cp->c_dquot[GRPQUOTA] = NODQUOT;
1818 }
1819 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1820 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1821 goto good;
1822 else
1823 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1824 }
1825 for (i = 0; i < MAXQUOTAS; i++) {
1826 dqrele(cp->c_dquot[i]);
1827 cp->c_dquot[i] = NODQUOT;
1828 }
1829 }
1830 cp->c_gid = ogid;
1831 cp->c_uid = ouid;
1832 if (hfs_getinoquota(cp) == 0) {
1833 if (ouid == uid) {
1834 dqrele(cp->c_dquot[USRQUOTA]);
1835 cp->c_dquot[USRQUOTA] = NODQUOT;
1836 }
1837 if (ogid == gid) {
1838 dqrele(cp->c_dquot[GRPQUOTA]);
1839 cp->c_dquot[GRPQUOTA] = NODQUOT;
1840 }
1841 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1842 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1843 (void) hfs_getinoquota(cp);
1844 }
1845 return (error);
1846 good:
1847 if (hfs_getinoquota(cp))
1848 panic("hfs_chown: lost quota");
1849 #endif /* QUOTA */
1850
1851 /*
1852 * Without quotas, we could probably make this a minor
1853 * modification.
1854 */
1855 cp->c_flag |= C_MODIFIED;
1856
1857 /*
1858 According to the SUSv3 Standard, chown() shall mark
1859 for update the st_ctime field of the file.
1860 (No exceptions mentioned)
1861 */
1862 cp->c_touch_chgtime = TRUE;
1863 return (0);
1864 }
1865
1866 #if HFS_COMPRESSION
1867 /*
1868 * Flush the resource fork if it exists. vp is the data fork and has
1869 * an iocount.
1870 */
1871 static int hfs_flush_rsrc(vnode_t vp, vfs_context_t ctx)
1872 {
1873 cnode_t *cp = VTOC(vp);
1874
1875 hfs_lock(cp, HFS_SHARED_LOCK, 0);
1876
1877 vnode_t rvp = cp->c_rsrc_vp;
1878
1879 if (!rvp) {
1880 hfs_unlock(cp);
1881 return 0;
1882 }
1883
1884 int vid = vnode_vid(rvp);
1885
1886 hfs_unlock(cp);
1887
1888 int error = vnode_getwithvid(rvp, vid);
1889
1890 if (error)
1891 return error == ENOENT ? 0 : error;
1892
1893 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
1894 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
1895 hfs_filedone(rvp, ctx, HFS_FILE_DONE_NO_SYNC);
1896 hfs_unlock(cp);
1897 hfs_unlock_truncate(cp, 0);
1898
1899 error = ubc_msync(rvp, 0, ubc_getsize(rvp), NULL,
1900 UBC_PUSHALL | UBC_SYNC);
1901
1902 vnode_put(rvp);
1903
1904 return error;
1905 }
1906 #endif // HFS_COMPRESSION
1907
1908
1909 /* Helper Functions for exchangedata(2) */
1910
1911 /*
1912 * hfs_exchangedata_getxattr
1913 * arguments:
1914 * vp: vnode to extract the EA for
1915 * name_selector: the index into the array of EA name entries.
1916 * buffer: address for output buffer to store the output EA
1917 * NOTE: This function will allocate the buffer, it is the caller's responsibility to free it.
1918 * xattr_size: output argument; will return the size of the EA, to correspond with the buffer.
1919 *
1920 * Return: 0 on success.
1921 * errno on error. If we return any error, the buffer is guaranteed to be NULL.
1922 *
1923 * Assumes CNODE lock held on cnode for 'vp'
1924 */
1925 static
1926 int hfs_exchangedata_getxattr (struct vnode *vp, uint32_t name_selector, void **buffer, size_t *xattr_size) {
1927 void *xattr_rawdata = NULL;
1928 void *extracted_xattr = NULL;
1929 uio_t uio;
1930 size_t memsize = MAX_EXCHANGE_EA_SIZE;
1931 size_t attrsize;
1932 int error = 0;
1933 struct hfsmount *hfsmp = NULL;
1934
1935 /* Sanity check inputs */
1936 if (name_selector > MAX_NUM_XATTR_NAMES) {
1937 return EINVAL;
1938 }
1939
1940 if (buffer == NULL || xattr_size == NULL) {
1941 return EINVAL;
1942 }
1943
1944 hfsmp = VTOHFS(vp);
1945
1946 //allocate 4k memory to hold the EA. We don't use this for "large" EAs, and the default
1947 //EA B-tree size should produce inline attributes of size < 4K
1948 xattr_rawdata = hfs_malloc (MAX_EXCHANGE_EA_SIZE);
1949 if (!xattr_rawdata) {
1950 return ENOMEM;
1951 }
1952
1953 //now create the UIO
1954 uio = uio_create (1, 0, UIO_SYSSPACE, UIO_READ);
1955 if (!uio) {
1956 hfs_free (xattr_rawdata, memsize);
1957 return ENOMEM;
1958 }
1959 uio_addiov(uio, CAST_USER_ADDR_T(xattr_rawdata), memsize);
1960 attrsize = memsize;
1961
1962 struct vnop_getxattr_args vga = {
1963 .a_uio = uio,
1964 .a_name = XATTR_NAMES[name_selector],
1965 .a_size = &attrsize
1966 };
1967
1968 // this takes care of grabbing the systemfile locks for us.
1969 error = hfs_getxattr_internal (VTOC(vp), &vga, hfsmp, 0);
1970
1971 if (error) {
1972 /*
1973 * We could have gotten a variety of errors back from the XATTR tree:
1974 * is it too big? (bigger than 4k?) == ERANGE
1975 * was the EA not found? == ENOATTR
1976 */
1977 uio_free(uio);
1978 hfs_free (xattr_rawdata, memsize);
1979 return error;
1980 }
1981
1982 //free the UIO
1983 uio_free(uio);
1984
1985 //upon success, a_size/attrsize now contains the actua/exported EA size
1986 extracted_xattr = hfs_malloc (attrsize);
1987 memcpy (extracted_xattr, xattr_rawdata, attrsize);
1988 hfs_free (xattr_rawdata, memsize);
1989
1990 *xattr_size = attrsize;
1991 *buffer = extracted_xattr;
1992
1993 return error;
1994 }
1995
1996
1997 /*
1998 * hfs_exchangedata_setxattr
1999 *
2000 * Note: This function takes fileIDs in as inputs, because exchangedata does
2001 * swizzly things with the two cnodes (See big block comment in hfs_vnop_exchange)
2002 * so we operate with FileIDs more or less directly on the XATTR b-tree.
2003 *
2004 * arguments:
2005 * hfsmp: the mount we're working on
2006 * fileid: the fileID of the EA to store into the tree.
2007 * name_selector: selector into the EA name array.
2008 * buffer: pointer to the memory of the EA to write.
2009 * xattr_size: size of the EA to write.
2010 *
2011 * Returns 0 on success
2012 * errno on failure
2013 *
2014 * Assumes that a transaction has already begun when this is called
2015 */
2016
2017 static
2018 int hfs_exchangedata_setxattr (struct hfsmount *hfsmp, uint32_t fileid,
2019 uint32_t name_selector, void *buffer, size_t xattr_size) {
2020
2021 int error = 0;
2022
2023
2024 /* Sanity check arguments */
2025 if (name_selector > MAX_NUM_XATTR_NAMES) {
2026 return EINVAL;
2027 }
2028
2029 if (buffer == NULL || xattr_size == 0 || fileid < kHFSFirstUserCatalogNodeID ) {
2030 return EINVAL;
2031 }
2032
2033 // is the size too big?
2034 if (xattr_size > hfsmp->hfs_max_inline_attrsize) {
2035 return EINVAL;
2036 }
2037
2038 /* setup the arguments to setxattr*/
2039 struct vnop_setxattr_args vsa = {
2040 .a_desc = NULL,
2041 .a_vp = NULL,
2042 .a_name = XATTR_NAMES[name_selector],
2043 .a_uio = NULL, // we use the data_ptr argument to setxattr_internal instead
2044 .a_options = 0,
2045 .a_context = NULL // no context needed, only done from within exchangedata
2046 };
2047
2048 /*
2049 * Since we must be in a transaction to guard the exchangedata operation, this will start
2050 * a nested transaction within the exchangedata one.
2051 */
2052 error = hfs_setxattr_internal (NULL, (caddr_t) buffer, xattr_size, &vsa, hfsmp, fileid);
2053
2054 return error;
2055
2056 }
2057
2058 /*
2059 * hfs_vnop_exchange:
2060 *
2061 * Inputs:
2062 * 'from' vnode/cnode
2063 * 'to' vnode/cnode
2064 * options flag bits
2065 * vfs_context
2066 *
2067 * Discussion:
2068 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
2069 * Per the requirements of that system call, this function "swaps" some
2070 * of the information that lives in one catalog record for some that
2071 * lives in another. Note that not everything is swapped; in particular,
2072 * the extent information stored in each cnode is kept local to that
2073 * cnode. This allows existing file descriptor references to continue
2074 * to operate on the same content, regardless of the location in the
2075 * namespace that the file may have moved to. See inline comments
2076 * in the function for more information.
2077 */
2078 int
2079 hfs_vnop_exchange(struct vnop_exchange_args *ap)
2080 {
2081 struct vnode *from_vp = ap->a_fvp;
2082 struct vnode *to_vp = ap->a_tvp;
2083 struct cnode *from_cp;
2084 struct cnode *to_cp;
2085 struct hfsmount *hfsmp;
2086 struct cat_desc tempdesc;
2087 struct cat_attr tempattr;
2088 const unsigned char *from_nameptr;
2089 const unsigned char *to_nameptr;
2090 char from_iname[32];
2091 char to_iname[32];
2092 uint32_t to_flag_special;
2093 uint32_t from_flag_special;
2094
2095 uint16_t to_recflags_special;
2096 uint16_t from_recflags_special;
2097
2098 cnid_t from_parid;
2099 cnid_t to_parid;
2100 int lockflags;
2101 int error = 0, started_tr = 0, got_cookie = 0;
2102 cat_cookie_t cookie;
2103 time_t orig_from_ctime, orig_to_ctime;
2104 bool have_cnode_locks = false, have_from_trunc_lock = false, have_to_trunc_lock = false;
2105
2106 /* For the quarantine EA */
2107 void *from_xattr = NULL;
2108 void *to_xattr = NULL;
2109 size_t from_attrsize = 0;
2110 size_t to_attrsize = 0;
2111
2112
2113 /*
2114 * VFS does the following checks:
2115 * 1. Validate that both are files.
2116 * 2. Validate that both are on the same mount.
2117 * 3. Validate that they're not the same vnode.
2118 */
2119
2120 from_cp = VTOC(from_vp);
2121 to_cp = VTOC(to_vp);
2122 hfsmp = VTOHFS(from_vp);
2123
2124 orig_from_ctime = from_cp->c_ctime;
2125 orig_to_ctime = to_cp->c_ctime;
2126
2127 #if CONFIG_PROTECT
2128 /*
2129 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
2130 * because the EAs will not be swapped. As a result, the persistent keys would not
2131 * match and the files will be garbage.
2132 */
2133 if (cp_fs_protected (vnode_mount(from_vp))) {
2134 return EINVAL;
2135 }
2136 #endif
2137
2138 #if HFS_COMPRESSION
2139 if (!ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2140 if ( hfs_file_is_compressed(from_cp, 0) ) {
2141 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
2142 return error;
2143 }
2144 }
2145
2146 if ( hfs_file_is_compressed(to_cp, 0) ) {
2147 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
2148 return error;
2149 }
2150 }
2151 }
2152 #endif // HFS_COMPRESSION
2153
2154 // Resource forks cannot be exchanged.
2155 if (VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
2156 return EINVAL;
2157
2158 /*
2159 * Normally, we want to notify the user handlers about the event,
2160 * except if it's a handler driving the event.
2161 */
2162 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
2163 nspace_snapshot_event(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2164 nspace_snapshot_event(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2165 } else {
2166 /*
2167 * This is currently used by mtmd so we should tidy up the
2168 * file now because the data won't be used again in the
2169 * destination file.
2170 */
2171 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, 0);
2172 hfs_lock_always(from_cp, HFS_EXCLUSIVE_LOCK);
2173 hfs_filedone(from_vp, ap->a_context, HFS_FILE_DONE_NO_SYNC);
2174 hfs_unlock(from_cp);
2175 hfs_unlock_truncate(from_cp, 0);
2176
2177 // Flush all the data from the source file
2178 error = ubc_msync(from_vp, 0, ubc_getsize(from_vp), NULL,
2179 UBC_PUSHALL | UBC_SYNC);
2180 if (error)
2181 goto exit;
2182
2183 #if HFS_COMPRESSION
2184 /*
2185 * If this is a compressed file, we need to do the same for
2186 * the resource fork.
2187 */
2188 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2189 error = hfs_flush_rsrc(from_vp, ap->a_context);
2190 if (error)
2191 goto exit;
2192 }
2193 #endif
2194
2195 /*
2196 * We're doing a data-swap so we need to take the truncate
2197 * lock exclusively. We need an exclusive lock because we
2198 * will be completely truncating the source file and we must
2199 * make sure nobody else sneaks in and trys to issue I/O
2200 * whilst we don't have the cnode lock.
2201 *
2202 * After taking the truncate lock we do a quick check to
2203 * verify there are no other references (including mmap
2204 * references), but we must remember that this does not stop
2205 * anybody coming in later and taking a reference. We will
2206 * have the truncate lock exclusively so that will prevent
2207 * them from issuing any I/O.
2208 */
2209
2210 if (to_cp < from_cp) {
2211 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2212 have_to_trunc_lock = true;
2213 }
2214
2215 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2216 have_from_trunc_lock = true;
2217
2218 /*
2219 * Do an early check to verify the source is not in use by
2220 * anyone. We should be called from an FD opened as F_EVTONLY
2221 * so that doesn't count as a reference.
2222 */
2223 if (vnode_isinuse(from_vp, 0)) {
2224 error = EBUSY;
2225 goto exit;
2226 }
2227
2228 if (to_cp >= from_cp) {
2229 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2230 have_to_trunc_lock = true;
2231 }
2232 }
2233
2234 if ((error = hfs_lockpair(from_cp, to_cp, HFS_EXCLUSIVE_LOCK)))
2235 goto exit;
2236 have_cnode_locks = true;
2237
2238 // Don't allow modification of the journal or journal_info_block
2239 if (hfs_is_journal_file(hfsmp, from_cp) ||
2240 hfs_is_journal_file(hfsmp, to_cp)) {
2241 error = EPERM;
2242 goto exit;
2243 }
2244
2245 /*
2246 * If doing a data move, then call the underlying function.
2247 */
2248 if (ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2249 #if HFS_COMPRESSION
2250 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2251 error = hfs_move_compressed(from_cp, to_cp);
2252 goto exit;
2253 }
2254 #endif
2255
2256 error = hfs_move_data(from_cp, to_cp, 0);
2257 goto exit;
2258 }
2259
2260 /*
2261 * If we're doing a normal exchangedata, then get the source/dst quarantine
2262 * EAs as needed. We do it here before we start the transaction.
2263 */
2264
2265 //get the EA for the 'from' vnode if it exists.
2266 error = hfs_exchangedata_getxattr (from_vp, quarantine, &from_xattr, &from_attrsize);
2267 if (error) {
2268 if (error == ENOATTR) {
2269 //it's OK for the quarantine EA to not exist
2270 error = 0;
2271 }
2272 else {
2273 goto exit;
2274 }
2275 }
2276
2277
2278 //get the EA from the 'to' vnode if it exists
2279 error = hfs_exchangedata_getxattr (to_vp, quarantine, &to_xattr, &to_attrsize);
2280 if (error) {
2281 if (error == ENOATTR) {
2282 //it's OK for the quarantine EA to not exist
2283 error = 0;
2284 }
2285 else {
2286 goto exit;
2287 }
2288 }
2289
2290
2291 /* Start a transaction; we have to do all of this atomically */
2292 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2293 goto exit;
2294 }
2295 started_tr = 1;
2296
2297 /*
2298 * Reserve some space in the Catalog file.
2299 */
2300 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
2301 goto exit;
2302 }
2303 got_cookie = 1;
2304
2305 /* The backend code always tries to delete the virtual
2306 * extent id for exchanging files so we need to lock
2307 * the extents b-tree.
2308 */
2309 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2310
2311 /* Account for the location of the catalog objects. */
2312 if (from_cp->c_flag & C_HARDLINK) {
2313 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
2314 from_cp->c_attr.ca_linkref);
2315 from_nameptr = (unsigned char *)from_iname;
2316 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2317 from_cp->c_hint = 0;
2318 } else {
2319 from_nameptr = from_cp->c_desc.cd_nameptr;
2320 from_parid = from_cp->c_parentcnid;
2321 }
2322 if (to_cp->c_flag & C_HARDLINK) {
2323 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
2324 to_cp->c_attr.ca_linkref);
2325 to_nameptr = (unsigned char *)to_iname;
2326 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2327 to_cp->c_hint = 0;
2328 } else {
2329 to_nameptr = to_cp->c_desc.cd_nameptr;
2330 to_parid = to_cp->c_parentcnid;
2331 }
2332
2333 /*
2334 * ExchangeFileIDs swaps the on-disk, or in-BTree extent information
2335 * attached to two different file IDs. It also swaps the extent
2336 * information that may live in the extents-overflow B-Tree.
2337 *
2338 * We do this in a transaction as this may require a lot of B-Tree nodes
2339 * to do completely, particularly if one of the files in question
2340 * has a lot of extents.
2341 *
2342 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
2343 * For the on-disk records, which are assumed to be synced, we will
2344 * first swap the resident inline-8 extents as part of the catalog records.
2345 * Then we will swap any extents overflow records for each file.
2346 *
2347 * When ExchangeFileIDs returns successfully, "file1" will have fileID 52,
2348 * and "file2" will have fileID 50. However, note that this is only
2349 * approximately half of the work that exchangedata(2) will need to
2350 * accomplish. In other words, we swap "too much" of the information
2351 * because if we only called ExchangeFileIDs, both the fileID and extent
2352 * information would be the invariants of this operation. We don't
2353 * actually want that; we want to conclude with "file1" having
2354 * file ID 50, and "file2" having fileID 52.
2355 *
2356 * The remainder of hfs_vnop_exchange will swap the file ID and other cnode
2357 * data back to the proper ownership, while still allowing the cnode to remain
2358 * pointing at the same set of extents that it did originally.
2359 */
2360 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
2361 to_parid, from_cp->c_hint, to_cp->c_hint);
2362 hfs_systemfile_unlock(hfsmp, lockflags);
2363
2364 if (error != E_NONE) {
2365 error = MacToVFSError(error);
2366 goto exit;
2367 }
2368
2369 /*
2370 * Now, we have to swap the quarantine EA.
2371 *
2372 * Ordinarily, we would not have to swap/exchange any extended attributes,
2373 * since they are keyed by the file ID, and this function is supposed
2374 * to manipulate the main data stream/fork only.
2375 *
2376 * However, we want the quarantine EA to follow the file content.
2377 */
2378
2379 int from_xattr_status = 0;
2380 if (from_xattr) {
2381 /*
2382 * Caution!
2383 * We've crossed a point of no return here, because if we
2384 * have successfully swapped the file content above, we need to continue here
2385 * to swap the rest of the cnode content, which is not subject to failure.
2386 * Failing the whole function because the xattr swap will result in perceived
2387 * data loss to the caller, so we swallow the error case here.
2388 */
2389 from_xattr_status = hfs_removexattr_by_id (hfsmp, from_cp->c_fileid, XATTR_NAMES[quarantine]);
2390 if (from_xattr_status == 0) {
2391 int xattr_lockflags;
2392 int remaining_eas;
2393 /*
2394 * Check to see if we need to remove the xattr bit from the catalog record flags while
2395 * 'from_cp' still tracks with its original file ID. Once the cnodes' contents are swapped
2396 * and they are ready to be re-hashed, we will OR in the bit if we know that we moved the
2397 * EA to the counterpart.
2398 */
2399 xattr_lockflags = hfs_systemfile_lock (hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK);
2400 remaining_eas = file_attribute_exist (hfsmp, from_cp->c_fileid);
2401 if (remaining_eas == 0) {
2402 from_cp->c_attr.ca_recflags &= ~kHFSHasAttributesMask;
2403 //the cnode will be pushed out to disk LATER on.
2404 }
2405 hfs_systemfile_unlock (hfsmp, xattr_lockflags);
2406
2407 }
2408 }
2409
2410 //and the same for to_xattr
2411 if (to_xattr) {
2412 int xattr_status = hfs_removexattr_by_id (hfsmp, to_cp->c_fileid, XATTR_NAMES[quarantine]);
2413
2414 if (xattr_status == 0) {
2415 int xattr_lockflags;
2416 int remaining_eas;
2417 /*
2418 * Check to see if we need to remove the xattr bit from the catalog record flags while
2419 * 'to_cp' still tracks with its original file ID. Once the cnodes' contents are swapped
2420 * and they are ready to be re-hashed, we will OR in the bit if we know that we moved the
2421 * EA to the counterpart.
2422 */
2423 xattr_lockflags = hfs_systemfile_lock (hfsmp, SFL_ATTRIBUTE, HFS_SHARED_LOCK);
2424 remaining_eas = file_attribute_exist (hfsmp, from_cp->c_fileid);
2425 if (remaining_eas == 0) {
2426 to_cp->c_attr.ca_recflags &= ~kHFSHasAttributesMask;
2427 //the cnode will be pushed out to disk LATER on.
2428 }
2429 hfs_systemfile_unlock (hfsmp, xattr_lockflags);
2430
2431 /* Now move the EA to the counterparty fileID. We piggyback on the larger transaction here */
2432 hfs_exchangedata_setxattr (hfsmp, from_cp->c_fileid, quarantine, to_xattr, to_attrsize);
2433 }
2434 }
2435
2436 if (from_xattr && from_xattr_status == 0) {
2437 /*
2438 * if the from EA got removed properly, then attach it to the 'to' file. We do it at this point
2439 * to ensure that it got removed properly above before re-setting it again.
2440 */
2441 hfs_exchangedata_setxattr (hfsmp, to_cp->c_fileid, quarantine, from_xattr, from_attrsize);
2442 }
2443
2444
2445 /* Purge the vnodes from the name cache */
2446 if (from_vp)
2447 cache_purge(from_vp);
2448 if (to_vp)
2449 cache_purge(to_vp);
2450
2451 /* Bump both source and destination write counts before any swaps. */
2452 {
2453 hfs_incr_gencount (from_cp);
2454 hfs_incr_gencount (to_cp);
2455 }
2456
2457 /* Save a copy of "from" attributes before swapping. */
2458 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
2459 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
2460
2461 /* Save whether or not each cnode is a hardlink or has EAs */
2462 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2463 from_recflags_special = (from_cp->c_attr.ca_recflags & kHFSHasAttributesMask);
2464
2465 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2466 to_recflags_special = (to_cp->c_attr.ca_recflags & kHFSHasAttributesMask);
2467
2468 /* Drop the special bits from each cnode */
2469 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2470 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2471 from_cp->c_attr.ca_recflags &= ~(kHFSHasAttributesMask);
2472 to_cp->c_attr.ca_recflags &= ~(kHFSHasAttributesMask);
2473
2474 /*
2475 * Now complete the in-memory portion of the copy.
2476 *
2477 * ExchangeFileIDs swaps the on-disk records involved. We complete the
2478 * operation by swapping the in-memory contents of the two files here.
2479 * We swap the cnode descriptors, which contain name, BSD attributes,
2480 * timestamps, etc, about the file.
2481 *
2482 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
2483 * already swapped the on-disk extent information. As long as we swap the
2484 * IDs, the in-line resident 8 extents that live in the filefork data
2485 * structure will point to the right data for the new file ID if we leave
2486 * them alone.
2487 *
2488 * As a result, any file descriptor that points to a particular
2489 * vnode (even though it should change names), will continue
2490 * to point to the same content.
2491 */
2492
2493 /* Copy the "to" -> "from" cnode */
2494 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
2495
2496 from_cp->c_hint = 0;
2497 /*
2498 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
2499 * when we bcopy'd the descriptor above. However, the cnode attributes
2500 * are not bcopied. As a result, make sure to swap the file IDs of each item.
2501 *
2502 * Further, other hardlink attributes must be moved along in this swap:
2503 * the linkcount, the linkref, and the firstlink all need to move
2504 * along with the file IDs. See note below regarding the flags and
2505 * what moves vs. what does not.
2506 *
2507 * For Reference:
2508 * linkcount == total # of hardlinks.
2509 * linkref == the indirect inode pointer.
2510 * firstlink == the first hardlink in the chain (written to the raw inode).
2511 * These three are tied to the fileID and must move along with the rest of the data.
2512 */
2513 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
2514
2515 from_cp->c_itime = to_cp->c_itime;
2516 from_cp->c_btime = to_cp->c_btime;
2517 from_cp->c_atime = to_cp->c_atime;
2518 from_cp->c_ctime = to_cp->c_ctime;
2519 from_cp->c_gid = to_cp->c_gid;
2520 from_cp->c_uid = to_cp->c_uid;
2521 from_cp->c_bsdflags = to_cp->c_bsdflags;
2522 from_cp->c_mode = to_cp->c_mode;
2523 from_cp->c_linkcount = to_cp->c_linkcount;
2524 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
2525 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
2526
2527 /*
2528 * The cnode flags need to stay with the cnode and not get transferred
2529 * over along with everything else because they describe the content; they are
2530 * not attributes that reflect changes specific to the file ID. In general,
2531 * fields that are tied to the file ID are the ones that will move.
2532 *
2533 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
2534 * or other extents, which may not yet have been written to the catalog. If
2535 * they were, they would have been transferred above in the ExchangeFileIDs call above...
2536 *
2537 * The flags that are special are:
2538 * C_HARDLINK, C_HASXATTRS
2539 *
2540 * and the c_attr recflag:
2541 * kHFSHasAttributesMask
2542 *
2543 * These flags move with the item and file ID in the namespace since their
2544 * state is tied to that of the file ID.
2545 *
2546 * So to transfer the flags, we have to take the following steps
2547 * 1) Store in a localvar whether or not the special bits are set.
2548 * 2) Drop the special bits from the current flags
2549 * 3) swap the special flag bits to their destination
2550 */
2551 from_cp->c_flag |= to_flag_special | C_MODIFIED;
2552 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
2553 from_cp->c_attr.ca_recflags |= to_recflags_special;
2554 if (from_xattr) {
2555 /*
2556 * NOTE:
2557 * This is counter-intuitive and part of the complexity of exchangedata.
2558 * if 'from_cp' originally had a quarantine EA, then ensure that the cnode
2559 * pointed to by 'from_cp' CONTINUES to keep the "has EAs" bit. This is because
2560 * the cnode is about to be re-hashed with a new ID, but the file CONTENT
2561 * (i.e. the file fork) stayed put. And we want the quarantine EA to follow
2562 * the content. The check above is correct.
2563 */
2564 from_cp->c_attr.ca_recflags |= kHFSHasAttributesMask;
2565 }
2566
2567 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
2568
2569
2570 /* Copy the "from" -> "to" cnode */
2571 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
2572 to_cp->c_hint = 0;
2573 /*
2574 * Pull the file ID from the tempattr we copied above. We can't assume
2575 * it is the same as the CNID.
2576 */
2577 to_cp->c_fileid = tempattr.ca_fileid;
2578 to_cp->c_itime = tempattr.ca_itime;
2579 to_cp->c_btime = tempattr.ca_btime;
2580 to_cp->c_atime = tempattr.ca_atime;
2581 to_cp->c_ctime = tempattr.ca_ctime;
2582 to_cp->c_gid = tempattr.ca_gid;
2583 to_cp->c_uid = tempattr.ca_uid;
2584 to_cp->c_bsdflags = tempattr.ca_flags;
2585 to_cp->c_mode = tempattr.ca_mode;
2586 to_cp->c_linkcount = tempattr.ca_linkcount;
2587 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
2588 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
2589
2590 /*
2591 * Only OR in the "from" flags into our cnode flags below.
2592 * Leave the rest of the flags alone.
2593 */
2594 to_cp->c_flag |= from_flag_special | C_MODIFIED;
2595 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
2596 to_cp->c_attr.ca_recflags |= from_recflags_special;
2597
2598 if (to_xattr) {
2599 /*
2600 * NOTE:
2601 * This is counter-intuitive and part of the complexity of exchangedata.
2602 * if 'to_cp' originally had a quarantine EA, then ensure that the cnode
2603 * pointed to by 'to_cp' CONTINUES to keep the "has EAs" bit. This is because
2604 * the cnode is about to be re-hashed with a new ID, but the file CONTENT
2605 * (i.e. the file fork) stayed put. And we want the quarantine EA to follow
2606 * the content. The check above is correct.
2607 */
2608 to_cp->c_attr.ca_recflags |= kHFSHasAttributesMask;
2609 }
2610
2611 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
2612
2613
2614 /* Rehash the cnodes using their new file IDs */
2615 hfs_chash_rehash(hfsmp, from_cp, to_cp);
2616
2617 /*
2618 * When a file moves out of "Cleanup At Startup"
2619 * we can drop its NODUMP status.
2620 */
2621 if ((from_cp->c_bsdflags & UF_NODUMP) &&
2622 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
2623 from_cp->c_bsdflags &= ~UF_NODUMP;
2624 from_cp->c_touch_chgtime = TRUE;
2625 }
2626 if ((to_cp->c_bsdflags & UF_NODUMP) &&
2627 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
2628 to_cp->c_bsdflags &= ~UF_NODUMP;
2629 to_cp->c_touch_chgtime = TRUE;
2630 }
2631
2632 exit:
2633 if (got_cookie) {
2634 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
2635 }
2636 if (started_tr) {
2637 hfs_end_transaction(hfsmp);
2638 }
2639
2640 if (have_cnode_locks)
2641 hfs_unlockpair(from_cp, to_cp);
2642
2643 if (have_from_trunc_lock)
2644 hfs_unlock_truncate(from_cp, 0);
2645
2646 if (have_to_trunc_lock)
2647 hfs_unlock_truncate(to_cp, 0);
2648
2649 /* Free the memory used by the EAs */
2650 if (from_xattr) {
2651 hfs_free (from_xattr, from_attrsize);
2652 from_xattr = NULL;
2653 }
2654
2655 if (to_xattr) {
2656 hfs_free (to_xattr, to_attrsize);
2657 to_xattr = NULL;
2658 }
2659
2660 return (error);
2661 }
2662
2663 #if HFS_COMPRESSION
2664 /*
2665 * This function is used specifically for the case when a namespace
2666 * handler is trying to steal data before it's deleted. Note that we
2667 * don't bother deleting the xattr from the source because it will get
2668 * deleted a short time later anyway.
2669 *
2670 * cnodes must be locked
2671 */
2672 static int hfs_move_compressed(cnode_t *from_cp, cnode_t *to_cp)
2673 {
2674 int ret;
2675 void *data = NULL;
2676
2677 CLR(from_cp->c_bsdflags, UF_COMPRESSED);
2678 SET(from_cp->c_flag, C_MODIFIED);
2679
2680 ret = hfs_move_data(from_cp, to_cp, HFS_MOVE_DATA_INCLUDE_RSRC);
2681 if (ret)
2682 goto exit;
2683
2684 /*
2685 * Transfer the xattr that decmpfs uses. Ideally, this code
2686 * should be with the other decmpfs code but it's file system
2687 * agnostic and this path is currently, and likely to remain, HFS+
2688 * specific. It's easier and more performant if we implement it
2689 * here.
2690 */
2691
2692 size_t size;
2693 data = hfs_malloc(size = MAX_DECMPFS_XATTR_SIZE);
2694
2695 ret = hfs_xattr_read(from_cp->c_vp, DECMPFS_XATTR_NAME, data, &size);
2696 if (ret)
2697 goto exit;
2698
2699 ret = hfs_xattr_write(to_cp->c_vp, DECMPFS_XATTR_NAME, data, size);
2700 if (ret)
2701 goto exit;
2702
2703 SET(to_cp->c_bsdflags, UF_COMPRESSED);
2704 SET(to_cp->c_flag, C_MODIFIED);
2705
2706 exit:
2707 hfs_free(data, MAX_DECMPFS_XATTR_SIZE);
2708
2709 return ret;
2710 }
2711 #endif // HFS_COMPRESSION
2712
2713 int
2714 hfs_vnop_mmap(struct vnop_mmap_args *ap)
2715 {
2716 struct vnode *vp = ap->a_vp;
2717 cnode_t *cp = VTOC(vp);
2718 int error;
2719
2720 if (VNODE_IS_RSRC(vp)) {
2721 /* allow pageins of the resource fork */
2722 } else {
2723 int compressed = hfs_file_is_compressed(cp, 1); /* 1 == don't take the cnode lock */
2724 time_t orig_ctime = cp->c_ctime;
2725
2726 if (!compressed && (cp->c_bsdflags & UF_COMPRESSED)) {
2727 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2728 if (error != 0) {
2729 return error;
2730 }
2731 }
2732
2733 if (ap->a_fflags & PROT_WRITE) {
2734 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2735 }
2736 }
2737
2738 #if CONFIG_PROTECT
2739 error = cp_handle_vnop(vp, (ap->a_fflags & PROT_WRITE
2740 ? CP_WRITE_ACCESS : 0) | CP_READ_ACCESS, 0);
2741 if (error)
2742 return error;
2743 #endif
2744
2745 //
2746 // NOTE: we return ENOTSUP because we want the cluster layer
2747 // to actually do all the real work.
2748 //
2749 return (ENOTSUP);
2750 }
2751
2752 static errno_t hfs_vnop_mnomap(struct vnop_mnomap_args *ap)
2753 {
2754 vnode_t vp = ap->a_vp;
2755
2756 /*
2757 * Whilst the file was mapped, there may not have been any
2758 * page-outs so we need to increment the generation counter now.
2759 * Unfortunately this may lead to a change in the generation
2760 * counter when no actual change has been made, but there is
2761 * little we can do about that with our current architecture.
2762 */
2763 if (ubc_is_mapped_writable(vp)) {
2764 cnode_t *cp = VTOC(vp);
2765 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2766 hfs_incr_gencount(cp);
2767
2768 /*
2769 * We don't want to set the modification time here since a
2770 * change to that is not acceptable if no changes were made.
2771 * Instead we set a flag so that if we get any page-outs we
2772 * know to update the modification time. It's possible that
2773 * they weren't actually because of changes made whilst the
2774 * file was mapped but that's not easy to fix now.
2775 */
2776 SET(cp->c_flag, C_MIGHT_BE_DIRTY_FROM_MAPPING);
2777
2778 hfs_unlock(cp);
2779 }
2780
2781 return 0;
2782 }
2783
2784 /*
2785 * Mark the resource fork as needing a ubc_setsize when we drop the
2786 * cnode lock later.
2787 */
2788 static void hfs_rsrc_setsize(cnode_t *cp)
2789 {
2790 /*
2791 * We need to take an iocount if we don't have one. vnode_get
2792 * will return ENOENT if the vnode is terminating which is what we
2793 * want as it's not safe to call ubc_setsize in that case.
2794 */
2795 if (cp->c_rsrc_vp && !vnode_get(cp->c_rsrc_vp)) {
2796 // Shouldn't happen, but better safe...
2797 if (ISSET(cp->c_flag, C_NEED_RVNODE_PUT))
2798 vnode_put(cp->c_rsrc_vp);
2799 SET(cp->c_flag, C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE);
2800 }
2801 }
2802
2803 /*
2804 * hfs_move_data
2805 *
2806 * This is a non-symmetric variant of exchangedata. In this function,
2807 * the contents of the data fork (and optionally the resource fork)
2808 * are moved from from_cp to to_cp.
2809 *
2810 * The cnodes must be locked.
2811 *
2812 * The cnode pointed to by 'to_cp' *must* be empty prior to invoking
2813 * this function. We impose this restriction because we may not be
2814 * able to fully delete the entire file's contents in a single
2815 * transaction, particularly if it has a lot of extents. In the
2816 * normal file deletion codepath, the file is screened for two
2817 * conditions: 1) bigger than 400MB, and 2) more than 8 extents. If
2818 * so, the file is relocated to the hidden directory and the deletion
2819 * is broken up into multiple truncates. We can't do that here
2820 * because both files need to exist in the namespace. The main reason
2821 * this is imposed is that we may have to touch a whole lot of bitmap
2822 * blocks if there are many extents.
2823 *
2824 * Any data written to 'from_cp' after this call completes is not
2825 * guaranteed to be moved.
2826 *
2827 * Arguments:
2828 * cnode_t *from_cp : source file
2829 * cnode_t *to_cp : destination file; must be empty
2830 *
2831 * Returns:
2832 *
2833 * EBUSY - File has been deleted or is in use
2834 * EFBIG - Destination file was not empty
2835 * EIO - An I/O error
2836 * 0 - success
2837 * other - Other errors that can be returned from called functions
2838 */
2839 int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
2840 hfs_move_data_options_t options)
2841 {
2842 hfsmount_t *hfsmp = VTOHFS(from_cp->c_vp);
2843 int error = 0;
2844 int lockflags = 0;
2845 bool return_EIO_on_error = false;
2846 const bool include_rsrc = ISSET(options, HFS_MOVE_DATA_INCLUDE_RSRC);
2847
2848 /* Verify that neither source/dest file is open-unlinked */
2849 if (ISSET(from_cp->c_flag, C_DELETED | C_NOEXISTS)
2850 || ISSET(to_cp->c_flag, C_DELETED | C_NOEXISTS)) {
2851 return EBUSY;
2852 }
2853
2854 /*
2855 * Verify the source file is not in use by anyone besides us.
2856 *
2857 * This function is typically invoked by a namespace handler
2858 * process responding to a temporarily stalled system call.
2859 * The FD that it is working off of is opened O_EVTONLY, so
2860 * it really has no active usecounts (the kusecount from O_EVTONLY
2861 * is subtracted from the total usecounts).
2862 *
2863 * As a result, we shouldn't have any active usecounts against
2864 * this vnode when we go to check it below.
2865 */
2866 if (vnode_isinuse(from_cp->c_vp, 0))
2867 return EBUSY;
2868
2869 if (include_rsrc && from_cp->c_rsrc_vp) {
2870 if (vnode_isinuse(from_cp->c_rsrc_vp, 0))
2871 return EBUSY;
2872
2873 /*
2874 * In the code below, if the destination file doesn't have a
2875 * c_rsrcfork then we don't create it which means we we cannot
2876 * transfer the ff_invalidranges and cf_vblocks fields. These
2877 * shouldn't be set because we flush the resource fork before
2878 * calling this function but there is a tiny window when we
2879 * did not have any locks...
2880 */
2881 if (!to_cp->c_rsrcfork
2882 && (!TAILQ_EMPTY(&from_cp->c_rsrcfork->ff_invalidranges)
2883 || from_cp->c_rsrcfork->ff_unallocblocks)) {
2884 /*
2885 * The file isn't really busy now but something did slip
2886 * in and tinker with the file while we didn't have any
2887 * locks, so this is the most meaningful return code for
2888 * the caller.
2889 */
2890 return EBUSY;
2891 }
2892 }
2893
2894 // Check the destination file is empty
2895 if (to_cp->c_datafork->ff_blocks
2896 || to_cp->c_datafork->ff_size
2897 || (include_rsrc
2898 && (to_cp->c_blocks
2899 || (to_cp->c_rsrcfork && to_cp->c_rsrcfork->ff_size)))) {
2900 return EFBIG;
2901 }
2902
2903 if ((error = hfs_start_transaction (hfsmp)))
2904 return error;
2905
2906 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE,
2907 HFS_EXCLUSIVE_LOCK);
2908
2909 // filefork_t is 128 bytes which should be OK
2910 filefork_t rfork_buf, *from_rfork = NULL;
2911
2912 if (include_rsrc) {
2913 from_rfork = from_cp->c_rsrcfork;
2914
2915 /*
2916 * Creating resource fork vnodes is expensive, so just get get
2917 * the fork data if we need it.
2918 */
2919 if (!from_rfork && hfs_has_rsrc(from_cp)) {
2920 from_rfork = &rfork_buf;
2921
2922 from_rfork->ff_cp = from_cp;
2923 TAILQ_INIT(&from_rfork->ff_invalidranges);
2924
2925 error = cat_idlookup(hfsmp, from_cp->c_fileid, 0, 1, NULL, NULL,
2926 &from_rfork->ff_data);
2927
2928 if (error)
2929 goto exit;
2930 }
2931 }
2932
2933 /*
2934 * From here on, any failures mean that we might be leaving things
2935 * in a weird or inconsistent state. Ideally, we should back out
2936 * all the changes, but to do that properly we need to fix
2937 * MoveData. We'll save fixing that for another time. For now,
2938 * just return EIO in all cases to the caller so that they know.
2939 */
2940 return_EIO_on_error = true;
2941
2942 bool data_overflow_extents = overflow_extents(from_cp->c_datafork);
2943
2944 // Move the data fork
2945 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp,
2946 to_cp->c_datafork, to_cp))) {
2947 goto exit;
2948 }
2949
2950 SET(from_cp->c_flag, C_NEED_DATA_SETSIZE);
2951 SET(to_cp->c_flag, C_NEED_DATA_SETSIZE);
2952
2953 // We move the resource fork later
2954
2955 /*
2956 * Note that because all we're doing is moving the extents around,
2957 * we can probably do this in a single transaction: Each extent
2958 * record (group of 8) is 64 bytes. A extent overflow B-Tree node
2959 * is typically 4k. This means each node can hold roughly ~60
2960 * extent records == (480 extents).
2961 *
2962 * If a file was massively fragmented and had 20k extents, this
2963 * means we'd roughly touch 20k/480 == 41 to 42 nodes, plus the
2964 * index nodes, for half of the operation. (inserting or
2965 * deleting). So if we're manipulating 80-100 nodes, this is
2966 * basically 320k of data to write to the journal in a bad case.
2967 */
2968 if (data_overflow_extents) {
2969 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0)))
2970 goto exit;
2971 }
2972
2973 if (from_rfork && overflow_extents(from_rfork)) {
2974 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1)))
2975 goto exit;
2976 }
2977
2978 // Touch times
2979 from_cp->c_touch_acctime = TRUE;
2980 from_cp->c_touch_chgtime = TRUE;
2981 from_cp->c_touch_modtime = TRUE;
2982 hfs_touchtimes(hfsmp, from_cp);
2983
2984 to_cp->c_touch_acctime = TRUE;
2985 to_cp->c_touch_chgtime = TRUE;
2986 to_cp->c_touch_modtime = TRUE;
2987 hfs_touchtimes(hfsmp, to_cp);
2988
2989 struct cat_fork dfork_buf;
2990 const struct cat_fork *dfork, *rfork;
2991
2992 dfork = hfs_prepare_fork_for_update(to_cp->c_datafork, NULL,
2993 &dfork_buf, hfsmp->blockSize);
2994 rfork = hfs_prepare_fork_for_update(from_rfork, NULL,
2995 &rfork_buf.ff_data, hfsmp->blockSize);
2996
2997 // Update the catalog nodes, to_cp first
2998 if ((error = cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2999 dfork, rfork))) {
3000 goto exit;
3001 }
3002
3003 CLR(to_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
3004
3005 // Update in-memory resource fork data here
3006 if (from_rfork) {
3007 // Update c_blocks
3008 uint32_t moving = from_rfork->ff_blocks + from_rfork->ff_unallocblocks;
3009
3010 from_cp->c_blocks -= moving;
3011 to_cp->c_blocks += moving;
3012
3013 // Update to_cp's resource data if it has it
3014 filefork_t *to_rfork = to_cp->c_rsrcfork;
3015 if (to_rfork) {
3016 TAILQ_SWAP(&to_rfork->ff_invalidranges,
3017 &from_rfork->ff_invalidranges, rl_entry, rl_link);
3018 to_rfork->ff_data = from_rfork->ff_data;
3019
3020 // Deal with ubc_setsize
3021 hfs_rsrc_setsize(to_cp);
3022 }
3023
3024 // Wipe out the resource fork in from_cp
3025 rl_init(&from_rfork->ff_invalidranges);
3026 bzero(&from_rfork->ff_data, sizeof(from_rfork->ff_data));
3027
3028 // Deal with ubc_setsize
3029 hfs_rsrc_setsize(from_cp);
3030 }
3031
3032 // Currently unnecessary, but might be useful in future...
3033 dfork = hfs_prepare_fork_for_update(from_cp->c_datafork, NULL, &dfork_buf,
3034 hfsmp->blockSize);
3035 rfork = hfs_prepare_fork_for_update(from_rfork, NULL, &rfork_buf.ff_data,
3036 hfsmp->blockSize);
3037
3038 // Update from_cp
3039 if ((error = cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
3040 dfork, rfork))) {
3041 goto exit;
3042 }
3043
3044 CLR(from_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
3045
3046 exit:
3047 if (lockflags) {
3048 hfs_systemfile_unlock(hfsmp, lockflags);
3049 hfs_end_transaction(hfsmp);
3050 }
3051
3052 if (error && error != EIO && return_EIO_on_error) {
3053 printf("hfs_move_data: encountered error %d\n", error);
3054 error = EIO;
3055 }
3056
3057 return error;
3058 }
3059
3060 /*
3061 * Move all of the catalog and runtime data in srcfork to dstfork.
3062 *
3063 * This allows us to maintain the invalid ranges across the move data
3064 * operation so we don't need to force all of the pending IO right
3065 * now. In addition, we move all non overflow-extent extents into the
3066 * destination here.
3067 *
3068 * The destination fork must be empty and should have been checked
3069 * prior to calling this.
3070 */
3071 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src_cp,
3072 filefork_t *dstfork, cnode_t *dst_cp)
3073 {
3074 // Move the invalid ranges
3075 TAILQ_SWAP(&dstfork->ff_invalidranges, &srcfork->ff_invalidranges,
3076 rl_entry, rl_link);
3077 rl_remove_all(&srcfork->ff_invalidranges);
3078
3079 // Move the fork data (copy whole structure)
3080 dstfork->ff_data = srcfork->ff_data;
3081 bzero(&srcfork->ff_data, sizeof(srcfork->ff_data));
3082
3083 // Update c_blocks
3084 src_cp->c_blocks -= dstfork->ff_blocks + dstfork->ff_unallocblocks;
3085 dst_cp->c_blocks += dstfork->ff_blocks + dstfork->ff_unallocblocks;
3086
3087 return 0;
3088 }
3089
3090 /*
3091 * cnode must be locked
3092 */
3093 int
3094 hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode, struct proc *p)
3095 {
3096 struct cnode *cp = VTOC(vp);
3097 struct filefork *fp = NULL;
3098 int retval = 0;
3099 struct hfsmount *hfsmp = VTOHFS(vp);
3100 struct timeval tv;
3101 int waitdata; /* attributes necessary for data retrieval */
3102 int wait; /* all other attributes (e.g. atime, etc.) */
3103 int took_trunc_lock = 0;
3104 int fsync_default = 1;
3105
3106 /*
3107 * Applications which only care about data integrity rather than full
3108 * file integrity may opt out of (delay) expensive metadata update
3109 * operations as a performance optimization.
3110 */
3111 wait = (waitfor == MNT_WAIT);
3112 waitdata = (waitfor == MNT_DWAIT) | wait;
3113
3114 if (always_do_fullfsync)
3115 fsyncmode = HFS_FSYNC_FULL;
3116 if (fsyncmode != HFS_FSYNC)
3117 fsync_default = 0;
3118
3119 /* HFS directories don't have any data blocks. */
3120 if (vnode_isdir(vp))
3121 goto metasync;
3122 fp = VTOF(vp);
3123
3124 /*
3125 * For system files flush the B-tree header and
3126 * for regular files write out any clusters
3127 */
3128 if (vnode_issystem(vp)) {
3129 if (VTOF(vp)->fcbBTCBPtr != NULL) {
3130 // XXXdbg
3131 if (hfsmp->jnl == NULL) {
3132 BTFlushPath(VTOF(vp));
3133 }
3134 }
3135 } else {
3136 hfs_unlock(cp);
3137 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
3138 took_trunc_lock = 1;
3139
3140 if (fp->ff_unallocblocks != 0) {
3141 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3142
3143 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3144 }
3145
3146 /* Don't hold cnode lock when calling into cluster layer. */
3147 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
3148
3149 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
3150 }
3151 /*
3152 * When MNT_WAIT is requested and the zero fill timeout
3153 * has expired then we must explicitly zero out any areas
3154 * that are currently marked invalid (holes).
3155 *
3156 * Files with NODUMP can bypass zero filling here.
3157 */
3158 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
3159 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
3160 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
3161 (vnode_issystem(vp) ==0) &&
3162 cp->c_zftimeout != 0))) {
3163
3164 microuptime(&tv);
3165 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && fsync_default && tv.tv_sec < (long)cp->c_zftimeout) {
3166 /* Remember that a force sync was requested. */
3167 cp->c_flag |= C_ZFWANTSYNC;
3168 goto datasync;
3169 }
3170 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
3171 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
3172 hfs_unlock(cp);
3173 if (took_trunc_lock) {
3174 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3175 }
3176 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3177 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
3178 took_trunc_lock = 1;
3179 }
3180 hfs_flush_invalid_ranges(vp);
3181 hfs_unlock(cp);
3182 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
3183 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
3184 }
3185 }
3186 datasync:
3187 if (took_trunc_lock) {
3188 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3189 took_trunc_lock = 0;
3190 }
3191
3192 if (!hfsmp->jnl)
3193 buf_flushdirtyblks(vp, waitdata, 0, "hfs_fsync");
3194 else if (fsync_default && vnode_islnk(vp)
3195 && vnode_hasdirtyblks(vp) && vnode_isrecycled(vp)) {
3196 /*
3197 * If it's a symlink that's dirty and is about to be recycled,
3198 * we need to flush the journal.
3199 */
3200 fsync_default = 0;
3201 }
3202
3203 metasync:
3204 if (vnode_isreg(vp) && vnode_issystem(vp)) {
3205 if (VTOF(vp)->fcbBTCBPtr != NULL) {
3206 microuptime(&tv);
3207 BTSetLastSync(VTOF(vp), tv.tv_sec);
3208 }
3209 cp->c_touch_acctime = FALSE;
3210 cp->c_touch_chgtime = FALSE;
3211 cp->c_touch_modtime = FALSE;
3212 } else if (!vnode_isswap(vp)) {
3213 retval = hfs_update(vp, HFS_UPDATE_FORCE);
3214
3215 /*
3216 * When MNT_WAIT is requested push out the catalog record for
3217 * this file. If they asked for a full fsync, we can skip this
3218 * because the journal_flush or hfs_metasync_all will push out
3219 * all of the metadata changes.
3220 */
3221 if ((retval == 0) && wait && fsync_default && cp->c_hint &&
3222 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
3223 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
3224 }
3225
3226 /*
3227 * If this was a full fsync, make sure all metadata
3228 * changes get to stable storage.
3229 */
3230 if (!fsync_default) {
3231 if (hfsmp->jnl) {
3232 if (fsyncmode == HFS_FSYNC_FULL)
3233 hfs_flush(hfsmp, HFS_FLUSH_FULL);
3234 else
3235 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_BARRIER);
3236 } else {
3237 retval = hfs_metasync_all(hfsmp);
3238 /* XXX need to pass context! */
3239 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
3240 }
3241 }
3242 }
3243
3244 if (!hfs_is_dirty(cp) && !ISSET(cp->c_flag, C_DELETED))
3245 vnode_cleardirty(vp);
3246
3247 return (retval);
3248 }
3249
3250
3251 /* Sync an hfs catalog b-tree node */
3252 int
3253 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
3254 {
3255 vnode_t vp;
3256 buf_t bp;
3257 int lockflags;
3258
3259 vp = HFSTOVCB(hfsmp)->catalogRefNum;
3260
3261 // XXXdbg - don't need to do this on a journaled volume
3262 if (hfsmp->jnl) {
3263 return 0;
3264 }
3265
3266 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3267 /*
3268 * Look for a matching node that has been delayed
3269 * but is not part of a set (B_LOCKED).
3270 *
3271 * BLK_ONLYVALID causes buf_getblk to return a
3272 * buf_t for the daddr64_t specified only if it's
3273 * currently resident in the cache... the size
3274 * parameter to buf_getblk is ignored when this flag
3275 * is set
3276 */
3277 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
3278
3279 if (bp) {
3280 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
3281 (void) VNOP_BWRITE(bp);
3282 else
3283 buf_brelse(bp);
3284 }
3285
3286 hfs_systemfile_unlock(hfsmp, lockflags);
3287
3288 return (0);
3289 }
3290
3291
3292 /*
3293 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
3294 * without a journal. Note that the volume bitmap does not get written;
3295 * we rely on fsck_hfs to fix that up (which it can do without any loss
3296 * of data).
3297 */
3298 int
3299 hfs_metasync_all(struct hfsmount *hfsmp)
3300 {
3301 int lockflags;
3302
3303 /* Lock all of the B-trees so we get a mutually consistent state */
3304 lockflags = hfs_systemfile_lock(hfsmp,
3305 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
3306
3307 /* Sync each of the B-trees */
3308 if (hfsmp->hfs_catalog_vp)
3309 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
3310 if (hfsmp->hfs_extents_vp)
3311 hfs_btsync(hfsmp->hfs_extents_vp, 0);
3312 if (hfsmp->hfs_attribute_vp)
3313 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
3314
3315 /* Wait for all of the writes to complete */
3316 if (hfsmp->hfs_catalog_vp)
3317 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
3318 if (hfsmp->hfs_extents_vp)
3319 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
3320 if (hfsmp->hfs_attribute_vp)
3321 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
3322
3323 hfs_systemfile_unlock(hfsmp, lockflags);
3324
3325 return 0;
3326 }
3327
3328
3329 /*ARGSUSED 1*/
3330 static int
3331 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
3332 {
3333 buf_clearflags(bp, B_LOCKED);
3334 (void) buf_bawrite(bp);
3335
3336 return(BUF_CLAIMED);
3337 }
3338
3339
3340 int
3341 hfs_btsync(struct vnode *vp, int sync_transaction)
3342 {
3343 struct cnode *cp = VTOC(vp);
3344 struct timeval tv;
3345 int flags = 0;
3346
3347 if (sync_transaction)
3348 flags |= BUF_SKIP_NONLOCKED;
3349 /*
3350 * Flush all dirty buffers associated with b-tree.
3351 */
3352 buf_iterate(vp, hfs_btsync_callback, flags, 0);
3353
3354 microuptime(&tv);
3355 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
3356 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
3357 cp->c_touch_acctime = FALSE;
3358 cp->c_touch_chgtime = FALSE;
3359 cp->c_touch_modtime = FALSE;
3360
3361 return 0;
3362 }
3363
3364 /*
3365 * Remove a directory.
3366 */
3367 int
3368 hfs_vnop_rmdir(struct vnop_rmdir_args *ap)
3369 {
3370 struct vnode *dvp = ap->a_dvp;
3371 struct vnode *vp = ap->a_vp;
3372 struct cnode *dcp = VTOC(dvp);
3373 struct cnode *cp = VTOC(vp);
3374 int error;
3375 time_t orig_ctime;
3376
3377 orig_ctime = VTOC(vp)->c_ctime;
3378
3379 if (!S_ISDIR(cp->c_mode)) {
3380 return (ENOTDIR);
3381 }
3382 if (dvp == vp) {
3383 return (EINVAL);
3384 }
3385
3386 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3387 cp = VTOC(vp);
3388
3389 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3390 return (error);
3391 }
3392
3393 /* Check for a race with rmdir on the parent directory */
3394 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3395 hfs_unlockpair (dcp, cp);
3396 return ENOENT;
3397 }
3398
3399 //
3400 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3401 //
3402 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3403 uint32_t newid;
3404
3405 hfs_unlockpair(dcp, cp);
3406
3407 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3408 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3409 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3410 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3411 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3412 FSE_ARG_INO, (ino64_t)0, // src inode #
3413 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3414 FSE_ARG_INT32, newid,
3415 FSE_ARG_DONE);
3416 } else {
3417 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3418 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3419 }
3420 }
3421
3422 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
3423
3424 hfs_unlockpair(dcp, cp);
3425
3426 return (error);
3427 }
3428
3429 /*
3430 * Remove a directory
3431 *
3432 * Both dvp and vp cnodes are locked
3433 */
3434 int
3435 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3436 int skip_reserve, int only_unlink)
3437 {
3438 struct cnode *cp;
3439 struct cnode *dcp;
3440 struct hfsmount * hfsmp;
3441 struct cat_desc desc;
3442 int lockflags;
3443 int error = 0, started_tr = 0;
3444
3445 cp = VTOC(vp);
3446 dcp = VTOC(dvp);
3447 hfsmp = VTOHFS(vp);
3448
3449 if (dcp == cp) {
3450 return (EINVAL); /* cannot remove "." */
3451 }
3452 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3453 return (0);
3454 }
3455 if (cp->c_entries != 0) {
3456 return (ENOTEMPTY);
3457 }
3458
3459 /*
3460 * If the directory is open or in use (e.g. opendir() or current working
3461 * directory for some process); wait for inactive/reclaim to actually
3462 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
3463 * of removing open-unlinked directories from the catalog, as well as getting rid
3464 * of EAs still on the element. So change only_unlink to true, so that it will get
3465 * cleaned up below.
3466 *
3467 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
3468 * but it really means C_NOEXISTS because the item was actually removed from the
3469 * catalog. Then when we try to remove the entry from the catalog later on, it won't
3470 * really be there anymore.
3471 */
3472 if (vnode_isinuse(vp, 0)) {
3473 only_unlink = 1;
3474 }
3475
3476 /* Deal with directory hardlinks */
3477 if (cp->c_flag & C_HARDLINK) {
3478 /*
3479 * Note that if we have a directory which was a hardlink at any point,
3480 * its actual directory data is stored in the directory inode in the hidden
3481 * directory rather than the leaf element(s) present in the namespace.
3482 *
3483 * If there are still other hardlinks to this directory,
3484 * then we'll just eliminate this particular link and the vnode will still exist.
3485 * If this is the last link to an empty directory, then we'll open-unlink the
3486 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
3487 *
3488 * We could also return EBUSY here.
3489 */
3490
3491 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3492 }
3493
3494 /*
3495 * In a few cases, we may want to allow the directory to persist in an
3496 * open-unlinked state. If the directory is being open-unlinked (still has usecount
3497 * references), or if it has EAs, or if it was being deleted as part of a rename,
3498 * then we go ahead and move it to the hidden directory.
3499 *
3500 * If the directory is being open-unlinked, then we want to keep the catalog entry
3501 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
3502 *
3503 * If the directory had EAs, then we want to use the open-unlink trick so that the
3504 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
3505 * due to overflowing the journal.
3506 *
3507 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
3508 * in order to maintain rename atomicity.
3509 *
3510 * Note that the allow_dirs argument to hfs_removefile specifies that it is
3511 * supposed to handle directories for this case.
3512 */
3513
3514 if (((hfsmp->hfs_attribute_vp != NULL) &&
3515 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
3516 (only_unlink != 0)) {
3517
3518 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
3519 /*
3520 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
3521 * it here just in case we were invoked by rmdir() on a directory that had
3522 * EAs. To ensure that we start reclaiming the space as soon as possible,
3523 * we call vnode_recycle on the directory.
3524 */
3525 vnode_recycle(vp);
3526
3527 return ret;
3528
3529 }
3530
3531 dcp->c_flag |= C_DIR_MODIFICATION;
3532
3533 #if QUOTA
3534 if (hfsmp->hfs_flags & HFS_QUOTAS)
3535 (void)hfs_getinoquota(cp);
3536 #endif
3537 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3538 goto out;
3539 }
3540 started_tr = 1;
3541
3542 /*
3543 * Verify the directory is empty (and valid).
3544 * (Rmdir ".." won't be valid since
3545 * ".." will contain a reference to
3546 * the current directory and thus be
3547 * non-empty.)
3548 */
3549 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
3550 error = EPERM;
3551 goto out;
3552 }
3553
3554 /* Remove the entry from the namei cache: */
3555 cache_purge(vp);
3556
3557 /*
3558 * Protect against a race with rename by using the component
3559 * name passed in and parent id from dvp (instead of using
3560 * the cp->c_desc which may have changed).
3561 */
3562 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3563 desc.cd_namelen = cnp->cn_namelen;
3564 desc.cd_parentcnid = dcp->c_fileid;
3565 desc.cd_cnid = cp->c_cnid;
3566 desc.cd_flags = CD_ISDIR;
3567 desc.cd_encoding = cp->c_encoding;
3568 desc.cd_hint = 0;
3569
3570 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3571 error = 0;
3572 goto out;
3573 }
3574
3575 /* Remove entry from catalog */
3576 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3577
3578 if (!skip_reserve) {
3579 /*
3580 * Reserve some space in the Catalog file.
3581 */
3582 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3583 hfs_systemfile_unlock(hfsmp, lockflags);
3584 goto out;
3585 }
3586 }
3587
3588 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3589
3590 if (!error) {
3591 //
3592 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3593 // we don't need to touch the document_id as it's handled by the rename code.
3594 // otherwise it's a normal remove and we need to save the document id in the
3595 // per thread struct and clear it from the cnode.
3596 //
3597 struct doc_tombstone *ut;
3598 ut = doc_tombstone_get();
3599 if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED)
3600 && doc_tombstone_should_save(ut, vp, cnp)) {
3601
3602 uint32_t doc_id = hfs_get_document_id(cp);
3603
3604 // this event is more of a "pending-delete"
3605 if (ut->t_lastop_document_id) {
3606 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3607 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
3608 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
3609 FSE_ARG_INO, (ino64_t)0, // dst inode #
3610 FSE_ARG_INT32, doc_id,
3611 FSE_ARG_DONE);
3612 }
3613
3614 doc_tombstone_save(dvp, vp, cnp, doc_id, cp->c_fileid);
3615
3616 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
3617
3618 // clear this so it's never returned again
3619 fip->document_id = 0;
3620 cp->c_bsdflags &= ~UF_TRACKED;
3621 }
3622
3623 /* The parent lost a child */
3624 if (dcp->c_entries > 0)
3625 dcp->c_entries--;
3626 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3627 dcp->c_dirchangecnt++;
3628 hfs_incr_gencount(dcp);
3629
3630 dcp->c_touch_chgtime = TRUE;
3631 dcp->c_touch_modtime = TRUE;
3632 dcp->c_flag |= C_MODIFIED;
3633
3634 hfs_update(dcp->c_vp, 0);
3635 }
3636
3637 hfs_systemfile_unlock(hfsmp, lockflags);
3638
3639 if (error)
3640 goto out;
3641
3642 #if QUOTA
3643 if (hfsmp->hfs_flags & HFS_QUOTAS)
3644 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3645 #endif /* QUOTA */
3646
3647 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
3648
3649 /* Mark C_NOEXISTS since the catalog entry is now gone */
3650 cp->c_flag |= C_NOEXISTS;
3651
3652 out:
3653 dcp->c_flag &= ~C_DIR_MODIFICATION;
3654 wakeup((caddr_t)&dcp->c_flag);
3655
3656 if (started_tr) {
3657 hfs_end_transaction(hfsmp);
3658 }
3659
3660 return (error);
3661 }
3662
3663
3664 /*
3665 * Remove a file or link.
3666 */
3667 int
3668 hfs_vnop_remove(struct vnop_remove_args *ap)
3669 {
3670 struct vnode *dvp = ap->a_dvp;
3671 struct vnode *vp = ap->a_vp;
3672 struct cnode *dcp = VTOC(dvp);
3673 struct cnode *cp;
3674 struct vnode *rvp = NULL;
3675 int error=0, recycle_rsrc=0;
3676 int recycle_vnode = 0;
3677 uint32_t rsrc_vid = 0;
3678 time_t orig_ctime;
3679
3680 if (dvp == vp) {
3681 return (EINVAL);
3682 }
3683
3684 orig_ctime = VTOC(vp)->c_ctime;
3685 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
3686 error = nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3687 if (error) {
3688 // XXXdbg - decide on a policy for handling namespace handler failures!
3689 // for now we just let them proceed.
3690 }
3691 }
3692 error = 0;
3693
3694 cp = VTOC(vp);
3695
3696 relock:
3697
3698 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3699
3700 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3701 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3702 if (rvp) {
3703 vnode_put (rvp);
3704 }
3705 return (error);
3706 }
3707 //
3708 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3709 //
3710 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3711 uint32_t newid;
3712
3713 hfs_unlockpair(dcp, cp);
3714
3715 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3716 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3717 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3718 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3719 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3720 FSE_ARG_INO, (ino64_t)0, // src inode #
3721 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3722 FSE_ARG_INT32, newid,
3723 FSE_ARG_DONE);
3724 } else {
3725 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3726 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3727 }
3728 }
3729
3730 /*
3731 * Lazily respond to determining if there is a valid resource fork
3732 * vnode attached to 'cp' if it is a regular file or symlink.
3733 * If the vnode does not exist, then we may proceed without having to
3734 * create it.
3735 *
3736 * If, however, it does exist, then we need to acquire an iocount on the
3737 * vnode after acquiring its vid. This ensures that if we have to do I/O
3738 * against it, it can't get recycled from underneath us in the middle
3739 * of this call.
3740 *
3741 * Note: this function may be invoked for directory hardlinks, so just skip these
3742 * steps if 'vp' is a directory.
3743 */
3744
3745 enum vtype vtype = vnode_vtype(vp);
3746 if ((vtype == VLNK) || (vtype == VREG)) {
3747 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
3748 /* We need to acquire the rsrc vnode */
3749 rvp = cp->c_rsrc_vp;
3750 rsrc_vid = vnode_vid (rvp);
3751
3752 /* Unlock everything to acquire iocount on the rsrc vnode */
3753 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
3754 hfs_unlockpair (dcp, cp);
3755 /* Use the vid to maintain identity on rvp */
3756 if (vnode_getwithvid(rvp, rsrc_vid)) {
3757 /*
3758 * If this fails, then it was recycled or
3759 * reclaimed in the interim. Reset fields and
3760 * start over.
3761 */
3762 rvp = NULL;
3763 rsrc_vid = 0;
3764 }
3765 goto relock;
3766 }
3767 }
3768
3769 /*
3770 * Check to see if we raced rmdir for the parent directory
3771 * hfs_removefile already checks for a race on vp/cp
3772 */
3773 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3774 error = ENOENT;
3775 goto rm_done;
3776 }
3777
3778 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
3779
3780 /*
3781 * If the remove succeeded in deleting the file, then we may need to mark
3782 * the resource fork for recycle so that it is reclaimed as quickly
3783 * as possible. If it were not recycled quickly, then this resource fork
3784 * vnode could keep a v_parent reference on the data fork, which prevents it
3785 * from going through reclaim (by giving it extra usecounts), except in the force-
3786 * unmount case.
3787 *
3788 * However, a caveat: we need to continue to supply resource fork
3789 * access to open-unlinked files even if the resource fork is not open. This is
3790 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3791 * this already if the data fork has been re-parented to the hidden directory.
3792 *
3793 * As a result, all we really need to do here is mark the resource fork vnode
3794 * for recycle. If it goes out of core, it can be brought in again if needed.
3795 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3796 * more work.
3797 */
3798 if (error == 0) {
3799 hfs_hotfile_deleted(vp);
3800
3801 if (rvp) {
3802 recycle_rsrc = 1;
3803 }
3804 /*
3805 * If the target was actually removed from the catalog schedule it for
3806 * full reclamation/inactivation. We hold an iocount on it so it should just
3807 * get marked with MARKTERM
3808 */
3809 if (cp->c_flag & C_NOEXISTS) {
3810 recycle_vnode = 1;
3811 }
3812 }
3813
3814
3815 /*
3816 * Drop the truncate lock before unlocking the cnode
3817 * (which can potentially perform a vnode_put and
3818 * recycle the vnode which in turn might require the
3819 * truncate lock)
3820 */
3821 rm_done:
3822 hfs_unlockpair(dcp, cp);
3823 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3824
3825 if (recycle_rsrc) {
3826 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3827 vnode_recycle(rvp);
3828 }
3829 if (recycle_vnode) {
3830 vnode_recycle (vp);
3831 }
3832
3833 if (rvp) {
3834 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3835 vnode_put(rvp);
3836 }
3837
3838 return (error);
3839 }
3840
3841
3842 int
3843 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3844
3845 if ( !(buf_flags(bp) & B_META))
3846 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3847 /*
3848 * it's part of the current transaction, kill it.
3849 */
3850 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3851
3852 return (BUF_CLAIMED);
3853 }
3854
3855 /*
3856 * hfs_removefile
3857 *
3858 * Similar to hfs_vnop_remove except there are additional options.
3859 * This function may be used to remove directories if they have
3860 * lots of EA's -- note the 'allow_dirs' argument.
3861 *
3862 * This function is able to delete blocks & fork data for the resource
3863 * fork even if it does not exist in core (and have a backing vnode).
3864 * It should infer the correct behavior based on the number of blocks
3865 * in the cnode and whether or not the resource fork pointer exists or
3866 * not. As a result, one only need pass in the 'vp' corresponding to the
3867 * data fork of this file (or main vnode in the case of a directory).
3868 * Passing in a resource fork will result in an error.
3869 *
3870 * Because we do not create any vnodes in this function, we are not at
3871 * risk of deadlocking against ourselves by double-locking.
3872 *
3873 * Requires cnode and truncate locks to be held.
3874 */
3875 int
3876 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3877 int flags, int skip_reserve, int allow_dirs,
3878 __unused struct vnode *rvp, int only_unlink)
3879 {
3880 struct cnode *cp;
3881 struct cnode *dcp;
3882 struct vnode *rsrc_vp = NULL;
3883 struct hfsmount *hfsmp;
3884 struct cat_desc desc;
3885 struct timeval tv;
3886 int dataforkbusy = 0;
3887 int rsrcforkbusy = 0;
3888 int lockflags;
3889 int error = 0;
3890 int started_tr = 0;
3891 int isbigfile = 0, defer_remove=0, isdir=0;
3892 int update_vh = 0;
3893
3894 cp = VTOC(vp);
3895 dcp = VTOC(dvp);
3896 hfsmp = VTOHFS(vp);
3897
3898 /* Check if we lost a race post lookup. */
3899 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3900 return (0);
3901 }
3902
3903 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3904 return 0;
3905 }
3906
3907 /* Make sure a remove is permitted */
3908 if (VNODE_IS_RSRC(vp)) {
3909 return (EPERM);
3910 }
3911 else {
3912 /*
3913 * We know it's a data fork.
3914 * Probe the cnode to see if we have a valid resource fork
3915 * in hand or not.
3916 */
3917 rsrc_vp = cp->c_rsrc_vp;
3918 }
3919
3920 /* Don't allow deleting the journal or journal_info_block. */
3921 if (hfs_is_journal_file(hfsmp, cp)) {
3922 return (EPERM);
3923 }
3924
3925 /*
3926 * Hard links require special handling.
3927 */
3928 if (cp->c_flag & C_HARDLINK) {
3929 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3930 return (EBUSY);
3931 } else {
3932 /* A directory hard link with a link count of one is
3933 * treated as a regular directory. Therefore it should
3934 * only be removed using rmdir().
3935 */
3936 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3937 (allow_dirs == 0)) {
3938 return (EPERM);
3939 }
3940 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3941 }
3942 }
3943
3944 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3945 if (vnode_isdir(vp)) {
3946 if (allow_dirs == 0)
3947 return (EPERM); /* POSIX */
3948 isdir = 1;
3949 }
3950 /* Sanity check the parent ids. */
3951 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3952 (cp->c_parentcnid != dcp->c_fileid)) {
3953 return (EINVAL);
3954 }
3955
3956 dcp->c_flag |= C_DIR_MODIFICATION;
3957
3958 // this guy is going away so mark him as such
3959 cp->c_flag |= C_DELETED;
3960
3961
3962 /* Remove our entry from the namei cache. */
3963 cache_purge(vp);
3964
3965 /*
3966 * If the caller was operating on a file (as opposed to a
3967 * directory with EAs), then we need to figure out
3968 * whether or not it has a valid resource fork vnode.
3969 *
3970 * If there was a valid resource fork vnode, then we need
3971 * to use hfs_truncate to eliminate its data. If there is
3972 * no vnode, then we hold the cnode lock which would
3973 * prevent it from being created. As a result,
3974 * we can use the data deletion functions which do not
3975 * require that a cnode/vnode pair exist.
3976 */
3977
3978 /* Check if this file is being used. */
3979 if (isdir == 0) {
3980 dataforkbusy = vnode_isinuse(vp, 0);
3981 /*
3982 * At this point, we know that 'vp' points to the
3983 * a data fork because we checked it up front. And if
3984 * there is no rsrc fork, rsrc_vp will be NULL.
3985 */
3986 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3987 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3988 }
3989 }
3990
3991 /* Check if we have to break the deletion into multiple pieces. */
3992 if (isdir == 0)
3993 isbigfile = cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE;
3994
3995 /* Check if the file has xattrs. If it does we'll have to delete them in
3996 individual transactions in case there are too many */
3997 if ((hfsmp->hfs_attribute_vp != NULL) &&
3998 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3999 defer_remove = 1;
4000 }
4001
4002 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
4003 if (only_unlink) {
4004 defer_remove = 1;
4005 }
4006
4007 /*
4008 * Carbon semantics prohibit deleting busy files.
4009 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
4010 */
4011 if (dataforkbusy || rsrcforkbusy) {
4012 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
4013 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
4014 error = EBUSY;
4015 goto out;
4016 }
4017 }
4018
4019 #if QUOTA
4020 if (hfsmp->hfs_flags & HFS_QUOTAS)
4021 (void)hfs_getinoquota(cp);
4022 #endif /* QUOTA */
4023
4024 /*
4025 * Do a ubc_setsize to indicate we need to wipe contents if:
4026 * 1) item is a regular file.
4027 * 2) Neither fork is busy AND we are not told to unlink this.
4028 *
4029 * We need to check for the defer_remove since it can be set without
4030 * having a busy data or rsrc fork
4031 */
4032 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
4033 /*
4034 * A ubc_setsize can cause a pagein so defer it
4035 * until after the cnode lock is dropped. The
4036 * cnode lock cannot be dropped/reacquired here
4037 * since we might already hold the journal lock.
4038 */
4039 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
4040 cp->c_flag |= C_NEED_DATA_SETSIZE;
4041 }
4042 if (!rsrcforkbusy && rsrc_vp) {
4043 cp->c_flag |= C_NEED_RSRC_SETSIZE;
4044 }
4045 }
4046
4047 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4048 goto out;
4049 }
4050 started_tr = 1;
4051
4052 // XXXdbg - if we're journaled, kill any dirty symlink buffers
4053 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
4054 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
4055 }
4056
4057 /*
4058 * Prepare to truncate any non-busy forks. Busy forks will
4059 * get truncated when their vnode goes inactive.
4060 * Note that we will only enter this region if we
4061 * can avoid creating an open-unlinked file. If
4062 * either region is busy, we will have to create an open
4063 * unlinked file.
4064 *
4065 * Since we are deleting the file, we need to stagger the runtime
4066 * modifications to do things in such a way that a crash won't
4067 * result in us getting overlapped extents or any other
4068 * bad inconsistencies. As such, we call prepare_release_storage
4069 * which updates the UBC, updates quota information, and releases
4070 * any loaned blocks that belong to this file. No actual
4071 * truncation or bitmap manipulation is done until *AFTER*
4072 * the catalog record is removed.
4073 */
4074 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
4075
4076 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
4077
4078 error = hfs_prepare_release_storage (hfsmp, vp);
4079 if (error) {
4080 goto out;
4081 }
4082 update_vh = 1;
4083 }
4084
4085 /*
4086 * If the resource fork vnode does not exist, we can skip this step.
4087 */
4088 if (!rsrcforkbusy && rsrc_vp) {
4089 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
4090 if (error) {
4091 goto out;
4092 }
4093 update_vh = 1;
4094 }
4095 }
4096
4097 /*
4098 * Protect against a race with rename by using the component
4099 * name passed in and parent id from dvp (instead of using
4100 * the cp->c_desc which may have changed). Also, be aware that
4101 * because we allow directories to be passed in, we need to special case
4102 * this temporary descriptor in case we were handed a directory.
4103 */
4104 if (isdir) {
4105 desc.cd_flags = CD_ISDIR;
4106 }
4107 else {
4108 desc.cd_flags = 0;
4109 }
4110 desc.cd_encoding = cp->c_desc.cd_encoding;
4111 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
4112 desc.cd_namelen = cnp->cn_namelen;
4113 desc.cd_parentcnid = dcp->c_fileid;
4114 desc.cd_hint = cp->c_desc.cd_hint;
4115 desc.cd_cnid = cp->c_cnid;
4116 microtime(&tv);
4117
4118 /*
4119 * There are two cases to consider:
4120 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
4121 * 2. File is not in use ==> remove the file
4122 *
4123 * We can get a directory in case 1 because it may have had lots of attributes,
4124 * which need to get removed here.
4125 */
4126 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
4127 char delname[32];
4128 struct cat_desc to_desc;
4129 struct cat_desc todir_desc;
4130
4131 /*
4132 * Orphan this file or directory (move to hidden directory).
4133 * Again, we need to take care that we treat directories as directories,
4134 * and files as files. Because directories with attributes can be passed in
4135 * check to make sure that we have a directory or a file before filling in the
4136 * temporary descriptor's flags. We keep orphaned directories AND files in
4137 * the FILE_HARDLINKS private directory since we're generalizing over all
4138 * orphaned filesystem objects.
4139 */
4140 bzero(&todir_desc, sizeof(todir_desc));
4141 todir_desc.cd_parentcnid = 2;
4142
4143 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
4144 bzero(&to_desc, sizeof(to_desc));
4145 to_desc.cd_nameptr = (const u_int8_t *)delname;
4146 to_desc.cd_namelen = strlen(delname);
4147 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
4148 if (isdir) {
4149 to_desc.cd_flags = CD_ISDIR;
4150 }
4151 else {
4152 to_desc.cd_flags = 0;
4153 }
4154 to_desc.cd_cnid = cp->c_cnid;
4155
4156 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4157 if (!skip_reserve) {
4158 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
4159 hfs_systemfile_unlock(hfsmp, lockflags);
4160 goto out;
4161 }
4162 }
4163
4164 error = cat_rename(hfsmp, &desc, &todir_desc,
4165 &to_desc, (struct cat_desc *)NULL);
4166
4167 if (error == 0) {
4168 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
4169 if (isdir == 1) {
4170 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
4171 }
4172 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
4173 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
4174
4175 /* Update the parent directory */
4176 if (dcp->c_entries > 0)
4177 dcp->c_entries--;
4178 if (isdir == 1) {
4179 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
4180 }
4181 dcp->c_dirchangecnt++;
4182 hfs_incr_gencount(dcp);
4183
4184 dcp->c_ctime = tv.tv_sec;
4185 dcp->c_mtime = tv.tv_sec;
4186 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
4187
4188 /* Update the file or directory's state */
4189 cp->c_flag |= C_DELETED;
4190 cp->c_ctime = tv.tv_sec;
4191 --cp->c_linkcount;
4192 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
4193 }
4194 hfs_systemfile_unlock(hfsmp, lockflags);
4195 if (error)
4196 goto out;
4197
4198 }
4199 else {
4200 /*
4201 * Nobody is using this item; we can safely remove everything.
4202 */
4203 struct filefork *temp_rsrc_fork = NULL;
4204 #if QUOTA
4205 off_t savedbytes;
4206 int blksize = hfsmp->blockSize;
4207 #endif
4208 u_int32_t fileid = cp->c_fileid;
4209
4210 /*
4211 * Figure out if we need to read the resource fork data into
4212 * core before wiping out the catalog record.
4213 *
4214 * 1) Must not be a directory
4215 * 2) cnode's c_rsrcfork ptr must be NULL.
4216 * 3) rsrc fork must have actual blocks
4217 */
4218 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
4219 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
4220 /*
4221 * The resource fork vnode & filefork did not exist.
4222 * Create a temporary one for use in this function only.
4223 */
4224 temp_rsrc_fork = hfs_zalloc(HFS_FILEFORK_ZONE);
4225 bzero(temp_rsrc_fork, sizeof(struct filefork));
4226 temp_rsrc_fork->ff_cp = cp;
4227 rl_init(&temp_rsrc_fork->ff_invalidranges);
4228 }
4229
4230 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4231
4232 /* Look up the resource fork first, if necessary */
4233 if (temp_rsrc_fork) {
4234 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
4235 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
4236 if (error) {
4237 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
4238 hfs_systemfile_unlock (hfsmp, lockflags);
4239 goto out;
4240 }
4241 }
4242
4243 if (!skip_reserve) {
4244 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
4245 if (temp_rsrc_fork) {
4246 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
4247 }
4248 hfs_systemfile_unlock(hfsmp, lockflags);
4249 goto out;
4250 }
4251 }
4252
4253 error = cat_delete(hfsmp, &desc, &cp->c_attr);
4254
4255 if (error && error != ENXIO && error != ENOENT) {
4256 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
4257 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
4258 }
4259
4260 if (error == 0) {
4261 /* Update the parent directory */
4262 if (dcp->c_entries > 0)
4263 dcp->c_entries--;
4264 dcp->c_dirchangecnt++;
4265 hfs_incr_gencount(dcp);
4266
4267 dcp->c_ctime = tv.tv_sec;
4268 dcp->c_mtime = tv.tv_sec;
4269 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
4270 }
4271 hfs_systemfile_unlock(hfsmp, lockflags);
4272
4273 if (error) {
4274 if (temp_rsrc_fork) {
4275 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
4276 }
4277 goto out;
4278 }
4279
4280 /*
4281 * Now that we've wiped out the catalog record, the file effectively doesn't
4282 * exist anymore. So update the quota records to reflect the loss of the
4283 * data fork and the resource fork.
4284 */
4285 #if QUOTA
4286 if (cp->c_datafork->ff_blocks > 0) {
4287 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
4288 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4289 }
4290
4291 /*
4292 * We may have just deleted the catalog record for a resource fork even
4293 * though it did not exist in core as a vnode. However, just because there
4294 * was a resource fork pointer in the cnode does not mean that it had any blocks.
4295 */
4296 if (temp_rsrc_fork || cp->c_rsrcfork) {
4297 if (cp->c_rsrcfork) {
4298 if (cp->c_rsrcfork->ff_blocks > 0) {
4299 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
4300 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4301 }
4302 }
4303 else {
4304 /* we must have used a temporary fork */
4305 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
4306 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4307 }
4308 }
4309
4310 if (hfsmp->hfs_flags & HFS_QUOTAS) {
4311 (void)hfs_chkiq(cp, -1, NOCRED, 0);
4312 }
4313 #endif
4314
4315 if (vnode_islnk(vp) && cp->c_datafork->ff_symlinkptr) {
4316 hfs_free(cp->c_datafork->ff_symlinkptr, cp->c_datafork->ff_size);
4317 cp->c_datafork->ff_symlinkptr = NULL;
4318 }
4319
4320 /*
4321 * If we didn't get any errors deleting the catalog entry, then go ahead
4322 * and release the backing store now. The filefork pointers are still valid.
4323 */
4324 if (temp_rsrc_fork) {
4325 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
4326 }
4327 else {
4328 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
4329 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
4330 }
4331 if (error) {
4332 /*
4333 * If we encountered an error updating the extents and bitmap,
4334 * mark the volume inconsistent. At this point, the catalog record has
4335 * already been deleted, so we can't recover it at this point. We need
4336 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
4337 * The subsequent fsck should be able to recover the free space for us.
4338 */
4339 hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE);
4340 }
4341 else {
4342 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
4343 update_vh = 0;
4344 }
4345
4346 /* Get rid of the temporary rsrc fork */
4347 if (temp_rsrc_fork) {
4348 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
4349 }
4350
4351 cp->c_flag |= C_NOEXISTS;
4352 cp->c_flag &= ~C_DELETED;
4353
4354 cp->c_touch_chgtime = TRUE;
4355 --cp->c_linkcount;
4356
4357 /*
4358 * We must never get a directory if we're in this else block. We could
4359 * accidentally drop the number of files in the volume header if we did.
4360 */
4361 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
4362
4363 }
4364
4365 //
4366 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
4367 // we don't need to touch the document_id as it's handled by the rename code.
4368 // otherwise it's a normal remove and we need to save the document id in the
4369 // per thread struct and clear it from the cnode.
4370 //
4371 if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED)
4372 && cp->c_linkcount <= 1) {
4373 struct doc_tombstone *ut;
4374 ut = doc_tombstone_get();
4375 if (doc_tombstone_should_save(ut, vp, cnp)) {
4376 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4377 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4378 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
4379 FSE_ARG_INO, (ino64_t)0, // dst inode #
4380 FSE_ARG_INT32, hfs_get_document_id(cp), // document id
4381 FSE_ARG_DONE);
4382
4383 doc_tombstone_save(dvp, vp, cnp, hfs_get_document_id(cp),
4384 cp->c_fileid);
4385
4386 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
4387
4388 fip->document_id = 0;
4389 cp->c_bsdflags &= ~UF_TRACKED;
4390 }
4391 }
4392
4393 /*
4394 * All done with this cnode's descriptor...
4395 *
4396 * Note: all future catalog calls for this cnode must be by
4397 * fileid only. This is OK for HFS (which doesn't have file
4398 * thread records) since HFS doesn't support the removal of
4399 * busy files.
4400 */
4401 cat_releasedesc(&cp->c_desc);
4402
4403 out:
4404 if (error) {
4405 cp->c_flag &= ~C_DELETED;
4406 }
4407
4408 if (update_vh) {
4409 /*
4410 * If we bailed out earlier, we may need to update the volume header
4411 * to deal with the borrowed blocks accounting.
4412 */
4413 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
4414 }
4415
4416 if (started_tr) {
4417 hfs_end_transaction(hfsmp);
4418 }
4419
4420 dcp->c_flag &= ~C_DIR_MODIFICATION;
4421 wakeup((caddr_t)&dcp->c_flag);
4422
4423 return (error);
4424 }
4425
4426
4427 void
4428 replace_desc(struct cnode *cp, struct cat_desc *cdp)
4429 {
4430 // fixes 4348457 and 4463138
4431 if (&cp->c_desc == cdp) {
4432 return;
4433 }
4434
4435 /* First release allocated name buffer */
4436 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
4437 const u_int8_t *name = cp->c_desc.cd_nameptr;
4438
4439 cp->c_desc.cd_nameptr = 0;
4440 cp->c_desc.cd_namelen = 0;
4441 cp->c_desc.cd_flags &= ~CD_HASBUF;
4442 vfs_removename((const char *)name);
4443 }
4444 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
4445
4446 /* Cnode now owns the name buffer */
4447 cdp->cd_nameptr = 0;
4448 cdp->cd_namelen = 0;
4449 cdp->cd_flags &= ~CD_HASBUF;
4450 }
4451
4452 /*
4453 * hfs_vnop_rename
4454 *
4455 * Just forwards the arguments from VNOP_RENAME into those of
4456 * VNOP_RENAMEX but zeros out the flags word.
4457 */
4458 int hfs_vnop_rename (struct vnop_rename_args *args) {
4459 struct vnop_renamex_args vrx;
4460
4461 vrx.a_desc = args->a_desc; // we aren't using it to switch into the vnop array, so fine as is.
4462 vrx.a_fdvp = args->a_fdvp;
4463 vrx.a_fvp = args->a_fvp;
4464 vrx.a_fcnp = args->a_fcnp;
4465 vrx.a_tdvp = args->a_tdvp;
4466 vrx.a_tvp = args->a_tvp;
4467 vrx.a_tcnp = args->a_tcnp;
4468 vrx.a_vap = NULL; // not used
4469 vrx.a_flags = 0; //zero out the flags.
4470 vrx.a_context = args->a_context;
4471
4472 return hfs_vnop_renamex (&vrx);
4473 }
4474
4475
4476
4477 /*
4478 * Rename a cnode.
4479 *
4480 * The VFS layer guarantees that:
4481 * - source and destination will either both be directories, or
4482 * both not be directories.
4483 * - all the vnodes are from the same file system
4484 *
4485 * When the target is a directory, HFS must ensure that its empty.
4486 *
4487 * Note that this function requires up to 6 vnodes in order to work properly
4488 * if it is operating on files (and not on directories). This is because only
4489 * files can have resource forks, and we now require iocounts to be held on the
4490 * vnodes corresponding to the resource forks (if applicable) as well as
4491 * the files or directories undergoing rename. The problem with not holding
4492 * iocounts on the resource fork vnodes is that it can lead to a deadlock
4493 * situation: The rsrc fork of the source file may be recycled and reclaimed
4494 * in order to provide a vnode for the destination file's rsrc fork. Since
4495 * data and rsrc forks share the same cnode, we'd eventually try to lock the
4496 * source file's cnode in order to sync its rsrc fork to disk, but it's already
4497 * been locked. By taking the rsrc fork vnodes up front we ensure that they
4498 * cannot be recycled, and that the situation mentioned above cannot happen.
4499 */
4500 int
4501 hfs_vnop_renamex(struct vnop_renamex_args *ap)
4502 {
4503 struct vnode *tvp = ap->a_tvp;
4504 struct vnode *tdvp = ap->a_tdvp;
4505 struct vnode *fvp = ap->a_fvp;
4506 struct vnode *fdvp = ap->a_fdvp;
4507 /*
4508 * Note that we only need locals for the target/destination's
4509 * resource fork vnode (and only if necessary). We don't care if the
4510 * source has a resource fork vnode or not.
4511 */
4512 struct vnode *tvp_rsrc = NULLVP;
4513 uint32_t tvp_rsrc_vid = 0;
4514 struct componentname *tcnp = ap->a_tcnp;
4515 struct componentname *fcnp = ap->a_fcnp;
4516 struct proc *p = vfs_context_proc(ap->a_context);
4517 struct cnode *fcp;
4518 struct cnode *fdcp;
4519 struct cnode *tdcp;
4520 struct cnode *tcp;
4521 struct cnode *error_cnode;
4522 struct cat_desc from_desc;
4523 struct cat_desc to_desc;
4524 struct cat_desc out_desc;
4525 struct hfsmount *hfsmp;
4526 cat_cookie_t cookie;
4527 int tvp_deleted = 0;
4528 int started_tr = 0, got_cookie = 0;
4529 int took_trunc_lock = 0;
4530 int lockflags;
4531 int error;
4532 time_t orig_from_ctime, orig_to_ctime;
4533 int emit_rename = 1;
4534 int emit_delete = 1;
4535 int is_tracked = 0;
4536 int unlocked;
4537 vnode_t old_doc_vp = NULL;
4538 int rename_exclusive = 0;
4539
4540 orig_from_ctime = VTOC(fvp)->c_ctime;
4541 if (tvp && VTOC(tvp)) {
4542 orig_to_ctime = VTOC(tvp)->c_ctime;
4543 } else {
4544 orig_to_ctime = ~0;
4545 }
4546
4547 hfsmp = VTOHFS(tdvp);
4548
4549 /* Check the flags first, so we can avoid grabbing locks if necessary */
4550 if (ap->a_flags) {
4551 /* These are the only flags we support for now */
4552 if ((ap->a_flags & (VFS_RENAME_EXCL)) == 0) {
4553 return ENOTSUP;
4554 }
4555
4556 /* The rename flags are mutually exclusive for HFS+ */
4557 switch (ap->a_flags & VFS_RENAME_FLAGS_MASK) {
4558 case VFS_RENAME_EXCL:
4559 rename_exclusive = true;
4560 break;
4561 default:
4562 return ENOTSUP;
4563 }
4564 }
4565
4566 /*
4567 * Do special case checks here. If fvp == tvp then we need to check the
4568 * cnode with locks held.
4569 */
4570 if (fvp == tvp) {
4571 int is_hardlink = 0;
4572 /*
4573 * In this case, we do *NOT* ever emit a DELETE event.
4574 * We may not necessarily emit a RENAME event
4575 */
4576 emit_delete = 0;
4577 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
4578 return error;
4579 }
4580 /* Check to see if the item is a hardlink or not */
4581 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
4582 hfs_unlock (VTOC(fvp));
4583
4584 /*
4585 * If the item is not a hardlink, then case sensitivity must be off, otherwise
4586 * two names should not resolve to the same cnode unless they were case variants.
4587 */
4588 if (is_hardlink) {
4589 emit_rename = 0;
4590 /*
4591 * Hardlinks are a little trickier. We only want to emit a rename event
4592 * if the item is a hardlink, the parent directories are the same, case sensitivity
4593 * is off, and the case folded names are the same. See the fvp == tvp case below for more
4594 * info.
4595 */
4596
4597 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
4598 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4599 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4600 /* Then in this case only it is ok to emit a rename */
4601 emit_rename = 1;
4602 }
4603 }
4604 }
4605 }
4606 if (emit_rename) {
4607 /* c_bsdflags should only be assessed while holding the cnode lock.
4608 * This is not done consistently throughout the code and can result
4609 * in race. This will be fixed via rdar://12181064
4610 */
4611 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
4612 is_tracked = 1;
4613 }
4614 nspace_snapshot_event(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
4615 }
4616
4617 if (tvp && VTOC(tvp)) {
4618 if (emit_delete) {
4619 nspace_snapshot_event(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
4620 }
4621 }
4622
4623 retry:
4624 /* When tvp exists, take the truncate lock for hfs_removefile(). */
4625 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
4626 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4627 took_trunc_lock = 1;
4628 }
4629
4630 relock:
4631 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
4632 HFS_EXCLUSIVE_LOCK, &error_cnode);
4633 if (error) {
4634 if (took_trunc_lock) {
4635 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4636 took_trunc_lock = 0;
4637 }
4638
4639 /*
4640 * We hit an error path. If we were trying to re-acquire the locks
4641 * after coming through here once, we might have already obtained
4642 * an iocount on tvp's resource fork vnode. Drop that before dealing
4643 * with the failure. Note this is safe -- since we are in an
4644 * error handling path, we can't be holding the cnode locks.
4645 */
4646 if (tvp_rsrc) {
4647 vnode_put (tvp_rsrc);
4648 tvp_rsrc_vid = 0;
4649 tvp_rsrc = NULL;
4650 }
4651
4652 /*
4653 * tvp might no longer exist. If the cause of the lock failure
4654 * was tvp, then we can try again with tvp/tcp set to NULL.
4655 * This is ok because the vfs syscall will vnode_put the vnodes
4656 * after we return from hfs_vnop_rename.
4657 */
4658 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
4659 tcp = NULL;
4660 tvp = NULL;
4661 goto retry;
4662 }
4663
4664 /* If we want to reintroduce notifications for failed renames, this
4665 is the place to do it. */
4666
4667 return (error);
4668 }
4669
4670 fdcp = VTOC(fdvp);
4671 fcp = VTOC(fvp);
4672 tdcp = VTOC(tdvp);
4673 tcp = tvp ? VTOC(tvp) : NULL;
4674
4675 //
4676 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
4677 //
4678 unlocked = 0;
4679 if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4680 uint32_t newid;
4681
4682 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4683 unlocked = 1;
4684
4685 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4686 hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4687 ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4688 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4689 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4690 FSE_ARG_INO, (ino64_t)0, // src inode #
4691 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4692 FSE_ARG_INT32, newid,
4693 FSE_ARG_DONE);
4694 hfs_unlock(fcp);
4695 } else {
4696 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4697 }
4698
4699 //
4700 // check if we're going to need to fix tcp as well. if we aren't, go back relock
4701 // everything. otherwise continue on and fix up tcp as well before relocking.
4702 //
4703 if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
4704 goto relock;
4705 }
4706 }
4707
4708 //
4709 // same thing for tcp if it's set
4710 //
4711 if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4712 uint32_t newid;
4713
4714 if (!unlocked) {
4715 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4716 unlocked = 1;
4717 }
4718
4719 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4720 hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4721 ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4722 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4723 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4724 FSE_ARG_INO, (ino64_t)0, // src inode #
4725 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
4726 FSE_ARG_INT32, newid,
4727 FSE_ARG_DONE);
4728 hfs_unlock(tcp);
4729 } else {
4730 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4731 }
4732
4733 // go back up and relock everything. next time through the if statement won't be true
4734 // and we'll skip over this block of code.
4735 goto relock;
4736 }
4737
4738
4739
4740 /*
4741 * Acquire iocounts on the destination's resource fork vnode
4742 * if necessary. If dst/src are files and the dst has a resource
4743 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
4744 * If it does not exist, then we don't care and can skip it.
4745 */
4746 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
4747 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
4748 tvp_rsrc = tcp->c_rsrc_vp;
4749 /*
4750 * We can look at the vid here because we're holding the
4751 * cnode lock on the underlying cnode for this rsrc vnode.
4752 */
4753 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
4754
4755 /* Unlock everything to acquire iocount on this rsrc vnode */
4756 if (took_trunc_lock) {
4757 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
4758 took_trunc_lock = 0;
4759 }
4760 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4761
4762 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
4763 /* iocount acquisition failed. Reset fields and start over.. */
4764 tvp_rsrc_vid = 0;
4765 tvp_rsrc = NULL;
4766 }
4767 goto retry;
4768 }
4769 }
4770
4771
4772
4773 /* Ensure we didn't race src or dst parent directories with rmdir. */
4774 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4775 error = ENOENT;
4776 goto out;
4777 }
4778
4779 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4780 error = ENOENT;
4781 goto out;
4782 }
4783
4784
4785 /* Check for a race against unlink. The hfs_valid_cnode checks validate
4786 * the parent/child relationship with fdcp and tdcp, as well as the
4787 * component name of the target cnodes.
4788 */
4789 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
4790 error = ENOENT;
4791 goto out;
4792 }
4793
4794 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
4795 //
4796 // hmm, the destination vnode isn't valid any more.
4797 // in this case we can just drop him and pretend he
4798 // never existed in the first place.
4799 //
4800 if (took_trunc_lock) {
4801 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4802 took_trunc_lock = 0;
4803 }
4804 error = 0;
4805
4806 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4807
4808 tcp = NULL;
4809 tvp = NULL;
4810
4811 // retry the locking with tvp null'ed out
4812 goto retry;
4813 }
4814
4815 fdcp->c_flag |= C_DIR_MODIFICATION;
4816 if (fdvp != tdvp) {
4817 tdcp->c_flag |= C_DIR_MODIFICATION;
4818 }
4819
4820 /*
4821 * Disallow renaming of a directory hard link if the source and
4822 * destination parent directories are different, or a directory whose
4823 * descendant is a directory hard link and the one of the ancestors
4824 * of the destination directory is a directory hard link.
4825 */
4826 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4827 if (fcp->c_flag & C_HARDLINK) {
4828 error = EPERM;
4829 goto out;
4830 }
4831 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4832 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4833 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4834 error = EPERM;
4835 hfs_systemfile_unlock(hfsmp, lockflags);
4836 goto out;
4837 }
4838 hfs_systemfile_unlock(hfsmp, lockflags);
4839 }
4840 }
4841
4842 /*
4843 * The following edge case is caught here:
4844 * (to cannot be a descendent of from)
4845 *
4846 * o fdvp
4847 * /
4848 * /
4849 * o fvp
4850 * \
4851 * \
4852 * o tdvp
4853 * /
4854 * /
4855 * o tvp
4856 */
4857 if (tdcp->c_parentcnid == fcp->c_fileid) {
4858 error = EINVAL;
4859 goto out;
4860 }
4861
4862 /*
4863 * The following two edge cases are caught here:
4864 * (note tvp is not empty)
4865 *
4866 * o tdvp o tdvp
4867 * / /
4868 * / /
4869 * o tvp tvp o fdvp
4870 * \ \
4871 * \ \
4872 * o fdvp o fvp
4873 * /
4874 * /
4875 * o fvp
4876 */
4877 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
4878 error = ENOTEMPTY;
4879 goto out;
4880 }
4881
4882 /*
4883 * The following edge case is caught here:
4884 * (the from child and parent are the same)
4885 *
4886 * o tdvp
4887 * /
4888 * /
4889 * fdvp o fvp
4890 */
4891 if (fdvp == fvp) {
4892 error = EINVAL;
4893 goto out;
4894 }
4895
4896 /*
4897 * Make sure "from" vnode and its parent are changeable.
4898 */
4899 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
4900 error = EPERM;
4901 goto out;
4902 }
4903
4904 /*
4905 * If the destination parent directory is "sticky", then the
4906 * user must own the parent directory, or the destination of
4907 * the rename, otherwise the destination may not be changed
4908 * (except by root). This implements append-only directories.
4909 *
4910 * Note that checks for immutable and write access are done
4911 * by the call to hfs_removefile.
4912 */
4913 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4914 (suser(vfs_context_ucred(ap->a_context), NULL)) &&
4915 (kauth_cred_getuid(vfs_context_ucred(ap->a_context)) != tdcp->c_uid) &&
4916 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(ap->a_context), p, false)) ) {
4917 error = EPERM;
4918 goto out;
4919 }
4920
4921 /* Don't allow modification of the journal or journal_info_block */
4922 if (hfs_is_journal_file(hfsmp, fcp) ||
4923 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4924 error = EPERM;
4925 goto out;
4926 }
4927
4928 #if QUOTA
4929 if (tvp)
4930 (void)hfs_getinoquota(tcp);
4931 #endif
4932 /* Preflighting done, take fvp out of the name space. */
4933 cache_purge(fvp);
4934
4935 #if CONFIG_SECLUDED_RENAME
4936 /*
4937 * Check for "secure" rename that imposes additional restrictions on the
4938 * source vnode. We wait until here to check in order to prevent a race
4939 * with other threads that manage to look up fvp, but their open or link
4940 * is blocked by our locks. At this point, with fvp out of the name cache,
4941 * and holding the lock on fdvp, no other thread can find fvp.
4942 *
4943 * TODO: Do we need to limit these checks to regular files only?
4944 */
4945 if (fcnp->cn_flags & CN_SECLUDE_RENAME) {
4946 if (vnode_isdir(fvp)) {
4947 error = EISDIR;
4948 goto out;
4949 }
4950
4951 /*
4952 * Neither fork of source may be open or memory mapped.
4953 * We also don't want it in use by any other system call.
4954 * The file must not have hard links.
4955 *
4956 * We can't simply use vnode_isinuse() because that does not
4957 * count opens with O_EVTONLY. We don't want a malicious
4958 * process using O_EVTONLY to subvert a secluded rename.
4959 */
4960 if (fcp->c_linkcount != 1) {
4961 error = EMLINK;
4962 goto out;
4963 }
4964
4965 if (fcp->c_rsrc_vp && (vnode_usecount(fcp->c_rsrc_vp) > 0 ||
4966 vnode_iocount(fcp->c_rsrc_vp) > 0)) {
4967 /* Resource fork is in use (including O_EVTONLY) */
4968 error = EBUSY;
4969 goto out;
4970 }
4971 if (fcp->c_vp && (vnode_usecount(fcp->c_vp) > (fcp->c_rsrc_vp ? 1 : 0) ||
4972 vnode_iocount(fcp->c_vp) > 1)) {
4973 /*
4974 * Data fork is in use, including O_EVTONLY, but not
4975 * including a reference from the resource fork.
4976 */
4977 error = EBUSY;
4978 goto out;
4979 }
4980 }
4981 #endif
4982
4983 bzero(&from_desc, sizeof(from_desc));
4984 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4985 from_desc.cd_namelen = fcnp->cn_namelen;
4986 from_desc.cd_parentcnid = fdcp->c_fileid;
4987 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4988 from_desc.cd_cnid = fcp->c_cnid;
4989
4990 bzero(&to_desc, sizeof(to_desc));
4991 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4992 to_desc.cd_namelen = tcnp->cn_namelen;
4993 to_desc.cd_parentcnid = tdcp->c_fileid;
4994 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4995 to_desc.cd_cnid = fcp->c_cnid;
4996
4997 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4998 goto out;
4999 }
5000 started_tr = 1;
5001
5002 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
5003 * inside a journal transaction and without holding a cnode lock.
5004 * As setting of this bit depends on being in journal transaction for
5005 * concurrency, check this bit again after we start journal transaction for rename
5006 * to ensure that this directory does not have any descendant that
5007 * is a directory hard link.
5008 */
5009 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
5010 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
5011 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5012 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
5013 error = EPERM;
5014 hfs_systemfile_unlock(hfsmp, lockflags);
5015 goto out;
5016 }
5017 hfs_systemfile_unlock(hfsmp, lockflags);
5018 }
5019 }
5020
5021 // if it's a hardlink then re-lookup the name so
5022 // that we get the correct cnid in from_desc (see
5023 // the comment in hfs_removefile for more details)
5024 //
5025 if (fcp->c_flag & C_HARDLINK) {
5026 struct cat_desc tmpdesc;
5027 cnid_t real_cnid;
5028
5029 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
5030 tmpdesc.cd_namelen = fcnp->cn_namelen;
5031 tmpdesc.cd_parentcnid = fdcp->c_fileid;
5032 tmpdesc.cd_hint = fdcp->c_childhint;
5033 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
5034 tmpdesc.cd_encoding = 0;
5035
5036 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5037
5038 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
5039 hfs_systemfile_unlock(hfsmp, lockflags);
5040 goto out;
5041 }
5042
5043 // use the real cnid instead of whatever happened to be there
5044 from_desc.cd_cnid = real_cnid;
5045 hfs_systemfile_unlock(hfsmp, lockflags);
5046 }
5047
5048 /*
5049 * Reserve some space in the Catalog file.
5050 */
5051 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
5052 goto out;
5053 }
5054 got_cookie = 1;
5055
5056 /*
5057 * If the destination exists then it may need to be removed.
5058 *
5059 * Due to HFS's locking system, we should always move the
5060 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
5061 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
5062 * of the actual vnop that it was trying to do (stat, link, readlink),
5063 * we must release the cnode lock of that element during the interim to
5064 * do MAC checking, vnode authorization, and other calls. In that time,
5065 * the item can be deleted (or renamed over). However, only in the rename
5066 * case is it inappropriate to return ENOENT from any of those calls. Either
5067 * the call should return information about the old element (stale), or get
5068 * information about the newer element that we are about to write in its place.
5069 *
5070 * HFS lookup has been modified to detect a rename and re-drive its
5071 * lookup internally. For other calls that have already succeeded in
5072 * their lookup call and are waiting to acquire the cnode lock in order
5073 * to proceed, that cnode lock will not fail due to the cnode being marked
5074 * C_NOEXISTS, because it won't have been marked as such. It will only
5075 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
5076 * element. All future callers will get the new element.
5077 *
5078 * To implement this behavior, we pass the "only_unlink" argument to
5079 * hfs_removefile and hfs_removedir. This will result in the vnode acting
5080 * as though it is open-unlinked. Additionally, when we are done moving the
5081 * element to the hidden directory, we vnode_recycle the target so that it is
5082 * reclaimed as soon as possible. Reclaim and inactive are both
5083 * capable of clearing out unused blocks for an open-unlinked file or dir.
5084 */
5085 if (tvp) {
5086 //
5087 // if the destination has a document id, we need to preserve it
5088 //
5089 if (fvp != tvp) {
5090 uint32_t document_id;
5091 struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
5092 struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
5093
5094 if (ffip->document_id && tfip->document_id) {
5095 // Both documents are tracked. `tvp` is deleted and `fvp` is
5096 // renamed on top of it. Send FSE_DOCID_CHANGED for both inodes,
5097 // clear tombstone of `old_doc_vp` and save tombstone of `fvp`.
5098 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
5099 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
5100 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
5101 FSE_ARG_INO, (ino64_t)0ULL, // dst inode #
5102 FSE_ARG_INT32, (uint32_t)tfip->document_id,
5103 FSE_ARG_DONE);
5104 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
5105 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
5106 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // src inode #
5107 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
5108 FSE_ARG_INT32, (uint32_t)ffip->document_id,
5109 FSE_ARG_DONE);
5110 doc_tombstone_clear(doc_tombstone_get(), &old_doc_vp);
5111 doc_tombstone_save(tdvp, fvp, tcnp, hfs_get_document_id(fcp),
5112 fcp->c_fileid);
5113 } else {
5114 struct doc_tombstone *ut;
5115 ut = doc_tombstone_get();
5116
5117 document_id = tfip->document_id;
5118 tfip->document_id = 0;
5119
5120 if (document_id != 0) {
5121 // clear UF_TRACKED as well since tcp is now no longer tracked
5122 tcp->c_bsdflags &= ~UF_TRACKED;
5123 (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
5124 }
5125
5126 if (ffip->document_id == 0 && document_id != 0) {
5127 // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
5128 fcp->c_bsdflags |= UF_TRACKED;
5129 ffip->document_id = document_id;
5130
5131 (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
5132 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
5133 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
5134 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
5135 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
5136 FSE_ARG_INT32, (uint32_t)ffip->document_id,
5137 FSE_ARG_DONE);
5138 }
5139 else if ((fcp->c_bsdflags & UF_TRACKED) && doc_tombstone_should_save(ut, fvp, fcnp)) {
5140
5141 if (ut->t_lastop_document_id) {
5142 doc_tombstone_clear(ut, NULL);
5143 }
5144 doc_tombstone_save(fdvp, fvp, fcnp,
5145 hfs_get_document_id(fcp), fcp->c_fileid);
5146
5147 //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
5148 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
5149 }
5150 }
5151 }
5152
5153 /*
5154 * When fvp matches tvp they could be case variants
5155 * or matching hard links.
5156 * If the caller requested an exclusive rename (VFS_RENAME_EXCL),
5157 * we allow rename when the target already exists when the following
5158 * conditions are met:
5159 * 1. the volume is case insensitive
5160 * 2. source and target directories are the same
5161 * 3. source and target files are the same
5162 * 4. name only differs in case
5163 */
5164 if (fvp == tvp) {
5165 if (!rename_exclusive && !(fcp->c_flag & C_HARDLINK)) {
5166 /*
5167 * If they're not hardlinks, then fvp == tvp must mean we
5168 * are using case-insensitive HFS because case-sensitive would
5169 * not use the same vnode for both. In this case we just update
5170 * the catalog for: a -> A
5171 */
5172 goto skip_rm; /* simple case variant */
5173
5174 }
5175 /* For all cases below, we must be using hardlinks */
5176 else if ((fdvp != tdvp) ||
5177 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
5178 /*
5179 * If the parent directories are not the same, AND the two items
5180 * are hardlinks, posix says to do nothing:
5181 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
5182 * We just return 0 in this case.
5183 *
5184 * If case sensitivity is on, and we are using hardlinks
5185 * then renaming is supposed to do nothing.
5186 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
5187 */
5188 goto out; /* matching hardlinks, nothing to do */
5189
5190 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
5191 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
5192 /*
5193 * If we get here, then the following must be true:
5194 * a) We are running case-insensitive HFS+.
5195 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
5196 * c) the two names are case-variants of each other.
5197 *
5198 * In this case, we are really only dealing with a single catalog record
5199 * whose name is being updated.
5200 *
5201 * op is dir1/fred -> dir1/FRED
5202 *
5203 * We need to special case the name matching, because if
5204 * dir1/fred <-> dir1/bob were the two links, and the
5205 * op was dir1/fred -> dir1/bob
5206 * That would fail/do nothing.
5207 */
5208
5209 goto skip_rm; /* case-variant hardlink in the same dir */
5210 } else if (rename_exclusive) {
5211 error = EEXIST;
5212 goto out;
5213 } else {
5214 goto out; /* matching hardlink, nothing to do */
5215 }
5216 }
5217
5218
5219 if (vnode_isdir(tvp)) {
5220 /*
5221 * hfs_removedir will eventually call hfs_removefile on the directory
5222 * we're working on, because only hfs_removefile does the renaming of the
5223 * item to the hidden directory. The directory will stay around in the
5224 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
5225 * That way, we can destroy all of the EAs as needed and allow new ones to be
5226 * written.
5227 */
5228 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
5229 }
5230 else {
5231 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
5232
5233 /*
5234 * If the destination file had a resource fork vnode, then we need to get rid of
5235 * its blocks when there are no more references to it. Because the call to
5236 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
5237 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
5238 * the resource fork vnode could prevent the data fork vnode from going out of scope
5239 * because it holds a v_parent reference on it. So we mark it for termination
5240 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
5241 * can clean up the blocks of open-unlinked files and resource forks.
5242 *
5243 * We can safely call vnode_recycle on the resource fork because we took an iocount
5244 * reference on it at the beginning of the function.
5245 */
5246
5247 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
5248 vnode_recycle(tvp_rsrc);
5249 }
5250 }
5251
5252 if (error) {
5253 goto out;
5254 }
5255
5256 tvp_deleted = 1;
5257
5258 /* Mark 'tcp' as being deleted due to a rename */
5259 tcp->c_flag |= C_RENAMED;
5260
5261 /*
5262 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
5263 * as quickly as possible.
5264 */
5265 vnode_recycle(tvp);
5266 } else {
5267 struct doc_tombstone *ut;
5268 ut = doc_tombstone_get();
5269
5270 //
5271 // There is nothing at the destination. If the file being renamed is
5272 // tracked, save a "tombstone" of the document_id. If the file is
5273 // not a tracked file, then see if it needs to inherit a tombstone.
5274 //
5275 // NOTE: we do not save a tombstone if the file being renamed begins
5276 // with "atmp" which is done to work-around AutoCad's bizarre
5277 // 5-step un-safe save behavior
5278 //
5279 if (fcp->c_bsdflags & UF_TRACKED) {
5280 if (doc_tombstone_should_save(ut, fvp, fcnp)) {
5281 doc_tombstone_save(fdvp, fvp, fcnp, hfs_get_document_id(fcp),
5282 fcp->c_fileid);
5283
5284 //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
5285 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
5286 } else {
5287 // intentionally do nothing
5288 }
5289 } else if ( ut->t_lastop_document_id != 0
5290 && tdvp == ut->t_lastop_parent
5291 && vnode_vid(tdvp) == ut->t_lastop_parent_vid
5292 && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
5293
5294 //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
5295 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
5296 fcp->c_bsdflags |= UF_TRACKED;
5297 fip->document_id = ut->t_lastop_document_id;
5298 cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
5299
5300 doc_tombstone_clear(ut, &old_doc_vp);
5301 } else if (ut->t_lastop_document_id && doc_tombstone_should_save(ut, fvp, fcnp) && doc_tombstone_should_save(ut, tvp, tcnp)) {
5302 // no match, clear the tombstone
5303 //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
5304 doc_tombstone_clear(ut, NULL);
5305 }
5306
5307 }
5308 skip_rm:
5309 /*
5310 * All done with tvp and fvp.
5311 *
5312 * We also jump to this point if there was no destination observed during lookup and namei.
5313 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
5314 * competing thread from racing us and creating a file or dir at the destination of this rename
5315 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
5316 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
5317 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
5318 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
5319 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
5320 * will be swallowed and it will restart the operation.
5321 */
5322
5323 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5324 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
5325 hfs_systemfile_unlock(hfsmp, lockflags);
5326
5327 if (error) {
5328 if (error == EEXIST) {
5329 error = ERECYCLE;
5330 }
5331 goto out;
5332 }
5333
5334 /* Invalidate negative cache entries in the destination directory */
5335 if (tdcp->c_flag & C_NEG_ENTRIES) {
5336 cache_purge_negatives(tdvp);
5337 tdcp->c_flag &= ~C_NEG_ENTRIES;
5338 }
5339
5340 /* Update cnode's catalog descriptor */
5341 replace_desc(fcp, &out_desc);
5342 fcp->c_parentcnid = tdcp->c_fileid;
5343 fcp->c_hint = 0;
5344
5345 /*
5346 * Now indicate this cnode needs to have date-added written to the
5347 * finderinfo, but only if moving to a different directory, or if
5348 * it doesn't already have it.
5349 */
5350 if (fdvp != tdvp || !ISSET(fcp->c_attr.ca_recflags, kHFSHasDateAddedMask))
5351 fcp->c_flag |= C_NEEDS_DATEADDED;
5352
5353 (void) hfs_update (fvp, 0);
5354
5355 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
5356 (fdcp->c_cnid == kHFSRootFolderID));
5357 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
5358 (tdcp->c_cnid == kHFSRootFolderID));
5359
5360 /* Update both parent directories. */
5361 if (fdvp != tdvp) {
5362 if (vnode_isdir(fvp)) {
5363 /* If the source directory has directory hard link
5364 * descendants, set the kHFSHasChildLinkBit in the
5365 * destination parent hierarchy
5366 */
5367 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
5368 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
5369
5370 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
5371
5372 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
5373 if (error) {
5374 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
5375 error = 0;
5376 }
5377 }
5378 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
5379 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
5380 }
5381 tdcp->c_entries++;
5382 tdcp->c_dirchangecnt++;
5383 tdcp->c_flag |= C_MODIFIED;
5384 hfs_incr_gencount(tdcp);
5385
5386 if (fdcp->c_entries > 0)
5387 fdcp->c_entries--;
5388 fdcp->c_dirchangecnt++;
5389 fdcp->c_flag |= C_MODIFIED;
5390 fdcp->c_touch_chgtime = TRUE;
5391 fdcp->c_touch_modtime = TRUE;
5392
5393 if (ISSET(fcp->c_flag, C_HARDLINK)) {
5394 hfs_relorigin(fcp, fdcp->c_fileid);
5395 if (fdcp->c_fileid != fdcp->c_cnid)
5396 hfs_relorigin(fcp, fdcp->c_cnid);
5397 }
5398
5399 (void) hfs_update(fdvp, 0);
5400 }
5401 hfs_incr_gencount(fdcp);
5402
5403 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5404 tdcp->c_touch_chgtime = TRUE;
5405 tdcp->c_touch_modtime = TRUE;
5406
5407 (void) hfs_update(tdvp, 0);
5408
5409 /* Update the vnode's name now that the rename has completed. */
5410 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
5411 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
5412
5413 /*
5414 * At this point, we may have a resource fork vnode attached to the
5415 * 'from' vnode. If it exists, we will want to update its name, because
5416 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
5417 *
5418 * Note that the only thing we need to update here is the name attached to
5419 * the vnode, since a resource fork vnode does not have a separate resource
5420 * cnode -- it's still 'fcp'.
5421 */
5422 if (fcp->c_rsrc_vp) {
5423 char* rsrc_path = NULL;
5424 int len;
5425
5426 /* Create a new temporary buffer that's going to hold the new name */
5427 rsrc_path = hfs_malloc(MAXPATHLEN);
5428 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
5429 len = MIN(len, MAXPATHLEN);
5430
5431 /*
5432 * vnode_update_identity will do the following for us:
5433 * 1) release reference on the existing rsrc vnode's name.
5434 * 2) copy/insert new name into the name cache
5435 * 3) attach the new name to the resource vnode
5436 * 4) update the vnode's vid
5437 */
5438 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
5439
5440 /* Free the memory associated with the resource fork's name */
5441 hfs_free(rsrc_path, MAXPATHLEN);
5442 }
5443 out:
5444 if (got_cookie) {
5445 cat_postflight(hfsmp, &cookie, p);
5446 }
5447 if (started_tr) {
5448 hfs_end_transaction(hfsmp);
5449 }
5450
5451 fdcp->c_flag &= ~C_DIR_MODIFICATION;
5452 wakeup((caddr_t)&fdcp->c_flag);
5453 if (fdvp != tdvp) {
5454 tdcp->c_flag &= ~C_DIR_MODIFICATION;
5455 wakeup((caddr_t)&tdcp->c_flag);
5456 }
5457
5458 const ino64_t file_id = fcp->c_fileid;
5459
5460 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
5461
5462 if (took_trunc_lock) {
5463 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
5464 }
5465
5466 /* Now vnode_put the resource forks vnodes if necessary */
5467 if (tvp_rsrc) {
5468 vnode_put(tvp_rsrc);
5469 tvp_rsrc = NULL;
5470 }
5471
5472 /* After tvp is removed the only acceptable error is EIO */
5473 if (error && tvp_deleted)
5474 error = EIO;
5475
5476 /* If we want to reintroduce notifications for renames, this is the
5477 place to do it. */
5478
5479 if (old_doc_vp) {
5480 cnode_t *ocp = VTOC(old_doc_vp);
5481 hfs_lock_always(ocp, HFS_EXCLUSIVE_LOCK);
5482 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
5483
5484 const uint32_t doc_id = ofip->document_id;
5485 const ino64_t old_file_id = ocp->c_fileid;
5486
5487 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
5488 ofip->document_id = 0;
5489 ocp->c_bsdflags &= ~UF_TRACKED;
5490 ocp->c_flag |= C_MODIFIED;
5491
5492 hfs_unlock(ocp);
5493 vnode_put(old_doc_vp);
5494
5495 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
5496 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
5497 FSE_ARG_INO, old_file_id, // src inode #
5498 FSE_ARG_INO, file_id, // dst inode #
5499 FSE_ARG_INT32, doc_id,
5500 FSE_ARG_DONE);
5501 }
5502
5503 return (error);
5504 }
5505
5506
5507 /*
5508 * Make a directory.
5509 */
5510 int
5511 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
5512 {
5513 /***** HACK ALERT ********/
5514 ap->a_cnp->cn_flags |= MAKEENTRY;
5515 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
5516 }
5517
5518
5519 /*
5520 * Create a symbolic link.
5521 */
5522 int
5523 hfs_vnop_symlink(struct vnop_symlink_args *ap)
5524 {
5525 struct vnode **vpp = ap->a_vpp;
5526 struct vnode *dvp = ap->a_dvp;
5527 struct vnode *vp = NULL;
5528 struct cnode *cp = NULL;
5529 struct hfsmount *hfsmp;
5530 struct filefork *fp;
5531 struct buf *bp = NULL;
5532 char *datap;
5533 int started_tr = 0;
5534 u_int32_t len;
5535 int error;
5536
5537 /* HFS standard disks don't support symbolic links */
5538 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
5539 return (ENOTSUP);
5540
5541 /* Check for empty target name */
5542 if (ap->a_target[0] == 0)
5543 return (EINVAL);
5544
5545 hfsmp = VTOHFS(dvp);
5546
5547 len = strlen(ap->a_target);
5548 if (len > MAXPATHLEN)
5549 return (ENAMETOOLONG);
5550
5551 /* Check for free space */
5552 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
5553 return (ENOSPC);
5554 }
5555
5556 /* Create the vnode */
5557 ap->a_vap->va_mode |= S_IFLNK;
5558 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
5559 goto out;
5560 }
5561 vp = *vpp;
5562 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5563 goto out;
5564 }
5565 cp = VTOC(vp);
5566 fp = VTOF(vp);
5567
5568 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
5569 goto out;
5570 }
5571
5572 #if QUOTA
5573 (void)hfs_getinoquota(cp);
5574 #endif /* QUOTA */
5575
5576 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5577 goto out;
5578 }
5579 started_tr = 1;
5580
5581 /*
5582 * Allocate space for the link.
5583 *
5584 * Since we're already inside a transaction,
5585 *
5586 * Don't need truncate lock since a symlink is treated as a system file.
5587 */
5588 error = hfs_truncate(vp, len, IO_NOZEROFILL, 0, ap->a_context);
5589
5590 /* On errors, remove the symlink file */
5591 if (error) {
5592 /*
5593 * End the transaction so we don't re-take the cnode lock
5594 * below while inside a transaction (lock order violation).
5595 */
5596 hfs_end_transaction(hfsmp);
5597
5598 /* hfs_removefile() requires holding the truncate lock */
5599 hfs_unlock(cp);
5600 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
5601 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5602
5603 if (hfs_start_transaction(hfsmp) != 0) {
5604 started_tr = 0;
5605 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5606 goto out;
5607 }
5608
5609 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
5610 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5611 goto out;
5612 }
5613
5614 /* Write the link to disk */
5615 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
5616 0, 0, BLK_META);
5617 if (hfsmp->jnl) {
5618 journal_modify_block_start(hfsmp->jnl, bp);
5619 }
5620 datap = (char *)buf_dataptr(bp);
5621 bzero(datap, buf_size(bp));
5622 bcopy(ap->a_target, datap, len);
5623
5624 if (hfsmp->jnl) {
5625 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
5626 } else {
5627 buf_bawrite(bp);
5628 }
5629 out:
5630 if (started_tr)
5631 hfs_end_transaction(hfsmp);
5632 if ((cp != NULL) && (vp != NULL)) {
5633 hfs_unlock(cp);
5634 }
5635 if (error) {
5636 if (vp) {
5637 vnode_put(vp);
5638 }
5639 *vpp = NULL;
5640 }
5641 return (error);
5642 }
5643
5644
5645 /* structures to hold a "." or ".." directory entry */
5646 struct hfs_stddotentry {
5647 u_int32_t d_fileno; /* unique file number */
5648 u_int16_t d_reclen; /* length of this structure */
5649 u_int8_t d_type; /* dirent file type */
5650 u_int8_t d_namlen; /* len of filename */
5651 char d_name[4]; /* "." or ".." */
5652 };
5653
5654 struct hfs_extdotentry {
5655 u_int64_t d_fileno; /* unique file number */
5656 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
5657 u_int16_t d_reclen; /* length of this structure */
5658 u_int16_t d_namlen; /* len of filename */
5659 u_int8_t d_type; /* dirent file type */
5660 u_char d_name[3]; /* "." or ".." */
5661 };
5662
5663 typedef union {
5664 struct hfs_stddotentry std;
5665 struct hfs_extdotentry ext;
5666 } hfs_dotentry_t;
5667
5668 /*
5669 * hfs_vnop_readdir reads directory entries into the buffer pointed
5670 * to by uio, in a filesystem independent format. Up to uio_resid
5671 * bytes of data can be transferred. The data in the buffer is a
5672 * series of packed dirent structures where each one contains the
5673 * following entries:
5674 *
5675 * u_int32_t d_fileno; // file number of entry
5676 * u_int16_t d_reclen; // length of this record
5677 * u_int8_t d_type; // file type
5678 * u_int8_t d_namlen; // length of string in d_name
5679 * char d_name[MAXNAMELEN+1]; // null terminated file name
5680 *
5681 * The current position (uio_offset) refers to the next block of
5682 * entries. The offset can only be set to a value previously
5683 * returned by hfs_vnop_readdir or zero. This offset does not have
5684 * to match the number of bytes returned (in uio_resid).
5685 *
5686 * In fact, the offset used by HFS is essentially an index (26 bits)
5687 * with a tag (6 bits). The tag is for associating the next request
5688 * with the current request. This enables us to have multiple threads
5689 * reading the directory while the directory is also being modified.
5690 *
5691 * Each tag/index pair is tied to a unique directory hint. The hint
5692 * contains information (filename) needed to build the catalog b-tree
5693 * key for finding the next set of entries.
5694 *
5695 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
5696 * do NOT synthesize entries for "." and "..".
5697 */
5698 int
5699 hfs_vnop_readdir(struct vnop_readdir_args *ap)
5700 {
5701 struct vnode *vp = ap->a_vp;
5702 uio_t uio = ap->a_uio;
5703 struct cnode *cp = VTOC(vp);
5704 struct hfsmount *hfsmp = VTOHFS(vp);
5705 directoryhint_t *dirhint = NULL;
5706 directoryhint_t localhint;
5707 off_t offset;
5708 off_t startoffset;
5709 int error = 0;
5710 int eofflag = 0;
5711 user_addr_t user_start = 0;
5712 user_size_t user_len = 0;
5713 user_size_t user_original_resid = 0;
5714 int index;
5715 unsigned int tag;
5716 int items;
5717 int lockflags;
5718 int extended;
5719 int nfs_cookies;
5720 cnid_t cnid_hint = 0;
5721 int bump_valence = 0;
5722
5723 items = 0;
5724 startoffset = offset = uio_offset(uio);
5725 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5726 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
5727
5728 /* Sanity check the uio data. */
5729 if (uio_iovcnt(uio) > 1)
5730 return (EINVAL);
5731
5732 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
5733 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
5734 if (VTOCMP(vp) != NULL && !compressed) {
5735 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
5736 if (error) {
5737 return error;
5738 }
5739 }
5740 }
5741
5742 //
5743 // We have to lock the user's buffer here so that we won't
5744 // fault on it after we've acquired a shared lock on the
5745 // catalog file. The issue is that you can get a 3-way
5746 // deadlock if someone else starts a transaction and then
5747 // tries to lock the catalog file but can't because we're
5748 // here and we can't service our page fault because VM is
5749 // blocked trying to start a transaction as a result of
5750 // trying to free up pages for our page fault. It's messy
5751 // but it does happen on dual-processors that are paging
5752 // heavily (see radar 3082639 for more info). By locking
5753 // the buffer up-front we prevent ourselves from faulting
5754 // while holding the shared catalog file lock.
5755 //
5756 // Fortunately this and hfs_search() are the only two places
5757 // currently (10/30/02) that can fault on user data with a
5758 // shared lock on the catalog file.
5759 //
5760 if (hfsmp->jnl && uio_isuserspace(uio)) {
5761 user_start = uio_curriovbase(uio);
5762 user_len = uio_curriovlen(uio);
5763
5764 /* Bounds check the user buffer */
5765 if (user_len > (256 * 1024)) {
5766 /* only allow the user to wire down at most 256k */
5767 user_len = (256 * 1024);
5768 user_original_resid = uio_resid(uio);
5769 uio_setresid (uio, (user_ssize_t)(256 * 1024));
5770 }
5771
5772 if ((error = vslock(user_start, user_len)) != 0) {
5773 if (user_original_resid > 0) {
5774 uio_setresid(uio, user_original_resid);
5775 user_original_resid = 0;
5776 }
5777 return error;
5778 }
5779 }
5780
5781 /* Note that the dirhint calls require an exclusive lock. */
5782 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5783 if (user_start) {
5784 if (user_original_resid > 0) {
5785 uio_setresid(uio, user_original_resid);
5786 user_original_resid = 0;
5787 }
5788 vsunlock(user_start, user_len, TRUE);
5789 }
5790 return error;
5791 }
5792
5793 /* Pick up cnid hint (if any). */
5794 if (nfs_cookies) {
5795 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
5796 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
5797 if (cnid_hint == INT_MAX) { /* searching pass the last item */
5798 eofflag = 1;
5799 goto out;
5800 }
5801 }
5802 /*
5803 * Synthesize entries for "." and "..", unless the directory has
5804 * been deleted, but not closed yet (lazy delete in progress).
5805 */
5806 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
5807
5808 size_t uiosize;
5809
5810 /*
5811 * We could use a union of the two types of dot entries (HFS / HFS+)
5812 * but it makes static analysis of this code difficult. The problem is that
5813 * the HFS standard dot entry is smaller than the HFS+ one, and we also ideally
5814 * want the uiomove to operate on a two-element adjacent array. If we used the
5815 * array of unions, we would have to do two separate uiomoves because the memory
5816 * for the hfs standard dot entries would not be adjacent to one another.
5817 * So just allocate the entries on the stack in separate cases.
5818 */
5819
5820 if (extended) {
5821 hfs_dotentry_t dotentry[2];
5822
5823 /* HFS Plus */
5824 struct hfs_extdotentry *entry = &dotentry[0].ext;
5825
5826 entry->d_fileno = cp->c_cnid;
5827 entry->d_reclen = sizeof(struct hfs_extdotentry);
5828 entry->d_type = DT_DIR;
5829 entry->d_namlen = 1;
5830 entry->d_name[0] = '.';
5831 entry->d_name[1] = '\0';
5832 entry->d_name[2] = '\0';
5833 entry->d_seekoff = 1;
5834
5835 ++entry;
5836 entry->d_fileno = cp->c_parentcnid;
5837 entry->d_reclen = sizeof(struct hfs_extdotentry);
5838 entry->d_type = DT_DIR;
5839 entry->d_namlen = 2;
5840 entry->d_name[0] = '.';
5841 entry->d_name[1] = '.';
5842 entry->d_name[2] = '\0';
5843 entry->d_seekoff = 2;
5844 uiosize = 2 * sizeof(struct hfs_extdotentry);
5845
5846 if ((error = uiomove((caddr_t)dotentry, uiosize, uio))) {
5847 goto out;
5848 }
5849
5850 } else {
5851 struct hfs_stddotentry hfs_std_dotentries[2];
5852
5853 /* HFS Standard */
5854 struct hfs_stddotentry *entry = &hfs_std_dotentries[0];
5855
5856 entry->d_fileno = cp->c_cnid;
5857 entry->d_reclen = sizeof(struct hfs_stddotentry);
5858 entry->d_type = DT_DIR;
5859 entry->d_namlen = 1;
5860 *(int *)&entry->d_name[0] = 0;
5861 entry->d_name[0] = '.';
5862
5863 ++entry;
5864 entry->d_fileno = cp->c_parentcnid;
5865 entry->d_reclen = sizeof(struct hfs_stddotentry);
5866 entry->d_type = DT_DIR;
5867 entry->d_namlen = 2;
5868 *(int *)&entry->d_name[0] = 0;
5869 entry->d_name[0] = '.';
5870 entry->d_name[1] = '.';
5871 uiosize = 2 * sizeof(struct hfs_stddotentry);
5872
5873 if ((error = uiomove((caddr_t)hfs_std_dotentries, uiosize, uio))) {
5874 goto out;
5875 }
5876 }
5877
5878 offset += 2;
5879 }
5880
5881 /*
5882 * Intentionally avoid checking the valence here. If we
5883 * have FS corruption that reports the valence is 0, even though it
5884 * has contents, we might artificially skip over iterating
5885 * this directory.
5886 */
5887
5888 /* Convert offset into a catalog directory index. */
5889 index = (offset & HFS_INDEX_MASK) - 2;
5890 tag = offset & ~HFS_INDEX_MASK;
5891
5892 /* Lock catalog during cat_findname and cat_getdirentries. */
5893 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5894
5895 /* When called from NFS, try and resolve a cnid hint. */
5896 if (nfs_cookies && cnid_hint != 0) {
5897 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
5898 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
5899 localhint.dh_index = index - 1;
5900 localhint.dh_time = 0;
5901 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
5902 dirhint = &localhint; /* don't forget to release the descriptor */
5903 } else {
5904 cat_releasedesc(&localhint.dh_desc);
5905 }
5906 }
5907 }
5908
5909 /* Get a directory hint (cnode must be locked exclusive) */
5910 if (dirhint == NULL) {
5911 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
5912
5913 /* Hide tag from catalog layer. */
5914 dirhint->dh_index &= HFS_INDEX_MASK;
5915 if (dirhint->dh_index == HFS_INDEX_MASK) {
5916 dirhint->dh_index = -1;
5917 }
5918 }
5919
5920 if (index == 0) {
5921 dirhint->dh_threadhint = cp->c_dirthreadhint;
5922 }
5923 else {
5924 /*
5925 * If we have a non-zero index, there is a possibility that during the last
5926 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
5927 * then we don't want to return any new entries for the caller. Just return 0
5928 * items, mark the eofflag, and bail out. Because we won't have done any work, the
5929 * code at the end of the function will release the dirhint for us.
5930 *
5931 * Don't forget to unlock the catalog lock on the way out, too.
5932 */
5933 if (dirhint->dh_desc.cd_flags & CD_EOF) {
5934 error = 0;
5935 eofflag = 1;
5936 uio_setoffset(uio, startoffset);
5937 if (user_original_resid > 0) {
5938 uio_setresid(uio, user_original_resid);
5939 user_original_resid = 0;
5940 }
5941 hfs_systemfile_unlock (hfsmp, lockflags);
5942
5943 goto seekoffcalc;
5944 }
5945 }
5946
5947 /* Pack the buffer with dirent entries. */
5948 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
5949
5950 if (user_original_resid > 0) {
5951 user_original_resid = user_original_resid - ((user_ssize_t)256*1024 - uio_resid(uio));
5952 uio_setresid(uio, user_original_resid);
5953 user_original_resid = 0;
5954 }
5955
5956 if (index == 0 && error == 0) {
5957 cp->c_dirthreadhint = dirhint->dh_threadhint;
5958 }
5959
5960 hfs_systemfile_unlock(hfsmp, lockflags);
5961
5962 if (error != 0) {
5963 goto out;
5964 }
5965
5966 /* Get index to the next item */
5967 index += items;
5968
5969 if (items >= (int)cp->c_entries) {
5970 eofflag = 1;
5971 }
5972
5973 /*
5974 * Detect valence FS corruption.
5975 *
5976 * We are holding the cnode lock exclusive, so there should not be
5977 * anybody modifying the valence field of this cnode. If we enter
5978 * this block, that means we observed filesystem corruption, because
5979 * this directory reported a valence of 0, yet we found at least one
5980 * item. In this case, we need to minimally self-heal this
5981 * directory to prevent userland from tripping over a directory
5982 * that appears empty (getattr of valence reports 0), but actually
5983 * has contents.
5984 *
5985 * We'll force the cnode update at the end of the function after
5986 * completing all of the normal getdirentries steps.
5987 */
5988 if ((cp->c_entries == 0) && (items > 0)) {
5989 /* disk corruption */
5990 cp->c_entries++;
5991 /* Mark the cnode as dirty. */
5992 cp->c_flag |= C_MODIFIED;
5993 printf("hfs_vnop_readdir: repairing valence to non-zero! \n");
5994 bump_valence++;
5995 }
5996
5997
5998 /* Convert catalog directory index back into an offset. */
5999 while (tag == 0)
6000 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
6001 uio_setoffset(uio, (index + 2) | tag);
6002 dirhint->dh_index |= tag;
6003
6004 seekoffcalc:
6005 cp->c_touch_acctime = TRUE;
6006
6007 if (ap->a_numdirent) {
6008 if (startoffset == 0)
6009 items += 2;
6010 *ap->a_numdirent = items;
6011 }
6012
6013 out:
6014 if (user_start) {
6015 if (user_original_resid > 0) {
6016 uio_setresid(uio, user_original_resid);
6017 user_original_resid = 0;
6018 }
6019 vsunlock(user_start, user_len, TRUE);
6020 }
6021 /* If we didn't do anything then go ahead and dump the hint. */
6022 if ((dirhint != NULL) &&
6023 (dirhint != &localhint) &&
6024 (uio_offset(uio) == startoffset)) {
6025 hfs_reldirhint(cp, dirhint);
6026 eofflag = 1;
6027 }
6028 if (ap->a_eofflag) {
6029 *ap->a_eofflag = eofflag;
6030 }
6031 if (dirhint == &localhint) {
6032 cat_releasedesc(&localhint.dh_desc);
6033 }
6034
6035 if (bump_valence) {
6036 /* force the update before dropping the cnode lock*/
6037 hfs_update(vp, 0);
6038 }
6039
6040 hfs_unlock(cp);
6041
6042 return (error);
6043 }
6044
6045
6046 /*
6047 * Read contents of a symbolic link.
6048 */
6049 int
6050 hfs_vnop_readlink(struct vnop_readlink_args *ap)
6051 {
6052 struct vnode *vp = ap->a_vp;
6053 struct cnode *cp;
6054 struct filefork *fp;
6055 int error;
6056
6057 if (!vnode_islnk(vp))
6058 return (EINVAL);
6059
6060 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
6061 return (error);
6062 cp = VTOC(vp);
6063 fp = VTOF(vp);
6064
6065 /* Zero length sym links are not allowed */
6066 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
6067 error = EINVAL;
6068 goto exit;
6069 }
6070
6071 /* Cache the path so we don't waste buffer cache resources */
6072 if (fp->ff_symlinkptr == NULL) {
6073 struct buf *bp = NULL;
6074
6075 fp->ff_symlinkptr = hfs_malloc(fp->ff_size);
6076 error = (int)buf_meta_bread(vp, (daddr64_t)0,
6077 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
6078 vfs_context_ucred(ap->a_context), &bp);
6079 if (error) {
6080 if (bp)
6081 buf_brelse(bp);
6082 if (fp->ff_symlinkptr) {
6083 hfs_free(fp->ff_symlinkptr, fp->ff_size);
6084 fp->ff_symlinkptr = NULL;
6085 }
6086 goto exit;
6087 }
6088 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
6089
6090 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
6091 buf_markinvalid(bp); /* data no longer needed */
6092 }
6093 buf_brelse(bp);
6094 }
6095 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
6096
6097 /*
6098 * Keep track blocks read
6099 */
6100 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
6101
6102 /*
6103 * If this file hasn't been seen since the start of
6104 * the current sampling period then start over.
6105 */
6106 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
6107 VTOF(vp)->ff_bytesread = fp->ff_size;
6108 else
6109 VTOF(vp)->ff_bytesread += fp->ff_size;
6110
6111 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
6112 // cp->c_touch_acctime = TRUE;
6113 }
6114
6115 exit:
6116 hfs_unlock(cp);
6117 return (error);
6118 }
6119
6120
6121 /*
6122 * Get configurable pathname variables.
6123 */
6124 int
6125 hfs_vnop_pathconf(struct vnop_pathconf_args *ap)
6126 {
6127
6128 #if CONFIG_HFS_STD
6129 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
6130 #endif
6131
6132 switch (ap->a_name) {
6133 case _PC_LINK_MAX:
6134 #if CONFIG_HFS_STD
6135 if (std_hfs) {
6136 *ap->a_retval = 1;
6137 } else
6138 #endif
6139 {
6140 *ap->a_retval = HFS_LINK_MAX;
6141 }
6142 break;
6143 case _PC_NAME_MAX:
6144 #if CONFIG_HFS_STD
6145 if (std_hfs) {
6146 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
6147 } else
6148 #endif
6149 {
6150 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
6151 }
6152 break;
6153 case _PC_PATH_MAX:
6154 *ap->a_retval = PATH_MAX; /* 1024 */
6155 break;
6156 case _PC_PIPE_BUF:
6157 *ap->a_retval = PIPE_BUF;
6158 break;
6159 case _PC_CHOWN_RESTRICTED:
6160 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
6161 break;
6162 case _PC_NO_TRUNC:
6163 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
6164 break;
6165 case _PC_NAME_CHARS_MAX:
6166 #if CONFIG_HFS_STD
6167 if (std_hfs) {
6168 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
6169 } else
6170 #endif
6171 {
6172 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
6173 }
6174 break;
6175 case _PC_CASE_SENSITIVE:
6176 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
6177 *ap->a_retval = 1;
6178 else
6179 *ap->a_retval = 0;
6180 break;
6181 case _PC_CASE_PRESERVING:
6182 *ap->a_retval = 1;
6183 break;
6184 case _PC_FILESIZEBITS:
6185 /* number of bits to store max file size */
6186 #if CONFIG_HFS_STD
6187 if (std_hfs) {
6188 *ap->a_retval = 32;
6189 } else
6190 #endif
6191 {
6192 *ap->a_retval = 64;
6193 }
6194 break;
6195 case _PC_XATTR_SIZE_BITS:
6196 /* Number of bits to store maximum extended attribute size */
6197 *ap->a_retval = HFS_XATTR_SIZE_BITS;
6198 break;
6199 default:
6200 return (EINVAL);
6201 }
6202
6203 return (0);
6204 }
6205
6206 /*
6207 * Prepares a fork for cat_update by making sure ff_size and ff_blocks
6208 * are no bigger than the valid data on disk thus reducing the chance
6209 * of exposing uninitialised data in the event of a non clean unmount.
6210 * fork_buf is where to put the temporary copy if required. (It can
6211 * be inside pfork.)
6212 */
6213 const struct cat_fork *
6214 hfs_prepare_fork_for_update(filefork_t *ff,
6215 const struct cat_fork *cf,
6216 struct cat_fork *cf_buf,
6217 uint32_t block_size)
6218 {
6219 if (!ff)
6220 return NULL;
6221
6222 if (!cf)
6223 cf = &ff->ff_data;
6224 if (!cf_buf)
6225 cf_buf = &ff->ff_data;
6226
6227 off_t max_size = ff->ff_size;
6228
6229 // Check first invalid range
6230 if (!TAILQ_EMPTY(&ff->ff_invalidranges))
6231 max_size = TAILQ_FIRST(&ff->ff_invalidranges)->rl_start;
6232
6233 if (!ff->ff_unallocblocks && ff->ff_size <= max_size)
6234 return cf; // Nothing to do
6235
6236 if (ff->ff_blocks < ff->ff_unallocblocks) {
6237 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
6238 ff->ff_blocks, ff->ff_unallocblocks);
6239 }
6240
6241 struct cat_fork *out = cf_buf;
6242
6243 if (out != cf)
6244 bcopy(cf, out, sizeof(*cf));
6245
6246 // Adjust cf_blocks for cf_vblocks
6247 out->cf_blocks -= out->cf_vblocks;
6248
6249 /*
6250 * Here we trim the size with the updated cf_blocks. This is
6251 * probably unnecessary now because the invalid ranges should
6252 * catch this (but that wasn't always the case).
6253 */
6254 off_t alloc_bytes = hfs_blk_to_bytes(out->cf_blocks, block_size);
6255 if (out->cf_size > alloc_bytes)
6256 out->cf_size = alloc_bytes;
6257
6258 // Trim cf_size to first invalid range
6259 if (out->cf_size > max_size)
6260 out->cf_size = max_size;
6261
6262 return out;
6263 }
6264
6265 /*
6266 * Update a cnode's on-disk metadata.
6267 *
6268 * The cnode must be locked exclusive. See declaration for possible
6269 * options.
6270 */
6271 int
6272 hfs_update(struct vnode *vp, int options)
6273 {
6274 struct cnode *cp = VTOC(vp);
6275 struct proc *p;
6276 const struct cat_fork *dataforkp = NULL;
6277 const struct cat_fork *rsrcforkp = NULL;
6278 struct cat_fork datafork;
6279 struct cat_fork rsrcfork;
6280 struct hfsmount *hfsmp;
6281 int lockflags;
6282 int error;
6283 uint32_t tstate = 0;
6284
6285 if (ISSET(cp->c_flag, C_NOEXISTS))
6286 return 0;
6287
6288 p = current_proc();
6289 hfsmp = VTOHFS(vp);
6290
6291 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
6292 hfsmp->hfs_catalog_vp == NULL){
6293 return (0);
6294 }
6295 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
6296 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD | C_NEEDS_DATEADDED);
6297 cp->c_touch_acctime = 0;
6298 cp->c_touch_chgtime = 0;
6299 cp->c_touch_modtime = 0;
6300 return (0);
6301 }
6302 if (kdebug_enable) {
6303 if (cp->c_touch_acctime || cp->c_atime != cp->c_attr.ca_atimeondisk)
6304 tstate |= DBG_HFS_UPDATE_ACCTIME;
6305 if (cp->c_touch_modtime)
6306 tstate |= DBG_HFS_UPDATE_MODTIME;
6307 if (cp->c_touch_chgtime)
6308 tstate |= DBG_HFS_UPDATE_CHGTIME;
6309
6310 if (cp->c_flag & C_MODIFIED)
6311 tstate |= DBG_HFS_UPDATE_MODIFIED;
6312 if (ISSET(options, HFS_UPDATE_FORCE))
6313 tstate |= DBG_HFS_UPDATE_FORCE;
6314 if (cp->c_flag & C_NEEDS_DATEADDED)
6315 tstate |= DBG_HFS_UPDATE_DATEADDED;
6316 if (cp->c_flag & C_MINOR_MOD)
6317 tstate |= DBG_HFS_UPDATE_MINOR;
6318 }
6319 hfs_touchtimes(hfsmp, cp);
6320
6321 if (!ISSET(cp->c_flag, C_MODIFIED | C_MINOR_MOD)
6322 && !hfs_should_save_atime(cp)) {
6323 // Nothing to update
6324 return 0;
6325 }
6326
6327 KDBG(HFSDBG_UPDATE | DBG_FUNC_START, kdebug_vnode(vp), tstate);
6328
6329 bool check_txn = false;
6330
6331 if (!ISSET(options, HFS_UPDATE_FORCE) && !ISSET(cp->c_flag, C_MODIFIED)) {
6332 /*
6333 * This must be a minor modification. If the current
6334 * transaction already has an update for this node, then we
6335 * bundle in the modification.
6336 */
6337 if (hfsmp->jnl
6338 && journal_current_txn(hfsmp->jnl) == cp->c_update_txn) {
6339 check_txn = true;
6340 } else {
6341 tstate |= DBG_HFS_UPDATE_SKIPPED;
6342 error = 0;
6343 goto exit;
6344 }
6345 }
6346
6347 if ((error = hfs_start_transaction(hfsmp)) != 0)
6348 goto exit;
6349
6350 if (check_txn
6351 && journal_current_txn(hfsmp->jnl) != cp->c_update_txn) {
6352 hfs_end_transaction(hfsmp);
6353 tstate |= DBG_HFS_UPDATE_SKIPPED;
6354 error = 0;
6355 goto exit;
6356 }
6357
6358 if (cp->c_datafork)
6359 dataforkp = &cp->c_datafork->ff_data;
6360 if (cp->c_rsrcfork)
6361 rsrcforkp = &cp->c_rsrcfork->ff_data;
6362
6363 /*
6364 * Modify the values passed to cat_update based on whether or not
6365 * the file has invalid ranges or borrowed blocks.
6366 */
6367 dataforkp = hfs_prepare_fork_for_update(cp->c_datafork, NULL, &datafork, hfsmp->blockSize);
6368 rsrcforkp = hfs_prepare_fork_for_update(cp->c_rsrcfork, NULL, &rsrcfork, hfsmp->blockSize);
6369
6370 if (__builtin_expect(kdebug_enable & KDEBUG_TRACE, 0)) {
6371 long dbg_parms[NUMPARMS];
6372 int err;
6373 #ifdef VN_GETPATH_NEW
6374 size_t dbg_namelen;
6375 #else // VN_GETPATH_NEW
6376 int dbg_namelen;
6377 #endif // VN_GETPATH_NEW
6378
6379 dbg_namelen = NUMPARMS * sizeof(long);
6380 err = vn_getpath_ext(vp, NULLVP, (char *)dbg_parms, &dbg_namelen, 0);
6381
6382 if (!err && (dbg_namelen < (int)sizeof(dbg_parms)))
6383 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
6384
6385 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
6386 }
6387
6388 /*
6389 * Lock the Catalog b-tree file.
6390 */
6391 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6392
6393 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
6394
6395 if (hfsmp->jnl)
6396 cp->c_update_txn = journal_current_txn(hfsmp->jnl);
6397
6398 hfs_systemfile_unlock(hfsmp, lockflags);
6399
6400 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD);
6401
6402 hfs_end_transaction(hfsmp);
6403
6404 exit:
6405
6406 KDBG(HFSDBG_UPDATE | DBG_FUNC_END, kdebug_vnode(vp), tstate, error);
6407
6408 return error;
6409 }
6410
6411 /*
6412 * Allocate a new node
6413 */
6414 int
6415 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
6416 struct vnode_attr *vap, vfs_context_t ctx)
6417 {
6418 struct cnode *cp = NULL;
6419 struct cnode *dcp = NULL;
6420 struct vnode *tvp;
6421 struct hfsmount *hfsmp;
6422 struct cat_desc in_desc, out_desc;
6423 struct cat_attr attr;
6424 struct timeval tv;
6425 int lockflags;
6426 int error, started_tr = 0;
6427 enum vtype vnodetype;
6428 int mode;
6429 int newvnode_flags = 0;
6430 u_int32_t gnv_flags = 0;
6431 int protectable_target = 0;
6432 int nocache = 0;
6433 vnode_t old_doc_vp = NULL;
6434
6435 #if CONFIG_PROTECT
6436 struct cprotect *entry = NULL;
6437 int32_t cp_class = -1;
6438
6439 /*
6440 * By default, it's OK for AKS to overrride our target class preferences.
6441 */
6442 uint32_t keywrap_flags = CP_KEYWRAP_DIFFCLASS;
6443
6444 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
6445 cp_class = (int32_t)vap->va_dataprotect_class;
6446 /*
6447 * Since the user specifically requested this target class be used,
6448 * we want to fail this creation operation if we cannot wrap to their
6449 * target class. The CP_KEYWRAP_DIFFCLASS bit says that it is OK to
6450 * use a different class than the one specified, so we turn that off
6451 * now.
6452 */
6453 keywrap_flags &= ~CP_KEYWRAP_DIFFCLASS;
6454 }
6455 int protected_mount = 0;
6456 #endif
6457
6458
6459 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
6460 return (error);
6461
6462 /* set the cnode pointer only after successfully acquiring lock */
6463 dcp = VTOC(dvp);
6464
6465 /* Don't allow creation of new entries in open-unlinked directories */
6466 if ((error = hfs_checkdeleted(dcp))) {
6467 hfs_unlock(dcp);
6468 return error;
6469 }
6470
6471 dcp->c_flag |= C_DIR_MODIFICATION;
6472
6473 hfsmp = VTOHFS(dvp);
6474
6475 *vpp = NULL;
6476 tvp = NULL;
6477 out_desc.cd_flags = 0;
6478 out_desc.cd_nameptr = NULL;
6479
6480 vnodetype = vap->va_type;
6481 if (vnodetype == VNON)
6482 vnodetype = VREG;
6483 mode = MAKEIMODE(vnodetype, vap->va_mode);
6484
6485 if (S_ISDIR (mode) || S_ISREG (mode)) {
6486 protectable_target = 1;
6487 }
6488
6489
6490 /* Check if were out of usable disk space. */
6491 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
6492 error = ENOSPC;
6493 goto exit;
6494 }
6495
6496 microtime(&tv);
6497
6498 /* Setup the default attributes */
6499 bzero(&attr, sizeof(attr));
6500 attr.ca_mode = mode;
6501 attr.ca_linkcount = 1;
6502 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
6503 attr.ca_rdev = vap->va_rdev;
6504 }
6505 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
6506 VATTR_SET_SUPPORTED(vap, va_create_time);
6507 attr.ca_itime = vap->va_create_time.tv_sec;
6508 } else {
6509 attr.ca_itime = tv.tv_sec;
6510 }
6511 #if CONFIG_HFS_STD
6512 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
6513 attr.ca_itime += 3600; /* Same as what hfs_update does */
6514 }
6515 #endif
6516 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
6517 attr.ca_atimeondisk = attr.ca_atime;
6518 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6519 VATTR_SET_SUPPORTED(vap, va_flags);
6520 attr.ca_flags = vap->va_flags;
6521 }
6522
6523 /*
6524 * HFS+ only: all files get ThreadExists
6525 * HFSX only: dirs get HasFolderCount
6526 */
6527 #if CONFIG_HFS_STD
6528 if (!(hfsmp->hfs_flags & HFS_STANDARD))
6529 #endif
6530 {
6531 if (vnodetype == VDIR) {
6532 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
6533 attr.ca_recflags = kHFSHasFolderCountMask;
6534 } else {
6535 attr.ca_recflags = kHFSThreadExistsMask;
6536 }
6537 }
6538
6539 #if CONFIG_PROTECT
6540 if (cp_fs_protected(hfsmp->hfs_mp)) {
6541 protected_mount = 1;
6542 }
6543 /*
6544 * On a content-protected HFS+/HFSX filesystem, files and directories
6545 * cannot be created without atomically setting/creating the EA that
6546 * contains the protection class metadata and keys at the same time, in
6547 * the same transaction. As a result, pre-set the "EAs exist" flag
6548 * on the cat_attr for protectable catalog record creations. This will
6549 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
6550 * as having EAs.
6551 */
6552 if ((protected_mount) && (protectable_target)) {
6553 attr.ca_recflags |= kHFSHasAttributesMask;
6554 /* delay entering in the namecache */
6555 nocache = 1;
6556 }
6557 #endif
6558
6559
6560 /*
6561 * Add the date added to the item. See above, as
6562 * all of the dates are set to the itime.
6563 */
6564 hfs_write_dateadded (&attr, attr.ca_atime);
6565
6566 /* Initialize the gen counter to 1 */
6567 hfs_write_gencount(&attr, (uint32_t)1);
6568
6569 attr.ca_uid = vap->va_uid;
6570 attr.ca_gid = vap->va_gid;
6571 VATTR_SET_SUPPORTED(vap, va_mode);
6572 VATTR_SET_SUPPORTED(vap, va_uid);
6573 VATTR_SET_SUPPORTED(vap, va_gid);
6574
6575 #if QUOTA
6576 /* check to see if this node's creation would cause us to go over
6577 * quota. If so, abort this operation.
6578 */
6579 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6580 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
6581 vfs_context_ucred(ctx)))) {
6582 goto exit;
6583 }
6584 }
6585 #endif
6586
6587
6588 /* Tag symlinks with a type and creator. */
6589 if (vnodetype == VLNK) {
6590 struct FndrFileInfo *fip;
6591
6592 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
6593 fip->fdType = SWAP_BE32(kSymLinkFileType);
6594 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
6595 }
6596
6597 /* Setup the descriptor */
6598 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
6599 in_desc.cd_namelen = cnp->cn_namelen;
6600 in_desc.cd_parentcnid = dcp->c_fileid;
6601 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
6602 in_desc.cd_hint = dcp->c_childhint;
6603 in_desc.cd_encoding = 0;
6604
6605 #if CONFIG_PROTECT
6606 /*
6607 * To preserve file creation atomicity with regards to the content protection EA,
6608 * we must create the file in the catalog and then write out its EA in the same
6609 * transaction.
6610 *
6611 * We only denote the target class in this EA; key generation is not completed
6612 * until the file has been inserted into the catalog and will be done
6613 * in a separate transaction.
6614 */
6615 if ((protected_mount) && (protectable_target)) {
6616 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
6617 if (error) {
6618 goto exit;
6619 }
6620 }
6621 #endif
6622
6623 if ((error = hfs_start_transaction(hfsmp)) != 0) {
6624 goto exit;
6625 }
6626 started_tr = 1;
6627
6628 // have to also lock the attribute file because cat_create() needs
6629 // to check that any fileID it wants to use does not have orphaned
6630 // attributes in it.
6631 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
6632 cnid_t new_id;
6633
6634 /* Reserve some space in the Catalog file. */
6635 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
6636 hfs_systemfile_unlock(hfsmp, lockflags);
6637 goto exit;
6638 }
6639
6640 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
6641 hfs_systemfile_unlock (hfsmp, lockflags);
6642 goto exit;
6643 }
6644
6645 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
6646 if (error == 0) {
6647 /* Update the parent directory */
6648 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
6649 dcp->c_entries++;
6650
6651 if (vnodetype == VDIR) {
6652 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
6653 }
6654 dcp->c_dirchangecnt++;
6655 hfs_incr_gencount(dcp);
6656
6657 dcp->c_touch_chgtime = dcp->c_touch_modtime = true;
6658 dcp->c_flag |= C_MODIFIED;
6659
6660 hfs_update(dcp->c_vp, 0);
6661
6662 #if CONFIG_PROTECT
6663 /*
6664 * If we are creating a content protected file, now is when
6665 * we create the EA. We must create it in the same transaction
6666 * that creates the file. We can also guarantee that the file
6667 * MUST exist because we are still holding the catalog lock
6668 * at this point.
6669 */
6670 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6671 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
6672
6673 if (error) {
6674 int delete_err;
6675 /*
6676 * If we fail the EA creation, then we need to delete the file.
6677 * Luckily, we are still holding all of the right locks.
6678 */
6679 delete_err = cat_delete (hfsmp, &out_desc, &attr);
6680 if (delete_err == 0) {
6681 /* Update the parent directory */
6682 if (dcp->c_entries > 0)
6683 dcp->c_entries--;
6684 dcp->c_dirchangecnt++;
6685 dcp->c_ctime = tv.tv_sec;
6686 dcp->c_mtime = tv.tv_sec;
6687 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6688 }
6689
6690 /* Emit EINVAL if we fail to create EA*/
6691 error = EINVAL;
6692 }
6693 }
6694 #endif
6695 }
6696 hfs_systemfile_unlock(hfsmp, lockflags);
6697 if (error)
6698 goto exit;
6699
6700 uint32_t txn = hfsmp->jnl ? journal_current_txn(hfsmp->jnl) : 0;
6701
6702 /* Invalidate negative cache entries in the directory */
6703 if (dcp->c_flag & C_NEG_ENTRIES) {
6704 cache_purge_negatives(dvp);
6705 dcp->c_flag &= ~C_NEG_ENTRIES;
6706 }
6707
6708 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
6709 (dcp->c_cnid == kHFSRootFolderID));
6710
6711 // XXXdbg
6712 // have to end the transaction here before we call hfs_getnewvnode()
6713 // because that can cause us to try and reclaim a vnode on a different
6714 // file system which could cause us to start a transaction which can
6715 // deadlock with someone on that other file system (since we could be
6716 // holding two transaction locks as well as various vnodes and we did
6717 // not obtain the locks on them in the proper order).
6718 //
6719 // NOTE: this means that if the quota check fails or we have to update
6720 // the change time on a block-special device that those changes
6721 // will happen as part of independent transactions.
6722 //
6723 if (started_tr) {
6724 hfs_end_transaction(hfsmp);
6725 started_tr = 0;
6726 }
6727
6728 #if CONFIG_PROTECT
6729 /*
6730 * At this point, we must have encountered success with writing the EA.
6731 * Destroy our temporary cprotect (which had no keys).
6732 */
6733
6734 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6735 cp_entry_destroy (hfsmp, entry);
6736 entry = NULL;
6737 }
6738 #endif
6739 gnv_flags |= GNV_CREATE;
6740 if (nocache) {
6741 gnv_flags |= GNV_NOCACHE;
6742 }
6743
6744 /*
6745 * Create a vnode for the object just created.
6746 *
6747 * NOTE: Maintaining the cnode lock on the parent directory is important,
6748 * as it prevents race conditions where other threads want to look up entries
6749 * in the directory and/or add things as we are in the process of creating
6750 * the vnode below. However, this has the potential for causing a
6751 * double lock panic when dealing with shadow files on a HFS boot partition.
6752 * The panic could occur if we are not cleaning up after ourselves properly
6753 * when done with a shadow file or in the error cases. The error would occur if we
6754 * try to create a new vnode, and then end up reclaiming another shadow vnode to
6755 * create the new one. However, if everything is working properly, this should
6756 * be a non-issue as we would never enter that reclaim codepath.
6757 *
6758 * The cnode is locked on successful return.
6759 */
6760 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
6761 NULL, &tvp, &newvnode_flags);
6762 if (error)
6763 goto exit;
6764
6765 cp = VTOC(tvp);
6766
6767 cp->c_update_txn = txn;
6768
6769 struct doc_tombstone *ut;
6770 ut = doc_tombstone_get();
6771 if ( ut->t_lastop_document_id != 0
6772 && ut->t_lastop_parent == dvp
6773 && ut->t_lastop_parent_vid == vnode_vid(dvp)
6774 && strcmp((char *)ut->t_lastop_filename, (const char *)cp->c_desc.cd_nameptr) == 0) {
6775 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
6776
6777 //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
6778 fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
6779
6780 cp->c_bsdflags |= UF_TRACKED;
6781 cp->c_flag |= C_MODIFIED;
6782
6783 if ((error = hfs_start_transaction(hfsmp)) == 0) {
6784 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6785
6786 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
6787
6788 hfs_systemfile_unlock (hfsmp, lockflags);
6789 (void) hfs_end_transaction(hfsmp);
6790 }
6791
6792 doc_tombstone_clear(ut, &old_doc_vp);
6793 } else if (ut->t_lastop_document_id != 0) {
6794 int len = cnp->cn_namelen;
6795 if (len == 0) {
6796 len = strlen(cnp->cn_nameptr);
6797 }
6798
6799 if (doc_tombstone_should_ignore_name(cnp->cn_nameptr, cnp->cn_namelen)) {
6800 // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
6801 } else {
6802 // Clear the tombstone because the thread is not recreating the same path
6803 // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
6804 doc_tombstone_clear(ut, NULL);
6805 }
6806 }
6807
6808 if ((hfsmp->hfs_flags & HFS_CS_HOTFILE_PIN) && (vnode_isfastdevicecandidate(dvp) && !vnode_isautocandidate(dvp))) {
6809
6810 //printf("hfs: flagging %s (fileid: %d) as VFASTDEVCANDIDATE (dvp name: %s)\n",
6811 // cnp->cn_nameptr ? cnp->cn_nameptr : "<NONAME>",
6812 // cp->c_fileid,
6813 // dvp->v_name ? dvp->v_name : "no-dir-name");
6814
6815 //
6816 // On new files we set the FastDevCandidate flag so that
6817 // any new blocks allocated to it will be pinned.
6818 //
6819 cp->c_attr.ca_recflags |= kHFSFastDevCandidateMask;
6820 vnode_setfastdevicecandidate(tvp);
6821
6822 //
6823 // properly inherit auto-cached flags
6824 //
6825 if (vnode_isautocandidate(dvp)) {
6826 cp->c_attr.ca_recflags |= kHFSAutoCandidateMask;
6827 vnode_setautocandidate(tvp);
6828 }
6829
6830
6831 //
6832 // We also want to add it to the hotfile adoption list so
6833 // that it will eventually land in the hotfile btree
6834 //
6835 (void) hfs_addhotfile(tvp);
6836 }
6837
6838 *vpp = tvp;
6839
6840 #if CONFIG_PROTECT
6841 /*
6842 * Now that we have a vnode-in-hand, generate keys for this namespace item.
6843 * If we fail to create the keys, then attempt to delete the item from the
6844 * namespace. If we can't delete the item, that's not desirable but also not fatal..
6845 * All of the places which deal with restoring/unwrapping keys must also be
6846 * prepared to encounter an entry that does not have keys.
6847 */
6848 if ((protectable_target) && (protected_mount)) {
6849 struct cprotect *keyed_entry = NULL;
6850
6851 if (cp->c_cpentry == NULL) {
6852 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
6853 }
6854
6855 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), keywrap_flags, &keyed_entry);
6856 if (error == 0) {
6857 /*
6858 * Upon success, the keys were generated and written out.
6859 * Update the cp pointer in the cnode.
6860 */
6861 cp_replace_entry (hfsmp, cp, keyed_entry);
6862 if (nocache) {
6863 cache_enter (dvp, tvp, cnp);
6864 }
6865 }
6866 else {
6867 /* If key creation OR the setxattr failed, emit EPERM to userland */
6868 error = EPERM;
6869
6870 /*
6871 * Beware! This slightly violates the lock ordering for the
6872 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
6873 * which guards file size changes before acquiring the normal cnode lock
6874 * and calling hfs_removefile on an item.
6875 *
6876 * However, in this case, we are still holding the directory lock so
6877 * 'tvp' is not lookup-able and it was a newly created vnode so it
6878 * cannot have any content yet. The only reason we are initiating
6879 * the removefile is because we could not generate content protection keys
6880 * for this namespace item. Note also that we pass a '1' in the allow_dirs
6881 * argument for hfs_removefile because we may be creating a directory here.
6882 *
6883 * All this to say that while it is technically a violation it is
6884 * impossible to race with another thread for this cnode so it is safe.
6885 */
6886 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
6887 if (err) {
6888 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
6889 }
6890
6891 /* Release the cnode lock and mark the vnode for termination */
6892 hfs_unlock (cp);
6893 err = vnode_recycle (tvp);
6894 if (err) {
6895 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
6896 }
6897
6898 /* Drop the iocount on the new vnode to force reclamation/recycling */
6899 vnode_put (tvp);
6900 cp = NULL;
6901 *vpp = NULL;
6902 }
6903 }
6904 #endif
6905
6906 #if QUOTA
6907 /*
6908 * Once we create this vnode, we need to initialize its quota data
6909 * structures, if necessary. We know that it is OK to just go ahead and
6910 * initialize because we've already validated earlier (through the hfs_quotacheck
6911 * function) to see if creating this cnode/vnode would cause us to go over quota.
6912 */
6913 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6914 if (cp) {
6915 /* cp could have been zeroed earlier */
6916 (void) hfs_getinoquota(cp);
6917 }
6918 }
6919 #endif
6920
6921 exit:
6922 cat_releasedesc(&out_desc);
6923
6924 #if CONFIG_PROTECT
6925 /*
6926 * We may have jumped here in error-handling various situations above.
6927 * If we haven't already dumped the temporary CP used to initialize
6928 * the file atomically, then free it now. cp_entry_destroy should null
6929 * out the pointer if it was called already.
6930 */
6931 if (entry) {
6932 cp_entry_destroy (hfsmp, entry);
6933 entry = NULL;
6934 }
6935 #endif
6936
6937 /*
6938 * Make sure we release cnode lock on dcp.
6939 */
6940 if (dcp) {
6941 dcp->c_flag &= ~C_DIR_MODIFICATION;
6942 wakeup((caddr_t)&dcp->c_flag);
6943
6944 hfs_unlock(dcp);
6945 }
6946 ino64_t file_id = 0;
6947 if (error == 0 && cp != NULL) {
6948 file_id = cp->c_fileid;
6949 hfs_unlock(cp);
6950 }
6951 if (started_tr) {
6952 hfs_end_transaction(hfsmp);
6953 started_tr = 0;
6954 }
6955
6956 if (old_doc_vp) {
6957 cnode_t *ocp = VTOC(old_doc_vp);
6958 hfs_lock_always(ocp, HFS_EXCLUSIVE_LOCK);
6959 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
6960
6961 const uint32_t doc_id = ofip->document_id;
6962 const ino64_t old_file_id = ocp->c_fileid;
6963
6964 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
6965 ofip->document_id = 0;
6966 ocp->c_bsdflags &= ~UF_TRACKED;
6967 ocp->c_flag |= C_MODIFIED;
6968
6969 hfs_unlock(ocp);
6970 vnode_put(old_doc_vp);
6971
6972 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
6973 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
6974 FSE_ARG_INO, old_file_id, // src inode #
6975 FSE_ARG_INO, file_id, // dst inode #
6976 FSE_ARG_INT32, doc_id,
6977 FSE_ARG_DONE);
6978 }
6979
6980 return (error);
6981 }
6982
6983
6984 /*
6985 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the
6986 * cnode that is found in 'vp'. The cnode should be locked upon entry
6987 * and will be returned locked, but it may be dropped temporarily.
6988 *
6989 * If the resource fork vnode does not exist, HFS will attempt to acquire an
6990 * empty (uninitialized) vnode from VFS so as to avoid deadlocks with
6991 * jetsam. If we let the normal getnewvnode code produce the vnode for us
6992 * we would be doing so while holding the cnode lock of our cnode.
6993 *
6994 * On success, *rvpp wlll hold the resource fork vnode with an
6995 * iocount. *Don't* forget the vnode_put.
6996 */
6997 int
6998 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp)
6999 {
7000 struct vnode *rvp = NULLVP;
7001 struct vnode *empty_rvp = NULLVP;
7002 struct vnode *dvp = NULLVP;
7003 struct cnode *cp = VTOC(vp);
7004 int error;
7005 int vid;
7006
7007 if (vnode_vtype(vp) == VDIR) {
7008 return EINVAL;
7009 }
7010
7011 restart:
7012 /* Attempt to use existing vnode */
7013 if ((rvp = cp->c_rsrc_vp)) {
7014 vid = vnode_vid(rvp);
7015
7016 // vnode_getwithvid can block so we need to drop the cnode lock
7017 hfs_unlock(cp);
7018
7019 error = vnode_getwithvid(rvp, vid);
7020
7021 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
7022
7023 /*
7024 * When our lock was relinquished, the resource fork
7025 * could have been recycled. Check for this and try
7026 * again.
7027 */
7028 if (error == ENOENT)
7029 goto restart;
7030
7031 if (error) {
7032 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
7033
7034 if (name)
7035 printf("hfs_vgetrsrc: couldn't get resource"
7036 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
7037 return (error);
7038 }
7039 } else {
7040 struct cat_fork rsrcfork;
7041 struct componentname cn;
7042 struct cat_desc *descptr = NULL;
7043 struct cat_desc to_desc;
7044 char delname[32];
7045 int lockflags;
7046 int newvnode_flags = 0;
7047
7048 /*
7049 * In this case, we don't currently see a resource fork vnode attached
7050 * to this cnode. In most cases, we were called from a read-only VNOP
7051 * like getattr, so it should be safe to drop the cnode lock and then
7052 * re-acquire it.
7053 *
7054 * Here, we drop the lock so that we can acquire an empty/husk
7055 * vnode so that we don't deadlock against jetsam.
7056 *
7057 * It does not currently appear possible to hold the truncate lock via
7058 * FS re-entrancy when we get to this point. (8/2014)
7059 */
7060 hfs_unlock (cp);
7061
7062 error = vnode_create_empty (&empty_rvp);
7063
7064 hfs_lock_always (cp, HFS_EXCLUSIVE_LOCK);
7065
7066 if (error) {
7067 /* If acquiring the 'empty' vnode failed, then nothing to clean up */
7068 return error;
7069 }
7070
7071 /*
7072 * We could have raced with another thread here while we dropped our cnode
7073 * lock. See if the cnode now has a resource fork vnode and restart if appropriate.
7074 *
7075 * Note: We just released the cnode lock, so there is a possibility that the
7076 * cnode that we just acquired has been deleted or even removed from disk
7077 * completely, though this is unlikely. If the file is open-unlinked, the
7078 * check below will resolve it for us. If it has been completely
7079 * removed (even from the catalog!), then when we examine the catalog
7080 * directly, below, while holding the catalog lock, we will not find the
7081 * item and we can fail out properly.
7082 */
7083 if (cp->c_rsrc_vp) {
7084 /* Drop the empty vnode before restarting */
7085 vnode_put (empty_rvp);
7086 empty_rvp = NULL;
7087 rvp = NULL;
7088 goto restart;
7089 }
7090
7091 /*
7092 * hfs_vgetsrc may be invoked for a cnode that has already been marked
7093 * C_DELETED. This is because we need to continue to provide rsrc
7094 * fork access to open-unlinked files. In this case, build a fake descriptor
7095 * like in hfs_removefile. If we don't do this, buildkey will fail in
7096 * cat_lookup because this cnode has no name in its descriptor.
7097 */
7098 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
7099 bzero (&to_desc, sizeof(to_desc));
7100 bzero (delname, 32);
7101 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
7102 to_desc.cd_nameptr = (const u_int8_t*) delname;
7103 to_desc.cd_namelen = strlen(delname);
7104 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
7105 to_desc.cd_flags = 0;
7106 to_desc.cd_cnid = cp->c_cnid;
7107
7108 descptr = &to_desc;
7109 }
7110 else {
7111 descptr = &cp->c_desc;
7112 }
7113
7114
7115 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
7116
7117 /*
7118 * We call cat_idlookup (instead of cat_lookup) below because we can't
7119 * trust the descriptor in the provided cnode for lookups at this point.
7120 * Between the time of the original lookup of this vnode and now, the
7121 * descriptor could have gotten swapped or replaced. If this occurred,
7122 * the parent/name combo originally desired may not necessarily be provided
7123 * if we use the descriptor. Even worse, if the vnode represents
7124 * a hardlink, we could have removed one of the links from the namespace
7125 * but left the descriptor alone, since hfs_unlink does not invalidate
7126 * the descriptor in the cnode if other links still point to the inode.
7127 *
7128 * Consider the following (slightly contrived) scenario:
7129 * /tmp/a <--> /tmp/b (hardlinks).
7130 * 1. Thread A: open rsrc fork on /tmp/b.
7131 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
7132 * 2. Thread B does 'mv /foo/b /tmp/b'
7133 * 2. Thread B succeeds.
7134 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
7135 *
7136 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
7137 * is not removed/updated during the unlink process. So, if you were to
7138 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
7139 * fork.
7140 *
7141 * As a result, we use the fileid, which should be invariant for the lifetime
7142 * of the cnode (possibly barring calls to exchangedata).
7143 *
7144 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
7145 * have thread records for files. They were only required for directories. So
7146 * we need to do the lookup with the catalog name. This is OK since hardlinks were
7147 * never allowed on HFS standard.
7148 */
7149
7150 /* Get resource fork data */
7151 #if CONFIG_HFS_STD
7152 if (ISSET(hfsmp->hfs_flags, HFS_STANDARD)) {
7153 /*
7154 * HFS standard only:
7155 *
7156 * Get the resource fork for this item with a cat_lookup call, but do not
7157 * force a case lookup since HFS standard is case-insensitive only. We
7158 * don't want the descriptor; just the fork data here. If we tried to
7159 * do a ID lookup (via thread record -> catalog record), then we might fail
7160 * prematurely since, as noted above, thread records were not strictly required
7161 * on files in HFS.
7162 */
7163 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
7164 (struct cat_attr*)NULL, &rsrcfork, NULL);
7165 } else
7166 #endif
7167 {
7168 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
7169 }
7170
7171 hfs_systemfile_unlock(hfsmp, lockflags);
7172 if (error) {
7173 /* Drop our 'empty' vnode ! */
7174 vnode_put (empty_rvp);
7175 return (error);
7176 }
7177 /*
7178 * Supply hfs_getnewvnode with a component name.
7179 */
7180 cn.cn_pnbuf = NULL;
7181 if (descptr->cd_nameptr) {
7182 void *buf = hfs_malloc(MAXPATHLEN);
7183
7184 cn = (struct componentname){
7185 .cn_nameiop = LOOKUP,
7186 .cn_flags = ISLASTCN,
7187 .cn_pnlen = MAXPATHLEN,
7188 .cn_pnbuf = buf,
7189 .cn_nameptr = buf,
7190 .cn_namelen = snprintf(buf, MAXPATHLEN,
7191 "%s%s", descptr->cd_nameptr,
7192 _PATH_RSRCFORKSPEC)
7193 };
7194
7195 // Should never happen because cn.cn_nameptr won't ever be long...
7196 if (cn.cn_namelen >= MAXPATHLEN) {
7197 hfs_free(buf, MAXPATHLEN);
7198 /* Drop our 'empty' vnode ! */
7199 vnode_put (empty_rvp);
7200 return ENAMETOOLONG;
7201
7202 }
7203 }
7204 dvp = vnode_getparent(vp);
7205
7206 /*
7207 * We are about to call hfs_getnewvnode and pass in the vnode that we acquired
7208 * earlier when we were not holding any locks. The semantics of GNV_USE_VP require that
7209 * either hfs_getnewvnode consume the vnode and vend it back to us, properly initialized,
7210 * or it will consume/dispose of it properly if it errors out.
7211 */
7212 rvp = empty_rvp;
7213
7214 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
7215 descptr, (GNV_WANTRSRC | GNV_SKIPLOCK | GNV_USE_VP),
7216 &cp->c_attr, &rsrcfork, &rvp, &newvnode_flags);
7217
7218 if (dvp)
7219 vnode_put(dvp);
7220 hfs_free(cn.cn_pnbuf, MAXPATHLEN);
7221 if (error)
7222 return (error);
7223 } /* End 'else' for rsrc fork not existing */
7224
7225 *rvpp = rvp;
7226 return (0);
7227 }
7228
7229 /*
7230 * Wrapper for special device reads
7231 */
7232 int
7233 hfsspec_read(struct vnop_read_args *ap)
7234 {
7235 /*
7236 * Set access flag.
7237 */
7238 cnode_t *cp = VTOC(ap->a_vp);
7239
7240 if (cp)
7241 cp->c_touch_acctime = TRUE;
7242
7243 return spec_read(ap);
7244 }
7245
7246 /*
7247 * Wrapper for special device writes
7248 */
7249 int
7250 hfsspec_write(struct vnop_write_args *ap)
7251 {
7252 /*
7253 * Set update and change flags.
7254 */
7255 cnode_t *cp = VTOC(ap->a_vp);
7256
7257 if (cp) {
7258 cp->c_touch_chgtime = TRUE;
7259 cp->c_touch_modtime = TRUE;
7260 }
7261
7262 return spec_write(ap);
7263 }
7264
7265 /*
7266 * Wrapper for special device close
7267 *
7268 * Update the times on the cnode then do device close.
7269 */
7270 int
7271 hfsspec_close(struct vnop_close_args *ap)
7272 {
7273 struct vnode *vp = ap->a_vp;
7274 cnode_t *cp = VTOC(vp);
7275
7276 if (cp && vnode_isinuse(ap->a_vp, 0)) {
7277 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
7278 hfs_touchtimes(VTOHFS(vp), cp);
7279 hfs_unlock(cp);
7280 }
7281 }
7282 return spec_close(ap);
7283 }
7284
7285 #if FIFO
7286 /*
7287 * Wrapper for fifo reads
7288 */
7289 static int
7290 hfsfifo_read(struct vnop_read_args *ap)
7291 {
7292 /*
7293 * Set access flag.
7294 */
7295 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
7296 return fifo_read(ap);
7297 }
7298
7299 /*
7300 * Wrapper for fifo writes
7301 */
7302 static int
7303 hfsfifo_write(struct vnop_write_args *ap)
7304 {
7305 /*
7306 * Set update and change flags.
7307 */
7308 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
7309 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
7310 return fifo_write(ap);
7311 }
7312
7313 /*
7314 * Wrapper for fifo close
7315 *
7316 * Update the times on the cnode then do device close.
7317 */
7318 static int
7319 hfsfifo_close(struct vnop_close_args *ap)
7320 {
7321 struct vnode *vp = ap->a_vp;
7322 struct cnode *cp;
7323
7324 if (vnode_isinuse(ap->a_vp, 1)) {
7325 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
7326 cp = VTOC(vp);
7327 hfs_touchtimes(VTOHFS(vp), cp);
7328 hfs_unlock(cp);
7329 }
7330 }
7331 return fifo_close(ap);
7332 }
7333
7334
7335 #endif /* FIFO */
7336
7337 /*
7338 * Getter for the document_id
7339 * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
7340 */
7341 static u_int32_t
7342 hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
7343 {
7344 const uint8_t *finfo = NULL;
7345 u_int32_t doc_id = 0;
7346
7347 /* overlay the FinderInfo to the correct pointer, and advance */
7348 finfo = finderinfo + 16;
7349
7350 if (S_ISDIR(mode) || S_ISREG(mode)) {
7351 const struct FndrExtendedFileInfo *extinfo = (const struct FndrExtendedFileInfo *)finfo;
7352 doc_id = extinfo->document_id;
7353 }
7354
7355 return doc_id;
7356 }
7357
7358
7359 /* getter(s) for document id */
7360 u_int32_t
7361 hfs_get_document_id(struct cnode *cp)
7362 {
7363 return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
7364 cp->c_attr.ca_mode));
7365 }
7366
7367 /* If you have finderinfo and mode, you can use this */
7368 u_int32_t
7369 hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
7370 {
7371 return (hfs_get_document_id_internal(finderinfo, mode));
7372 }
7373
7374 /*
7375 * Synchronize a file's in-core state with that on disk.
7376 */
7377 int
7378 hfs_vnop_fsync(struct vnop_fsync_args *ap)
7379 {
7380 struct vnode* vp = ap->a_vp;
7381 int error;
7382
7383 /* Note: We check hfs flags instead of vfs mount flag because during
7384 * read-write update, hfs marks itself read-write much earlier than
7385 * the vfs, and hence won't result in skipping of certain writes like
7386 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
7387 */
7388 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
7389 return 0;
7390 }
7391
7392 /*
7393 * No need to call cp_handle_vnop to resolve fsync(). Any dirty data
7394 * should have caused the keys to be unwrapped at the time the data was
7395 * put into the UBC, either at mmap/pagein/read-write. If we did manage
7396 * to let this by, then strategy will auto-resolve for us.
7397 *
7398 * We also need to allow ENOENT lock errors since unlink
7399 * system call can call VNOP_FSYNC during vclean.
7400 */
7401 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
7402 if (error)
7403 return (0);
7404
7405 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
7406
7407 hfs_unlock(VTOC(vp));
7408 return (error);
7409 }
7410
7411 int (**hfs_vnodeop_p)(void *);
7412
7413 #define VOPFUNC int (*)(void *)
7414
7415
7416 #if CONFIG_HFS_STD
7417 int (**hfs_std_vnodeop_p) (void *);
7418 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
7419
7420 /*
7421 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
7422 * is for use with HFS standard to block out operations that would modify the file system
7423 */
7424
7425 const struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
7426 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7427 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7428 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
7429 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
7430 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7431 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7432 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7433 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
7434 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7435 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
7436 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7437 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7438 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7439 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
7440 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
7441 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
7442 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
7443 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
7444 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
7445 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
7446 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
7447 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
7448 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7449 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7450 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7451 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7452 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7453 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7454 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7455 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7456 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
7457 #if CONFIG_SEARCHFS
7458 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7459 #else
7460 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7461 #endif
7462 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
7463 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7464 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
7465 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
7466 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7467 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7468 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7469 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7470 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
7471 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
7472 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7473 #if NAMEDSTREAMS
7474 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7475 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7476 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7477 #endif
7478 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7479 { NULL, (VOPFUNC)NULL }
7480 };
7481
7482 const struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
7483 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
7484 #endif
7485
7486 /* VNOP table for HFS+ */
7487 const struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
7488 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7489 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7490 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
7491 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
7492 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7493 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7494 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7495 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7496 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7497 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
7498 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7499 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7500 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7501 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
7502 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
7503 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7504 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
7505 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
7506 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
7507 { &vnop_renamex_desc, (VOPFUNC)hfs_vnop_renamex }, /* renamex (with flags) */
7508 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
7509 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
7510 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
7511 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7512 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7513 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7514 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7515 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7516 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7517 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7518 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7519 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
7520 #if CONFIG_SEARCHFS
7521 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7522 #else
7523 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7524 #endif
7525 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
7526 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7527 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
7528 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7529 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7530 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7531 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7532 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7533 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7534 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7535 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7536 #if NAMEDSTREAMS
7537 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7538 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
7539 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
7540 #endif
7541 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7542 { &vnop_mnomap_desc, (VOPFUNC)hfs_vnop_mnomap },
7543 { NULL, (VOPFUNC)NULL }
7544 };
7545
7546 const struct vnodeopv_desc hfs_vnodeop_opv_desc =
7547 { &hfs_vnodeop_p, hfs_vnodeop_entries };
7548
7549
7550 /* Spec Op vnop table for HFS+ */
7551 int (**hfs_specop_p)(void *);
7552 const struct vnodeopv_entry_desc hfs_specop_entries[] = {
7553 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7554 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
7555 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
7556 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
7557 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
7558 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
7559 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7560 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7561 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
7562 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
7563 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
7564 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
7565 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
7566 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
7567 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7568 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
7569 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
7570 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
7571 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
7572 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
7573 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
7574 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
7575 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
7576 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7577 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7578 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
7579 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
7580 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7581 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7582 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7583 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7584 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7585 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7586 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7587 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7588 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7589 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7590 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7591 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7592 };
7593 const struct vnodeopv_desc hfs_specop_opv_desc =
7594 { &hfs_specop_p, hfs_specop_entries };
7595
7596 #if FIFO
7597 /* HFS+ FIFO VNOP table */
7598 int (**hfs_fifoop_p)(void *);
7599 const struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
7600 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7601 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
7602 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
7603 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
7604 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
7605 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
7606 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7607 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7608 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
7609 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
7610 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
7611 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
7612 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
7613 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
7614 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7615 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
7616 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
7617 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
7618 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
7619 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
7620 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
7621 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
7622 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
7623 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7624 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7625 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
7626 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
7627 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7628 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7629 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7630 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7631 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7632 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7633 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7634 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7635 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7636 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7637 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7638 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7639 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7640 };
7641 const struct vnodeopv_desc hfs_fifoop_opv_desc =
7642 { &hfs_fifoop_p, hfs_fifoop_entries };
7643 #endif /* FIFO */