]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_vnops.c
hfs-366.50.19.tar.gz
[apple/hfs.git] / core / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <libkern/OSAtomic.h>
30 #include <stdbool.h>
31 #include <sys/systm.h>
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/dirent.h>
35 #include <sys/stat.h>
36 #include <sys/buf.h>
37 #include <sys/mount.h>
38 #include <sys/vnode_if.h>
39 #include <sys/malloc.h>
40 #include <sys/ubc.h>
41 #include <sys/paths.h>
42 #include <sys/quota.h>
43 #include <sys/time.h>
44 #include <sys/disk.h>
45 #include <sys/kauth.h>
46 #include <sys/fsctl.h>
47 #include <sys/xattr.h>
48 #include <sys/decmpfs.h>
49 #include <sys/mman.h>
50 #include <sys/doc_tombstone.h>
51 #include <sys/namei.h>
52 #include <string.h>
53 #include <sys/fsevents.h>
54
55 #include <miscfs/specfs/specdev.h>
56 #include <miscfs/fifofs/fifo.h>
57 #include <vfs/vfs_support.h>
58 #include <machine/spl.h>
59
60 #include <sys/kdebug.h>
61 #include <sys/sysctl.h>
62 #include <stdbool.h>
63
64 #include "hfs.h"
65 #include "hfs_catalog.h"
66 #include "hfs_cnode.h"
67 #include "hfs_dbg.h"
68 #include "hfs_mount.h"
69 #include "hfs_quota.h"
70 #include "hfs_endian.h"
71 #include "hfs_kdebug.h"
72 #include "hfs_cprotect.h"
73
74 #if HFS_CONFIG_KEY_ROLL
75 #include "hfs_key_roll.h"
76 #endif
77
78 #include "BTreesInternal.h"
79 #include "FileMgrInternal.h"
80
81 /* Global vfs data structures for hfs */
82
83 /*
84 * Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is
85 * 'no'). At some point this might need to move into VFS and we might
86 * need to provide an API to get at it, but for now, this is only used
87 * by HFS+.
88 */
89 int always_do_fullfsync = 0;
90 SYSCTL_DECL(_vfs_generic);
91 HFS_SYSCTL(INT, _vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called")
92
93 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
94 struct componentname *cnp, struct vnode_attr *vap,
95 vfs_context_t ctx);
96 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
97 int hfs_metasync_all(struct hfsmount *hfsmp);
98
99 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
100 int, int);
101 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
102 int, int, int, struct vnode *, int);
103
104 /* Used here and in cnode teardown -- for symlinks */
105 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
106
107 enum {
108 HFS_MOVE_DATA_INCLUDE_RSRC = 1,
109 };
110 typedef uint32_t hfs_move_data_options_t;
111
112 static int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
113 hfs_move_data_options_t options);
114 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src,
115 filefork_t *dstfork, cnode_t *dst);
116
117 #if HFS_COMPRESSION
118 static int hfs_move_compressed(cnode_t *from_vp, cnode_t *to_vp);
119 #endif
120
121 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
122
123 #if FIFO
124 static int hfsfifo_read(struct vnop_read_args *);
125 static int hfsfifo_write(struct vnop_write_args *);
126 static int hfsfifo_close(struct vnop_close_args *);
127
128 extern int (**fifo_vnodeop_p)(void *);
129 #endif /* FIFO */
130
131 int hfs_vnop_close(struct vnop_close_args*);
132 int hfs_vnop_exchange(struct vnop_exchange_args*);
133 int hfs_vnop_fsync(struct vnop_fsync_args*);
134 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
135 int hfs_vnop_mknod(struct vnop_mknod_args*);
136 int hfs_vnop_getattr(struct vnop_getattr_args*);
137 int hfs_vnop_open(struct vnop_open_args*);
138 int hfs_vnop_readdir(struct vnop_readdir_args*);
139 int hfs_vnop_rename(struct vnop_rename_args*);
140 int hfs_vnop_renamex(struct vnop_renamex_args*);
141 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
142 int hfs_vnop_symlink(struct vnop_symlink_args*);
143 int hfs_vnop_setattr(struct vnop_setattr_args*);
144 int hfs_vnop_readlink(struct vnop_readlink_args *);
145 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
146 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
147 int hfsspec_read(struct vnop_read_args *);
148 int hfsspec_write(struct vnop_write_args *);
149 int hfsspec_close(struct vnop_close_args *);
150
151 /* Options for hfs_removedir and hfs_removefile */
152 #define HFSRM_SKIP_RESERVE 0x01
153
154
155
156 /*****************************************************************************
157 *
158 * Common Operations on vnodes
159 *
160 *****************************************************************************/
161
162 /*
163 * Is the given cnode either the .journal or .journal_info_block file on
164 * a volume with an active journal? Many VNOPs use this to deny access
165 * to those files.
166 *
167 * Note: the .journal file on a volume with an external journal still
168 * returns true here, even though it does not actually hold the contents
169 * of the volume's journal.
170 */
171 static _Bool
172 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
173 {
174 if (hfsmp->jnl != NULL &&
175 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
176 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
177 return true;
178 } else {
179 return false;
180 }
181 }
182
183 /*
184 * Create a regular file.
185 */
186 int
187 hfs_vnop_create(struct vnop_create_args *ap)
188 {
189 /*
190 * We leave handling of certain race conditions here to the caller
191 * which will have a better understanding of the semantics it
192 * requires. For example, if it turns out that the file exists,
193 * it would be wrong of us to return a reference to the existing
194 * file because the caller might not want that and it would be
195 * misleading to suggest the file had been created when it hadn't
196 * been. Note that our NFS server code does not set the
197 * VA_EXCLUSIVE flag so you cannot assume that callers don't want
198 * EEXIST errors if it's not set. The common case, where users
199 * are calling open with the O_CREAT mode, is handled in VFS; when
200 * we return EEXIST, it will loop and do the look-up again.
201 */
202 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
203 }
204
205 /*
206 * Make device special file.
207 */
208 int
209 hfs_vnop_mknod(struct vnop_mknod_args *ap)
210 {
211 struct vnode_attr *vap = ap->a_vap;
212 struct vnode *dvp = ap->a_dvp;
213 struct vnode **vpp = ap->a_vpp;
214 struct cnode *cp;
215 int error;
216
217 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
218 return (ENOTSUP);
219 }
220
221 /* Create the vnode */
222 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
223 if (error)
224 return (error);
225
226 cp = VTOC(*vpp);
227 cp->c_touch_acctime = TRUE;
228 cp->c_touch_chgtime = TRUE;
229 cp->c_touch_modtime = TRUE;
230
231 if ((vap->va_rdev != VNOVAL) &&
232 (vap->va_type == VBLK || vap->va_type == VCHR))
233 cp->c_rdev = vap->va_rdev;
234
235 return (0);
236 }
237
238 #if HFS_COMPRESSION
239 /*
240 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
241 * In the (hopefully rare) case where the data fork vnode is not
242 * present, it will use hfs_vget() to create a new vnode for the
243 * data fork.
244 *
245 * NOTE: If successful and a vnode is returned, the caller is responsible
246 * for releasing the returned vnode with vnode_rele().
247 */
248 static int
249 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
250 {
251 int vref = 0;
252
253 if (!data_vp || !cp) /* sanity check incoming parameters */
254 return EINVAL;
255
256 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
257
258 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
259 struct vnode *c_vp = cp->c_vp;
260 if (c_vp) {
261 /* we already have a data vnode */
262 *data_vp = c_vp;
263 vref = vnode_ref(*data_vp);
264 if (!skiplock) hfs_unlock(cp);
265 if (vref == 0) {
266 return 0;
267 }
268 return EINVAL;
269 }
270 /* no data fork vnode in the cnode, so ask hfs for one. */
271
272 if (!cp->c_rsrc_vp) {
273 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
274 *data_vp = NULL;
275 if (!skiplock) hfs_unlock(cp);
276 return EINVAL;
277 }
278
279 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
280 0 != data_vp) {
281 vref = vnode_ref(*data_vp);
282 vnode_put(*data_vp);
283 if (!skiplock) hfs_unlock(cp);
284 if (vref == 0) {
285 return 0;
286 }
287 return EINVAL;
288 }
289 /* there was an error getting the vnode */
290 *data_vp = NULL;
291 if (!skiplock) hfs_unlock(cp);
292 return EINVAL;
293 }
294
295 /*
296 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
297 * allocating it if necessary; returns NULL if there was an allocation error.
298 * function is non-static so that it can be used from the FCNTL handler.
299 */
300 decmpfs_cnode *
301 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
302 {
303 if (!cp->c_decmp) {
304 decmpfs_cnode *dp = decmpfs_cnode_alloc();
305 decmpfs_cnode_init(dp);
306 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
307 /* another thread got here first, so free the decmpfs_cnode we allocated */
308 decmpfs_cnode_destroy(dp);
309 decmpfs_cnode_free(dp);
310 }
311 }
312
313 return cp->c_decmp;
314 }
315
316 /*
317 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
318 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
319 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
320 * or else fills it in via the decmpfs_file_is_compressed() function.
321 */
322 int
323 hfs_file_is_compressed(struct cnode *cp, int skiplock)
324 {
325 int ret = 0;
326
327 /* fast check to see if file is compressed. If flag is clear, just answer no */
328 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
329 return 0;
330 }
331
332 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
333 if (!dp) {
334 /* error allocating a decmpfs cnode, treat the file as uncompressed */
335 return 0;
336 }
337
338 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
339 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
340 switch(decmpfs_state) {
341 case FILE_IS_COMPRESSED:
342 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
343 return 1;
344 case FILE_IS_NOT_COMPRESSED:
345 return 0;
346 /* otherwise the state is not cached yet */
347 }
348
349 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
350 struct vnode *data_vp = NULL;
351 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
352 if (data_vp) {
353 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
354 vnode_rele(data_vp);
355 }
356 }
357 return ret;
358 }
359
360 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
361 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
362 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
363 * files size is returned in size (required)
364 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
365 * will return an error and the caller should fall back to treating the item as an uncompressed file
366 */
367 int
368 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
369 {
370 int ret = 0;
371 int putaway = 0; /* flag to remember if we used hfs_vget() */
372
373 if (!size) {
374 return EINVAL; /* no place to put the file size */
375 }
376
377 if (NULL == vp) {
378 if (!hfsmp || !fid) { /* make sure we have the required parameters */
379 return EINVAL;
380 }
381 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
382 vp = NULL;
383 } else {
384 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
385 }
386 }
387 /* this double check for compression (hfs_file_is_compressed)
388 * ensures the cached size is present in case decmpfs hasn't
389 * encountered this node yet.
390 */
391 if (vp) {
392 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
393 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
394 } else if (VTOCMP(vp)) {
395 uint32_t cmp_type = decmpfs_cnode_cmp_type(VTOCMP(vp));
396
397 if (cmp_type == DATALESS_CMPFS_TYPE) {
398 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
399 ret = 0;
400 } else if (cmp_type >= CMP_MAX && VTOC(vp)->c_datafork) {
401 // if we don't recognize this type, just use the real data fork size
402 *size = VTOC(vp)->c_datafork->ff_size;
403 ret = 0;
404 } else
405 ret = EINVAL;
406 } else
407 ret = EINVAL;
408 }
409
410 if (putaway) { /* did we use hfs_vget() to get this vnode? */
411 vnode_put(vp); /* if so, release it and set it to null */
412 vp = NULL;
413 }
414 return ret;
415 }
416
417 int
418 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
419 {
420 if (ctx == decmpfs_ctx)
421 return 0;
422 if (!hfs_file_is_compressed(cp, skiplock))
423 return 0;
424 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
425 }
426
427 int
428 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
429 {
430 if (ctx == decmpfs_ctx)
431 return 0;
432 if (!hfs_file_is_compressed(cp, skiplock))
433 return 0;
434 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
435 }
436 #endif /* HFS_COMPRESSION */
437
438 /*
439 * Open a file/directory.
440 */
441 int
442 hfs_vnop_open(struct vnop_open_args *ap)
443 {
444 struct vnode *vp = ap->a_vp;
445 struct filefork *fp;
446 struct timeval tv;
447 int error;
448 static int past_bootup = 0;
449 struct cnode *cp = VTOC(vp);
450 struct hfsmount *hfsmp = VTOHFS(vp);
451
452 #if CONFIG_PROTECT
453 error = cp_handle_open(vp, ap->a_mode);
454 if (error)
455 return error;
456 #endif
457
458 #if HFS_COMPRESSION
459 if (ap->a_mode & FWRITE) {
460 /* open for write */
461 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
462 /* opening a compressed file for write, so convert it to decompressed */
463 struct vnode *data_vp = NULL;
464 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
465 if (0 == error) {
466 if (data_vp) {
467 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
468 vnode_rele(data_vp);
469 } else {
470 error = EINVAL;
471 }
472 }
473 if (error != 0)
474 return error;
475 }
476 } else {
477 /* open for read */
478 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
479 if (VNODE_IS_RSRC(vp)) {
480 /* opening the resource fork of a compressed file, so nothing to do */
481 } else {
482 /* opening a compressed file for read, make sure it validates */
483 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
484 if (error != 0)
485 return error;
486 }
487 }
488 }
489 #endif
490
491 /*
492 * Files marked append-only must be opened for appending.
493 */
494 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
495 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
496 return (EPERM);
497
498 if (vnode_issystem(vp))
499 return (EBUSY); /* file is in use by the kernel */
500
501 /* Don't allow journal to be opened externally. */
502 if (hfs_is_journal_file(hfsmp, cp))
503 return (EPERM);
504
505 bool have_lock = false;
506
507 #if CONFIG_PROTECT
508 if (ISSET(ap->a_mode, FENCRYPTED) && cp->c_cpentry && vnode_isreg(vp)) {
509 bool have_trunc_lock = false;
510
511 #if HFS_CONFIG_KEY_ROLL
512 again:
513 #endif
514
515 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
516 if (have_trunc_lock)
517 hfs_unlock_truncate(cp, 0);
518 return error;
519 }
520
521 have_lock = true;
522
523 if (cp->c_cpentry->cp_raw_open_count + 1
524 < cp->c_cpentry->cp_raw_open_count) {
525 // Overflow; too many raw opens on this file
526 hfs_unlock(cp);
527 if (have_trunc_lock)
528 hfs_unlock_truncate(cp, 0);
529 return ENFILE;
530 }
531
532 #if HFS_CONFIG_KEY_ROLL
533 if (cp_should_auto_roll(hfsmp, cp->c_cpentry)) {
534 if (!have_trunc_lock) {
535 hfs_unlock(cp);
536 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
537 have_trunc_lock = true;
538 goto again;
539 }
540
541 error = hfs_key_roll_start(cp);
542 if (error) {
543 hfs_unlock(cp);
544 hfs_unlock_truncate(cp, 0);
545 return error;
546 }
547 }
548 #endif
549
550 if (have_trunc_lock)
551 hfs_unlock_truncate(cp, 0);
552
553 ++cp->c_cpentry->cp_raw_open_count;
554 }
555 #endif
556
557 if (ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
558 || !vnode_isreg(vp)
559 #if NAMEDSTREAMS
560 || vnode_isnamedstream(vp)
561 #endif
562 || !hfsmp->jnl || vnode_isinuse(vp, 0)) {
563
564 #if CONFIG_PROTECT
565 if (have_lock)
566 hfs_unlock(cp);
567 #endif
568
569 return (0);
570 }
571
572 if (!have_lock && (error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
573 return (error);
574
575 #if QUOTA
576 /* If we're going to write to the file, initialize quotas. */
577 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
578 (void)hfs_getinoquota(cp);
579 #endif /* QUOTA */
580
581 /*
582 * On the first (non-busy) open of a fragmented
583 * file attempt to de-frag it, if it's less than hfs_defrag_max bytes.
584 * That field is initially set to 20MB.
585 */
586 fp = VTOF(vp);
587 if (fp->ff_blocks &&
588 fp->ff_extents[7].blockCount != 0 &&
589 fp->ff_size <= hfsmp->hfs_defrag_max) {
590
591 int no_mods = 0;
592 struct timeval now;
593 /*
594 * Wait until system bootup is done (3 min).
595 * And don't relocate a file that's been modified
596 * within the past minute -- this can lead to
597 * system thrashing.
598 */
599
600 if (hfsmp->hfs_defrag_nowait) {
601 /* If this is toggled, then issue the defrag if appropriate */
602 past_bootup = 1;
603 no_mods = 1;
604 }
605
606 if (!past_bootup) {
607 microuptime(&tv);
608 if (tv.tv_sec > (60*3)) {
609 past_bootup = 1;
610 }
611 }
612
613 microtime(&now);
614 if ((now.tv_sec - cp->c_mtime) > 60) {
615 no_mods = 1;
616 }
617
618 if (past_bootup && no_mods) {
619 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
620 vfs_context_ucred(ap->a_context),
621 vfs_context_proc(ap->a_context));
622 }
623 }
624
625 hfs_unlock(cp);
626
627 return (0);
628 }
629
630
631 /*
632 * Close a file/directory.
633 */
634 int
635 hfs_vnop_close(struct vnop_close_args *ap)
636 {
637 register struct vnode *vp = ap->a_vp;
638 register struct cnode *cp;
639 struct proc *p = vfs_context_proc(ap->a_context);
640 struct hfsmount *hfsmp;
641 int busy;
642 int tooktrunclock = 0;
643 int knownrefs = 0;
644
645 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
646 return (0);
647 cp = VTOC(vp);
648 hfsmp = VTOHFS(vp);
649
650 #if CONFIG_PROTECT
651 if (cp->c_cpentry && ISSET(ap->a_fflag, FENCRYPTED) && vnode_isreg(vp)) {
652 hfs_assert(cp->c_cpentry->cp_raw_open_count > 0);
653 --cp->c_cpentry->cp_raw_open_count;
654 }
655 #endif
656
657 /*
658 * If the rsrc fork is a named stream, it can cause the data fork to
659 * stay around, preventing de-allocation of these blocks.
660 * Do checks for truncation on close. Purge extra extents if they exist.
661 * Make sure the vp is not a directory, and that it has a resource fork,
662 * and that resource fork is also a named stream.
663 */
664
665 if ((vnode_vtype(vp) == VREG) && (cp->c_rsrc_vp)
666 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
667 uint32_t blks;
668
669 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
670 /*
671 * If there are extra blocks and there are only 2 refs on
672 * this vp (ourselves + rsrc fork holding ref on us), go ahead
673 * and try to truncate.
674 */
675 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
676 // release cnode lock; must acquire truncate lock BEFORE cnode lock
677 hfs_unlock(cp);
678
679 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
680 tooktrunclock = 1;
681
682 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
683 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
684 // bail out if we can't re-acquire cnode lock
685 return 0;
686 }
687 // now re-test to make sure it's still valid
688 if (cp->c_rsrc_vp) {
689 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
690 if (!vnode_isinuse(vp, knownrefs)){
691 // now we can truncate the file, if necessary
692 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
693 if (blks < VTOF(vp)->ff_blocks){
694 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY,
695 0, ap->a_context);
696 }
697 }
698 }
699 }
700 }
701
702
703 // if we froze the fs and we're exiting, then "thaw" the fs
704 if (hfsmp->hfs_freeze_state == HFS_FROZEN
705 && hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
706 hfs_thaw(hfsmp, p);
707 }
708
709 busy = vnode_isinuse(vp, 1);
710
711 if (busy) {
712 hfs_touchtimes(VTOHFS(vp), cp);
713 }
714 if (vnode_isdir(vp)) {
715 hfs_reldirhints(cp, busy);
716 } else if (vnode_issystem(vp) && !busy) {
717 vnode_recycle(vp);
718 }
719
720 if (tooktrunclock){
721 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
722 }
723 hfs_unlock(cp);
724
725 if (ap->a_fflag & FWASWRITTEN) {
726 hfs_sync_ejectable(hfsmp);
727 }
728
729 return (0);
730 }
731
732 static bool hfs_should_generate_document_id(hfsmount_t *hfsmp, cnode_t *cp)
733 {
734 return (!ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
735 && ISSET(cp->c_bsdflags, UF_TRACKED)
736 && cp->c_desc.cd_cnid != kHFSRootFolderID
737 && (S_ISDIR(cp->c_mode) || S_ISREG(cp->c_mode) || S_ISLNK(cp->c_mode)));
738 }
739
740 /*
741 * Get basic attributes.
742 */
743 int
744 hfs_vnop_getattr(struct vnop_getattr_args *ap)
745 {
746 #define VNODE_ATTR_TIMES \
747 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
748 #define VNODE_ATTR_AUTH \
749 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
750 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
751
752 struct vnode *vp = ap->a_vp;
753 struct vnode_attr *vap = ap->a_vap;
754 struct vnode *rvp = NULLVP;
755 struct hfsmount *hfsmp;
756 struct cnode *cp;
757 uint64_t data_size;
758 enum vtype v_type;
759 int error = 0;
760 cp = VTOC(vp);
761
762 #if HFS_COMPRESSION
763 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
764 int compressed = 0;
765 int hide_size = 0;
766 off_t uncompressed_size = -1;
767 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
768 /* we only care about whether the file is compressed if asked for the uncompressed size */
769 if (VNODE_IS_RSRC(vp)) {
770 /* if it's a resource fork, decmpfs may want us to hide the size */
771 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
772 } else {
773 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
774 compressed = hfs_file_is_compressed(cp, 0);
775 }
776 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
777 // if it's compressed
778 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && decmpfs_cnode_cmp_type(cp->c_decmp) >= CMP_MAX)) {
779 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
780 /* failed to get the uncompressed size, we'll check for this later */
781 uncompressed_size = -1;
782 } else {
783 // fake that it's compressed
784 compressed = 1;
785 }
786 }
787 }
788 }
789 #endif
790
791 /*
792 * Shortcut for vnode_authorize path. Each of the attributes
793 * in this set is updated atomically so we don't need to take
794 * the cnode lock to access them.
795 */
796 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
797 /* Make sure file still exists. */
798 if (cp->c_flag & C_NOEXISTS)
799 return (ENOENT);
800
801 vap->va_uid = cp->c_uid;
802 vap->va_gid = cp->c_gid;
803 vap->va_mode = cp->c_mode;
804 vap->va_flags = cp->c_bsdflags;
805 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
806
807 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
808 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
809 VATTR_SET_SUPPORTED(vap, va_acl);
810 }
811
812 return (0);
813 }
814
815 hfsmp = VTOHFS(vp);
816 v_type = vnode_vtype(vp);
817
818 if (VATTR_IS_ACTIVE(vap, va_document_id)) {
819 uint32_t document_id;
820
821 if (cp->c_desc.cd_cnid == kHFSRootFolderID)
822 document_id = kHFSRootFolderID;
823 else {
824 /*
825 * This is safe without a lock because we're just reading
826 * a 32 bit aligned integer which should be atomic on all
827 * platforms we support.
828 */
829 document_id = hfs_get_document_id(cp);
830
831 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
832 uint32_t new_document_id;
833
834 error = hfs_generate_document_id(hfsmp, &new_document_id);
835 if (error)
836 return error;
837
838 error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
839 if (error)
840 return error;
841
842 bool want_docid_fsevent = false;
843
844 // Need to check again now that we have the lock
845 document_id = hfs_get_document_id(cp);
846 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
847 cp->c_attr.ca_finderextendeddirinfo.document_id = document_id = new_document_id;
848 want_docid_fsevent = true;
849 SET(cp->c_flag, C_MODIFIED);
850 }
851
852 hfs_unlock(cp);
853
854 if (want_docid_fsevent) {
855 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
856 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
857 FSE_ARG_INO, (ino64_t)0, // src inode #
858 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
859 FSE_ARG_INT32, document_id,
860 FSE_ARG_DONE);
861
862 if (need_fsevent(FSE_STAT_CHANGED, vp)) {
863 add_fsevent(FSE_STAT_CHANGED, ap->a_context,
864 FSE_ARG_VNODE, vp, FSE_ARG_DONE);
865 }
866 }
867 }
868 }
869
870 vap->va_document_id = document_id;
871 VATTR_SET_SUPPORTED(vap, va_document_id);
872 }
873
874 /*
875 * If time attributes are requested and we have cnode times
876 * that require updating, then acquire an exclusive lock on
877 * the cnode before updating the times. Otherwise we can
878 * just acquire a shared lock.
879 */
880 if ((vap->va_active & VNODE_ATTR_TIMES) &&
881 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
882 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
883 return (error);
884 hfs_touchtimes(hfsmp, cp);
885
886 // downgrade to a shared lock since that's all we need from here on out
887 cp->c_lockowner = HFS_SHARED_OWNER;
888 lck_rw_lock_exclusive_to_shared(&cp->c_rwlock);
889
890 } else if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
891 return (error);
892 }
893
894 if (v_type == VDIR) {
895 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
896
897 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
898 int nlink;
899
900 /*
901 * For directories, the va_nlink is esentially a count
902 * of the ".." references to a directory plus the "."
903 * reference and the directory itself. So for HFS+ this
904 * becomes the sub-directory count plus two.
905 *
906 * In the absence of a sub-directory count we use the
907 * directory's item count. This will be too high in
908 * most cases since it also includes files.
909 */
910 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
911 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
912 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
913 else
914 nlink = cp->c_entries;
915
916 /* Account for ourself and our "." entry */
917 nlink += 2;
918 /* Hide our private directories. */
919 if (cp->c_cnid == kHFSRootFolderID) {
920 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
921 --nlink;
922 }
923 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
924 --nlink;
925 }
926 }
927 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
928 }
929 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
930 int entries;
931
932 entries = cp->c_entries;
933 /* Hide our private files and directories. */
934 if (cp->c_cnid == kHFSRootFolderID) {
935 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
936 --entries;
937 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
938 --entries;
939 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
940 entries -= 2; /* hide the journal files */
941 }
942 VATTR_RETURN(vap, va_nchildren, entries);
943 }
944 /*
945 * The va_dirlinkcount is the count of real directory hard links.
946 * (i.e. its not the sum of the implied "." and ".." references)
947 */
948 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
949 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
950 }
951 } else /* !VDIR */ {
952 data_size = VCTOF(vp, cp)->ff_size;
953
954 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
955 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
956 u_int64_t blocks;
957
958 #if HFS_COMPRESSION
959 if (hide_size) {
960 VATTR_RETURN(vap, va_data_alloc, 0);
961 } else if (compressed) {
962 /* for compressed files, we report all allocated blocks as belonging to the data fork */
963 blocks = cp->c_blocks;
964 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
965 }
966 else
967 #endif
968 {
969 blocks = VCTOF(vp, cp)->ff_blocks;
970 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
971 }
972 }
973 }
974
975 /* conditional because 64-bit arithmetic can be expensive */
976 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
977 if (v_type == VDIR) {
978 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
979 } else {
980 u_int64_t total_size = ~0ULL;
981 struct cnode *rcp;
982 #if HFS_COMPRESSION
983 if (hide_size) {
984 /* we're hiding the size of this file, so just return 0 */
985 total_size = 0;
986 } else if (compressed) {
987 if (uncompressed_size == -1) {
988 /*
989 * We failed to get the uncompressed size above,
990 * so we'll fall back to the standard path below
991 * since total_size is still -1
992 */
993 } else {
994 /* use the uncompressed size we fetched above */
995 total_size = uncompressed_size;
996 }
997 }
998 #endif
999 if (total_size == ~0ULL) {
1000 if (cp->c_datafork) {
1001 total_size = cp->c_datafork->ff_size;
1002 }
1003
1004 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1005 /* We deal with rsrc fork vnode iocount at the end of the function */
1006 error = hfs_vgetrsrc(hfsmp, vp, &rvp);
1007 if (error) {
1008 /*
1009 * Note that we call hfs_vgetrsrc with error_on_unlinked
1010 * set to FALSE. This is because we may be invoked via
1011 * fstat() on an open-unlinked file descriptor and we must
1012 * continue to support access to the rsrc fork until it disappears.
1013 * The code at the end of this function will be
1014 * responsible for releasing the iocount generated by
1015 * hfs_vgetrsrc. This is because we can't drop the iocount
1016 * without unlocking the cnode first.
1017 */
1018 goto out;
1019 }
1020
1021 rcp = VTOC(rvp);
1022 if (rcp && rcp->c_rsrcfork) {
1023 total_size += rcp->c_rsrcfork->ff_size;
1024 }
1025 }
1026 }
1027
1028 VATTR_RETURN(vap, va_total_size, total_size);
1029 }
1030 }
1031 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1032 if (v_type == VDIR) {
1033 VATTR_RETURN(vap, va_total_alloc, 0);
1034 } else {
1035 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
1036 }
1037 }
1038
1039 /*
1040 * If the VFS wants extended security data, and we know that we
1041 * don't have any (because it never told us it was setting any)
1042 * then we can return the supported bit and no data. If we do
1043 * have extended security, we can just leave the bit alone and
1044 * the VFS will use the fallback path to fetch it.
1045 */
1046 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1047 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
1048 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
1049 VATTR_SET_SUPPORTED(vap, va_acl);
1050 }
1051 }
1052
1053 vap->va_access_time.tv_sec = cp->c_atime;
1054 vap->va_access_time.tv_nsec = 0;
1055 vap->va_create_time.tv_sec = cp->c_itime;
1056 vap->va_create_time.tv_nsec = 0;
1057 vap->va_modify_time.tv_sec = cp->c_mtime;
1058 vap->va_modify_time.tv_nsec = 0;
1059 vap->va_change_time.tv_sec = cp->c_ctime;
1060 vap->va_change_time.tv_nsec = 0;
1061 vap->va_backup_time.tv_sec = cp->c_btime;
1062 vap->va_backup_time.tv_nsec = 0;
1063
1064 /* See if we need to emit the date added field to the user */
1065 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1066 u_int32_t dateadded = hfs_get_dateadded (cp);
1067 if (dateadded) {
1068 vap->va_addedtime.tv_sec = dateadded;
1069 vap->va_addedtime.tv_nsec = 0;
1070 VATTR_SET_SUPPORTED (vap, va_addedtime);
1071 }
1072 }
1073
1074 /* XXX is this really a good 'optimal I/O size'? */
1075 vap->va_iosize = hfsmp->hfs_logBlockSize;
1076 vap->va_uid = cp->c_uid;
1077 vap->va_gid = cp->c_gid;
1078 vap->va_mode = cp->c_mode;
1079 vap->va_flags = cp->c_bsdflags;
1080
1081 /*
1082 * Exporting file IDs from HFS Plus:
1083 *
1084 * For "normal" files the c_fileid is the same value as the
1085 * c_cnid. But for hard link files, they are different - the
1086 * c_cnid belongs to the active directory entry (ie the link)
1087 * and the c_fileid is for the actual inode (ie the data file).
1088 *
1089 * The stat call (getattr) uses va_fileid and the Carbon APIs,
1090 * which are hardlink-ignorant, will ask for va_linkid.
1091 */
1092 vap->va_fileid = (u_int64_t)cp->c_fileid;
1093 /*
1094 * We need to use the origin cache for both hardlinked files
1095 * and directories. Hardlinked directories have multiple cnids
1096 * and parents (one per link). Hardlinked files also have their
1097 * own parents and link IDs separate from the indirect inode number.
1098 * If we don't use the cache, we could end up vending the wrong ID
1099 * because the cnode will only reflect the link that was looked up most recently.
1100 */
1101 if (cp->c_flag & C_HARDLINK) {
1102 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
1103 vap->va_parentid = (u_int64_t)hfs_currentparent(cp, /* have_lock: */ true);
1104 } else {
1105 vap->va_linkid = (u_int64_t)cp->c_cnid;
1106 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
1107 }
1108
1109 vap->va_fsid = hfsmp->hfs_raw_dev;
1110 if (VATTR_IS_ACTIVE(vap, va_devid)) {
1111 VATTR_RETURN(vap, va_devid, hfsmp->hfs_raw_dev);
1112 }
1113 vap->va_filerev = 0;
1114 vap->va_encoding = cp->c_encoding;
1115 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
1116 #if HFS_COMPRESSION
1117 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1118 if (hide_size)
1119 vap->va_data_size = 0;
1120 else if (compressed) {
1121 if (uncompressed_size == -1) {
1122 /* failed to get the uncompressed size above, so just return data_size */
1123 vap->va_data_size = data_size;
1124 } else {
1125 /* use the uncompressed size we fetched above */
1126 vap->va_data_size = uncompressed_size;
1127 }
1128 } else
1129 vap->va_data_size = data_size;
1130 VATTR_SET_SUPPORTED(vap, va_data_size);
1131 }
1132 #else
1133 vap->va_data_size = data_size;
1134 vap->va_supported |= VNODE_ATTR_va_data_size;
1135 #endif
1136
1137 #if CONFIG_PROTECT
1138 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
1139 vap->va_dataprotect_class = cp->c_cpentry ? CP_CLASS(cp->c_cpentry->cp_pclass) : 0;
1140 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1141 }
1142 #endif
1143 if (VATTR_IS_ACTIVE(vap, va_write_gencount)) {
1144 if (ubc_is_mapped_writable(vp)) {
1145 /*
1146 * Return 0 to the caller to indicate the file may be
1147 * changing. There is no need for us to increment the
1148 * generation counter here because it gets done as part of
1149 * page-out and also when the file is unmapped (to account
1150 * for changes we might not have seen).
1151 */
1152 vap->va_write_gencount = 0;
1153 } else {
1154 vap->va_write_gencount = hfs_get_gencount(cp);
1155 }
1156
1157 VATTR_SET_SUPPORTED(vap, va_write_gencount);
1158 }
1159
1160 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1161 vap->va_supported |= VNODE_ATTR_va_access_time |
1162 VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1163 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1164 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1165 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1166 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1167 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1168 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1169 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1170
1171 /* If this is the root, let VFS to find out the mount name, which
1172 * may be different from the real name. Otherwise, we need to take care
1173 * for hardlinked files, which need to be looked up, if necessary
1174 */
1175 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1176 struct cat_desc linkdesc;
1177 int lockflags;
1178 int uselinkdesc = 0;
1179 cnid_t nextlinkid = 0;
1180 cnid_t prevlinkid = 0;
1181
1182 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1183 * here because the info. for the link ID requested by getattrlist may be
1184 * different than what's currently in the cnode. This is because the cnode
1185 * will be filled in with the information for the most recent link ID that went
1186 * through namei/lookup(). If there are competing lookups for hardlinks that point
1187 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1188 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1189 */
1190
1191 if ((cp->c_flag & C_HARDLINK) &&
1192 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1193 /*
1194 * If we have no name and our link ID is the raw inode number, then we may
1195 * have an open-unlinked file. Go to the next link in this case.
1196 */
1197 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1198 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1199 goto out;
1200 }
1201 }
1202 else {
1203 /* just use link obtained from vap above */
1204 nextlinkid = vap->va_linkid;
1205 }
1206
1207 /* We need to probe the catalog for the descriptor corresponding to the link ID
1208 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1209 * for the cnode here, so we can't just update the descriptor. Instead,
1210 * we should just store the descriptor's value locally and then use it to pass
1211 * out the name value as needed below.
1212 */
1213 if (nextlinkid){
1214 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1215 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1216 hfs_systemfile_unlock(hfsmp, lockflags);
1217 if (error == 0) {
1218 uselinkdesc = 1;
1219 }
1220 }
1221 }
1222
1223 /* By this point, we've either patched up the name above and the c_desc
1224 * points to the correct data, or it already did, in which case we just proceed
1225 * by copying the name into the vap. Note that we will never set va_name to
1226 * supported if nextlinkid is never initialized. This could happen in the degenerate
1227 * case above involving the raw inode number, where it has no nextlinkid. In this case
1228 * we will simply not mark the name bit as supported.
1229 */
1230 if (uselinkdesc) {
1231 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1232 VATTR_SET_SUPPORTED(vap, va_name);
1233 cat_releasedesc(&linkdesc);
1234 }
1235 else if (cp->c_desc.cd_namelen) {
1236 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1237 VATTR_SET_SUPPORTED(vap, va_name);
1238 }
1239 }
1240
1241 out:
1242 hfs_unlock(cp);
1243 /*
1244 * We need to vnode_put the rsrc fork vnode only *after* we've released
1245 * the cnode lock, since vnode_put can trigger an inactive call, which
1246 * will go back into HFS and try to acquire a cnode lock.
1247 */
1248 if (rvp) {
1249 vnode_put (rvp);
1250 }
1251
1252 return (error);
1253 }
1254
1255 int
1256 hfs_vnop_setattr(struct vnop_setattr_args *ap)
1257 {
1258 struct vnode_attr *vap = ap->a_vap;
1259 struct vnode *vp = ap->a_vp;
1260 struct cnode *cp = NULL;
1261 struct hfsmount *hfsmp;
1262 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1263 struct proc *p = vfs_context_proc(ap->a_context);
1264 int error = 0;
1265 uid_t nuid;
1266 gid_t ngid;
1267 time_t orig_ctime;
1268
1269 orig_ctime = VTOC(vp)->c_ctime;
1270
1271 #if HFS_COMPRESSION
1272 int decmpfs_reset_state = 0;
1273 /*
1274 we call decmpfs_update_attributes even if the file is not compressed
1275 because we want to update the incoming flags if the xattrs are invalid
1276 */
1277 error = decmpfs_update_attributes(vp, vap);
1278 if (error)
1279 return error;
1280 #endif
1281 //
1282 // if this is not a size-changing setattr and it is not just
1283 // an atime update, then check for a snapshot.
1284 //
1285 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1286 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1287 }
1288
1289 #if CONFIG_PROTECT
1290 /*
1291 * All metadata changes should be allowed except a size-changing setattr, which
1292 * has effects on file content and requires calling into cp_handle_vnop
1293 * to have content protection check.
1294 */
1295 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1296 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1297 return (error);
1298 }
1299 }
1300 #endif /* CONFIG_PROTECT */
1301
1302 hfsmp = VTOHFS(vp);
1303
1304 /* Don't allow modification of the journal. */
1305 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1306 return (EPERM);
1307 }
1308
1309 //
1310 // Check if we'll need a document_id and if so, get it before we lock the
1311 // the cnode to avoid any possible deadlock with the root vnode which has
1312 // to get locked to get the document id
1313 //
1314 u_int32_t document_id=0;
1315 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
1316 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
1317 //
1318 // If the document_id is not set, get a new one. It will be set
1319 // on the file down below once we hold the cnode lock.
1320 //
1321 if (fip->document_id == 0) {
1322 if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
1323 document_id = 0;
1324 }
1325 }
1326 }
1327
1328
1329 /*
1330 * File size change request.
1331 * We are guaranteed that this is not a directory, and that
1332 * the filesystem object is writeable.
1333 *
1334 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1335 */
1336 VATTR_SET_SUPPORTED(vap, va_data_size);
1337 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1338 if (!vnode_isreg(vp)) {
1339 if (vnode_isdir(vp)) {
1340 return EISDIR;
1341 }
1342 //otherwise return EINVAL
1343 return EINVAL;
1344 }
1345
1346 #if HFS_COMPRESSION
1347 /* keep the compressed state locked until we're done truncating the file */
1348 decmpfs_cnode *dp = VTOCMP(vp);
1349 if (!dp) {
1350 /*
1351 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1352 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1353 * on this file while it's truncating
1354 */
1355 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1356 if (!dp) {
1357 /* failed to allocate a decmpfs_cnode */
1358 return ENOMEM; /* what should this be? */
1359 }
1360 }
1361
1362 nspace_snapshot_event(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1363
1364 decmpfs_lock_compressed_data(dp, 1);
1365 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1366 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1367 if (error != 0) {
1368 decmpfs_unlock_compressed_data(dp, 1);
1369 return error;
1370 }
1371 }
1372 #endif
1373
1374 // Take truncate lock
1375 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1376
1377 // hfs_truncate will deal with the cnode lock
1378 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff,
1379 0, ap->a_context);
1380
1381 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1382 #if HFS_COMPRESSION
1383 decmpfs_unlock_compressed_data(dp, 1);
1384 #endif
1385 if (error)
1386 return error;
1387 }
1388 if (cp == NULL) {
1389 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1390 return (error);
1391 cp = VTOC(vp);
1392 }
1393
1394 /*
1395 * If it is just an access time update request by itself
1396 * we know the request is from kernel level code, and we
1397 * can delay it without being as worried about consistency.
1398 * This change speeds up mmaps, in the rare case that they
1399 * get caught behind a sync.
1400 */
1401
1402 if (vap->va_active == VNODE_ATTR_va_access_time) {
1403 cp->c_touch_acctime=TRUE;
1404 goto out;
1405 }
1406
1407
1408
1409 /*
1410 * Owner/group change request.
1411 * We are guaranteed that the new owner/group is valid and legal.
1412 */
1413 VATTR_SET_SUPPORTED(vap, va_uid);
1414 VATTR_SET_SUPPORTED(vap, va_gid);
1415 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1416 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1417 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1418 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1419 goto out;
1420
1421 /*
1422 * Mode change request.
1423 * We are guaranteed that the mode value is valid and that in
1424 * conjunction with the owner and group, this change is legal.
1425 */
1426 VATTR_SET_SUPPORTED(vap, va_mode);
1427 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1428 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1429 goto out;
1430
1431 /*
1432 * File flags change.
1433 * We are guaranteed that only flags allowed to change given the
1434 * current securelevel are being changed.
1435 */
1436 VATTR_SET_SUPPORTED(vap, va_flags);
1437 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1438 u_int16_t *fdFlags;
1439
1440 #if HFS_COMPRESSION
1441 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1442 /*
1443 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1444 * but we don't want to actually do the update until we've released the cnode lock down below
1445 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1446 * turn off the flag and look at the "raw" file for debugging purposes
1447 */
1448 decmpfs_reset_state = 1;
1449 }
1450 #endif
1451 if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
1452 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1453
1454 //
1455 // we're marking this item UF_TRACKED. if the document_id is
1456 // not set, get a new one and put it on the file.
1457 //
1458 if (fip->document_id == 0) {
1459 if (document_id != 0) {
1460 // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
1461 fip->document_id = (uint32_t)document_id;
1462 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1463 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1464 FSE_ARG_INO, (ino64_t)0, // src inode #
1465 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1466 FSE_ARG_INT32, document_id,
1467 FSE_ARG_DONE);
1468 } else {
1469 // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
1470 }
1471 }
1472
1473 } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
1474 //
1475 // UF_TRACKED is being cleared so clear the document_id
1476 //
1477 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1478 if (fip->document_id) {
1479 // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
1480 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1481 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1482 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
1483 FSE_ARG_INO, (ino64_t)0, // dst inode #
1484 FSE_ARG_INT32, fip->document_id, // document id
1485 FSE_ARG_DONE);
1486 fip->document_id = 0;
1487 cp->c_bsdflags &= ~UF_TRACKED;
1488 }
1489 }
1490
1491 cp->c_bsdflags = vap->va_flags;
1492 cp->c_flag |= C_MODIFIED;
1493 cp->c_touch_chgtime = TRUE;
1494
1495
1496 /*
1497 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1498 *
1499 * The fdFlags for files and frFlags for folders are both 8 bytes
1500 * into the userInfo (the first 16 bytes of the Finder Info). They
1501 * are both 16-bit fields.
1502 */
1503 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1504 if (vap->va_flags & UF_HIDDEN)
1505 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1506 else
1507 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1508 }
1509
1510 /*
1511 * Timestamp updates.
1512 */
1513 VATTR_SET_SUPPORTED(vap, va_create_time);
1514 VATTR_SET_SUPPORTED(vap, va_access_time);
1515 VATTR_SET_SUPPORTED(vap, va_modify_time);
1516 VATTR_SET_SUPPORTED(vap, va_backup_time);
1517 VATTR_SET_SUPPORTED(vap, va_change_time);
1518 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1519 VATTR_IS_ACTIVE(vap, va_access_time) ||
1520 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1521 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1522 if (VATTR_IS_ACTIVE(vap, va_create_time))
1523 cp->c_itime = vap->va_create_time.tv_sec;
1524 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1525 cp->c_atime = vap->va_access_time.tv_sec;
1526 cp->c_touch_acctime = FALSE;
1527 }
1528 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1529 cp->c_mtime = vap->va_modify_time.tv_sec;
1530 cp->c_touch_modtime = FALSE;
1531 cp->c_touch_chgtime = TRUE;
1532
1533 hfs_clear_might_be_dirty_flag(cp);
1534
1535 /*
1536 * The utimes system call can reset the modification
1537 * time but it doesn't know about HFS create times.
1538 * So we need to ensure that the creation time is
1539 * always at least as old as the modification time.
1540 */
1541 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1542 (cp->c_cnid != kHFSRootFolderID) &&
1543 !VATTR_IS_ACTIVE(vap, va_create_time) &&
1544 (cp->c_mtime < cp->c_itime)) {
1545 cp->c_itime = cp->c_mtime;
1546 }
1547 }
1548 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1549 cp->c_btime = vap->va_backup_time.tv_sec;
1550 cp->c_flag |= C_MINOR_MOD;
1551 }
1552
1553 // Set the date added time
1554 VATTR_SET_SUPPORTED(vap, va_addedtime);
1555 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1556 hfs_write_dateadded(&cp->c_attr, vap->va_addedtime.tv_sec);
1557 cp->c_flag &= ~C_NEEDS_DATEADDED;
1558 cp->c_touch_chgtime = true;
1559 }
1560
1561 /*
1562 * Set name encoding.
1563 */
1564 VATTR_SET_SUPPORTED(vap, va_encoding);
1565 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1566 cp->c_encoding = vap->va_encoding;
1567 cp->c_flag |= C_MODIFIED;
1568 hfs_setencodingbits(hfsmp, cp->c_encoding);
1569 }
1570
1571 if ((error = hfs_update(vp, 0)) != 0)
1572 goto out;
1573
1574 out:
1575 if (cp) {
1576 /* Purge origin cache for cnode, since caller now has correct link ID for it
1577 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1578 */
1579 if ((cp->c_flag & C_HARDLINK) && (vnode_vtype(vp) != VDIR)){
1580 hfs_relorigin(cp, 0);
1581 }
1582
1583 hfs_unlock(cp);
1584 #if HFS_COMPRESSION
1585 if (decmpfs_reset_state) {
1586 /*
1587 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1588 * but don't do it while holding the hfs cnode lock
1589 */
1590 decmpfs_cnode *dp = VTOCMP(vp);
1591 if (!dp) {
1592 /*
1593 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1594 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1595 * on this file if it's locked
1596 */
1597 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1598 if (!dp) {
1599 /* failed to allocate a decmpfs_cnode */
1600 return ENOMEM; /* what should this be? */
1601 }
1602 }
1603 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1604 }
1605 #endif
1606 }
1607
1608 #if CONFIG_PROTECT
1609 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1610 if (!error && VATTR_IS_ACTIVE(vap, va_dataprotect_class))
1611 error = cp_vnode_setclass(vp, vap->va_dataprotect_class);
1612 #endif
1613
1614 return (error);
1615 }
1616
1617
1618 /*
1619 * Change the mode on a file.
1620 * cnode must be locked before calling.
1621 */
1622 int
1623 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1624 {
1625 register struct cnode *cp = VTOC(vp);
1626
1627 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1628 return (0);
1629
1630 // Don't allow modification of the journal or journal_info_block
1631 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1632 return EPERM;
1633 }
1634
1635 #if OVERRIDE_UNKNOWN_PERMISSIONS
1636 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1637 return (0);
1638 };
1639 #endif
1640
1641 mode_t new_mode = (cp->c_mode & ~ALLPERMS) | (mode & ALLPERMS);
1642 if (new_mode != cp->c_mode) {
1643 cp->c_mode = new_mode;
1644 cp->c_flag |= C_MINOR_MOD;
1645 }
1646 cp->c_touch_chgtime = TRUE;
1647 return (0);
1648 }
1649
1650
1651 int
1652 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1653 {
1654 struct cnode *cp = VTOC(vp);
1655 int retval = 0;
1656 int is_member;
1657
1658 /*
1659 * Disallow write attempts on read-only file systems;
1660 * unless the file is a socket, fifo, or a block or
1661 * character device resident on the file system.
1662 */
1663 switch (vnode_vtype(vp)) {
1664 case VDIR:
1665 case VLNK:
1666 case VREG:
1667 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1668 return (EROFS);
1669 break;
1670 default:
1671 break;
1672 }
1673
1674 /* If immutable bit set, nobody gets to write it. */
1675 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1676 return (EPERM);
1677
1678 /* Otherwise, user id 0 always gets access. */
1679 if (!suser(cred, NULL))
1680 return (0);
1681
1682 /* Otherwise, check the owner. */
1683 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1684 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1685
1686 /* Otherwise, check the groups. */
1687 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1688 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1689 }
1690
1691 /* Otherwise, check everyone else. */
1692 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1693 }
1694
1695
1696 /*
1697 * Perform chown operation on cnode cp;
1698 * code must be locked prior to call.
1699 */
1700 int
1701 #if !QUOTA
1702 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1703 __unused struct proc *p)
1704 #else
1705 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1706 __unused struct proc *p)
1707 #endif
1708 {
1709 register struct cnode *cp = VTOC(vp);
1710 uid_t ouid;
1711 gid_t ogid;
1712 #if QUOTA
1713 int error = 0;
1714 register int i;
1715 int64_t change;
1716 #endif /* QUOTA */
1717
1718 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1719 return (ENOTSUP);
1720
1721 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1722 return (0);
1723
1724 if (uid == (uid_t)VNOVAL)
1725 uid = cp->c_uid;
1726 if (gid == (gid_t)VNOVAL)
1727 gid = cp->c_gid;
1728
1729 #if 0 /* we are guaranteed that this is already the case */
1730 /*
1731 * If we don't own the file, are trying to change the owner
1732 * of the file, or are not a member of the target group,
1733 * the caller must be superuser or the call fails.
1734 */
1735 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1736 (gid != cp->c_gid &&
1737 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1738 (error = suser(cred, 0)))
1739 return (error);
1740 #endif
1741
1742 ogid = cp->c_gid;
1743 ouid = cp->c_uid;
1744
1745 if (ouid == uid && ogid == gid) {
1746 // No change, just set change time
1747 cp->c_touch_chgtime = TRUE;
1748 return 0;
1749 }
1750
1751 #if QUOTA
1752 if ((error = hfs_getinoquota(cp)))
1753 return (error);
1754 if (ouid == uid) {
1755 dqrele(cp->c_dquot[USRQUOTA]);
1756 cp->c_dquot[USRQUOTA] = NODQUOT;
1757 }
1758 if (ogid == gid) {
1759 dqrele(cp->c_dquot[GRPQUOTA]);
1760 cp->c_dquot[GRPQUOTA] = NODQUOT;
1761 }
1762
1763 /*
1764 * Eventually need to account for (fake) a block per directory
1765 * if (vnode_isdir(vp))
1766 * change = VTOHFS(vp)->blockSize;
1767 * else
1768 */
1769
1770 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1771 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1772 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1773 for (i = 0; i < MAXQUOTAS; i++) {
1774 dqrele(cp->c_dquot[i]);
1775 cp->c_dquot[i] = NODQUOT;
1776 }
1777 #endif /* QUOTA */
1778 cp->c_gid = gid;
1779 cp->c_uid = uid;
1780 #if QUOTA
1781 if ((error = hfs_getinoquota(cp)) == 0) {
1782 if (ouid == uid) {
1783 dqrele(cp->c_dquot[USRQUOTA]);
1784 cp->c_dquot[USRQUOTA] = NODQUOT;
1785 }
1786 if (ogid == gid) {
1787 dqrele(cp->c_dquot[GRPQUOTA]);
1788 cp->c_dquot[GRPQUOTA] = NODQUOT;
1789 }
1790 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1791 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1792 goto good;
1793 else
1794 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1795 }
1796 for (i = 0; i < MAXQUOTAS; i++) {
1797 dqrele(cp->c_dquot[i]);
1798 cp->c_dquot[i] = NODQUOT;
1799 }
1800 }
1801 cp->c_gid = ogid;
1802 cp->c_uid = ouid;
1803 if (hfs_getinoquota(cp) == 0) {
1804 if (ouid == uid) {
1805 dqrele(cp->c_dquot[USRQUOTA]);
1806 cp->c_dquot[USRQUOTA] = NODQUOT;
1807 }
1808 if (ogid == gid) {
1809 dqrele(cp->c_dquot[GRPQUOTA]);
1810 cp->c_dquot[GRPQUOTA] = NODQUOT;
1811 }
1812 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1813 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1814 (void) hfs_getinoquota(cp);
1815 }
1816 return (error);
1817 good:
1818 if (hfs_getinoquota(cp))
1819 panic("hfs_chown: lost quota");
1820 #endif /* QUOTA */
1821
1822 /*
1823 * Without quotas, we could probably make this a minor
1824 * modification.
1825 */
1826 cp->c_flag |= C_MODIFIED;
1827
1828 /*
1829 According to the SUSv3 Standard, chown() shall mark
1830 for update the st_ctime field of the file.
1831 (No exceptions mentioned)
1832 */
1833 cp->c_touch_chgtime = TRUE;
1834 return (0);
1835 }
1836
1837 #if HFS_COMPRESSION
1838 /*
1839 * Flush the resource fork if it exists. vp is the data fork and has
1840 * an iocount.
1841 */
1842 static int hfs_flush_rsrc(vnode_t vp, vfs_context_t ctx)
1843 {
1844 cnode_t *cp = VTOC(vp);
1845
1846 hfs_lock(cp, HFS_SHARED_LOCK, 0);
1847
1848 vnode_t rvp = cp->c_rsrc_vp;
1849
1850 if (!rvp) {
1851 hfs_unlock(cp);
1852 return 0;
1853 }
1854
1855 int vid = vnode_vid(rvp);
1856
1857 hfs_unlock(cp);
1858
1859 int error = vnode_getwithvid(rvp, vid);
1860
1861 if (error)
1862 return error == ENOENT ? 0 : error;
1863
1864 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
1865 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
1866 hfs_filedone(rvp, ctx, HFS_FILE_DONE_NO_SYNC);
1867 hfs_unlock(cp);
1868 hfs_unlock_truncate(cp, 0);
1869
1870 error = ubc_msync(rvp, 0, ubc_getsize(rvp), NULL,
1871 UBC_PUSHALL | UBC_SYNC);
1872
1873 vnode_put(rvp);
1874
1875 return error;
1876 }
1877 #endif // HFS_COMPRESSION
1878
1879 /*
1880 * hfs_vnop_exchange:
1881 *
1882 * Inputs:
1883 * 'from' vnode/cnode
1884 * 'to' vnode/cnode
1885 * options flag bits
1886 * vfs_context
1887 *
1888 * Discussion:
1889 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
1890 * Per the requirements of that system call, this function "swaps" some
1891 * of the information that lives in one catalog record for some that
1892 * lives in another. Note that not everything is swapped; in particular,
1893 * the extent information stored in each cnode is kept local to that
1894 * cnode. This allows existing file descriptor references to continue
1895 * to operate on the same content, regardless of the location in the
1896 * namespace that the file may have moved to. See inline comments
1897 * in the function for more information.
1898 */
1899 int
1900 hfs_vnop_exchange(struct vnop_exchange_args *ap)
1901 {
1902 struct vnode *from_vp = ap->a_fvp;
1903 struct vnode *to_vp = ap->a_tvp;
1904 struct cnode *from_cp;
1905 struct cnode *to_cp;
1906 struct hfsmount *hfsmp;
1907 struct cat_desc tempdesc;
1908 struct cat_attr tempattr;
1909 const unsigned char *from_nameptr;
1910 const unsigned char *to_nameptr;
1911 char from_iname[32];
1912 char to_iname[32];
1913 uint32_t to_flag_special;
1914 uint32_t from_flag_special;
1915 cnid_t from_parid;
1916 cnid_t to_parid;
1917 int lockflags;
1918 int error = 0, started_tr = 0, got_cookie = 0;
1919 cat_cookie_t cookie;
1920 time_t orig_from_ctime, orig_to_ctime;
1921 bool have_cnode_locks = false, have_from_trunc_lock = false, have_to_trunc_lock = false;
1922
1923 /*
1924 * VFS does the following checks:
1925 * 1. Validate that both are files.
1926 * 2. Validate that both are on the same mount.
1927 * 3. Validate that they're not the same vnode.
1928 */
1929
1930 from_cp = VTOC(from_vp);
1931 to_cp = VTOC(to_vp);
1932 hfsmp = VTOHFS(from_vp);
1933
1934 orig_from_ctime = from_cp->c_ctime;
1935 orig_to_ctime = to_cp->c_ctime;
1936
1937 #if CONFIG_PROTECT
1938 /*
1939 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
1940 * because the EAs will not be swapped. As a result, the persistent keys would not
1941 * match and the files will be garbage.
1942 */
1943 if (cp_fs_protected (vnode_mount(from_vp))) {
1944 return EINVAL;
1945 }
1946 #endif
1947
1948 #if HFS_COMPRESSION
1949 if (!ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
1950 if ( hfs_file_is_compressed(from_cp, 0) ) {
1951 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
1952 return error;
1953 }
1954 }
1955
1956 if ( hfs_file_is_compressed(to_cp, 0) ) {
1957 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
1958 return error;
1959 }
1960 }
1961 }
1962 #endif // HFS_COMPRESSION
1963
1964 // Resource forks cannot be exchanged.
1965 if (VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
1966 return EINVAL;
1967
1968 /*
1969 * Normally, we want to notify the user handlers about the event,
1970 * except if it's a handler driving the event.
1971 */
1972 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
1973 nspace_snapshot_event(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1974 nspace_snapshot_event(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1975 } else {
1976 /*
1977 * This is currently used by mtmd so we should tidy up the
1978 * file now because the data won't be used again in the
1979 * destination file.
1980 */
1981 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, 0);
1982 hfs_lock_always(from_cp, HFS_EXCLUSIVE_LOCK);
1983 hfs_filedone(from_vp, ap->a_context, HFS_FILE_DONE_NO_SYNC);
1984 hfs_unlock(from_cp);
1985 hfs_unlock_truncate(from_cp, 0);
1986
1987 // Flush all the data from the source file
1988 error = ubc_msync(from_vp, 0, ubc_getsize(from_vp), NULL,
1989 UBC_PUSHALL | UBC_SYNC);
1990 if (error)
1991 goto exit;
1992
1993 #if HFS_COMPRESSION
1994 /*
1995 * If this is a compressed file, we need to do the same for
1996 * the resource fork.
1997 */
1998 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
1999 error = hfs_flush_rsrc(from_vp, ap->a_context);
2000 if (error)
2001 goto exit;
2002 }
2003 #endif
2004
2005 /*
2006 * We're doing a data-swap so we need to take the truncate
2007 * lock exclusively. We need an exclusive lock because we
2008 * will be completely truncating the source file and we must
2009 * make sure nobody else sneaks in and trys to issue I/O
2010 * whilst we don't have the cnode lock.
2011 *
2012 * After taking the truncate lock we do a quick check to
2013 * verify there are no other references (including mmap
2014 * references), but we must remember that this does not stop
2015 * anybody coming in later and taking a reference. We will
2016 * have the truncate lock exclusively so that will prevent
2017 * them from issuing any I/O.
2018 */
2019
2020 if (to_cp < from_cp) {
2021 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2022 have_to_trunc_lock = true;
2023 }
2024
2025 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2026 have_from_trunc_lock = true;
2027
2028 /*
2029 * Do an early check to verify the source is not in use by
2030 * anyone. We should be called from an FD opened as F_EVTONLY
2031 * so that doesn't count as a reference.
2032 */
2033 if (vnode_isinuse(from_vp, 0)) {
2034 error = EBUSY;
2035 goto exit;
2036 }
2037
2038 if (to_cp >= from_cp) {
2039 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2040 have_to_trunc_lock = true;
2041 }
2042 }
2043
2044 if ((error = hfs_lockpair(from_cp, to_cp, HFS_EXCLUSIVE_LOCK)))
2045 goto exit;
2046 have_cnode_locks = true;
2047
2048 // Don't allow modification of the journal or journal_info_block
2049 if (hfs_is_journal_file(hfsmp, from_cp) ||
2050 hfs_is_journal_file(hfsmp, to_cp)) {
2051 error = EPERM;
2052 goto exit;
2053 }
2054
2055 /*
2056 * Ok, now that all of the pre-flighting is done, call the underlying
2057 * function if needed.
2058 */
2059 if (ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2060 #if HFS_COMPRESSION
2061 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2062 error = hfs_move_compressed(from_cp, to_cp);
2063 goto exit;
2064 }
2065 #endif
2066
2067 error = hfs_move_data(from_cp, to_cp, 0);
2068 goto exit;
2069 }
2070
2071 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2072 goto exit;
2073 }
2074 started_tr = 1;
2075
2076 /*
2077 * Reserve some space in the Catalog file.
2078 */
2079 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
2080 goto exit;
2081 }
2082 got_cookie = 1;
2083
2084 /* The backend code always tries to delete the virtual
2085 * extent id for exchanging files so we need to lock
2086 * the extents b-tree.
2087 */
2088 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2089
2090 /* Account for the location of the catalog objects. */
2091 if (from_cp->c_flag & C_HARDLINK) {
2092 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
2093 from_cp->c_attr.ca_linkref);
2094 from_nameptr = (unsigned char *)from_iname;
2095 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2096 from_cp->c_hint = 0;
2097 } else {
2098 from_nameptr = from_cp->c_desc.cd_nameptr;
2099 from_parid = from_cp->c_parentcnid;
2100 }
2101 if (to_cp->c_flag & C_HARDLINK) {
2102 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
2103 to_cp->c_attr.ca_linkref);
2104 to_nameptr = (unsigned char *)to_iname;
2105 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2106 to_cp->c_hint = 0;
2107 } else {
2108 to_nameptr = to_cp->c_desc.cd_nameptr;
2109 to_parid = to_cp->c_parentcnid;
2110 }
2111
2112 /*
2113 * ExchangeFileIDs swaps the on-disk, or in-BTree extent information
2114 * attached to two different file IDs. It also swaps the extent
2115 * information that may live in the extents-overflow B-Tree.
2116 *
2117 * We do this in a transaction as this may require a lot of B-Tree nodes
2118 * to do completely, particularly if one of the files in question
2119 * has a lot of extents.
2120 *
2121 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
2122 * For the on-disk records, which are assumed to be synced, we will
2123 * first swap the resident inline-8 extents as part of the catalog records.
2124 * Then we will swap any extents overflow records for each file.
2125 *
2126 * When ExchangeFileIDs returns successfully, "file1" will have fileID 52,
2127 * and "file2" will have fileID 50. However, note that this is only
2128 * approximately half of the work that exchangedata(2) will need to
2129 * accomplish. In other words, we swap "too much" of the information
2130 * because if we only called ExchangeFileIDs, both the fileID and extent
2131 * information would be the invariants of this operation. We don't
2132 * actually want that; we want to conclude with "file1" having
2133 * file ID 50, and "file2" having fileID 52.
2134 *
2135 * The remainder of hfs_vnop_exchange will swap the file ID and other cnode
2136 * data back to the proper ownership, while still allowing the cnode to remain
2137 * pointing at the same set of extents that it did originally.
2138 */
2139 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
2140 to_parid, from_cp->c_hint, to_cp->c_hint);
2141 hfs_systemfile_unlock(hfsmp, lockflags);
2142
2143 /*
2144 * Note that we don't need to exchange any extended attributes
2145 * since the attributes are keyed by file ID.
2146 */
2147
2148 if (error != E_NONE) {
2149 error = MacToVFSError(error);
2150 goto exit;
2151 }
2152
2153 /* Purge the vnodes from the name cache */
2154 if (from_vp)
2155 cache_purge(from_vp);
2156 if (to_vp)
2157 cache_purge(to_vp);
2158
2159 /* Bump both source and destination write counts before any swaps. */
2160 {
2161 hfs_incr_gencount (from_cp);
2162 hfs_incr_gencount (to_cp);
2163 }
2164
2165 /* Save a copy of "from" attributes before swapping. */
2166 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
2167 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
2168
2169 /* Save whether or not each cnode is a hardlink or has EAs */
2170 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2171 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2172
2173 /* Drop the special bits from each cnode */
2174 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2175 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2176
2177 /*
2178 * Now complete the in-memory portion of the copy.
2179 *
2180 * ExchangeFileIDs swaps the on-disk records involved. We complete the
2181 * operation by swapping the in-memory contents of the two files here.
2182 * We swap the cnode descriptors, which contain name, BSD attributes,
2183 * timestamps, etc, about the file.
2184 *
2185 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
2186 * already swapped the on-disk extent information. As long as we swap the
2187 * IDs, the in-line resident 8 extents that live in the filefork data
2188 * structure will point to the right data for the new file ID if we leave
2189 * them alone.
2190 *
2191 * As a result, any file descriptor that points to a particular
2192 * vnode (even though it should change names), will continue
2193 * to point to the same content.
2194 */
2195
2196 /* Copy the "to" -> "from" cnode */
2197 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
2198
2199 from_cp->c_hint = 0;
2200 /*
2201 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
2202 * when we bcopy'd the descriptor above. However, the cnode attributes
2203 * are not bcopied. As a result, make sure to swap the file IDs of each item.
2204 *
2205 * Further, other hardlink attributes must be moved along in this swap:
2206 * the linkcount, the linkref, and the firstlink all need to move
2207 * along with the file IDs. See note below regarding the flags and
2208 * what moves vs. what does not.
2209 *
2210 * For Reference:
2211 * linkcount == total # of hardlinks.
2212 * linkref == the indirect inode pointer.
2213 * firstlink == the first hardlink in the chain (written to the raw inode).
2214 * These three are tied to the fileID and must move along with the rest of the data.
2215 */
2216 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
2217
2218 from_cp->c_itime = to_cp->c_itime;
2219 from_cp->c_btime = to_cp->c_btime;
2220 from_cp->c_atime = to_cp->c_atime;
2221 from_cp->c_ctime = to_cp->c_ctime;
2222 from_cp->c_gid = to_cp->c_gid;
2223 from_cp->c_uid = to_cp->c_uid;
2224 from_cp->c_bsdflags = to_cp->c_bsdflags;
2225 from_cp->c_mode = to_cp->c_mode;
2226 from_cp->c_linkcount = to_cp->c_linkcount;
2227 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
2228 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
2229
2230 /*
2231 * The cnode flags need to stay with the cnode and not get transferred
2232 * over along with everything else because they describe the content; they are
2233 * not attributes that reflect changes specific to the file ID. In general,
2234 * fields that are tied to the file ID are the ones that will move.
2235 *
2236 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
2237 * or other extents, which may not yet have been written to the catalog. If
2238 * they were, they would have been transferred above in the ExchangeFileIDs call above...
2239 *
2240 * The flags that are special are:
2241 * C_HARDLINK, C_HASXATTRS
2242 *
2243 * These flags move with the item and file ID in the namespace since their
2244 * state is tied to that of the file ID.
2245 *
2246 * So to transfer the flags, we have to take the following steps
2247 * 1) Store in a localvar whether or not the special bits are set.
2248 * 2) Drop the special bits from the current flags
2249 * 3) swap the special flag bits to their destination
2250 */
2251 from_cp->c_flag |= to_flag_special | C_MODIFIED;
2252 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
2253 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
2254
2255
2256 /* Copy the "from" -> "to" cnode */
2257 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
2258 to_cp->c_hint = 0;
2259 /*
2260 * Pull the file ID from the tempattr we copied above. We can't assume
2261 * it is the same as the CNID.
2262 */
2263 to_cp->c_fileid = tempattr.ca_fileid;
2264 to_cp->c_itime = tempattr.ca_itime;
2265 to_cp->c_btime = tempattr.ca_btime;
2266 to_cp->c_atime = tempattr.ca_atime;
2267 to_cp->c_ctime = tempattr.ca_ctime;
2268 to_cp->c_gid = tempattr.ca_gid;
2269 to_cp->c_uid = tempattr.ca_uid;
2270 to_cp->c_bsdflags = tempattr.ca_flags;
2271 to_cp->c_mode = tempattr.ca_mode;
2272 to_cp->c_linkcount = tempattr.ca_linkcount;
2273 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
2274 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
2275
2276 /*
2277 * Only OR in the "from" flags into our cnode flags below.
2278 * Leave the rest of the flags alone.
2279 */
2280 to_cp->c_flag |= from_flag_special | C_MODIFIED;
2281
2282 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
2283 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
2284
2285
2286 /* Rehash the cnodes using their new file IDs */
2287 hfs_chash_rehash(hfsmp, from_cp, to_cp);
2288
2289 /*
2290 * When a file moves out of "Cleanup At Startup"
2291 * we can drop its NODUMP status.
2292 */
2293 if ((from_cp->c_bsdflags & UF_NODUMP) &&
2294 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
2295 from_cp->c_bsdflags &= ~UF_NODUMP;
2296 from_cp->c_touch_chgtime = TRUE;
2297 }
2298 if ((to_cp->c_bsdflags & UF_NODUMP) &&
2299 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
2300 to_cp->c_bsdflags &= ~UF_NODUMP;
2301 to_cp->c_touch_chgtime = TRUE;
2302 }
2303
2304 exit:
2305 if (got_cookie) {
2306 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
2307 }
2308 if (started_tr) {
2309 hfs_end_transaction(hfsmp);
2310 }
2311
2312 if (have_cnode_locks)
2313 hfs_unlockpair(from_cp, to_cp);
2314
2315 if (have_from_trunc_lock)
2316 hfs_unlock_truncate(from_cp, 0);
2317
2318 if (have_to_trunc_lock)
2319 hfs_unlock_truncate(to_cp, 0);
2320
2321 return (error);
2322 }
2323
2324 #if HFS_COMPRESSION
2325 /*
2326 * This function is used specifically for the case when a namespace
2327 * handler is trying to steal data before it's deleted. Note that we
2328 * don't bother deleting the xattr from the source because it will get
2329 * deleted a short time later anyway.
2330 *
2331 * cnodes must be locked
2332 */
2333 static int hfs_move_compressed(cnode_t *from_cp, cnode_t *to_cp)
2334 {
2335 int ret;
2336 void *data = NULL;
2337
2338 CLR(from_cp->c_bsdflags, UF_COMPRESSED);
2339 SET(from_cp->c_flag, C_MODIFIED);
2340
2341 ret = hfs_move_data(from_cp, to_cp, HFS_MOVE_DATA_INCLUDE_RSRC);
2342 if (ret)
2343 goto exit;
2344
2345 /*
2346 * Transfer the xattr that decmpfs uses. Ideally, this code
2347 * should be with the other decmpfs code but it's file system
2348 * agnostic and this path is currently, and likely to remain, HFS+
2349 * specific. It's easier and more performant if we implement it
2350 * here.
2351 */
2352
2353 size_t size;
2354 data = hfs_malloc(size = MAX_DECMPFS_XATTR_SIZE);
2355
2356 ret = hfs_xattr_read(from_cp->c_vp, DECMPFS_XATTR_NAME, data, &size);
2357 if (ret)
2358 goto exit;
2359
2360 ret = hfs_xattr_write(to_cp->c_vp, DECMPFS_XATTR_NAME, data, size);
2361 if (ret)
2362 goto exit;
2363
2364 SET(to_cp->c_bsdflags, UF_COMPRESSED);
2365 SET(to_cp->c_flag, C_MODIFIED);
2366
2367 exit:
2368 hfs_free(data, MAX_DECMPFS_XATTR_SIZE);
2369
2370 return ret;
2371 }
2372 #endif // HFS_COMPRESSION
2373
2374 int
2375 hfs_vnop_mmap(struct vnop_mmap_args *ap)
2376 {
2377 struct vnode *vp = ap->a_vp;
2378 cnode_t *cp = VTOC(vp);
2379 int error;
2380
2381 if (VNODE_IS_RSRC(vp)) {
2382 /* allow pageins of the resource fork */
2383 } else {
2384 int compressed = hfs_file_is_compressed(cp, 1); /* 1 == don't take the cnode lock */
2385 time_t orig_ctime = cp->c_ctime;
2386
2387 if (!compressed && (cp->c_bsdflags & UF_COMPRESSED)) {
2388 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2389 if (error != 0) {
2390 return error;
2391 }
2392 }
2393
2394 if (ap->a_fflags & PROT_WRITE) {
2395 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2396 }
2397 }
2398
2399 #if CONFIG_PROTECT
2400 error = cp_handle_vnop(vp, (ap->a_fflags & PROT_WRITE
2401 ? CP_WRITE_ACCESS : 0) | CP_READ_ACCESS, 0);
2402 if (error)
2403 return error;
2404 #endif
2405
2406 //
2407 // NOTE: we return ENOTSUP because we want the cluster layer
2408 // to actually do all the real work.
2409 //
2410 return (ENOTSUP);
2411 }
2412
2413 static errno_t hfs_vnop_mnomap(struct vnop_mnomap_args *ap)
2414 {
2415 vnode_t vp = ap->a_vp;
2416
2417 /*
2418 * Whilst the file was mapped, there may not have been any
2419 * page-outs so we need to increment the generation counter now.
2420 * Unfortunately this may lead to a change in the generation
2421 * counter when no actual change has been made, but there is
2422 * little we can do about that with our current architecture.
2423 */
2424 if (ubc_is_mapped_writable(vp)) {
2425 cnode_t *cp = VTOC(vp);
2426 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2427 hfs_incr_gencount(cp);
2428
2429 /*
2430 * We don't want to set the modification time here since a
2431 * change to that is not acceptable if no changes were made.
2432 * Instead we set a flag so that if we get any page-outs we
2433 * know to update the modification time. It's possible that
2434 * they weren't actually because of changes made whilst the
2435 * file was mapped but that's not easy to fix now.
2436 */
2437 SET(cp->c_flag, C_MIGHT_BE_DIRTY_FROM_MAPPING);
2438
2439 hfs_unlock(cp);
2440 }
2441
2442 return 0;
2443 }
2444
2445 /*
2446 * Mark the resource fork as needing a ubc_setsize when we drop the
2447 * cnode lock later.
2448 */
2449 static void hfs_rsrc_setsize(cnode_t *cp)
2450 {
2451 /*
2452 * We need to take an iocount if we don't have one. vnode_get
2453 * will return ENOENT if the vnode is terminating which is what we
2454 * want as it's not safe to call ubc_setsize in that case.
2455 */
2456 if (cp->c_rsrc_vp && !vnode_get(cp->c_rsrc_vp)) {
2457 // Shouldn't happen, but better safe...
2458 if (ISSET(cp->c_flag, C_NEED_RVNODE_PUT))
2459 vnode_put(cp->c_rsrc_vp);
2460 SET(cp->c_flag, C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE);
2461 }
2462 }
2463
2464 /*
2465 * hfs_move_data
2466 *
2467 * This is a non-symmetric variant of exchangedata. In this function,
2468 * the contents of the data fork (and optionally the resource fork)
2469 * are moved from from_cp to to_cp.
2470 *
2471 * The cnodes must be locked.
2472 *
2473 * The cnode pointed to by 'to_cp' *must* be empty prior to invoking
2474 * this function. We impose this restriction because we may not be
2475 * able to fully delete the entire file's contents in a single
2476 * transaction, particularly if it has a lot of extents. In the
2477 * normal file deletion codepath, the file is screened for two
2478 * conditions: 1) bigger than 400MB, and 2) more than 8 extents. If
2479 * so, the file is relocated to the hidden directory and the deletion
2480 * is broken up into multiple truncates. We can't do that here
2481 * because both files need to exist in the namespace. The main reason
2482 * this is imposed is that we may have to touch a whole lot of bitmap
2483 * blocks if there are many extents.
2484 *
2485 * Any data written to 'from_cp' after this call completes is not
2486 * guaranteed to be moved.
2487 *
2488 * Arguments:
2489 * cnode_t *from_cp : source file
2490 * cnode_t *to_cp : destination file; must be empty
2491 *
2492 * Returns:
2493 *
2494 * EBUSY - File has been deleted or is in use
2495 * EFBIG - Destination file was not empty
2496 * EIO - An I/O error
2497 * 0 - success
2498 * other - Other errors that can be returned from called functions
2499 */
2500 int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
2501 hfs_move_data_options_t options)
2502 {
2503 hfsmount_t *hfsmp = VTOHFS(from_cp->c_vp);
2504 int error = 0;
2505 int lockflags = 0;
2506 bool return_EIO_on_error = false;
2507 const bool include_rsrc = ISSET(options, HFS_MOVE_DATA_INCLUDE_RSRC);
2508
2509 /* Verify that neither source/dest file is open-unlinked */
2510 if (ISSET(from_cp->c_flag, C_DELETED | C_NOEXISTS)
2511 || ISSET(to_cp->c_flag, C_DELETED | C_NOEXISTS)) {
2512 return EBUSY;
2513 }
2514
2515 /*
2516 * Verify the source file is not in use by anyone besides us.
2517 *
2518 * This function is typically invoked by a namespace handler
2519 * process responding to a temporarily stalled system call.
2520 * The FD that it is working off of is opened O_EVTONLY, so
2521 * it really has no active usecounts (the kusecount from O_EVTONLY
2522 * is subtracted from the total usecounts).
2523 *
2524 * As a result, we shouldn't have any active usecounts against
2525 * this vnode when we go to check it below.
2526 */
2527 if (vnode_isinuse(from_cp->c_vp, 0))
2528 return EBUSY;
2529
2530 if (include_rsrc && from_cp->c_rsrc_vp) {
2531 if (vnode_isinuse(from_cp->c_rsrc_vp, 0))
2532 return EBUSY;
2533
2534 /*
2535 * In the code below, if the destination file doesn't have a
2536 * c_rsrcfork then we don't create it which means we we cannot
2537 * transfer the ff_invalidranges and cf_vblocks fields. These
2538 * shouldn't be set because we flush the resource fork before
2539 * calling this function but there is a tiny window when we
2540 * did not have any locks...
2541 */
2542 if (!to_cp->c_rsrcfork
2543 && (!TAILQ_EMPTY(&from_cp->c_rsrcfork->ff_invalidranges)
2544 || from_cp->c_rsrcfork->ff_unallocblocks)) {
2545 /*
2546 * The file isn't really busy now but something did slip
2547 * in and tinker with the file while we didn't have any
2548 * locks, so this is the most meaningful return code for
2549 * the caller.
2550 */
2551 return EBUSY;
2552 }
2553 }
2554
2555 // Check the destination file is empty
2556 if (to_cp->c_datafork->ff_blocks
2557 || to_cp->c_datafork->ff_size
2558 || (include_rsrc
2559 && (to_cp->c_blocks
2560 || (to_cp->c_rsrcfork && to_cp->c_rsrcfork->ff_size)))) {
2561 return EFBIG;
2562 }
2563
2564 if ((error = hfs_start_transaction (hfsmp)))
2565 return error;
2566
2567 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE,
2568 HFS_EXCLUSIVE_LOCK);
2569
2570 // filefork_t is 128 bytes which should be OK
2571 filefork_t rfork_buf, *from_rfork = NULL;
2572
2573 if (include_rsrc) {
2574 from_rfork = from_cp->c_rsrcfork;
2575
2576 /*
2577 * Creating resource fork vnodes is expensive, so just get get
2578 * the fork data if we need it.
2579 */
2580 if (!from_rfork && hfs_has_rsrc(from_cp)) {
2581 from_rfork = &rfork_buf;
2582
2583 from_rfork->ff_cp = from_cp;
2584 TAILQ_INIT(&from_rfork->ff_invalidranges);
2585
2586 error = cat_idlookup(hfsmp, from_cp->c_fileid, 0, 1, NULL, NULL,
2587 &from_rfork->ff_data);
2588
2589 if (error)
2590 goto exit;
2591 }
2592 }
2593
2594 /*
2595 * From here on, any failures mean that we might be leaving things
2596 * in a weird or inconsistent state. Ideally, we should back out
2597 * all the changes, but to do that properly we need to fix
2598 * MoveData. We'll save fixing that for another time. For now,
2599 * just return EIO in all cases to the caller so that they know.
2600 */
2601 return_EIO_on_error = true;
2602
2603 bool data_overflow_extents = overflow_extents(from_cp->c_datafork);
2604
2605 // Move the data fork
2606 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp,
2607 to_cp->c_datafork, to_cp))) {
2608 goto exit;
2609 }
2610
2611 SET(from_cp->c_flag, C_NEED_DATA_SETSIZE);
2612 SET(to_cp->c_flag, C_NEED_DATA_SETSIZE);
2613
2614 // We move the resource fork later
2615
2616 /*
2617 * Note that because all we're doing is moving the extents around,
2618 * we can probably do this in a single transaction: Each extent
2619 * record (group of 8) is 64 bytes. A extent overflow B-Tree node
2620 * is typically 4k. This means each node can hold roughly ~60
2621 * extent records == (480 extents).
2622 *
2623 * If a file was massively fragmented and had 20k extents, this
2624 * means we'd roughly touch 20k/480 == 41 to 42 nodes, plus the
2625 * index nodes, for half of the operation. (inserting or
2626 * deleting). So if we're manipulating 80-100 nodes, this is
2627 * basically 320k of data to write to the journal in a bad case.
2628 */
2629 if (data_overflow_extents) {
2630 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0)))
2631 goto exit;
2632 }
2633
2634 if (from_rfork && overflow_extents(from_rfork)) {
2635 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1)))
2636 goto exit;
2637 }
2638
2639 // Touch times
2640 from_cp->c_touch_acctime = TRUE;
2641 from_cp->c_touch_chgtime = TRUE;
2642 from_cp->c_touch_modtime = TRUE;
2643 hfs_touchtimes(hfsmp, from_cp);
2644
2645 to_cp->c_touch_acctime = TRUE;
2646 to_cp->c_touch_chgtime = TRUE;
2647 to_cp->c_touch_modtime = TRUE;
2648 hfs_touchtimes(hfsmp, to_cp);
2649
2650 struct cat_fork dfork_buf;
2651 const struct cat_fork *dfork, *rfork;
2652
2653 dfork = hfs_prepare_fork_for_update(to_cp->c_datafork, NULL,
2654 &dfork_buf, hfsmp->blockSize);
2655 rfork = hfs_prepare_fork_for_update(from_rfork, NULL,
2656 &rfork_buf.ff_data, hfsmp->blockSize);
2657
2658 // Update the catalog nodes, to_cp first
2659 if ((error = cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2660 dfork, rfork))) {
2661 goto exit;
2662 }
2663
2664 CLR(to_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
2665
2666 // Update in-memory resource fork data here
2667 if (from_rfork) {
2668 // Update c_blocks
2669 uint32_t moving = from_rfork->ff_blocks + from_rfork->ff_unallocblocks;
2670
2671 from_cp->c_blocks -= moving;
2672 to_cp->c_blocks += moving;
2673
2674 // Update to_cp's resource data if it has it
2675 filefork_t *to_rfork = to_cp->c_rsrcfork;
2676 if (to_rfork) {
2677 TAILQ_SWAP(&to_rfork->ff_invalidranges,
2678 &from_rfork->ff_invalidranges, rl_entry, rl_link);
2679 to_rfork->ff_data = from_rfork->ff_data;
2680
2681 // Deal with ubc_setsize
2682 hfs_rsrc_setsize(to_cp);
2683 }
2684
2685 // Wipe out the resource fork in from_cp
2686 rl_init(&from_rfork->ff_invalidranges);
2687 bzero(&from_rfork->ff_data, sizeof(from_rfork->ff_data));
2688
2689 // Deal with ubc_setsize
2690 hfs_rsrc_setsize(from_cp);
2691 }
2692
2693 // Currently unnecessary, but might be useful in future...
2694 dfork = hfs_prepare_fork_for_update(from_cp->c_datafork, NULL, &dfork_buf,
2695 hfsmp->blockSize);
2696 rfork = hfs_prepare_fork_for_update(from_rfork, NULL, &rfork_buf.ff_data,
2697 hfsmp->blockSize);
2698
2699 // Update from_cp
2700 if ((error = cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2701 dfork, rfork))) {
2702 goto exit;
2703 }
2704
2705 CLR(from_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
2706
2707 exit:
2708 if (lockflags) {
2709 hfs_systemfile_unlock(hfsmp, lockflags);
2710 hfs_end_transaction(hfsmp);
2711 }
2712
2713 if (error && error != EIO && return_EIO_on_error) {
2714 printf("hfs_move_data: encountered error %d\n", error);
2715 error = EIO;
2716 }
2717
2718 return error;
2719 }
2720
2721 /*
2722 * Move all of the catalog and runtime data in srcfork to dstfork.
2723 *
2724 * This allows us to maintain the invalid ranges across the move data
2725 * operation so we don't need to force all of the pending IO right
2726 * now. In addition, we move all non overflow-extent extents into the
2727 * destination here.
2728 *
2729 * The destination fork must be empty and should have been checked
2730 * prior to calling this.
2731 */
2732 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src_cp,
2733 filefork_t *dstfork, cnode_t *dst_cp)
2734 {
2735 // Move the invalid ranges
2736 TAILQ_SWAP(&dstfork->ff_invalidranges, &srcfork->ff_invalidranges,
2737 rl_entry, rl_link);
2738 rl_remove_all(&srcfork->ff_invalidranges);
2739
2740 // Move the fork data (copy whole structure)
2741 dstfork->ff_data = srcfork->ff_data;
2742 bzero(&srcfork->ff_data, sizeof(srcfork->ff_data));
2743
2744 // Update c_blocks
2745 src_cp->c_blocks -= dstfork->ff_blocks + dstfork->ff_unallocblocks;
2746 dst_cp->c_blocks += dstfork->ff_blocks + dstfork->ff_unallocblocks;
2747
2748 return 0;
2749 }
2750
2751 /*
2752 * cnode must be locked
2753 */
2754 int
2755 hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode, struct proc *p)
2756 {
2757 struct cnode *cp = VTOC(vp);
2758 struct filefork *fp = NULL;
2759 int retval = 0;
2760 struct hfsmount *hfsmp = VTOHFS(vp);
2761 struct timeval tv;
2762 int waitdata; /* attributes necessary for data retrieval */
2763 int wait; /* all other attributes (e.g. atime, etc.) */
2764 int took_trunc_lock = 0;
2765 int fsync_default = 1;
2766
2767 /*
2768 * Applications which only care about data integrity rather than full
2769 * file integrity may opt out of (delay) expensive metadata update
2770 * operations as a performance optimization.
2771 */
2772 wait = (waitfor == MNT_WAIT);
2773 waitdata = (waitfor == MNT_DWAIT) | wait;
2774
2775 if (always_do_fullfsync)
2776 fsyncmode = HFS_FSYNC_FULL;
2777 if (fsyncmode != HFS_FSYNC)
2778 fsync_default = 0;
2779
2780 /* HFS directories don't have any data blocks. */
2781 if (vnode_isdir(vp))
2782 goto metasync;
2783 fp = VTOF(vp);
2784
2785 /*
2786 * For system files flush the B-tree header and
2787 * for regular files write out any clusters
2788 */
2789 if (vnode_issystem(vp)) {
2790 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2791 // XXXdbg
2792 if (hfsmp->jnl == NULL) {
2793 BTFlushPath(VTOF(vp));
2794 }
2795 }
2796 } else {
2797 hfs_unlock(cp);
2798 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
2799 took_trunc_lock = 1;
2800
2801 if (fp->ff_unallocblocks != 0) {
2802 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2803
2804 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2805 }
2806
2807 /* Don't hold cnode lock when calling into cluster layer. */
2808 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2809
2810 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2811 }
2812 /*
2813 * When MNT_WAIT is requested and the zero fill timeout
2814 * has expired then we must explicitly zero out any areas
2815 * that are currently marked invalid (holes).
2816 *
2817 * Files with NODUMP can bypass zero filling here.
2818 */
2819 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
2820 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
2821 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
2822 (vnode_issystem(vp) ==0) &&
2823 cp->c_zftimeout != 0))) {
2824
2825 microuptime(&tv);
2826 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && fsync_default && tv.tv_sec < (long)cp->c_zftimeout) {
2827 /* Remember that a force sync was requested. */
2828 cp->c_flag |= C_ZFWANTSYNC;
2829 goto datasync;
2830 }
2831 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
2832 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
2833 hfs_unlock(cp);
2834 if (took_trunc_lock) {
2835 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2836 }
2837 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2838 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2839 took_trunc_lock = 1;
2840 }
2841 hfs_flush_invalid_ranges(vp);
2842 hfs_unlock(cp);
2843 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2844 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2845 }
2846 }
2847 datasync:
2848 if (took_trunc_lock) {
2849 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2850 took_trunc_lock = 0;
2851 }
2852
2853 if (!hfsmp->jnl)
2854 buf_flushdirtyblks(vp, waitdata, 0, "hfs_fsync");
2855 else if (fsync_default && vnode_islnk(vp)
2856 && vnode_hasdirtyblks(vp) && vnode_isrecycled(vp)) {
2857 /*
2858 * If it's a symlink that's dirty and is about to be recycled,
2859 * we need to flush the journal.
2860 */
2861 fsync_default = 0;
2862 }
2863
2864 metasync:
2865 if (vnode_isreg(vp) && vnode_issystem(vp)) {
2866 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2867 microuptime(&tv);
2868 BTSetLastSync(VTOF(vp), tv.tv_sec);
2869 }
2870 cp->c_touch_acctime = FALSE;
2871 cp->c_touch_chgtime = FALSE;
2872 cp->c_touch_modtime = FALSE;
2873 } else if (!vnode_isswap(vp)) {
2874 retval = hfs_update(vp, HFS_UPDATE_FORCE);
2875
2876 /*
2877 * When MNT_WAIT is requested push out the catalog record for
2878 * this file. If they asked for a full fsync, we can skip this
2879 * because the journal_flush or hfs_metasync_all will push out
2880 * all of the metadata changes.
2881 */
2882 if ((retval == 0) && wait && fsync_default && cp->c_hint &&
2883 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
2884 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
2885 }
2886
2887 /*
2888 * If this was a full fsync, make sure all metadata
2889 * changes get to stable storage.
2890 */
2891 if (!fsync_default) {
2892 if (hfsmp->jnl) {
2893 if (fsyncmode == HFS_FSYNC_FULL)
2894 hfs_flush(hfsmp, HFS_FLUSH_FULL);
2895 else
2896 hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_BARRIER);
2897 } else {
2898 retval = hfs_metasync_all(hfsmp);
2899 /* XXX need to pass context! */
2900 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
2901 }
2902 }
2903 }
2904
2905 if (!hfs_is_dirty(cp) && !ISSET(cp->c_flag, C_DELETED))
2906 vnode_cleardirty(vp);
2907
2908 return (retval);
2909 }
2910
2911
2912 /* Sync an hfs catalog b-tree node */
2913 int
2914 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
2915 {
2916 vnode_t vp;
2917 buf_t bp;
2918 int lockflags;
2919
2920 vp = HFSTOVCB(hfsmp)->catalogRefNum;
2921
2922 // XXXdbg - don't need to do this on a journaled volume
2923 if (hfsmp->jnl) {
2924 return 0;
2925 }
2926
2927 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2928 /*
2929 * Look for a matching node that has been delayed
2930 * but is not part of a set (B_LOCKED).
2931 *
2932 * BLK_ONLYVALID causes buf_getblk to return a
2933 * buf_t for the daddr64_t specified only if it's
2934 * currently resident in the cache... the size
2935 * parameter to buf_getblk is ignored when this flag
2936 * is set
2937 */
2938 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
2939
2940 if (bp) {
2941 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
2942 (void) VNOP_BWRITE(bp);
2943 else
2944 buf_brelse(bp);
2945 }
2946
2947 hfs_systemfile_unlock(hfsmp, lockflags);
2948
2949 return (0);
2950 }
2951
2952
2953 /*
2954 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
2955 * without a journal. Note that the volume bitmap does not get written;
2956 * we rely on fsck_hfs to fix that up (which it can do without any loss
2957 * of data).
2958 */
2959 int
2960 hfs_metasync_all(struct hfsmount *hfsmp)
2961 {
2962 int lockflags;
2963
2964 /* Lock all of the B-trees so we get a mutually consistent state */
2965 lockflags = hfs_systemfile_lock(hfsmp,
2966 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2967
2968 /* Sync each of the B-trees */
2969 if (hfsmp->hfs_catalog_vp)
2970 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
2971 if (hfsmp->hfs_extents_vp)
2972 hfs_btsync(hfsmp->hfs_extents_vp, 0);
2973 if (hfsmp->hfs_attribute_vp)
2974 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
2975
2976 /* Wait for all of the writes to complete */
2977 if (hfsmp->hfs_catalog_vp)
2978 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
2979 if (hfsmp->hfs_extents_vp)
2980 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
2981 if (hfsmp->hfs_attribute_vp)
2982 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
2983
2984 hfs_systemfile_unlock(hfsmp, lockflags);
2985
2986 return 0;
2987 }
2988
2989
2990 /*ARGSUSED 1*/
2991 static int
2992 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
2993 {
2994 buf_clearflags(bp, B_LOCKED);
2995 (void) buf_bawrite(bp);
2996
2997 return(BUF_CLAIMED);
2998 }
2999
3000
3001 int
3002 hfs_btsync(struct vnode *vp, int sync_transaction)
3003 {
3004 struct cnode *cp = VTOC(vp);
3005 struct timeval tv;
3006 int flags = 0;
3007
3008 if (sync_transaction)
3009 flags |= BUF_SKIP_NONLOCKED;
3010 /*
3011 * Flush all dirty buffers associated with b-tree.
3012 */
3013 buf_iterate(vp, hfs_btsync_callback, flags, 0);
3014
3015 microuptime(&tv);
3016 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
3017 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
3018 cp->c_touch_acctime = FALSE;
3019 cp->c_touch_chgtime = FALSE;
3020 cp->c_touch_modtime = FALSE;
3021
3022 return 0;
3023 }
3024
3025 /*
3026 * Remove a directory.
3027 */
3028 int
3029 hfs_vnop_rmdir(struct vnop_rmdir_args *ap)
3030 {
3031 struct vnode *dvp = ap->a_dvp;
3032 struct vnode *vp = ap->a_vp;
3033 struct cnode *dcp = VTOC(dvp);
3034 struct cnode *cp = VTOC(vp);
3035 int error;
3036 time_t orig_ctime;
3037
3038 orig_ctime = VTOC(vp)->c_ctime;
3039
3040 if (!S_ISDIR(cp->c_mode)) {
3041 return (ENOTDIR);
3042 }
3043 if (dvp == vp) {
3044 return (EINVAL);
3045 }
3046
3047 nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3048 cp = VTOC(vp);
3049
3050 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3051 return (error);
3052 }
3053
3054 /* Check for a race with rmdir on the parent directory */
3055 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3056 hfs_unlockpair (dcp, cp);
3057 return ENOENT;
3058 }
3059
3060 //
3061 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3062 //
3063 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3064 uint32_t newid;
3065
3066 hfs_unlockpair(dcp, cp);
3067
3068 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3069 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3070 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3071 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3072 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3073 FSE_ARG_INO, (ino64_t)0, // src inode #
3074 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3075 FSE_ARG_INT32, newid,
3076 FSE_ARG_DONE);
3077 } else {
3078 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3079 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3080 }
3081 }
3082
3083 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
3084
3085 hfs_unlockpair(dcp, cp);
3086
3087 return (error);
3088 }
3089
3090 /*
3091 * Remove a directory
3092 *
3093 * Both dvp and vp cnodes are locked
3094 */
3095 int
3096 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3097 int skip_reserve, int only_unlink)
3098 {
3099 struct cnode *cp;
3100 struct cnode *dcp;
3101 struct hfsmount * hfsmp;
3102 struct cat_desc desc;
3103 int lockflags;
3104 int error = 0, started_tr = 0;
3105
3106 cp = VTOC(vp);
3107 dcp = VTOC(dvp);
3108 hfsmp = VTOHFS(vp);
3109
3110 if (dcp == cp) {
3111 return (EINVAL); /* cannot remove "." */
3112 }
3113 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3114 return (0);
3115 }
3116 if (cp->c_entries != 0) {
3117 return (ENOTEMPTY);
3118 }
3119
3120 /*
3121 * If the directory is open or in use (e.g. opendir() or current working
3122 * directory for some process); wait for inactive/reclaim to actually
3123 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
3124 * of removing open-unlinked directories from the catalog, as well as getting rid
3125 * of EAs still on the element. So change only_unlink to true, so that it will get
3126 * cleaned up below.
3127 *
3128 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
3129 * but it really means C_NOEXISTS because the item was actually removed from the
3130 * catalog. Then when we try to remove the entry from the catalog later on, it won't
3131 * really be there anymore.
3132 */
3133 if (vnode_isinuse(vp, 0)) {
3134 only_unlink = 1;
3135 }
3136
3137 /* Deal with directory hardlinks */
3138 if (cp->c_flag & C_HARDLINK) {
3139 /*
3140 * Note that if we have a directory which was a hardlink at any point,
3141 * its actual directory data is stored in the directory inode in the hidden
3142 * directory rather than the leaf element(s) present in the namespace.
3143 *
3144 * If there are still other hardlinks to this directory,
3145 * then we'll just eliminate this particular link and the vnode will still exist.
3146 * If this is the last link to an empty directory, then we'll open-unlink the
3147 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
3148 *
3149 * We could also return EBUSY here.
3150 */
3151
3152 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3153 }
3154
3155 /*
3156 * In a few cases, we may want to allow the directory to persist in an
3157 * open-unlinked state. If the directory is being open-unlinked (still has usecount
3158 * references), or if it has EAs, or if it was being deleted as part of a rename,
3159 * then we go ahead and move it to the hidden directory.
3160 *
3161 * If the directory is being open-unlinked, then we want to keep the catalog entry
3162 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
3163 *
3164 * If the directory had EAs, then we want to use the open-unlink trick so that the
3165 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
3166 * due to overflowing the journal.
3167 *
3168 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
3169 * in order to maintain rename atomicity.
3170 *
3171 * Note that the allow_dirs argument to hfs_removefile specifies that it is
3172 * supposed to handle directories for this case.
3173 */
3174
3175 if (((hfsmp->hfs_attribute_vp != NULL) &&
3176 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
3177 (only_unlink != 0)) {
3178
3179 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
3180 /*
3181 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
3182 * it here just in case we were invoked by rmdir() on a directory that had
3183 * EAs. To ensure that we start reclaiming the space as soon as possible,
3184 * we call vnode_recycle on the directory.
3185 */
3186 vnode_recycle(vp);
3187
3188 return ret;
3189
3190 }
3191
3192 dcp->c_flag |= C_DIR_MODIFICATION;
3193
3194 #if QUOTA
3195 if (hfsmp->hfs_flags & HFS_QUOTAS)
3196 (void)hfs_getinoquota(cp);
3197 #endif
3198 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3199 goto out;
3200 }
3201 started_tr = 1;
3202
3203 /*
3204 * Verify the directory is empty (and valid).
3205 * (Rmdir ".." won't be valid since
3206 * ".." will contain a reference to
3207 * the current directory and thus be
3208 * non-empty.)
3209 */
3210 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
3211 error = EPERM;
3212 goto out;
3213 }
3214
3215 /* Remove the entry from the namei cache: */
3216 cache_purge(vp);
3217
3218 /*
3219 * Protect against a race with rename by using the component
3220 * name passed in and parent id from dvp (instead of using
3221 * the cp->c_desc which may have changed).
3222 */
3223 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3224 desc.cd_namelen = cnp->cn_namelen;
3225 desc.cd_parentcnid = dcp->c_fileid;
3226 desc.cd_cnid = cp->c_cnid;
3227 desc.cd_flags = CD_ISDIR;
3228 desc.cd_encoding = cp->c_encoding;
3229 desc.cd_hint = 0;
3230
3231 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3232 error = 0;
3233 goto out;
3234 }
3235
3236 /* Remove entry from catalog */
3237 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3238
3239 if (!skip_reserve) {
3240 /*
3241 * Reserve some space in the Catalog file.
3242 */
3243 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3244 hfs_systemfile_unlock(hfsmp, lockflags);
3245 goto out;
3246 }
3247 }
3248
3249 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3250
3251 if (!error) {
3252 //
3253 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3254 // we don't need to touch the document_id as it's handled by the rename code.
3255 // otherwise it's a normal remove and we need to save the document id in the
3256 // per thread struct and clear it from the cnode.
3257 //
3258 struct doc_tombstone *ut;
3259 ut = doc_tombstone_get();
3260 if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED)
3261 && doc_tombstone_should_save(ut, vp, cnp)) {
3262
3263 uint32_t doc_id = hfs_get_document_id(cp);
3264
3265 // this event is more of a "pending-delete"
3266 if (ut->t_lastop_document_id) {
3267 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3268 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
3269 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
3270 FSE_ARG_INO, (ino64_t)0, // dst inode #
3271 FSE_ARG_INT32, doc_id,
3272 FSE_ARG_DONE);
3273 }
3274
3275 doc_tombstone_save(dvp, vp, cnp, doc_id, cp->c_fileid);
3276
3277 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
3278
3279 // clear this so it's never returned again
3280 fip->document_id = 0;
3281 cp->c_bsdflags &= ~UF_TRACKED;
3282 }
3283
3284 /* The parent lost a child */
3285 if (dcp->c_entries > 0)
3286 dcp->c_entries--;
3287 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3288 dcp->c_dirchangecnt++;
3289 hfs_incr_gencount(dcp);
3290
3291 dcp->c_touch_chgtime = TRUE;
3292 dcp->c_touch_modtime = TRUE;
3293 dcp->c_flag |= C_MODIFIED;
3294
3295 hfs_update(dcp->c_vp, 0);
3296 }
3297
3298 hfs_systemfile_unlock(hfsmp, lockflags);
3299
3300 if (error)
3301 goto out;
3302
3303 #if QUOTA
3304 if (hfsmp->hfs_flags & HFS_QUOTAS)
3305 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3306 #endif /* QUOTA */
3307
3308 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
3309
3310 /* Mark C_NOEXISTS since the catalog entry is now gone */
3311 cp->c_flag |= C_NOEXISTS;
3312
3313 out:
3314 dcp->c_flag &= ~C_DIR_MODIFICATION;
3315 wakeup((caddr_t)&dcp->c_flag);
3316
3317 if (started_tr) {
3318 hfs_end_transaction(hfsmp);
3319 }
3320
3321 return (error);
3322 }
3323
3324
3325 /*
3326 * Remove a file or link.
3327 */
3328 int
3329 hfs_vnop_remove(struct vnop_remove_args *ap)
3330 {
3331 struct vnode *dvp = ap->a_dvp;
3332 struct vnode *vp = ap->a_vp;
3333 struct cnode *dcp = VTOC(dvp);
3334 struct cnode *cp;
3335 struct vnode *rvp = NULL;
3336 int error=0, recycle_rsrc=0;
3337 int recycle_vnode = 0;
3338 uint32_t rsrc_vid = 0;
3339 time_t orig_ctime;
3340
3341 if (dvp == vp) {
3342 return (EINVAL);
3343 }
3344
3345 orig_ctime = VTOC(vp)->c_ctime;
3346 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
3347 error = nspace_snapshot_event(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3348 if (error) {
3349 // XXXdbg - decide on a policy for handling namespace handler failures!
3350 // for now we just let them proceed.
3351 }
3352 }
3353 error = 0;
3354
3355 cp = VTOC(vp);
3356
3357 relock:
3358
3359 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3360
3361 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3362 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3363 if (rvp) {
3364 vnode_put (rvp);
3365 }
3366 return (error);
3367 }
3368 //
3369 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3370 //
3371 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3372 uint32_t newid;
3373
3374 hfs_unlockpair(dcp, cp);
3375
3376 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3377 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3378 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3379 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3380 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3381 FSE_ARG_INO, (ino64_t)0, // src inode #
3382 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3383 FSE_ARG_INT32, newid,
3384 FSE_ARG_DONE);
3385 } else {
3386 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3387 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3388 }
3389 }
3390
3391 /*
3392 * Lazily respond to determining if there is a valid resource fork
3393 * vnode attached to 'cp' if it is a regular file or symlink.
3394 * If the vnode does not exist, then we may proceed without having to
3395 * create it.
3396 *
3397 * If, however, it does exist, then we need to acquire an iocount on the
3398 * vnode after acquiring its vid. This ensures that if we have to do I/O
3399 * against it, it can't get recycled from underneath us in the middle
3400 * of this call.
3401 *
3402 * Note: this function may be invoked for directory hardlinks, so just skip these
3403 * steps if 'vp' is a directory.
3404 */
3405
3406 enum vtype vtype = vnode_vtype(vp);
3407 if ((vtype == VLNK) || (vtype == VREG)) {
3408 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
3409 /* We need to acquire the rsrc vnode */
3410 rvp = cp->c_rsrc_vp;
3411 rsrc_vid = vnode_vid (rvp);
3412
3413 /* Unlock everything to acquire iocount on the rsrc vnode */
3414 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
3415 hfs_unlockpair (dcp, cp);
3416 /* Use the vid to maintain identity on rvp */
3417 if (vnode_getwithvid(rvp, rsrc_vid)) {
3418 /*
3419 * If this fails, then it was recycled or
3420 * reclaimed in the interim. Reset fields and
3421 * start over.
3422 */
3423 rvp = NULL;
3424 rsrc_vid = 0;
3425 }
3426 goto relock;
3427 }
3428 }
3429
3430 /*
3431 * Check to see if we raced rmdir for the parent directory
3432 * hfs_removefile already checks for a race on vp/cp
3433 */
3434 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3435 error = ENOENT;
3436 goto rm_done;
3437 }
3438
3439 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
3440
3441 /*
3442 * If the remove succeeded in deleting the file, then we may need to mark
3443 * the resource fork for recycle so that it is reclaimed as quickly
3444 * as possible. If it were not recycled quickly, then this resource fork
3445 * vnode could keep a v_parent reference on the data fork, which prevents it
3446 * from going through reclaim (by giving it extra usecounts), except in the force-
3447 * unmount case.
3448 *
3449 * However, a caveat: we need to continue to supply resource fork
3450 * access to open-unlinked files even if the resource fork is not open. This is
3451 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3452 * this already if the data fork has been re-parented to the hidden directory.
3453 *
3454 * As a result, all we really need to do here is mark the resource fork vnode
3455 * for recycle. If it goes out of core, it can be brought in again if needed.
3456 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3457 * more work.
3458 */
3459 if (error == 0) {
3460 hfs_hotfile_deleted(vp);
3461
3462 if (rvp) {
3463 recycle_rsrc = 1;
3464 }
3465 /*
3466 * If the target was actually removed from the catalog schedule it for
3467 * full reclamation/inactivation. We hold an iocount on it so it should just
3468 * get marked with MARKTERM
3469 */
3470 if (cp->c_flag & C_NOEXISTS) {
3471 recycle_vnode = 1;
3472 }
3473 }
3474
3475
3476 /*
3477 * Drop the truncate lock before unlocking the cnode
3478 * (which can potentially perform a vnode_put and
3479 * recycle the vnode which in turn might require the
3480 * truncate lock)
3481 */
3482 rm_done:
3483 hfs_unlockpair(dcp, cp);
3484 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3485
3486 if (recycle_rsrc) {
3487 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3488 vnode_recycle(rvp);
3489 }
3490 if (recycle_vnode) {
3491 vnode_recycle (vp);
3492 }
3493
3494 if (rvp) {
3495 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3496 vnode_put(rvp);
3497 }
3498
3499 return (error);
3500 }
3501
3502
3503 int
3504 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3505
3506 if ( !(buf_flags(bp) & B_META))
3507 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3508 /*
3509 * it's part of the current transaction, kill it.
3510 */
3511 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3512
3513 return (BUF_CLAIMED);
3514 }
3515
3516 /*
3517 * hfs_removefile
3518 *
3519 * Similar to hfs_vnop_remove except there are additional options.
3520 * This function may be used to remove directories if they have
3521 * lots of EA's -- note the 'allow_dirs' argument.
3522 *
3523 * This function is able to delete blocks & fork data for the resource
3524 * fork even if it does not exist in core (and have a backing vnode).
3525 * It should infer the correct behavior based on the number of blocks
3526 * in the cnode and whether or not the resource fork pointer exists or
3527 * not. As a result, one only need pass in the 'vp' corresponding to the
3528 * data fork of this file (or main vnode in the case of a directory).
3529 * Passing in a resource fork will result in an error.
3530 *
3531 * Because we do not create any vnodes in this function, we are not at
3532 * risk of deadlocking against ourselves by double-locking.
3533 *
3534 * Requires cnode and truncate locks to be held.
3535 */
3536 int
3537 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3538 int flags, int skip_reserve, int allow_dirs,
3539 __unused struct vnode *rvp, int only_unlink)
3540 {
3541 struct cnode *cp;
3542 struct cnode *dcp;
3543 struct vnode *rsrc_vp = NULL;
3544 struct hfsmount *hfsmp;
3545 struct cat_desc desc;
3546 struct timeval tv;
3547 int dataforkbusy = 0;
3548 int rsrcforkbusy = 0;
3549 int lockflags;
3550 int error = 0;
3551 int started_tr = 0;
3552 int isbigfile = 0, defer_remove=0, isdir=0;
3553 int update_vh = 0;
3554
3555 cp = VTOC(vp);
3556 dcp = VTOC(dvp);
3557 hfsmp = VTOHFS(vp);
3558
3559 /* Check if we lost a race post lookup. */
3560 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3561 return (0);
3562 }
3563
3564 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3565 return 0;
3566 }
3567
3568 /* Make sure a remove is permitted */
3569 if (VNODE_IS_RSRC(vp)) {
3570 return (EPERM);
3571 }
3572 else {
3573 /*
3574 * We know it's a data fork.
3575 * Probe the cnode to see if we have a valid resource fork
3576 * in hand or not.
3577 */
3578 rsrc_vp = cp->c_rsrc_vp;
3579 }
3580
3581 /* Don't allow deleting the journal or journal_info_block. */
3582 if (hfs_is_journal_file(hfsmp, cp)) {
3583 return (EPERM);
3584 }
3585
3586 /*
3587 * Hard links require special handling.
3588 */
3589 if (cp->c_flag & C_HARDLINK) {
3590 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3591 return (EBUSY);
3592 } else {
3593 /* A directory hard link with a link count of one is
3594 * treated as a regular directory. Therefore it should
3595 * only be removed using rmdir().
3596 */
3597 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3598 (allow_dirs == 0)) {
3599 return (EPERM);
3600 }
3601 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3602 }
3603 }
3604
3605 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3606 if (vnode_isdir(vp)) {
3607 if (allow_dirs == 0)
3608 return (EPERM); /* POSIX */
3609 isdir = 1;
3610 }
3611 /* Sanity check the parent ids. */
3612 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3613 (cp->c_parentcnid != dcp->c_fileid)) {
3614 return (EINVAL);
3615 }
3616
3617 dcp->c_flag |= C_DIR_MODIFICATION;
3618
3619 // this guy is going away so mark him as such
3620 cp->c_flag |= C_DELETED;
3621
3622
3623 /* Remove our entry from the namei cache. */
3624 cache_purge(vp);
3625
3626 /*
3627 * If the caller was operating on a file (as opposed to a
3628 * directory with EAs), then we need to figure out
3629 * whether or not it has a valid resource fork vnode.
3630 *
3631 * If there was a valid resource fork vnode, then we need
3632 * to use hfs_truncate to eliminate its data. If there is
3633 * no vnode, then we hold the cnode lock which would
3634 * prevent it from being created. As a result,
3635 * we can use the data deletion functions which do not
3636 * require that a cnode/vnode pair exist.
3637 */
3638
3639 /* Check if this file is being used. */
3640 if (isdir == 0) {
3641 dataforkbusy = vnode_isinuse(vp, 0);
3642 /*
3643 * At this point, we know that 'vp' points to the
3644 * a data fork because we checked it up front. And if
3645 * there is no rsrc fork, rsrc_vp will be NULL.
3646 */
3647 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3648 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3649 }
3650 }
3651
3652 /* Check if we have to break the deletion into multiple pieces. */
3653 if (isdir == 0)
3654 isbigfile = cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE;
3655
3656 /* Check if the file has xattrs. If it does we'll have to delete them in
3657 individual transactions in case there are too many */
3658 if ((hfsmp->hfs_attribute_vp != NULL) &&
3659 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3660 defer_remove = 1;
3661 }
3662
3663 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3664 if (only_unlink) {
3665 defer_remove = 1;
3666 }
3667
3668 /*
3669 * Carbon semantics prohibit deleting busy files.
3670 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3671 */
3672 if (dataforkbusy || rsrcforkbusy) {
3673 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3674 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3675 error = EBUSY;
3676 goto out;
3677 }
3678 }
3679
3680 #if QUOTA
3681 if (hfsmp->hfs_flags & HFS_QUOTAS)
3682 (void)hfs_getinoquota(cp);
3683 #endif /* QUOTA */
3684
3685 /*
3686 * Do a ubc_setsize to indicate we need to wipe contents if:
3687 * 1) item is a regular file.
3688 * 2) Neither fork is busy AND we are not told to unlink this.
3689 *
3690 * We need to check for the defer_remove since it can be set without
3691 * having a busy data or rsrc fork
3692 */
3693 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3694 /*
3695 * A ubc_setsize can cause a pagein so defer it
3696 * until after the cnode lock is dropped. The
3697 * cnode lock cannot be dropped/reacquired here
3698 * since we might already hold the journal lock.
3699 */
3700 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3701 cp->c_flag |= C_NEED_DATA_SETSIZE;
3702 }
3703 if (!rsrcforkbusy && rsrc_vp) {
3704 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3705 }
3706 }
3707
3708 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3709 goto out;
3710 }
3711 started_tr = 1;
3712
3713 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3714 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3715 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3716 }
3717
3718 /*
3719 * Prepare to truncate any non-busy forks. Busy forks will
3720 * get truncated when their vnode goes inactive.
3721 * Note that we will only enter this region if we
3722 * can avoid creating an open-unlinked file. If
3723 * either region is busy, we will have to create an open
3724 * unlinked file.
3725 *
3726 * Since we are deleting the file, we need to stagger the runtime
3727 * modifications to do things in such a way that a crash won't
3728 * result in us getting overlapped extents or any other
3729 * bad inconsistencies. As such, we call prepare_release_storage
3730 * which updates the UBC, updates quota information, and releases
3731 * any loaned blocks that belong to this file. No actual
3732 * truncation or bitmap manipulation is done until *AFTER*
3733 * the catalog record is removed.
3734 */
3735 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3736
3737 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3738
3739 error = hfs_prepare_release_storage (hfsmp, vp);
3740 if (error) {
3741 goto out;
3742 }
3743 update_vh = 1;
3744 }
3745
3746 /*
3747 * If the resource fork vnode does not exist, we can skip this step.
3748 */
3749 if (!rsrcforkbusy && rsrc_vp) {
3750 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3751 if (error) {
3752 goto out;
3753 }
3754 update_vh = 1;
3755 }
3756 }
3757
3758 /*
3759 * Protect against a race with rename by using the component
3760 * name passed in and parent id from dvp (instead of using
3761 * the cp->c_desc which may have changed). Also, be aware that
3762 * because we allow directories to be passed in, we need to special case
3763 * this temporary descriptor in case we were handed a directory.
3764 */
3765 if (isdir) {
3766 desc.cd_flags = CD_ISDIR;
3767 }
3768 else {
3769 desc.cd_flags = 0;
3770 }
3771 desc.cd_encoding = cp->c_desc.cd_encoding;
3772 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3773 desc.cd_namelen = cnp->cn_namelen;
3774 desc.cd_parentcnid = dcp->c_fileid;
3775 desc.cd_hint = cp->c_desc.cd_hint;
3776 desc.cd_cnid = cp->c_cnid;
3777 microtime(&tv);
3778
3779 /*
3780 * There are two cases to consider:
3781 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
3782 * 2. File is not in use ==> remove the file
3783 *
3784 * We can get a directory in case 1 because it may have had lots of attributes,
3785 * which need to get removed here.
3786 */
3787 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
3788 char delname[32];
3789 struct cat_desc to_desc;
3790 struct cat_desc todir_desc;
3791
3792 /*
3793 * Orphan this file or directory (move to hidden directory).
3794 * Again, we need to take care that we treat directories as directories,
3795 * and files as files. Because directories with attributes can be passed in
3796 * check to make sure that we have a directory or a file before filling in the
3797 * temporary descriptor's flags. We keep orphaned directories AND files in
3798 * the FILE_HARDLINKS private directory since we're generalizing over all
3799 * orphaned filesystem objects.
3800 */
3801 bzero(&todir_desc, sizeof(todir_desc));
3802 todir_desc.cd_parentcnid = 2;
3803
3804 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
3805 bzero(&to_desc, sizeof(to_desc));
3806 to_desc.cd_nameptr = (const u_int8_t *)delname;
3807 to_desc.cd_namelen = strlen(delname);
3808 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
3809 if (isdir) {
3810 to_desc.cd_flags = CD_ISDIR;
3811 }
3812 else {
3813 to_desc.cd_flags = 0;
3814 }
3815 to_desc.cd_cnid = cp->c_cnid;
3816
3817 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3818 if (!skip_reserve) {
3819 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
3820 hfs_systemfile_unlock(hfsmp, lockflags);
3821 goto out;
3822 }
3823 }
3824
3825 error = cat_rename(hfsmp, &desc, &todir_desc,
3826 &to_desc, (struct cat_desc *)NULL);
3827
3828 if (error == 0) {
3829 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
3830 if (isdir == 1) {
3831 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
3832 }
3833 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
3834 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
3835
3836 /* Update the parent directory */
3837 if (dcp->c_entries > 0)
3838 dcp->c_entries--;
3839 if (isdir == 1) {
3840 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3841 }
3842 dcp->c_dirchangecnt++;
3843 hfs_incr_gencount(dcp);
3844
3845 dcp->c_ctime = tv.tv_sec;
3846 dcp->c_mtime = tv.tv_sec;
3847 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3848
3849 /* Update the file or directory's state */
3850 cp->c_flag |= C_DELETED;
3851 cp->c_ctime = tv.tv_sec;
3852 --cp->c_linkcount;
3853 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
3854 }
3855 hfs_systemfile_unlock(hfsmp, lockflags);
3856 if (error)
3857 goto out;
3858
3859 }
3860 else {
3861 /*
3862 * Nobody is using this item; we can safely remove everything.
3863 */
3864 struct filefork *temp_rsrc_fork = NULL;
3865 #if QUOTA
3866 off_t savedbytes;
3867 int blksize = hfsmp->blockSize;
3868 #endif
3869 u_int32_t fileid = cp->c_fileid;
3870
3871 /*
3872 * Figure out if we need to read the resource fork data into
3873 * core before wiping out the catalog record.
3874 *
3875 * 1) Must not be a directory
3876 * 2) cnode's c_rsrcfork ptr must be NULL.
3877 * 3) rsrc fork must have actual blocks
3878 */
3879 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
3880 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3881 /*
3882 * The resource fork vnode & filefork did not exist.
3883 * Create a temporary one for use in this function only.
3884 */
3885 temp_rsrc_fork = hfs_zalloc(HFS_FILEFORK_ZONE);
3886 bzero(temp_rsrc_fork, sizeof(struct filefork));
3887 temp_rsrc_fork->ff_cp = cp;
3888 rl_init(&temp_rsrc_fork->ff_invalidranges);
3889 }
3890
3891 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3892
3893 /* Look up the resource fork first, if necessary */
3894 if (temp_rsrc_fork) {
3895 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
3896 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
3897 if (error) {
3898 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
3899 hfs_systemfile_unlock (hfsmp, lockflags);
3900 goto out;
3901 }
3902 }
3903
3904 if (!skip_reserve) {
3905 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3906 if (temp_rsrc_fork) {
3907 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
3908 }
3909 hfs_systemfile_unlock(hfsmp, lockflags);
3910 goto out;
3911 }
3912 }
3913
3914 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3915
3916 if (error && error != ENXIO && error != ENOENT) {
3917 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
3918 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
3919 }
3920
3921 if (error == 0) {
3922 /* Update the parent directory */
3923 if (dcp->c_entries > 0)
3924 dcp->c_entries--;
3925 dcp->c_dirchangecnt++;
3926 hfs_incr_gencount(dcp);
3927
3928 dcp->c_ctime = tv.tv_sec;
3929 dcp->c_mtime = tv.tv_sec;
3930 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3931 }
3932 hfs_systemfile_unlock(hfsmp, lockflags);
3933
3934 if (error) {
3935 if (temp_rsrc_fork) {
3936 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
3937 }
3938 goto out;
3939 }
3940
3941 /*
3942 * Now that we've wiped out the catalog record, the file effectively doesn't
3943 * exist anymore. So update the quota records to reflect the loss of the
3944 * data fork and the resource fork.
3945 */
3946 #if QUOTA
3947 if (cp->c_datafork->ff_blocks > 0) {
3948 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
3949 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3950 }
3951
3952 /*
3953 * We may have just deleted the catalog record for a resource fork even
3954 * though it did not exist in core as a vnode. However, just because there
3955 * was a resource fork pointer in the cnode does not mean that it had any blocks.
3956 */
3957 if (temp_rsrc_fork || cp->c_rsrcfork) {
3958 if (cp->c_rsrcfork) {
3959 if (cp->c_rsrcfork->ff_blocks > 0) {
3960 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
3961 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3962 }
3963 }
3964 else {
3965 /* we must have used a temporary fork */
3966 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
3967 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3968 }
3969 }
3970
3971 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3972 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3973 }
3974 #endif
3975
3976 if (vnode_islnk(vp) && cp->c_datafork->ff_symlinkptr) {
3977 hfs_free(cp->c_datafork->ff_symlinkptr, cp->c_datafork->ff_size);
3978 cp->c_datafork->ff_symlinkptr = NULL;
3979 }
3980
3981 /*
3982 * If we didn't get any errors deleting the catalog entry, then go ahead
3983 * and release the backing store now. The filefork pointers are still valid.
3984 */
3985 if (temp_rsrc_fork) {
3986 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
3987 }
3988 else {
3989 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
3990 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
3991 }
3992 if (error) {
3993 /*
3994 * If we encountered an error updating the extents and bitmap,
3995 * mark the volume inconsistent. At this point, the catalog record has
3996 * already been deleted, so we can't recover it at this point. We need
3997 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
3998 * The subsequent fsck should be able to recover the free space for us.
3999 */
4000 hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE);
4001 }
4002 else {
4003 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
4004 update_vh = 0;
4005 }
4006
4007 /* Get rid of the temporary rsrc fork */
4008 if (temp_rsrc_fork) {
4009 hfs_zfree(temp_rsrc_fork, HFS_FILEFORK_ZONE);
4010 }
4011
4012 cp->c_flag |= C_NOEXISTS;
4013 cp->c_flag &= ~C_DELETED;
4014
4015 cp->c_touch_chgtime = TRUE;
4016 --cp->c_linkcount;
4017
4018 /*
4019 * We must never get a directory if we're in this else block. We could
4020 * accidentally drop the number of files in the volume header if we did.
4021 */
4022 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
4023
4024 }
4025
4026 //
4027 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
4028 // we don't need to touch the document_id as it's handled by the rename code.
4029 // otherwise it's a normal remove and we need to save the document id in the
4030 // per thread struct and clear it from the cnode.
4031 //
4032 if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED)
4033 && cp->c_linkcount <= 1) {
4034 struct doc_tombstone *ut;
4035 ut = doc_tombstone_get();
4036 if (doc_tombstone_should_save(ut, vp, cnp)) {
4037 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4038 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4039 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
4040 FSE_ARG_INO, (ino64_t)0, // dst inode #
4041 FSE_ARG_INT32, hfs_get_document_id(cp), // document id
4042 FSE_ARG_DONE);
4043
4044 doc_tombstone_save(dvp, vp, cnp, hfs_get_document_id(cp),
4045 cp->c_fileid);
4046
4047 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
4048
4049 fip->document_id = 0;
4050 cp->c_bsdflags &= ~UF_TRACKED;
4051 }
4052 }
4053
4054 /*
4055 * All done with this cnode's descriptor...
4056 *
4057 * Note: all future catalog calls for this cnode must be by
4058 * fileid only. This is OK for HFS (which doesn't have file
4059 * thread records) since HFS doesn't support the removal of
4060 * busy files.
4061 */
4062 cat_releasedesc(&cp->c_desc);
4063
4064 out:
4065 if (error) {
4066 cp->c_flag &= ~C_DELETED;
4067 }
4068
4069 if (update_vh) {
4070 /*
4071 * If we bailed out earlier, we may need to update the volume header
4072 * to deal with the borrowed blocks accounting.
4073 */
4074 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
4075 }
4076
4077 if (started_tr) {
4078 hfs_end_transaction(hfsmp);
4079 }
4080
4081 dcp->c_flag &= ~C_DIR_MODIFICATION;
4082 wakeup((caddr_t)&dcp->c_flag);
4083
4084 return (error);
4085 }
4086
4087
4088 void
4089 replace_desc(struct cnode *cp, struct cat_desc *cdp)
4090 {
4091 // fixes 4348457 and 4463138
4092 if (&cp->c_desc == cdp) {
4093 return;
4094 }
4095
4096 /* First release allocated name buffer */
4097 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
4098 const u_int8_t *name = cp->c_desc.cd_nameptr;
4099
4100 cp->c_desc.cd_nameptr = 0;
4101 cp->c_desc.cd_namelen = 0;
4102 cp->c_desc.cd_flags &= ~CD_HASBUF;
4103 vfs_removename((const char *)name);
4104 }
4105 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
4106
4107 /* Cnode now owns the name buffer */
4108 cdp->cd_nameptr = 0;
4109 cdp->cd_namelen = 0;
4110 cdp->cd_flags &= ~CD_HASBUF;
4111 }
4112
4113 /*
4114 * hfs_vnop_rename
4115 *
4116 * Just forwards the arguments from VNOP_RENAME into those of
4117 * VNOP_RENAMEX but zeros out the flags word.
4118 */
4119 int hfs_vnop_rename (struct vnop_rename_args *args) {
4120 struct vnop_renamex_args vrx;
4121
4122 vrx.a_desc = args->a_desc; // we aren't using it to switch into the vnop array, so fine as is.
4123 vrx.a_fdvp = args->a_fdvp;
4124 vrx.a_fvp = args->a_fvp;
4125 vrx.a_fcnp = args->a_fcnp;
4126 vrx.a_tdvp = args->a_tdvp;
4127 vrx.a_tvp = args->a_tvp;
4128 vrx.a_tcnp = args->a_tcnp;
4129 vrx.a_vap = NULL; // not used
4130 vrx.a_flags = 0; //zero out the flags.
4131 vrx.a_context = args->a_context;
4132
4133 return hfs_vnop_renamex (&vrx);
4134 }
4135
4136
4137
4138 /*
4139 * Rename a cnode.
4140 *
4141 * The VFS layer guarantees that:
4142 * - source and destination will either both be directories, or
4143 * both not be directories.
4144 * - all the vnodes are from the same file system
4145 *
4146 * When the target is a directory, HFS must ensure that its empty.
4147 *
4148 * Note that this function requires up to 6 vnodes in order to work properly
4149 * if it is operating on files (and not on directories). This is because only
4150 * files can have resource forks, and we now require iocounts to be held on the
4151 * vnodes corresponding to the resource forks (if applicable) as well as
4152 * the files or directories undergoing rename. The problem with not holding
4153 * iocounts on the resource fork vnodes is that it can lead to a deadlock
4154 * situation: The rsrc fork of the source file may be recycled and reclaimed
4155 * in order to provide a vnode for the destination file's rsrc fork. Since
4156 * data and rsrc forks share the same cnode, we'd eventually try to lock the
4157 * source file's cnode in order to sync its rsrc fork to disk, but it's already
4158 * been locked. By taking the rsrc fork vnodes up front we ensure that they
4159 * cannot be recycled, and that the situation mentioned above cannot happen.
4160 */
4161 int
4162 hfs_vnop_renamex(struct vnop_renamex_args *ap)
4163 {
4164 struct vnode *tvp = ap->a_tvp;
4165 struct vnode *tdvp = ap->a_tdvp;
4166 struct vnode *fvp = ap->a_fvp;
4167 struct vnode *fdvp = ap->a_fdvp;
4168 /*
4169 * Note that we only need locals for the target/destination's
4170 * resource fork vnode (and only if necessary). We don't care if the
4171 * source has a resource fork vnode or not.
4172 */
4173 struct vnode *tvp_rsrc = NULLVP;
4174 uint32_t tvp_rsrc_vid = 0;
4175 struct componentname *tcnp = ap->a_tcnp;
4176 struct componentname *fcnp = ap->a_fcnp;
4177 struct proc *p = vfs_context_proc(ap->a_context);
4178 struct cnode *fcp;
4179 struct cnode *fdcp;
4180 struct cnode *tdcp;
4181 struct cnode *tcp;
4182 struct cnode *error_cnode;
4183 struct cat_desc from_desc;
4184 struct cat_desc to_desc;
4185 struct cat_desc out_desc;
4186 struct hfsmount *hfsmp;
4187 cat_cookie_t cookie;
4188 int tvp_deleted = 0;
4189 int started_tr = 0, got_cookie = 0;
4190 int took_trunc_lock = 0;
4191 int lockflags;
4192 int error;
4193 time_t orig_from_ctime, orig_to_ctime;
4194 int emit_rename = 1;
4195 int emit_delete = 1;
4196 int is_tracked = 0;
4197 int unlocked;
4198 vnode_t old_doc_vp = NULL;
4199 int rename_exclusive = 0;
4200
4201 orig_from_ctime = VTOC(fvp)->c_ctime;
4202 if (tvp && VTOC(tvp)) {
4203 orig_to_ctime = VTOC(tvp)->c_ctime;
4204 } else {
4205 orig_to_ctime = ~0;
4206 }
4207
4208 hfsmp = VTOHFS(tdvp);
4209
4210 /* Check the flags first, so we can avoid grabbing locks if necessary */
4211 if (ap->a_flags) {
4212 /* These are the only flags we support for now */
4213 if ((ap->a_flags & (VFS_RENAME_EXCL)) == 0) {
4214 return ENOTSUP;
4215 }
4216
4217 /* The rename flags are mutually exclusive for HFS+ */
4218 switch (ap->a_flags & VFS_RENAME_FLAGS_MASK) {
4219 case VFS_RENAME_EXCL:
4220 rename_exclusive = true;
4221 break;
4222 default:
4223 return ENOTSUP;
4224 }
4225 }
4226
4227 /*
4228 * Do special case checks here. If fvp == tvp then we need to check the
4229 * cnode with locks held.
4230 */
4231 if (fvp == tvp) {
4232 int is_hardlink = 0;
4233 /*
4234 * In this case, we do *NOT* ever emit a DELETE event.
4235 * We may not necessarily emit a RENAME event
4236 */
4237 emit_delete = 0;
4238 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
4239 return error;
4240 }
4241 /* Check to see if the item is a hardlink or not */
4242 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
4243 hfs_unlock (VTOC(fvp));
4244
4245 /*
4246 * If the item is not a hardlink, then case sensitivity must be off, otherwise
4247 * two names should not resolve to the same cnode unless they were case variants.
4248 */
4249 if (is_hardlink) {
4250 emit_rename = 0;
4251 /*
4252 * Hardlinks are a little trickier. We only want to emit a rename event
4253 * if the item is a hardlink, the parent directories are the same, case sensitivity
4254 * is off, and the case folded names are the same. See the fvp == tvp case below for more
4255 * info.
4256 */
4257
4258 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
4259 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4260 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4261 /* Then in this case only it is ok to emit a rename */
4262 emit_rename = 1;
4263 }
4264 }
4265 }
4266 }
4267 if (emit_rename) {
4268 /* c_bsdflags should only be assessed while holding the cnode lock.
4269 * This is not done consistently throughout the code and can result
4270 * in race. This will be fixed via rdar://12181064
4271 */
4272 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
4273 is_tracked = 1;
4274 }
4275 nspace_snapshot_event(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
4276 }
4277
4278 if (tvp && VTOC(tvp)) {
4279 if (emit_delete) {
4280 nspace_snapshot_event(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
4281 }
4282 }
4283
4284 retry:
4285 /* When tvp exists, take the truncate lock for hfs_removefile(). */
4286 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
4287 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4288 took_trunc_lock = 1;
4289 }
4290
4291 relock:
4292 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
4293 HFS_EXCLUSIVE_LOCK, &error_cnode);
4294 if (error) {
4295 if (took_trunc_lock) {
4296 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4297 took_trunc_lock = 0;
4298 }
4299
4300 /*
4301 * We hit an error path. If we were trying to re-acquire the locks
4302 * after coming through here once, we might have already obtained
4303 * an iocount on tvp's resource fork vnode. Drop that before dealing
4304 * with the failure. Note this is safe -- since we are in an
4305 * error handling path, we can't be holding the cnode locks.
4306 */
4307 if (tvp_rsrc) {
4308 vnode_put (tvp_rsrc);
4309 tvp_rsrc_vid = 0;
4310 tvp_rsrc = NULL;
4311 }
4312
4313 /*
4314 * tvp might no longer exist. If the cause of the lock failure
4315 * was tvp, then we can try again with tvp/tcp set to NULL.
4316 * This is ok because the vfs syscall will vnode_put the vnodes
4317 * after we return from hfs_vnop_rename.
4318 */
4319 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
4320 tcp = NULL;
4321 tvp = NULL;
4322 goto retry;
4323 }
4324
4325 /* If we want to reintroduce notifications for failed renames, this
4326 is the place to do it. */
4327
4328 return (error);
4329 }
4330
4331 fdcp = VTOC(fdvp);
4332 fcp = VTOC(fvp);
4333 tdcp = VTOC(tdvp);
4334 tcp = tvp ? VTOC(tvp) : NULL;
4335
4336
4337 /*
4338 * If caller requested an exclusive rename (VFS_RENAME_EXCL) and 'tcp' exists
4339 * then we must fail the operation.
4340 */
4341 if (tcp && rename_exclusive) {
4342 error = EEXIST;
4343 goto out;
4344 }
4345
4346 //
4347 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
4348 //
4349 unlocked = 0;
4350 if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4351 uint32_t newid;
4352
4353 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4354 unlocked = 1;
4355
4356 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4357 hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4358 ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4359 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4360 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4361 FSE_ARG_INO, (ino64_t)0, // src inode #
4362 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4363 FSE_ARG_INT32, newid,
4364 FSE_ARG_DONE);
4365 hfs_unlock(fcp);
4366 } else {
4367 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4368 }
4369
4370 //
4371 // check if we're going to need to fix tcp as well. if we aren't, go back relock
4372 // everything. otherwise continue on and fix up tcp as well before relocking.
4373 //
4374 if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
4375 goto relock;
4376 }
4377 }
4378
4379 //
4380 // same thing for tcp if it's set
4381 //
4382 if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4383 uint32_t newid;
4384
4385 if (!unlocked) {
4386 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4387 unlocked = 1;
4388 }
4389
4390 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4391 hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4392 ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4393 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4394 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4395 FSE_ARG_INO, (ino64_t)0, // src inode #
4396 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
4397 FSE_ARG_INT32, newid,
4398 FSE_ARG_DONE);
4399 hfs_unlock(tcp);
4400 } else {
4401 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4402 }
4403
4404 // go back up and relock everything. next time through the if statement won't be true
4405 // and we'll skip over this block of code.
4406 goto relock;
4407 }
4408
4409
4410
4411 /*
4412 * Acquire iocounts on the destination's resource fork vnode
4413 * if necessary. If dst/src are files and the dst has a resource
4414 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
4415 * If it does not exist, then we don't care and can skip it.
4416 */
4417 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
4418 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
4419 tvp_rsrc = tcp->c_rsrc_vp;
4420 /*
4421 * We can look at the vid here because we're holding the
4422 * cnode lock on the underlying cnode for this rsrc vnode.
4423 */
4424 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
4425
4426 /* Unlock everything to acquire iocount on this rsrc vnode */
4427 if (took_trunc_lock) {
4428 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
4429 took_trunc_lock = 0;
4430 }
4431 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4432
4433 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
4434 /* iocount acquisition failed. Reset fields and start over.. */
4435 tvp_rsrc_vid = 0;
4436 tvp_rsrc = NULL;
4437 }
4438 goto retry;
4439 }
4440 }
4441
4442
4443
4444 /* Ensure we didn't race src or dst parent directories with rmdir. */
4445 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4446 error = ENOENT;
4447 goto out;
4448 }
4449
4450 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4451 error = ENOENT;
4452 goto out;
4453 }
4454
4455
4456 /* Check for a race against unlink. The hfs_valid_cnode checks validate
4457 * the parent/child relationship with fdcp and tdcp, as well as the
4458 * component name of the target cnodes.
4459 */
4460 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
4461 error = ENOENT;
4462 goto out;
4463 }
4464
4465 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
4466 //
4467 // hmm, the destination vnode isn't valid any more.
4468 // in this case we can just drop him and pretend he
4469 // never existed in the first place.
4470 //
4471 if (took_trunc_lock) {
4472 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4473 took_trunc_lock = 0;
4474 }
4475 error = 0;
4476
4477 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4478
4479 tcp = NULL;
4480 tvp = NULL;
4481
4482 // retry the locking with tvp null'ed out
4483 goto retry;
4484 }
4485
4486 fdcp->c_flag |= C_DIR_MODIFICATION;
4487 if (fdvp != tdvp) {
4488 tdcp->c_flag |= C_DIR_MODIFICATION;
4489 }
4490
4491 /*
4492 * Disallow renaming of a directory hard link if the source and
4493 * destination parent directories are different, or a directory whose
4494 * descendant is a directory hard link and the one of the ancestors
4495 * of the destination directory is a directory hard link.
4496 */
4497 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4498 if (fcp->c_flag & C_HARDLINK) {
4499 error = EPERM;
4500 goto out;
4501 }
4502 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4503 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4504 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4505 error = EPERM;
4506 hfs_systemfile_unlock(hfsmp, lockflags);
4507 goto out;
4508 }
4509 hfs_systemfile_unlock(hfsmp, lockflags);
4510 }
4511 }
4512
4513 /*
4514 * The following edge case is caught here:
4515 * (to cannot be a descendent of from)
4516 *
4517 * o fdvp
4518 * /
4519 * /
4520 * o fvp
4521 * \
4522 * \
4523 * o tdvp
4524 * /
4525 * /
4526 * o tvp
4527 */
4528 if (tdcp->c_parentcnid == fcp->c_fileid) {
4529 error = EINVAL;
4530 goto out;
4531 }
4532
4533 /*
4534 * The following two edge cases are caught here:
4535 * (note tvp is not empty)
4536 *
4537 * o tdvp o tdvp
4538 * / /
4539 * / /
4540 * o tvp tvp o fdvp
4541 * \ \
4542 * \ \
4543 * o fdvp o fvp
4544 * /
4545 * /
4546 * o fvp
4547 */
4548 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
4549 error = ENOTEMPTY;
4550 goto out;
4551 }
4552
4553 /*
4554 * The following edge case is caught here:
4555 * (the from child and parent are the same)
4556 *
4557 * o tdvp
4558 * /
4559 * /
4560 * fdvp o fvp
4561 */
4562 if (fdvp == fvp) {
4563 error = EINVAL;
4564 goto out;
4565 }
4566
4567 /*
4568 * Make sure "from" vnode and its parent are changeable.
4569 */
4570 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
4571 error = EPERM;
4572 goto out;
4573 }
4574
4575 /*
4576 * If the destination parent directory is "sticky", then the
4577 * user must own the parent directory, or the destination of
4578 * the rename, otherwise the destination may not be changed
4579 * (except by root). This implements append-only directories.
4580 *
4581 * Note that checks for immutable and write access are done
4582 * by the call to hfs_removefile.
4583 */
4584 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4585 (suser(vfs_context_ucred(ap->a_context), NULL)) &&
4586 (kauth_cred_getuid(vfs_context_ucred(ap->a_context)) != tdcp->c_uid) &&
4587 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(ap->a_context), p, false)) ) {
4588 error = EPERM;
4589 goto out;
4590 }
4591
4592 /* Don't allow modification of the journal or journal_info_block */
4593 if (hfs_is_journal_file(hfsmp, fcp) ||
4594 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4595 error = EPERM;
4596 goto out;
4597 }
4598
4599 #if QUOTA
4600 if (tvp)
4601 (void)hfs_getinoquota(tcp);
4602 #endif
4603 /* Preflighting done, take fvp out of the name space. */
4604 cache_purge(fvp);
4605
4606 #if CONFIG_SECLUDED_RENAME
4607 /*
4608 * Check for "secure" rename that imposes additional restrictions on the
4609 * source vnode. We wait until here to check in order to prevent a race
4610 * with other threads that manage to look up fvp, but their open or link
4611 * is blocked by our locks. At this point, with fvp out of the name cache,
4612 * and holding the lock on fdvp, no other thread can find fvp.
4613 *
4614 * TODO: Do we need to limit these checks to regular files only?
4615 */
4616 if (fcnp->cn_flags & CN_SECLUDE_RENAME) {
4617 if (vnode_isdir(fvp)) {
4618 error = EISDIR;
4619 goto out;
4620 }
4621
4622 /*
4623 * Neither fork of source may be open or memory mapped.
4624 * We also don't want it in use by any other system call.
4625 * The file must not have hard links.
4626 *
4627 * We can't simply use vnode_isinuse() because that does not
4628 * count opens with O_EVTONLY. We don't want a malicious
4629 * process using O_EVTONLY to subvert a secluded rename.
4630 */
4631 if (fcp->c_linkcount != 1) {
4632 error = EMLINK;
4633 goto out;
4634 }
4635
4636 if (fcp->c_rsrc_vp && (vnode_usecount(fcp->c_rsrc_vp) > 0 ||
4637 vnode_iocount(fcp->c_rsrc_vp) > 0)) {
4638 /* Resource fork is in use (including O_EVTONLY) */
4639 error = EBUSY;
4640 goto out;
4641 }
4642 if (fcp->c_vp && (vnode_usecount(fcp->c_vp) > (fcp->c_rsrc_vp ? 1 : 0) ||
4643 vnode_iocount(fcp->c_vp) > 1)) {
4644 /*
4645 * Data fork is in use, including O_EVTONLY, but not
4646 * including a reference from the resource fork.
4647 */
4648 error = EBUSY;
4649 goto out;
4650 }
4651 }
4652 #endif
4653
4654 bzero(&from_desc, sizeof(from_desc));
4655 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4656 from_desc.cd_namelen = fcnp->cn_namelen;
4657 from_desc.cd_parentcnid = fdcp->c_fileid;
4658 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4659 from_desc.cd_cnid = fcp->c_cnid;
4660
4661 bzero(&to_desc, sizeof(to_desc));
4662 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4663 to_desc.cd_namelen = tcnp->cn_namelen;
4664 to_desc.cd_parentcnid = tdcp->c_fileid;
4665 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4666 to_desc.cd_cnid = fcp->c_cnid;
4667
4668 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4669 goto out;
4670 }
4671 started_tr = 1;
4672
4673 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
4674 * inside a journal transaction and without holding a cnode lock.
4675 * As setting of this bit depends on being in journal transaction for
4676 * concurrency, check this bit again after we start journal transaction for rename
4677 * to ensure that this directory does not have any descendant that
4678 * is a directory hard link.
4679 */
4680 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4681 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4682 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4683 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4684 error = EPERM;
4685 hfs_systemfile_unlock(hfsmp, lockflags);
4686 goto out;
4687 }
4688 hfs_systemfile_unlock(hfsmp, lockflags);
4689 }
4690 }
4691
4692 // if it's a hardlink then re-lookup the name so
4693 // that we get the correct cnid in from_desc (see
4694 // the comment in hfs_removefile for more details)
4695 //
4696 if (fcp->c_flag & C_HARDLINK) {
4697 struct cat_desc tmpdesc;
4698 cnid_t real_cnid;
4699
4700 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4701 tmpdesc.cd_namelen = fcnp->cn_namelen;
4702 tmpdesc.cd_parentcnid = fdcp->c_fileid;
4703 tmpdesc.cd_hint = fdcp->c_childhint;
4704 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
4705 tmpdesc.cd_encoding = 0;
4706
4707 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4708
4709 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
4710 hfs_systemfile_unlock(hfsmp, lockflags);
4711 goto out;
4712 }
4713
4714 // use the real cnid instead of whatever happened to be there
4715 from_desc.cd_cnid = real_cnid;
4716 hfs_systemfile_unlock(hfsmp, lockflags);
4717 }
4718
4719 /*
4720 * Reserve some space in the Catalog file.
4721 */
4722 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
4723 goto out;
4724 }
4725 got_cookie = 1;
4726
4727 /*
4728 * If the destination exists then it may need to be removed.
4729 *
4730 * Due to HFS's locking system, we should always move the
4731 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
4732 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
4733 * of the actual vnop that it was trying to do (stat, link, readlink),
4734 * we must release the cnode lock of that element during the interim to
4735 * do MAC checking, vnode authorization, and other calls. In that time,
4736 * the item can be deleted (or renamed over). However, only in the rename
4737 * case is it inappropriate to return ENOENT from any of those calls. Either
4738 * the call should return information about the old element (stale), or get
4739 * information about the newer element that we are about to write in its place.
4740 *
4741 * HFS lookup has been modified to detect a rename and re-drive its
4742 * lookup internally. For other calls that have already succeeded in
4743 * their lookup call and are waiting to acquire the cnode lock in order
4744 * to proceed, that cnode lock will not fail due to the cnode being marked
4745 * C_NOEXISTS, because it won't have been marked as such. It will only
4746 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
4747 * element. All future callers will get the new element.
4748 *
4749 * To implement this behavior, we pass the "only_unlink" argument to
4750 * hfs_removefile and hfs_removedir. This will result in the vnode acting
4751 * as though it is open-unlinked. Additionally, when we are done moving the
4752 * element to the hidden directory, we vnode_recycle the target so that it is
4753 * reclaimed as soon as possible. Reclaim and inactive are both
4754 * capable of clearing out unused blocks for an open-unlinked file or dir.
4755 */
4756 if (tvp) {
4757 //
4758 // if the destination has a document id, we need to preserve it
4759 //
4760 if (fvp != tvp) {
4761 uint32_t document_id;
4762 struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4763 struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
4764
4765 if (ffip->document_id && tfip->document_id) {
4766 // both documents are tracked. only save a tombstone from tcp and do nothing else.
4767 doc_tombstone_save(tdvp, tvp, tcnp, hfs_get_document_id(tcp),
4768 tcp->c_fileid);
4769 } else {
4770 struct doc_tombstone *ut;
4771 ut = doc_tombstone_get();
4772
4773 document_id = tfip->document_id;
4774 tfip->document_id = 0;
4775
4776 if (document_id != 0) {
4777 // clear UF_TRACKED as well since tcp is now no longer tracked
4778 tcp->c_bsdflags &= ~UF_TRACKED;
4779 (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
4780 }
4781
4782 if (ffip->document_id == 0 && document_id != 0) {
4783 // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
4784 fcp->c_bsdflags |= UF_TRACKED;
4785 ffip->document_id = document_id;
4786
4787 (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4788 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4789 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4790 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
4791 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4792 FSE_ARG_INT32, (uint32_t)ffip->document_id,
4793 FSE_ARG_DONE);
4794 }
4795 else if ((fcp->c_bsdflags & UF_TRACKED) && doc_tombstone_should_save(ut, fvp, fcnp)) {
4796
4797 if (ut->t_lastop_document_id) {
4798 doc_tombstone_clear(ut, NULL);
4799 }
4800 doc_tombstone_save(fdvp, fvp, fcnp,
4801 hfs_get_document_id(fcp), fcp->c_fileid);
4802
4803 //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
4804 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4805 }
4806 }
4807 }
4808
4809 /*
4810 * When fvp matches tvp they could be case variants
4811 * or matching hard links.
4812 */
4813 if (fvp == tvp) {
4814 if (!(fcp->c_flag & C_HARDLINK)) {
4815 /*
4816 * If they're not hardlinks, then fvp == tvp must mean we
4817 * are using case-insensitive HFS because case-sensitive would
4818 * not use the same vnode for both. In this case we just update
4819 * the catalog for: a -> A
4820 */
4821 goto skip_rm; /* simple case variant */
4822
4823 }
4824 /* For all cases below, we must be using hardlinks */
4825 else if ((fdvp != tdvp) ||
4826 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4827 /*
4828 * If the parent directories are not the same, AND the two items
4829 * are hardlinks, posix says to do nothing:
4830 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4831 * We just return 0 in this case.
4832 *
4833 * If case sensitivity is on, and we are using hardlinks
4834 * then renaming is supposed to do nothing.
4835 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
4836 */
4837 goto out; /* matching hardlinks, nothing to do */
4838
4839 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4840 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4841 /*
4842 * If we get here, then the following must be true:
4843 * a) We are running case-insensitive HFS+.
4844 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
4845 * c) the two names are case-variants of each other.
4846 *
4847 * In this case, we are really only dealing with a single catalog record
4848 * whose name is being updated.
4849 *
4850 * op is dir1/fred -> dir1/FRED
4851 *
4852 * We need to special case the name matching, because if
4853 * dir1/fred <-> dir1/bob were the two links, and the
4854 * op was dir1/fred -> dir1/bob
4855 * That would fail/do nothing.
4856 */
4857 goto skip_rm; /* case-variant hardlink in the same dir */
4858 } else {
4859 goto out; /* matching hardlink, nothing to do */
4860 }
4861 }
4862
4863
4864 if (vnode_isdir(tvp)) {
4865 /*
4866 * hfs_removedir will eventually call hfs_removefile on the directory
4867 * we're working on, because only hfs_removefile does the renaming of the
4868 * item to the hidden directory. The directory will stay around in the
4869 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
4870 * That way, we can destroy all of the EAs as needed and allow new ones to be
4871 * written.
4872 */
4873 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
4874 }
4875 else {
4876 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
4877
4878 /*
4879 * If the destination file had a resource fork vnode, then we need to get rid of
4880 * its blocks when there are no more references to it. Because the call to
4881 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
4882 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
4883 * the resource fork vnode could prevent the data fork vnode from going out of scope
4884 * because it holds a v_parent reference on it. So we mark it for termination
4885 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
4886 * can clean up the blocks of open-unlinked files and resource forks.
4887 *
4888 * We can safely call vnode_recycle on the resource fork because we took an iocount
4889 * reference on it at the beginning of the function.
4890 */
4891
4892 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
4893 vnode_recycle(tvp_rsrc);
4894 }
4895 }
4896
4897 if (error) {
4898 goto out;
4899 }
4900
4901 tvp_deleted = 1;
4902
4903 /* Mark 'tcp' as being deleted due to a rename */
4904 tcp->c_flag |= C_RENAMED;
4905
4906 /*
4907 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
4908 * as quickly as possible.
4909 */
4910 vnode_recycle(tvp);
4911 } else {
4912 struct doc_tombstone *ut;
4913 ut = doc_tombstone_get();
4914
4915 //
4916 // There is nothing at the destination. If the file being renamed is
4917 // tracked, save a "tombstone" of the document_id. If the file is
4918 // not a tracked file, then see if it needs to inherit a tombstone.
4919 //
4920 // NOTE: we do not save a tombstone if the file being renamed begins
4921 // with "atmp" which is done to work-around AutoCad's bizarre
4922 // 5-step un-safe save behavior
4923 //
4924 if (fcp->c_bsdflags & UF_TRACKED) {
4925 if (doc_tombstone_should_save(ut, fvp, fcnp)) {
4926 doc_tombstone_save(fdvp, fvp, fcnp, hfs_get_document_id(fcp),
4927 fcp->c_fileid);
4928
4929 //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
4930 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4931 } else {
4932 // intentionally do nothing
4933 }
4934 } else if ( ut->t_lastop_document_id != 0
4935 && tdvp == ut->t_lastop_parent
4936 && vnode_vid(tdvp) == ut->t_lastop_parent_vid
4937 && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
4938
4939 //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
4940 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4941 fcp->c_bsdflags |= UF_TRACKED;
4942 fip->document_id = ut->t_lastop_document_id;
4943 cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4944
4945 doc_tombstone_clear(ut, &old_doc_vp);
4946 } else if (ut->t_lastop_document_id && doc_tombstone_should_save(ut, fvp, fcnp) && doc_tombstone_should_save(ut, tvp, tcnp)) {
4947 // no match, clear the tombstone
4948 //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
4949 doc_tombstone_clear(ut, NULL);
4950 }
4951
4952 }
4953 skip_rm:
4954 /*
4955 * All done with tvp and fvp.
4956 *
4957 * We also jump to this point if there was no destination observed during lookup and namei.
4958 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
4959 * competing thread from racing us and creating a file or dir at the destination of this rename
4960 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
4961 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
4962 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
4963 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
4964 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
4965 * will be swallowed and it will restart the operation.
4966 */
4967
4968 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4969 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
4970 hfs_systemfile_unlock(hfsmp, lockflags);
4971
4972 if (error) {
4973 if (error == EEXIST) {
4974 error = ERECYCLE;
4975 }
4976 goto out;
4977 }
4978
4979 /* Invalidate negative cache entries in the destination directory */
4980 if (tdcp->c_flag & C_NEG_ENTRIES) {
4981 cache_purge_negatives(tdvp);
4982 tdcp->c_flag &= ~C_NEG_ENTRIES;
4983 }
4984
4985 /* Update cnode's catalog descriptor */
4986 replace_desc(fcp, &out_desc);
4987 fcp->c_parentcnid = tdcp->c_fileid;
4988 fcp->c_hint = 0;
4989
4990 /*
4991 * Now indicate this cnode needs to have date-added written to the
4992 * finderinfo, but only if moving to a different directory, or if
4993 * it doesn't already have it.
4994 */
4995 if (fdvp != tdvp || !ISSET(fcp->c_attr.ca_recflags, kHFSHasDateAddedMask))
4996 fcp->c_flag |= C_NEEDS_DATEADDED;
4997
4998 (void) hfs_update (fvp, 0);
4999
5000 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
5001 (fdcp->c_cnid == kHFSRootFolderID));
5002 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
5003 (tdcp->c_cnid == kHFSRootFolderID));
5004
5005 /* Update both parent directories. */
5006 if (fdvp != tdvp) {
5007 if (vnode_isdir(fvp)) {
5008 /* If the source directory has directory hard link
5009 * descendants, set the kHFSHasChildLinkBit in the
5010 * destination parent hierarchy
5011 */
5012 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
5013 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
5014
5015 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
5016
5017 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
5018 if (error) {
5019 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
5020 error = 0;
5021 }
5022 }
5023 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
5024 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
5025 }
5026 tdcp->c_entries++;
5027 tdcp->c_dirchangecnt++;
5028 tdcp->c_flag |= C_MODIFIED;
5029 hfs_incr_gencount(tdcp);
5030
5031 if (fdcp->c_entries > 0)
5032 fdcp->c_entries--;
5033 fdcp->c_dirchangecnt++;
5034 fdcp->c_flag |= C_MODIFIED;
5035 fdcp->c_touch_chgtime = TRUE;
5036 fdcp->c_touch_modtime = TRUE;
5037
5038 if (ISSET(fcp->c_flag, C_HARDLINK)) {
5039 hfs_relorigin(fcp, fdcp->c_fileid);
5040 if (fdcp->c_fileid != fdcp->c_cnid)
5041 hfs_relorigin(fcp, fdcp->c_cnid);
5042 }
5043
5044 (void) hfs_update(fdvp, 0);
5045 }
5046 hfs_incr_gencount(fdcp);
5047
5048 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5049 tdcp->c_touch_chgtime = TRUE;
5050 tdcp->c_touch_modtime = TRUE;
5051
5052 (void) hfs_update(tdvp, 0);
5053
5054 /* Update the vnode's name now that the rename has completed. */
5055 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
5056 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
5057
5058 /*
5059 * At this point, we may have a resource fork vnode attached to the
5060 * 'from' vnode. If it exists, we will want to update its name, because
5061 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
5062 *
5063 * Note that the only thing we need to update here is the name attached to
5064 * the vnode, since a resource fork vnode does not have a separate resource
5065 * cnode -- it's still 'fcp'.
5066 */
5067 if (fcp->c_rsrc_vp) {
5068 char* rsrc_path = NULL;
5069 int len;
5070
5071 /* Create a new temporary buffer that's going to hold the new name */
5072 rsrc_path = hfs_malloc(MAXPATHLEN);
5073 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
5074 len = MIN(len, MAXPATHLEN);
5075
5076 /*
5077 * vnode_update_identity will do the following for us:
5078 * 1) release reference on the existing rsrc vnode's name.
5079 * 2) copy/insert new name into the name cache
5080 * 3) attach the new name to the resource vnode
5081 * 4) update the vnode's vid
5082 */
5083 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
5084
5085 /* Free the memory associated with the resource fork's name */
5086 hfs_free(rsrc_path, MAXPATHLEN);
5087 }
5088 out:
5089 if (got_cookie) {
5090 cat_postflight(hfsmp, &cookie, p);
5091 }
5092 if (started_tr) {
5093 hfs_end_transaction(hfsmp);
5094 }
5095
5096 fdcp->c_flag &= ~C_DIR_MODIFICATION;
5097 wakeup((caddr_t)&fdcp->c_flag);
5098 if (fdvp != tdvp) {
5099 tdcp->c_flag &= ~C_DIR_MODIFICATION;
5100 wakeup((caddr_t)&tdcp->c_flag);
5101 }
5102
5103 const ino64_t file_id = fcp->c_fileid;
5104
5105 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
5106
5107 if (took_trunc_lock) {
5108 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
5109 }
5110
5111 /* Now vnode_put the resource forks vnodes if necessary */
5112 if (tvp_rsrc) {
5113 vnode_put(tvp_rsrc);
5114 tvp_rsrc = NULL;
5115 }
5116
5117 /* After tvp is removed the only acceptable error is EIO */
5118 if (error && tvp_deleted)
5119 error = EIO;
5120
5121 /* If we want to reintroduce notifications for renames, this is the
5122 place to do it. */
5123
5124 if (old_doc_vp) {
5125 cnode_t *ocp = VTOC(old_doc_vp);
5126 hfs_lock_always(ocp, HFS_EXCLUSIVE_LOCK);
5127 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
5128
5129 const uint32_t doc_id = ofip->document_id;
5130 const ino64_t old_file_id = ocp->c_fileid;
5131
5132 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
5133 ofip->document_id = 0;
5134 ocp->c_bsdflags &= ~UF_TRACKED;
5135 ocp->c_flag |= C_MODIFIED;
5136
5137 hfs_unlock(ocp);
5138 vnode_put(old_doc_vp);
5139
5140 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
5141 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
5142 FSE_ARG_INO, old_file_id, // src inode #
5143 FSE_ARG_INO, file_id, // dst inode #
5144 FSE_ARG_INT32, doc_id,
5145 FSE_ARG_DONE);
5146 }
5147
5148 return (error);
5149 }
5150
5151
5152 /*
5153 * Make a directory.
5154 */
5155 int
5156 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
5157 {
5158 /***** HACK ALERT ********/
5159 ap->a_cnp->cn_flags |= MAKEENTRY;
5160 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
5161 }
5162
5163
5164 /*
5165 * Create a symbolic link.
5166 */
5167 int
5168 hfs_vnop_symlink(struct vnop_symlink_args *ap)
5169 {
5170 struct vnode **vpp = ap->a_vpp;
5171 struct vnode *dvp = ap->a_dvp;
5172 struct vnode *vp = NULL;
5173 struct cnode *cp = NULL;
5174 struct hfsmount *hfsmp;
5175 struct filefork *fp;
5176 struct buf *bp = NULL;
5177 char *datap;
5178 int started_tr = 0;
5179 u_int32_t len;
5180 int error;
5181
5182 /* HFS standard disks don't support symbolic links */
5183 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
5184 return (ENOTSUP);
5185
5186 /* Check for empty target name */
5187 if (ap->a_target[0] == 0)
5188 return (EINVAL);
5189
5190 hfsmp = VTOHFS(dvp);
5191
5192 len = strlen(ap->a_target);
5193 if (len > MAXPATHLEN)
5194 return (ENAMETOOLONG);
5195
5196 /* Check for free space */
5197 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
5198 return (ENOSPC);
5199 }
5200
5201 /* Create the vnode */
5202 ap->a_vap->va_mode |= S_IFLNK;
5203 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
5204 goto out;
5205 }
5206 vp = *vpp;
5207 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5208 goto out;
5209 }
5210 cp = VTOC(vp);
5211 fp = VTOF(vp);
5212
5213 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
5214 goto out;
5215 }
5216
5217 #if QUOTA
5218 (void)hfs_getinoquota(cp);
5219 #endif /* QUOTA */
5220
5221 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5222 goto out;
5223 }
5224 started_tr = 1;
5225
5226 /*
5227 * Allocate space for the link.
5228 *
5229 * Since we're already inside a transaction,
5230 *
5231 * Don't need truncate lock since a symlink is treated as a system file.
5232 */
5233 error = hfs_truncate(vp, len, IO_NOZEROFILL, 0, ap->a_context);
5234
5235 /* On errors, remove the symlink file */
5236 if (error) {
5237 /*
5238 * End the transaction so we don't re-take the cnode lock
5239 * below while inside a transaction (lock order violation).
5240 */
5241 hfs_end_transaction(hfsmp);
5242
5243 /* hfs_removefile() requires holding the truncate lock */
5244 hfs_unlock(cp);
5245 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
5246 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5247
5248 if (hfs_start_transaction(hfsmp) != 0) {
5249 started_tr = 0;
5250 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5251 goto out;
5252 }
5253
5254 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
5255 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5256 goto out;
5257 }
5258
5259 /* Write the link to disk */
5260 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
5261 0, 0, BLK_META);
5262 if (hfsmp->jnl) {
5263 journal_modify_block_start(hfsmp->jnl, bp);
5264 }
5265 datap = (char *)buf_dataptr(bp);
5266 bzero(datap, buf_size(bp));
5267 bcopy(ap->a_target, datap, len);
5268
5269 if (hfsmp->jnl) {
5270 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
5271 } else {
5272 buf_bawrite(bp);
5273 }
5274 out:
5275 if (started_tr)
5276 hfs_end_transaction(hfsmp);
5277 if ((cp != NULL) && (vp != NULL)) {
5278 hfs_unlock(cp);
5279 }
5280 if (error) {
5281 if (vp) {
5282 vnode_put(vp);
5283 }
5284 *vpp = NULL;
5285 }
5286 return (error);
5287 }
5288
5289
5290 /* structures to hold a "." or ".." directory entry */
5291 struct hfs_stddotentry {
5292 u_int32_t d_fileno; /* unique file number */
5293 u_int16_t d_reclen; /* length of this structure */
5294 u_int8_t d_type; /* dirent file type */
5295 u_int8_t d_namlen; /* len of filename */
5296 char d_name[4]; /* "." or ".." */
5297 };
5298
5299 struct hfs_extdotentry {
5300 u_int64_t d_fileno; /* unique file number */
5301 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
5302 u_int16_t d_reclen; /* length of this structure */
5303 u_int16_t d_namlen; /* len of filename */
5304 u_int8_t d_type; /* dirent file type */
5305 u_char d_name[3]; /* "." or ".." */
5306 };
5307
5308 typedef union {
5309 struct hfs_stddotentry std;
5310 struct hfs_extdotentry ext;
5311 } hfs_dotentry_t;
5312
5313 /*
5314 * hfs_vnop_readdir reads directory entries into the buffer pointed
5315 * to by uio, in a filesystem independent format. Up to uio_resid
5316 * bytes of data can be transferred. The data in the buffer is a
5317 * series of packed dirent structures where each one contains the
5318 * following entries:
5319 *
5320 * u_int32_t d_fileno; // file number of entry
5321 * u_int16_t d_reclen; // length of this record
5322 * u_int8_t d_type; // file type
5323 * u_int8_t d_namlen; // length of string in d_name
5324 * char d_name[MAXNAMELEN+1]; // null terminated file name
5325 *
5326 * The current position (uio_offset) refers to the next block of
5327 * entries. The offset can only be set to a value previously
5328 * returned by hfs_vnop_readdir or zero. This offset does not have
5329 * to match the number of bytes returned (in uio_resid).
5330 *
5331 * In fact, the offset used by HFS is essentially an index (26 bits)
5332 * with a tag (6 bits). The tag is for associating the next request
5333 * with the current request. This enables us to have multiple threads
5334 * reading the directory while the directory is also being modified.
5335 *
5336 * Each tag/index pair is tied to a unique directory hint. The hint
5337 * contains information (filename) needed to build the catalog b-tree
5338 * key for finding the next set of entries.
5339 *
5340 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
5341 * do NOT synthesize entries for "." and "..".
5342 */
5343 int
5344 hfs_vnop_readdir(struct vnop_readdir_args *ap)
5345 {
5346 struct vnode *vp = ap->a_vp;
5347 uio_t uio = ap->a_uio;
5348 struct cnode *cp = VTOC(vp);
5349 struct hfsmount *hfsmp = VTOHFS(vp);
5350 directoryhint_t *dirhint = NULL;
5351 directoryhint_t localhint;
5352 off_t offset;
5353 off_t startoffset;
5354 int error = 0;
5355 int eofflag = 0;
5356 user_addr_t user_start = 0;
5357 user_size_t user_len = 0;
5358 int index;
5359 unsigned int tag;
5360 int items;
5361 int lockflags;
5362 int extended;
5363 int nfs_cookies;
5364 cnid_t cnid_hint = 0;
5365 int bump_valence = 0;
5366
5367 items = 0;
5368 startoffset = offset = uio_offset(uio);
5369 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5370 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
5371
5372 /* Sanity check the uio data. */
5373 if (uio_iovcnt(uio) > 1)
5374 return (EINVAL);
5375
5376 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
5377 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
5378 if (VTOCMP(vp) != NULL && !compressed) {
5379 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
5380 if (error) {
5381 return error;
5382 }
5383 }
5384 }
5385
5386 //
5387 // We have to lock the user's buffer here so that we won't
5388 // fault on it after we've acquired a shared lock on the
5389 // catalog file. The issue is that you can get a 3-way
5390 // deadlock if someone else starts a transaction and then
5391 // tries to lock the catalog file but can't because we're
5392 // here and we can't service our page fault because VM is
5393 // blocked trying to start a transaction as a result of
5394 // trying to free up pages for our page fault. It's messy
5395 // but it does happen on dual-processors that are paging
5396 // heavily (see radar 3082639 for more info). By locking
5397 // the buffer up-front we prevent ourselves from faulting
5398 // while holding the shared catalog file lock.
5399 //
5400 // Fortunately this and hfs_search() are the only two places
5401 // currently (10/30/02) that can fault on user data with a
5402 // shared lock on the catalog file.
5403 //
5404 if (hfsmp->jnl && uio_isuserspace(uio)) {
5405 user_start = uio_curriovbase(uio);
5406 user_len = uio_curriovlen(uio);
5407
5408 /* Bounds check the user buffer */
5409 if (user_len > (256 * 1024)) {
5410 /* only allow the user to wire down at most 256k */
5411 user_len = (256 * 1024);
5412 uio_setresid (uio, (user_ssize_t)(256 * 1024));
5413 }
5414
5415 if ((error = vslock(user_start, user_len)) != 0) {
5416 return error;
5417 }
5418 }
5419
5420 /* Note that the dirhint calls require an exclusive lock. */
5421 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5422 if (user_start) {
5423 vsunlock(user_start, user_len, TRUE);
5424 }
5425 return error;
5426 }
5427
5428 /* Pick up cnid hint (if any). */
5429 if (nfs_cookies) {
5430 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
5431 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
5432 if (cnid_hint == INT_MAX) { /* searching pass the last item */
5433 eofflag = 1;
5434 goto out;
5435 }
5436 }
5437 /*
5438 * Synthesize entries for "." and "..", unless the directory has
5439 * been deleted, but not closed yet (lazy delete in progress).
5440 */
5441 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
5442
5443 size_t uiosize;
5444
5445 /*
5446 * We could use a union of the two types of dot entries (HFS / HFS+)
5447 * but it makes static analysis of this code difficult. The problem is that
5448 * the HFS standard dot entry is smaller than the HFS+ one, and we also ideally
5449 * want the uiomove to operate on a two-element adjacent array. If we used the
5450 * array of unions, we would have to do two separate uiomoves because the memory
5451 * for the hfs standard dot entries would not be adjacent to one another.
5452 * So just allocate the entries on the stack in separate cases.
5453 */
5454
5455 if (extended) {
5456 hfs_dotentry_t dotentry[2];
5457
5458 /* HFS Plus */
5459 struct hfs_extdotentry *entry = &dotentry[0].ext;
5460
5461 entry->d_fileno = cp->c_cnid;
5462 entry->d_reclen = sizeof(struct hfs_extdotentry);
5463 entry->d_type = DT_DIR;
5464 entry->d_namlen = 1;
5465 entry->d_name[0] = '.';
5466 entry->d_name[1] = '\0';
5467 entry->d_name[2] = '\0';
5468 entry->d_seekoff = 1;
5469
5470 ++entry;
5471 entry->d_fileno = cp->c_parentcnid;
5472 entry->d_reclen = sizeof(struct hfs_extdotentry);
5473 entry->d_type = DT_DIR;
5474 entry->d_namlen = 2;
5475 entry->d_name[0] = '.';
5476 entry->d_name[1] = '.';
5477 entry->d_name[2] = '\0';
5478 entry->d_seekoff = 2;
5479 uiosize = 2 * sizeof(struct hfs_extdotentry);
5480
5481 if ((error = uiomove((caddr_t)dotentry, uiosize, uio))) {
5482 goto out;
5483 }
5484
5485 } else {
5486 struct hfs_stddotentry hfs_std_dotentries[2];
5487
5488 /* HFS Standard */
5489 struct hfs_stddotentry *entry = &hfs_std_dotentries[0];
5490
5491 entry->d_fileno = cp->c_cnid;
5492 entry->d_reclen = sizeof(struct hfs_stddotentry);
5493 entry->d_type = DT_DIR;
5494 entry->d_namlen = 1;
5495 *(int *)&entry->d_name[0] = 0;
5496 entry->d_name[0] = '.';
5497
5498 ++entry;
5499 entry->d_fileno = cp->c_parentcnid;
5500 entry->d_reclen = sizeof(struct hfs_stddotentry);
5501 entry->d_type = DT_DIR;
5502 entry->d_namlen = 2;
5503 *(int *)&entry->d_name[0] = 0;
5504 entry->d_name[0] = '.';
5505 entry->d_name[1] = '.';
5506 uiosize = 2 * sizeof(struct hfs_stddotentry);
5507
5508 if ((error = uiomove((caddr_t)hfs_std_dotentries, uiosize, uio))) {
5509 goto out;
5510 }
5511 }
5512
5513 offset += 2;
5514 }
5515
5516 /*
5517 * Intentionally avoid checking the valence here. If we
5518 * have FS corruption that reports the valence is 0, even though it
5519 * has contents, we might artificially skip over iterating
5520 * this directory.
5521 */
5522
5523 /* Convert offset into a catalog directory index. */
5524 index = (offset & HFS_INDEX_MASK) - 2;
5525 tag = offset & ~HFS_INDEX_MASK;
5526
5527 /* Lock catalog during cat_findname and cat_getdirentries. */
5528 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5529
5530 /* When called from NFS, try and resolve a cnid hint. */
5531 if (nfs_cookies && cnid_hint != 0) {
5532 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
5533 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
5534 localhint.dh_index = index - 1;
5535 localhint.dh_time = 0;
5536 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
5537 dirhint = &localhint; /* don't forget to release the descriptor */
5538 } else {
5539 cat_releasedesc(&localhint.dh_desc);
5540 }
5541 }
5542 }
5543
5544 /* Get a directory hint (cnode must be locked exclusive) */
5545 if (dirhint == NULL) {
5546 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
5547
5548 /* Hide tag from catalog layer. */
5549 dirhint->dh_index &= HFS_INDEX_MASK;
5550 if (dirhint->dh_index == HFS_INDEX_MASK) {
5551 dirhint->dh_index = -1;
5552 }
5553 }
5554
5555 if (index == 0) {
5556 dirhint->dh_threadhint = cp->c_dirthreadhint;
5557 }
5558 else {
5559 /*
5560 * If we have a non-zero index, there is a possibility that during the last
5561 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
5562 * then we don't want to return any new entries for the caller. Just return 0
5563 * items, mark the eofflag, and bail out. Because we won't have done any work, the
5564 * code at the end of the function will release the dirhint for us.
5565 *
5566 * Don't forget to unlock the catalog lock on the way out, too.
5567 */
5568 if (dirhint->dh_desc.cd_flags & CD_EOF) {
5569 error = 0;
5570 eofflag = 1;
5571 uio_setoffset(uio, startoffset);
5572 hfs_systemfile_unlock (hfsmp, lockflags);
5573
5574 goto seekoffcalc;
5575 }
5576 }
5577
5578 /* Pack the buffer with dirent entries. */
5579 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
5580
5581 if (index == 0 && error == 0) {
5582 cp->c_dirthreadhint = dirhint->dh_threadhint;
5583 }
5584
5585 hfs_systemfile_unlock(hfsmp, lockflags);
5586
5587 if (error != 0) {
5588 goto out;
5589 }
5590
5591 /* Get index to the next item */
5592 index += items;
5593
5594 if (items >= (int)cp->c_entries) {
5595 eofflag = 1;
5596 }
5597
5598 /*
5599 * Detect valence FS corruption.
5600 *
5601 * We are holding the cnode lock exclusive, so there should not be
5602 * anybody modifying the valence field of this cnode. If we enter
5603 * this block, that means we observed filesystem corruption, because
5604 * this directory reported a valence of 0, yet we found at least one
5605 * item. In this case, we need to minimally self-heal this
5606 * directory to prevent userland from tripping over a directory
5607 * that appears empty (getattr of valence reports 0), but actually
5608 * has contents.
5609 *
5610 * We'll force the cnode update at the end of the function after
5611 * completing all of the normal getdirentries steps.
5612 */
5613 if ((cp->c_entries == 0) && (items > 0)) {
5614 /* disk corruption */
5615 cp->c_entries++;
5616 /* Mark the cnode as dirty. */
5617 cp->c_flag |= C_MODIFIED;
5618 printf("hfs_vnop_readdir: repairing valence to non-zero! \n");
5619 bump_valence++;
5620 }
5621
5622
5623 /* Convert catalog directory index back into an offset. */
5624 while (tag == 0)
5625 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
5626 uio_setoffset(uio, (index + 2) | tag);
5627 dirhint->dh_index |= tag;
5628
5629 seekoffcalc:
5630 cp->c_touch_acctime = TRUE;
5631
5632 if (ap->a_numdirent) {
5633 if (startoffset == 0)
5634 items += 2;
5635 *ap->a_numdirent = items;
5636 }
5637
5638 out:
5639 if (user_start) {
5640 vsunlock(user_start, user_len, TRUE);
5641 }
5642 /* If we didn't do anything then go ahead and dump the hint. */
5643 if ((dirhint != NULL) &&
5644 (dirhint != &localhint) &&
5645 (uio_offset(uio) == startoffset)) {
5646 hfs_reldirhint(cp, dirhint);
5647 eofflag = 1;
5648 }
5649 if (ap->a_eofflag) {
5650 *ap->a_eofflag = eofflag;
5651 }
5652 if (dirhint == &localhint) {
5653 cat_releasedesc(&localhint.dh_desc);
5654 }
5655
5656 if (bump_valence) {
5657 /* force the update before dropping the cnode lock*/
5658 hfs_update(vp, 0);
5659 }
5660
5661 hfs_unlock(cp);
5662
5663 return (error);
5664 }
5665
5666
5667 /*
5668 * Read contents of a symbolic link.
5669 */
5670 int
5671 hfs_vnop_readlink(struct vnop_readlink_args *ap)
5672 {
5673 struct vnode *vp = ap->a_vp;
5674 struct cnode *cp;
5675 struct filefork *fp;
5676 int error;
5677
5678 if (!vnode_islnk(vp))
5679 return (EINVAL);
5680
5681 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5682 return (error);
5683 cp = VTOC(vp);
5684 fp = VTOF(vp);
5685
5686 /* Zero length sym links are not allowed */
5687 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
5688 error = EINVAL;
5689 goto exit;
5690 }
5691
5692 /* Cache the path so we don't waste buffer cache resources */
5693 if (fp->ff_symlinkptr == NULL) {
5694 struct buf *bp = NULL;
5695
5696 fp->ff_symlinkptr = hfs_malloc(fp->ff_size);
5697 error = (int)buf_meta_bread(vp, (daddr64_t)0,
5698 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
5699 vfs_context_ucred(ap->a_context), &bp);
5700 if (error) {
5701 if (bp)
5702 buf_brelse(bp);
5703 if (fp->ff_symlinkptr) {
5704 hfs_free(fp->ff_symlinkptr, fp->ff_size);
5705 fp->ff_symlinkptr = NULL;
5706 }
5707 goto exit;
5708 }
5709 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
5710
5711 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
5712 buf_markinvalid(bp); /* data no longer needed */
5713 }
5714 buf_brelse(bp);
5715 }
5716 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
5717
5718 /*
5719 * Keep track blocks read
5720 */
5721 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
5722
5723 /*
5724 * If this file hasn't been seen since the start of
5725 * the current sampling period then start over.
5726 */
5727 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
5728 VTOF(vp)->ff_bytesread = fp->ff_size;
5729 else
5730 VTOF(vp)->ff_bytesread += fp->ff_size;
5731
5732 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
5733 // cp->c_touch_acctime = TRUE;
5734 }
5735
5736 exit:
5737 hfs_unlock(cp);
5738 return (error);
5739 }
5740
5741
5742 /*
5743 * Get configurable pathname variables.
5744 */
5745 int
5746 hfs_vnop_pathconf(struct vnop_pathconf_args *ap)
5747 {
5748
5749 #if CONFIG_HFS_STD
5750 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
5751 #endif
5752
5753 switch (ap->a_name) {
5754 case _PC_LINK_MAX:
5755 #if CONFIG_HFS_STD
5756 if (std_hfs) {
5757 *ap->a_retval = 1;
5758 } else
5759 #endif
5760 {
5761 *ap->a_retval = HFS_LINK_MAX;
5762 }
5763 break;
5764 case _PC_NAME_MAX:
5765 #if CONFIG_HFS_STD
5766 if (std_hfs) {
5767 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5768 } else
5769 #endif
5770 {
5771 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5772 }
5773 break;
5774 case _PC_PATH_MAX:
5775 *ap->a_retval = PATH_MAX; /* 1024 */
5776 break;
5777 case _PC_PIPE_BUF:
5778 *ap->a_retval = PIPE_BUF;
5779 break;
5780 case _PC_CHOWN_RESTRICTED:
5781 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
5782 break;
5783 case _PC_NO_TRUNC:
5784 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
5785 break;
5786 case _PC_NAME_CHARS_MAX:
5787 #if CONFIG_HFS_STD
5788 if (std_hfs) {
5789 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5790 } else
5791 #endif
5792 {
5793 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5794 }
5795 break;
5796 case _PC_CASE_SENSITIVE:
5797 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
5798 *ap->a_retval = 1;
5799 else
5800 *ap->a_retval = 0;
5801 break;
5802 case _PC_CASE_PRESERVING:
5803 *ap->a_retval = 1;
5804 break;
5805 case _PC_FILESIZEBITS:
5806 /* number of bits to store max file size */
5807 #if CONFIG_HFS_STD
5808 if (std_hfs) {
5809 *ap->a_retval = 32;
5810 } else
5811 #endif
5812 {
5813 *ap->a_retval = 64;
5814 }
5815 break;
5816 case _PC_XATTR_SIZE_BITS:
5817 /* Number of bits to store maximum extended attribute size */
5818 *ap->a_retval = HFS_XATTR_SIZE_BITS;
5819 break;
5820 default:
5821 return (EINVAL);
5822 }
5823
5824 return (0);
5825 }
5826
5827 /*
5828 * Prepares a fork for cat_update by making sure ff_size and ff_blocks
5829 * are no bigger than the valid data on disk thus reducing the chance
5830 * of exposing uninitialised data in the event of a non clean unmount.
5831 * fork_buf is where to put the temporary copy if required. (It can
5832 * be inside pfork.)
5833 */
5834 const struct cat_fork *
5835 hfs_prepare_fork_for_update(filefork_t *ff,
5836 const struct cat_fork *cf,
5837 struct cat_fork *cf_buf,
5838 uint32_t block_size)
5839 {
5840 if (!ff)
5841 return NULL;
5842
5843 if (!cf)
5844 cf = &ff->ff_data;
5845 if (!cf_buf)
5846 cf_buf = &ff->ff_data;
5847
5848 off_t max_size = ff->ff_size;
5849
5850 // Check first invalid range
5851 if (!TAILQ_EMPTY(&ff->ff_invalidranges))
5852 max_size = TAILQ_FIRST(&ff->ff_invalidranges)->rl_start;
5853
5854 if (!ff->ff_unallocblocks && ff->ff_size <= max_size)
5855 return cf; // Nothing to do
5856
5857 if (ff->ff_blocks < ff->ff_unallocblocks) {
5858 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
5859 ff->ff_blocks, ff->ff_unallocblocks);
5860 }
5861
5862 struct cat_fork *out = cf_buf;
5863
5864 if (out != cf)
5865 bcopy(cf, out, sizeof(*cf));
5866
5867 // Adjust cf_blocks for cf_vblocks
5868 out->cf_blocks -= out->cf_vblocks;
5869
5870 /*
5871 * Here we trim the size with the updated cf_blocks. This is
5872 * probably unnecessary now because the invalid ranges should
5873 * catch this (but that wasn't always the case).
5874 */
5875 off_t alloc_bytes = hfs_blk_to_bytes(out->cf_blocks, block_size);
5876 if (out->cf_size > alloc_bytes)
5877 out->cf_size = alloc_bytes;
5878
5879 // Trim cf_size to first invalid range
5880 if (out->cf_size > max_size)
5881 out->cf_size = max_size;
5882
5883 return out;
5884 }
5885
5886 /*
5887 * Update a cnode's on-disk metadata.
5888 *
5889 * The cnode must be locked exclusive. See declaration for possible
5890 * options.
5891 */
5892 int
5893 hfs_update(struct vnode *vp, int options)
5894 {
5895 struct cnode *cp = VTOC(vp);
5896 struct proc *p;
5897 const struct cat_fork *dataforkp = NULL;
5898 const struct cat_fork *rsrcforkp = NULL;
5899 struct cat_fork datafork;
5900 struct cat_fork rsrcfork;
5901 struct hfsmount *hfsmp;
5902 int lockflags;
5903 int error;
5904 uint32_t tstate = 0;
5905
5906 if (ISSET(cp->c_flag, C_NOEXISTS))
5907 return 0;
5908
5909 p = current_proc();
5910 hfsmp = VTOHFS(vp);
5911
5912 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
5913 hfsmp->hfs_catalog_vp == NULL){
5914 return (0);
5915 }
5916 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
5917 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD | C_NEEDS_DATEADDED);
5918 cp->c_touch_acctime = 0;
5919 cp->c_touch_chgtime = 0;
5920 cp->c_touch_modtime = 0;
5921 return (0);
5922 }
5923 if (kdebug_enable) {
5924 if (cp->c_touch_acctime || cp->c_atime != cp->c_attr.ca_atimeondisk)
5925 tstate |= DBG_HFS_UPDATE_ACCTIME;
5926 if (cp->c_touch_modtime)
5927 tstate |= DBG_HFS_UPDATE_MODTIME;
5928 if (cp->c_touch_chgtime)
5929 tstate |= DBG_HFS_UPDATE_CHGTIME;
5930
5931 if (cp->c_flag & C_MODIFIED)
5932 tstate |= DBG_HFS_UPDATE_MODIFIED;
5933 if (ISSET(options, HFS_UPDATE_FORCE))
5934 tstate |= DBG_HFS_UPDATE_FORCE;
5935 if (cp->c_flag & C_NEEDS_DATEADDED)
5936 tstate |= DBG_HFS_UPDATE_DATEADDED;
5937 if (cp->c_flag & C_MINOR_MOD)
5938 tstate |= DBG_HFS_UPDATE_MINOR;
5939 }
5940 hfs_touchtimes(hfsmp, cp);
5941
5942 if (!ISSET(cp->c_flag, C_MODIFIED | C_MINOR_MOD)
5943 && !hfs_should_save_atime(cp)) {
5944 // Nothing to update
5945 return 0;
5946 }
5947
5948 KDBG(HFSDBG_UPDATE | DBG_FUNC_START, kdebug_vnode(vp), tstate);
5949
5950 bool check_txn = false;
5951
5952 if (!ISSET(options, HFS_UPDATE_FORCE) && !ISSET(cp->c_flag, C_MODIFIED)) {
5953 /*
5954 * This must be a minor modification. If the current
5955 * transaction already has an update for this node, then we
5956 * bundle in the modification.
5957 */
5958 if (hfsmp->jnl
5959 && journal_current_txn(hfsmp->jnl) == cp->c_update_txn) {
5960 check_txn = true;
5961 } else {
5962 tstate |= DBG_HFS_UPDATE_SKIPPED;
5963 error = 0;
5964 goto exit;
5965 }
5966 }
5967
5968 if ((error = hfs_start_transaction(hfsmp)) != 0)
5969 goto exit;
5970
5971 if (check_txn
5972 && journal_current_txn(hfsmp->jnl) != cp->c_update_txn) {
5973 hfs_end_transaction(hfsmp);
5974 tstate |= DBG_HFS_UPDATE_SKIPPED;
5975 error = 0;
5976 goto exit;
5977 }
5978
5979 if (cp->c_datafork)
5980 dataforkp = &cp->c_datafork->ff_data;
5981 if (cp->c_rsrcfork)
5982 rsrcforkp = &cp->c_rsrcfork->ff_data;
5983
5984 /*
5985 * Modify the values passed to cat_update based on whether or not
5986 * the file has invalid ranges or borrowed blocks.
5987 */
5988 dataforkp = hfs_prepare_fork_for_update(cp->c_datafork, NULL, &datafork, hfsmp->blockSize);
5989 rsrcforkp = hfs_prepare_fork_for_update(cp->c_rsrcfork, NULL, &rsrcfork, hfsmp->blockSize);
5990
5991 if (__builtin_expect(kdebug_enable & KDEBUG_TRACE, 0)) {
5992 long dbg_parms[NUMPARMS];
5993 int dbg_namelen;
5994
5995 dbg_namelen = NUMPARMS * sizeof(long);
5996 vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
5997
5998 if (dbg_namelen < (int)sizeof(dbg_parms))
5999 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
6000
6001 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
6002 }
6003
6004 /*
6005 * Lock the Catalog b-tree file.
6006 */
6007 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6008
6009 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
6010
6011 if (hfsmp->jnl)
6012 cp->c_update_txn = journal_current_txn(hfsmp->jnl);
6013
6014 hfs_systemfile_unlock(hfsmp, lockflags);
6015
6016 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD);
6017
6018 hfs_end_transaction(hfsmp);
6019
6020 exit:
6021
6022 KDBG(HFSDBG_UPDATE | DBG_FUNC_END, kdebug_vnode(vp), tstate, error);
6023
6024 return error;
6025 }
6026
6027 /*
6028 * Allocate a new node
6029 */
6030 int
6031 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
6032 struct vnode_attr *vap, vfs_context_t ctx)
6033 {
6034 struct cnode *cp = NULL;
6035 struct cnode *dcp = NULL;
6036 struct vnode *tvp;
6037 struct hfsmount *hfsmp;
6038 struct cat_desc in_desc, out_desc;
6039 struct cat_attr attr;
6040 struct timeval tv;
6041 int lockflags;
6042 int error, started_tr = 0;
6043 enum vtype vnodetype;
6044 int mode;
6045 int newvnode_flags = 0;
6046 u_int32_t gnv_flags = 0;
6047 int protectable_target = 0;
6048 int nocache = 0;
6049 vnode_t old_doc_vp = NULL;
6050
6051 #if CONFIG_PROTECT
6052 struct cprotect *entry = NULL;
6053 int32_t cp_class = -1;
6054
6055 /*
6056 * By default, it's OK for AKS to overrride our target class preferences.
6057 */
6058 uint32_t keywrap_flags = CP_KEYWRAP_DIFFCLASS;
6059
6060 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
6061 cp_class = (int32_t)vap->va_dataprotect_class;
6062 /*
6063 * Since the user specifically requested this target class be used,
6064 * we want to fail this creation operation if we cannot wrap to their
6065 * target class. The CP_KEYWRAP_DIFFCLASS bit says that it is OK to
6066 * use a different class than the one specified, so we turn that off
6067 * now.
6068 */
6069 keywrap_flags &= ~CP_KEYWRAP_DIFFCLASS;
6070 }
6071 int protected_mount = 0;
6072 #endif
6073
6074
6075 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
6076 return (error);
6077
6078 /* set the cnode pointer only after successfully acquiring lock */
6079 dcp = VTOC(dvp);
6080
6081 /* Don't allow creation of new entries in open-unlinked directories */
6082 if ((error = hfs_checkdeleted(dcp))) {
6083 hfs_unlock(dcp);
6084 return error;
6085 }
6086
6087 dcp->c_flag |= C_DIR_MODIFICATION;
6088
6089 hfsmp = VTOHFS(dvp);
6090
6091 *vpp = NULL;
6092 tvp = NULL;
6093 out_desc.cd_flags = 0;
6094 out_desc.cd_nameptr = NULL;
6095
6096 vnodetype = vap->va_type;
6097 if (vnodetype == VNON)
6098 vnodetype = VREG;
6099 mode = MAKEIMODE(vnodetype, vap->va_mode);
6100
6101 if (S_ISDIR (mode) || S_ISREG (mode)) {
6102 protectable_target = 1;
6103 }
6104
6105
6106 /* Check if were out of usable disk space. */
6107 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
6108 error = ENOSPC;
6109 goto exit;
6110 }
6111
6112 microtime(&tv);
6113
6114 /* Setup the default attributes */
6115 bzero(&attr, sizeof(attr));
6116 attr.ca_mode = mode;
6117 attr.ca_linkcount = 1;
6118 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
6119 attr.ca_rdev = vap->va_rdev;
6120 }
6121 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
6122 VATTR_SET_SUPPORTED(vap, va_create_time);
6123 attr.ca_itime = vap->va_create_time.tv_sec;
6124 } else {
6125 attr.ca_itime = tv.tv_sec;
6126 }
6127 #if CONFIG_HFS_STD
6128 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
6129 attr.ca_itime += 3600; /* Same as what hfs_update does */
6130 }
6131 #endif
6132 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
6133 attr.ca_atimeondisk = attr.ca_atime;
6134 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6135 VATTR_SET_SUPPORTED(vap, va_flags);
6136 attr.ca_flags = vap->va_flags;
6137 }
6138
6139 /*
6140 * HFS+ only: all files get ThreadExists
6141 * HFSX only: dirs get HasFolderCount
6142 */
6143 #if CONFIG_HFS_STD
6144 if (!(hfsmp->hfs_flags & HFS_STANDARD))
6145 #endif
6146 {
6147 if (vnodetype == VDIR) {
6148 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
6149 attr.ca_recflags = kHFSHasFolderCountMask;
6150 } else {
6151 attr.ca_recflags = kHFSThreadExistsMask;
6152 }
6153 }
6154
6155 #if CONFIG_PROTECT
6156 if (cp_fs_protected(hfsmp->hfs_mp)) {
6157 protected_mount = 1;
6158 }
6159 /*
6160 * On a content-protected HFS+/HFSX filesystem, files and directories
6161 * cannot be created without atomically setting/creating the EA that
6162 * contains the protection class metadata and keys at the same time, in
6163 * the same transaction. As a result, pre-set the "EAs exist" flag
6164 * on the cat_attr for protectable catalog record creations. This will
6165 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
6166 * as having EAs.
6167 */
6168 if ((protected_mount) && (protectable_target)) {
6169 attr.ca_recflags |= kHFSHasAttributesMask;
6170 /* delay entering in the namecache */
6171 nocache = 1;
6172 }
6173 #endif
6174
6175
6176 /*
6177 * Add the date added to the item. See above, as
6178 * all of the dates are set to the itime.
6179 */
6180 hfs_write_dateadded (&attr, attr.ca_atime);
6181
6182 /* Initialize the gen counter to 1 */
6183 hfs_write_gencount(&attr, (uint32_t)1);
6184
6185 attr.ca_uid = vap->va_uid;
6186 attr.ca_gid = vap->va_gid;
6187 VATTR_SET_SUPPORTED(vap, va_mode);
6188 VATTR_SET_SUPPORTED(vap, va_uid);
6189 VATTR_SET_SUPPORTED(vap, va_gid);
6190
6191 #if QUOTA
6192 /* check to see if this node's creation would cause us to go over
6193 * quota. If so, abort this operation.
6194 */
6195 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6196 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
6197 vfs_context_ucred(ctx)))) {
6198 goto exit;
6199 }
6200 }
6201 #endif
6202
6203
6204 /* Tag symlinks with a type and creator. */
6205 if (vnodetype == VLNK) {
6206 struct FndrFileInfo *fip;
6207
6208 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
6209 fip->fdType = SWAP_BE32(kSymLinkFileType);
6210 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
6211 }
6212
6213 /* Setup the descriptor */
6214 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
6215 in_desc.cd_namelen = cnp->cn_namelen;
6216 in_desc.cd_parentcnid = dcp->c_fileid;
6217 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
6218 in_desc.cd_hint = dcp->c_childhint;
6219 in_desc.cd_encoding = 0;
6220
6221 #if CONFIG_PROTECT
6222 /*
6223 * To preserve file creation atomicity with regards to the content protection EA,
6224 * we must create the file in the catalog and then write out its EA in the same
6225 * transaction.
6226 *
6227 * We only denote the target class in this EA; key generation is not completed
6228 * until the file has been inserted into the catalog and will be done
6229 * in a separate transaction.
6230 */
6231 if ((protected_mount) && (protectable_target)) {
6232 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
6233 if (error) {
6234 goto exit;
6235 }
6236 }
6237 #endif
6238
6239 if ((error = hfs_start_transaction(hfsmp)) != 0) {
6240 goto exit;
6241 }
6242 started_tr = 1;
6243
6244 // have to also lock the attribute file because cat_create() needs
6245 // to check that any fileID it wants to use does not have orphaned
6246 // attributes in it.
6247 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
6248 cnid_t new_id;
6249
6250 /* Reserve some space in the Catalog file. */
6251 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
6252 hfs_systemfile_unlock(hfsmp, lockflags);
6253 goto exit;
6254 }
6255
6256 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
6257 hfs_systemfile_unlock (hfsmp, lockflags);
6258 goto exit;
6259 }
6260
6261 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
6262 if (error == 0) {
6263 /* Update the parent directory */
6264 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
6265 dcp->c_entries++;
6266
6267 if (vnodetype == VDIR) {
6268 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
6269 }
6270 dcp->c_dirchangecnt++;
6271 hfs_incr_gencount(dcp);
6272
6273 dcp->c_touch_chgtime = dcp->c_touch_modtime = true;
6274 dcp->c_flag |= C_MODIFIED;
6275
6276 hfs_update(dcp->c_vp, 0);
6277
6278 #if CONFIG_PROTECT
6279 /*
6280 * If we are creating a content protected file, now is when
6281 * we create the EA. We must create it in the same transaction
6282 * that creates the file. We can also guarantee that the file
6283 * MUST exist because we are still holding the catalog lock
6284 * at this point.
6285 */
6286 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6287 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
6288
6289 if (error) {
6290 int delete_err;
6291 /*
6292 * If we fail the EA creation, then we need to delete the file.
6293 * Luckily, we are still holding all of the right locks.
6294 */
6295 delete_err = cat_delete (hfsmp, &out_desc, &attr);
6296 if (delete_err == 0) {
6297 /* Update the parent directory */
6298 if (dcp->c_entries > 0)
6299 dcp->c_entries--;
6300 dcp->c_dirchangecnt++;
6301 dcp->c_ctime = tv.tv_sec;
6302 dcp->c_mtime = tv.tv_sec;
6303 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6304 }
6305
6306 /* Emit EINVAL if we fail to create EA*/
6307 error = EINVAL;
6308 }
6309 }
6310 #endif
6311 }
6312 hfs_systemfile_unlock(hfsmp, lockflags);
6313 if (error)
6314 goto exit;
6315
6316 uint32_t txn = hfsmp->jnl ? journal_current_txn(hfsmp->jnl) : 0;
6317
6318 /* Invalidate negative cache entries in the directory */
6319 if (dcp->c_flag & C_NEG_ENTRIES) {
6320 cache_purge_negatives(dvp);
6321 dcp->c_flag &= ~C_NEG_ENTRIES;
6322 }
6323
6324 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
6325 (dcp->c_cnid == kHFSRootFolderID));
6326
6327 // XXXdbg
6328 // have to end the transaction here before we call hfs_getnewvnode()
6329 // because that can cause us to try and reclaim a vnode on a different
6330 // file system which could cause us to start a transaction which can
6331 // deadlock with someone on that other file system (since we could be
6332 // holding two transaction locks as well as various vnodes and we did
6333 // not obtain the locks on them in the proper order).
6334 //
6335 // NOTE: this means that if the quota check fails or we have to update
6336 // the change time on a block-special device that those changes
6337 // will happen as part of independent transactions.
6338 //
6339 if (started_tr) {
6340 hfs_end_transaction(hfsmp);
6341 started_tr = 0;
6342 }
6343
6344 #if CONFIG_PROTECT
6345 /*
6346 * At this point, we must have encountered success with writing the EA.
6347 * Destroy our temporary cprotect (which had no keys).
6348 */
6349
6350 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6351 cp_entry_destroy (hfsmp, entry);
6352 entry = NULL;
6353 }
6354 #endif
6355 gnv_flags |= GNV_CREATE;
6356 if (nocache) {
6357 gnv_flags |= GNV_NOCACHE;
6358 }
6359
6360 /*
6361 * Create a vnode for the object just created.
6362 *
6363 * NOTE: Maintaining the cnode lock on the parent directory is important,
6364 * as it prevents race conditions where other threads want to look up entries
6365 * in the directory and/or add things as we are in the process of creating
6366 * the vnode below. However, this has the potential for causing a
6367 * double lock panic when dealing with shadow files on a HFS boot partition.
6368 * The panic could occur if we are not cleaning up after ourselves properly
6369 * when done with a shadow file or in the error cases. The error would occur if we
6370 * try to create a new vnode, and then end up reclaiming another shadow vnode to
6371 * create the new one. However, if everything is working properly, this should
6372 * be a non-issue as we would never enter that reclaim codepath.
6373 *
6374 * The cnode is locked on successful return.
6375 */
6376 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
6377 NULL, &tvp, &newvnode_flags);
6378 if (error)
6379 goto exit;
6380
6381 cp = VTOC(tvp);
6382
6383 cp->c_update_txn = txn;
6384
6385 struct doc_tombstone *ut;
6386 ut = doc_tombstone_get();
6387 if ( ut->t_lastop_document_id != 0
6388 && ut->t_lastop_parent == dvp
6389 && ut->t_lastop_parent_vid == vnode_vid(dvp)
6390 && strcmp((char *)ut->t_lastop_filename, (const char *)cp->c_desc.cd_nameptr) == 0) {
6391 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
6392
6393 //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
6394 fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
6395
6396 cp->c_bsdflags |= UF_TRACKED;
6397 cp->c_flag |= C_MODIFIED;
6398
6399 if ((error = hfs_start_transaction(hfsmp)) == 0) {
6400 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6401
6402 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
6403
6404 hfs_systemfile_unlock (hfsmp, lockflags);
6405 (void) hfs_end_transaction(hfsmp);
6406 }
6407
6408 doc_tombstone_clear(ut, &old_doc_vp);
6409 } else if (ut->t_lastop_document_id != 0) {
6410 int len = cnp->cn_namelen;
6411 if (len == 0) {
6412 len = strlen(cnp->cn_nameptr);
6413 }
6414
6415 if (doc_tombstone_should_ignore_name(cnp->cn_nameptr, cnp->cn_namelen)) {
6416 // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
6417 } else {
6418 // Clear the tombstone because the thread is not recreating the same path
6419 // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
6420 doc_tombstone_clear(ut, NULL);
6421 }
6422 }
6423
6424 if ((hfsmp->hfs_flags & HFS_CS_HOTFILE_PIN) && (vnode_isfastdevicecandidate(dvp) && !vnode_isautocandidate(dvp))) {
6425
6426 //printf("hfs: flagging %s (fileid: %d) as VFASTDEVCANDIDATE (dvp name: %s)\n",
6427 // cnp->cn_nameptr ? cnp->cn_nameptr : "<NONAME>",
6428 // cp->c_fileid,
6429 // dvp->v_name ? dvp->v_name : "no-dir-name");
6430
6431 //
6432 // On new files we set the FastDevCandidate flag so that
6433 // any new blocks allocated to it will be pinned.
6434 //
6435 cp->c_attr.ca_recflags |= kHFSFastDevCandidateMask;
6436 vnode_setfastdevicecandidate(tvp);
6437
6438 //
6439 // properly inherit auto-cached flags
6440 //
6441 if (vnode_isautocandidate(dvp)) {
6442 cp->c_attr.ca_recflags |= kHFSAutoCandidateMask;
6443 vnode_setautocandidate(tvp);
6444 }
6445
6446
6447 //
6448 // We also want to add it to the hotfile adoption list so
6449 // that it will eventually land in the hotfile btree
6450 //
6451 (void) hfs_addhotfile(tvp);
6452 }
6453
6454 *vpp = tvp;
6455
6456 #if CONFIG_PROTECT
6457 /*
6458 * Now that we have a vnode-in-hand, generate keys for this namespace item.
6459 * If we fail to create the keys, then attempt to delete the item from the
6460 * namespace. If we can't delete the item, that's not desirable but also not fatal..
6461 * All of the places which deal with restoring/unwrapping keys must also be
6462 * prepared to encounter an entry that does not have keys.
6463 */
6464 if ((protectable_target) && (protected_mount)) {
6465 struct cprotect *keyed_entry = NULL;
6466
6467 if (cp->c_cpentry == NULL) {
6468 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
6469 }
6470
6471 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), keywrap_flags, &keyed_entry);
6472 if (error == 0) {
6473 /*
6474 * Upon success, the keys were generated and written out.
6475 * Update the cp pointer in the cnode.
6476 */
6477 cp_replace_entry (hfsmp, cp, keyed_entry);
6478 if (nocache) {
6479 cache_enter (dvp, tvp, cnp);
6480 }
6481 }
6482 else {
6483 /* If key creation OR the setxattr failed, emit EPERM to userland */
6484 error = EPERM;
6485
6486 /*
6487 * Beware! This slightly violates the lock ordering for the
6488 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
6489 * which guards file size changes before acquiring the normal cnode lock
6490 * and calling hfs_removefile on an item.
6491 *
6492 * However, in this case, we are still holding the directory lock so
6493 * 'tvp' is not lookup-able and it was a newly created vnode so it
6494 * cannot have any content yet. The only reason we are initiating
6495 * the removefile is because we could not generate content protection keys
6496 * for this namespace item. Note also that we pass a '1' in the allow_dirs
6497 * argument for hfs_removefile because we may be creating a directory here.
6498 *
6499 * All this to say that while it is technically a violation it is
6500 * impossible to race with another thread for this cnode so it is safe.
6501 */
6502 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
6503 if (err) {
6504 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
6505 }
6506
6507 /* Release the cnode lock and mark the vnode for termination */
6508 hfs_unlock (cp);
6509 err = vnode_recycle (tvp);
6510 if (err) {
6511 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
6512 }
6513
6514 /* Drop the iocount on the new vnode to force reclamation/recycling */
6515 vnode_put (tvp);
6516 cp = NULL;
6517 *vpp = NULL;
6518 }
6519 }
6520 #endif
6521
6522 #if QUOTA
6523 /*
6524 * Once we create this vnode, we need to initialize its quota data
6525 * structures, if necessary. We know that it is OK to just go ahead and
6526 * initialize because we've already validated earlier (through the hfs_quotacheck
6527 * function) to see if creating this cnode/vnode would cause us to go over quota.
6528 */
6529 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6530 if (cp) {
6531 /* cp could have been zeroed earlier */
6532 (void) hfs_getinoquota(cp);
6533 }
6534 }
6535 #endif
6536
6537 exit:
6538 cat_releasedesc(&out_desc);
6539
6540 #if CONFIG_PROTECT
6541 /*
6542 * We may have jumped here in error-handling various situations above.
6543 * If we haven't already dumped the temporary CP used to initialize
6544 * the file atomically, then free it now. cp_entry_destroy should null
6545 * out the pointer if it was called already.
6546 */
6547 if (entry) {
6548 cp_entry_destroy (hfsmp, entry);
6549 entry = NULL;
6550 }
6551 #endif
6552
6553 /*
6554 * Make sure we release cnode lock on dcp.
6555 */
6556 if (dcp) {
6557 dcp->c_flag &= ~C_DIR_MODIFICATION;
6558 wakeup((caddr_t)&dcp->c_flag);
6559
6560 hfs_unlock(dcp);
6561 }
6562 ino64_t file_id = 0;
6563 if (error == 0 && cp != NULL) {
6564 file_id = cp->c_fileid;
6565 hfs_unlock(cp);
6566 }
6567 if (started_tr) {
6568 hfs_end_transaction(hfsmp);
6569 started_tr = 0;
6570 }
6571
6572 if (old_doc_vp) {
6573 cnode_t *ocp = VTOC(old_doc_vp);
6574 hfs_lock_always(ocp, HFS_EXCLUSIVE_LOCK);
6575 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
6576
6577 const uint32_t doc_id = ofip->document_id;
6578 const ino64_t old_file_id = ocp->c_fileid;
6579
6580 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
6581 ofip->document_id = 0;
6582 ocp->c_bsdflags &= ~UF_TRACKED;
6583 ocp->c_flag |= C_MODIFIED;
6584
6585 hfs_unlock(ocp);
6586 vnode_put(old_doc_vp);
6587
6588 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
6589 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
6590 FSE_ARG_INO, old_file_id, // src inode #
6591 FSE_ARG_INO, file_id, // dst inode #
6592 FSE_ARG_INT32, doc_id,
6593 FSE_ARG_DONE);
6594 }
6595
6596 return (error);
6597 }
6598
6599
6600 /*
6601 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the
6602 * cnode that is found in 'vp'. The cnode should be locked upon entry
6603 * and will be returned locked, but it may be dropped temporarily.
6604 *
6605 * If the resource fork vnode does not exist, HFS will attempt to acquire an
6606 * empty (uninitialized) vnode from VFS so as to avoid deadlocks with
6607 * jetsam. If we let the normal getnewvnode code produce the vnode for us
6608 * we would be doing so while holding the cnode lock of our cnode.
6609 *
6610 * On success, *rvpp wlll hold the resource fork vnode with an
6611 * iocount. *Don't* forget the vnode_put.
6612 */
6613 int
6614 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp)
6615 {
6616 struct vnode *rvp = NULLVP;
6617 struct vnode *empty_rvp = NULLVP;
6618 struct vnode *dvp = NULLVP;
6619 struct cnode *cp = VTOC(vp);
6620 int error;
6621 int vid;
6622
6623 if (vnode_vtype(vp) == VDIR) {
6624 return EINVAL;
6625 }
6626
6627 restart:
6628 /* Attempt to use existing vnode */
6629 if ((rvp = cp->c_rsrc_vp)) {
6630 vid = vnode_vid(rvp);
6631
6632 // vnode_getwithvid can block so we need to drop the cnode lock
6633 hfs_unlock(cp);
6634
6635 error = vnode_getwithvid(rvp, vid);
6636
6637 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
6638
6639 /*
6640 * When our lock was relinquished, the resource fork
6641 * could have been recycled. Check for this and try
6642 * again.
6643 */
6644 if (error == ENOENT)
6645 goto restart;
6646
6647 if (error) {
6648 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
6649
6650 if (name)
6651 printf("hfs_vgetrsrc: couldn't get resource"
6652 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
6653 return (error);
6654 }
6655 } else {
6656 struct cat_fork rsrcfork;
6657 struct componentname cn;
6658 struct cat_desc *descptr = NULL;
6659 struct cat_desc to_desc;
6660 char delname[32];
6661 int lockflags;
6662 int newvnode_flags = 0;
6663
6664 /*
6665 * In this case, we don't currently see a resource fork vnode attached
6666 * to this cnode. In most cases, we were called from a read-only VNOP
6667 * like getattr, so it should be safe to drop the cnode lock and then
6668 * re-acquire it.
6669 *
6670 * Here, we drop the lock so that we can acquire an empty/husk
6671 * vnode so that we don't deadlock against jetsam.
6672 *
6673 * It does not currently appear possible to hold the truncate lock via
6674 * FS re-entrancy when we get to this point. (8/2014)
6675 */
6676 hfs_unlock (cp);
6677
6678 error = vnode_create_empty (&empty_rvp);
6679
6680 hfs_lock_always (cp, HFS_EXCLUSIVE_LOCK);
6681
6682 if (error) {
6683 /* If acquiring the 'empty' vnode failed, then nothing to clean up */
6684 return error;
6685 }
6686
6687 /*
6688 * We could have raced with another thread here while we dropped our cnode
6689 * lock. See if the cnode now has a resource fork vnode and restart if appropriate.
6690 *
6691 * Note: We just released the cnode lock, so there is a possibility that the
6692 * cnode that we just acquired has been deleted or even removed from disk
6693 * completely, though this is unlikely. If the file is open-unlinked, the
6694 * check below will resolve it for us. If it has been completely
6695 * removed (even from the catalog!), then when we examine the catalog
6696 * directly, below, while holding the catalog lock, we will not find the
6697 * item and we can fail out properly.
6698 */
6699 if (cp->c_rsrc_vp) {
6700 /* Drop the empty vnode before restarting */
6701 vnode_put (empty_rvp);
6702 empty_rvp = NULL;
6703 rvp = NULL;
6704 goto restart;
6705 }
6706
6707 /*
6708 * hfs_vgetsrc may be invoked for a cnode that has already been marked
6709 * C_DELETED. This is because we need to continue to provide rsrc
6710 * fork access to open-unlinked files. In this case, build a fake descriptor
6711 * like in hfs_removefile. If we don't do this, buildkey will fail in
6712 * cat_lookup because this cnode has no name in its descriptor.
6713 */
6714 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
6715 bzero (&to_desc, sizeof(to_desc));
6716 bzero (delname, 32);
6717 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
6718 to_desc.cd_nameptr = (const u_int8_t*) delname;
6719 to_desc.cd_namelen = strlen(delname);
6720 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
6721 to_desc.cd_flags = 0;
6722 to_desc.cd_cnid = cp->c_cnid;
6723
6724 descptr = &to_desc;
6725 }
6726 else {
6727 descptr = &cp->c_desc;
6728 }
6729
6730
6731 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
6732
6733 /*
6734 * We call cat_idlookup (instead of cat_lookup) below because we can't
6735 * trust the descriptor in the provided cnode for lookups at this point.
6736 * Between the time of the original lookup of this vnode and now, the
6737 * descriptor could have gotten swapped or replaced. If this occurred,
6738 * the parent/name combo originally desired may not necessarily be provided
6739 * if we use the descriptor. Even worse, if the vnode represents
6740 * a hardlink, we could have removed one of the links from the namespace
6741 * but left the descriptor alone, since hfs_unlink does not invalidate
6742 * the descriptor in the cnode if other links still point to the inode.
6743 *
6744 * Consider the following (slightly contrived) scenario:
6745 * /tmp/a <--> /tmp/b (hardlinks).
6746 * 1. Thread A: open rsrc fork on /tmp/b.
6747 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
6748 * 2. Thread B does 'mv /foo/b /tmp/b'
6749 * 2. Thread B succeeds.
6750 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
6751 *
6752 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
6753 * is not removed/updated during the unlink process. So, if you were to
6754 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
6755 * fork.
6756 *
6757 * As a result, we use the fileid, which should be invariant for the lifetime
6758 * of the cnode (possibly barring calls to exchangedata).
6759 *
6760 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
6761 * have thread records for files. They were only required for directories. So
6762 * we need to do the lookup with the catalog name. This is OK since hardlinks were
6763 * never allowed on HFS standard.
6764 */
6765
6766 /* Get resource fork data */
6767 #if CONFIG_HFS_STD
6768 if (ISSET(hfsmp->hfs_flags, HFS_STANDARD)) {
6769 /*
6770 * HFS standard only:
6771 *
6772 * Get the resource fork for this item with a cat_lookup call, but do not
6773 * force a case lookup since HFS standard is case-insensitive only. We
6774 * don't want the descriptor; just the fork data here. If we tried to
6775 * do a ID lookup (via thread record -> catalog record), then we might fail
6776 * prematurely since, as noted above, thread records were not strictly required
6777 * on files in HFS.
6778 */
6779 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
6780 (struct cat_attr*)NULL, &rsrcfork, NULL);
6781 } else
6782 #endif
6783 {
6784 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
6785 }
6786
6787 hfs_systemfile_unlock(hfsmp, lockflags);
6788 if (error) {
6789 /* Drop our 'empty' vnode ! */
6790 vnode_put (empty_rvp);
6791 return (error);
6792 }
6793 /*
6794 * Supply hfs_getnewvnode with a component name.
6795 */
6796 cn.cn_pnbuf = NULL;
6797 if (descptr->cd_nameptr) {
6798 void *buf = hfs_malloc(MAXPATHLEN);
6799
6800 cn = (struct componentname){
6801 .cn_nameiop = LOOKUP,
6802 .cn_flags = ISLASTCN,
6803 .cn_pnlen = MAXPATHLEN,
6804 .cn_pnbuf = buf,
6805 .cn_nameptr = buf,
6806 .cn_namelen = snprintf(buf, MAXPATHLEN,
6807 "%s%s", descptr->cd_nameptr,
6808 _PATH_RSRCFORKSPEC)
6809 };
6810
6811 // Should never happen because cn.cn_nameptr won't ever be long...
6812 if (cn.cn_namelen >= MAXPATHLEN) {
6813 hfs_free(buf, MAXPATHLEN);
6814 /* Drop our 'empty' vnode ! */
6815 vnode_put (empty_rvp);
6816 return ENAMETOOLONG;
6817
6818 }
6819 }
6820 dvp = vnode_getparent(vp);
6821
6822 /*
6823 * We are about to call hfs_getnewvnode and pass in the vnode that we acquired
6824 * earlier when we were not holding any locks. The semantics of GNV_USE_VP require that
6825 * either hfs_getnewvnode consume the vnode and vend it back to us, properly initialized,
6826 * or it will consume/dispose of it properly if it errors out.
6827 */
6828 rvp = empty_rvp;
6829
6830 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
6831 descptr, (GNV_WANTRSRC | GNV_SKIPLOCK | GNV_USE_VP),
6832 &cp->c_attr, &rsrcfork, &rvp, &newvnode_flags);
6833
6834 if (dvp)
6835 vnode_put(dvp);
6836 hfs_free(cn.cn_pnbuf, MAXPATHLEN);
6837 if (error)
6838 return (error);
6839 } /* End 'else' for rsrc fork not existing */
6840
6841 *rvpp = rvp;
6842 return (0);
6843 }
6844
6845 /*
6846 * Wrapper for special device reads
6847 */
6848 int
6849 hfsspec_read(struct vnop_read_args *ap)
6850 {
6851 /*
6852 * Set access flag.
6853 */
6854 cnode_t *cp = VTOC(ap->a_vp);
6855
6856 if (cp)
6857 cp->c_touch_acctime = TRUE;
6858
6859 return spec_read(ap);
6860 }
6861
6862 /*
6863 * Wrapper for special device writes
6864 */
6865 int
6866 hfsspec_write(struct vnop_write_args *ap)
6867 {
6868 /*
6869 * Set update and change flags.
6870 */
6871 cnode_t *cp = VTOC(ap->a_vp);
6872
6873 if (cp) {
6874 cp->c_touch_chgtime = TRUE;
6875 cp->c_touch_modtime = TRUE;
6876 }
6877
6878 return spec_write(ap);
6879 }
6880
6881 /*
6882 * Wrapper for special device close
6883 *
6884 * Update the times on the cnode then do device close.
6885 */
6886 int
6887 hfsspec_close(struct vnop_close_args *ap)
6888 {
6889 struct vnode *vp = ap->a_vp;
6890 cnode_t *cp = VTOC(vp);
6891
6892 if (cp && vnode_isinuse(ap->a_vp, 0)) {
6893 if (hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6894 hfs_touchtimes(VTOHFS(vp), cp);
6895 hfs_unlock(cp);
6896 }
6897 }
6898 return spec_close(ap);
6899 }
6900
6901 #if FIFO
6902 /*
6903 * Wrapper for fifo reads
6904 */
6905 static int
6906 hfsfifo_read(struct vnop_read_args *ap)
6907 {
6908 /*
6909 * Set access flag.
6910 */
6911 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6912 return fifo_read(ap);
6913 }
6914
6915 /*
6916 * Wrapper for fifo writes
6917 */
6918 static int
6919 hfsfifo_write(struct vnop_write_args *ap)
6920 {
6921 /*
6922 * Set update and change flags.
6923 */
6924 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6925 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6926 return fifo_write(ap);
6927 }
6928
6929 /*
6930 * Wrapper for fifo close
6931 *
6932 * Update the times on the cnode then do device close.
6933 */
6934 static int
6935 hfsfifo_close(struct vnop_close_args *ap)
6936 {
6937 struct vnode *vp = ap->a_vp;
6938 struct cnode *cp;
6939
6940 if (vnode_isinuse(ap->a_vp, 1)) {
6941 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6942 cp = VTOC(vp);
6943 hfs_touchtimes(VTOHFS(vp), cp);
6944 hfs_unlock(cp);
6945 }
6946 }
6947 return fifo_close(ap);
6948 }
6949
6950
6951 #endif /* FIFO */
6952
6953 /*
6954 * Getter for the document_id
6955 * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
6956 */
6957 static u_int32_t
6958 hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
6959 {
6960 const uint8_t *finfo = NULL;
6961 u_int32_t doc_id = 0;
6962
6963 /* overlay the FinderInfo to the correct pointer, and advance */
6964 finfo = finderinfo + 16;
6965
6966 if (S_ISDIR(mode) || S_ISREG(mode)) {
6967 const struct FndrExtendedFileInfo *extinfo = (const struct FndrExtendedFileInfo *)finfo;
6968 doc_id = extinfo->document_id;
6969 }
6970
6971 return doc_id;
6972 }
6973
6974
6975 /* getter(s) for document id */
6976 u_int32_t
6977 hfs_get_document_id(struct cnode *cp)
6978 {
6979 return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
6980 cp->c_attr.ca_mode));
6981 }
6982
6983 /* If you have finderinfo and mode, you can use this */
6984 u_int32_t
6985 hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
6986 {
6987 return (hfs_get_document_id_internal(finderinfo, mode));
6988 }
6989
6990 /*
6991 * Synchronize a file's in-core state with that on disk.
6992 */
6993 int
6994 hfs_vnop_fsync(struct vnop_fsync_args *ap)
6995 {
6996 struct vnode* vp = ap->a_vp;
6997 int error;
6998
6999 /* Note: We check hfs flags instead of vfs mount flag because during
7000 * read-write update, hfs marks itself read-write much earlier than
7001 * the vfs, and hence won't result in skipping of certain writes like
7002 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
7003 */
7004 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
7005 return 0;
7006 }
7007
7008 /*
7009 * No need to call cp_handle_vnop to resolve fsync(). Any dirty data
7010 * should have caused the keys to be unwrapped at the time the data was
7011 * put into the UBC, either at mmap/pagein/read-write. If we did manage
7012 * to let this by, then strategy will auto-resolve for us.
7013 *
7014 * We also need to allow ENOENT lock errors since unlink
7015 * system call can call VNOP_FSYNC during vclean.
7016 */
7017 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
7018 if (error)
7019 return (0);
7020
7021 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
7022
7023 hfs_unlock(VTOC(vp));
7024 return (error);
7025 }
7026
7027 int (**hfs_vnodeop_p)(void *);
7028
7029 #define VOPFUNC int (*)(void *)
7030
7031
7032 #if CONFIG_HFS_STD
7033 int (**hfs_std_vnodeop_p) (void *);
7034 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
7035
7036 /*
7037 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
7038 * is for use with HFS standard to block out operations that would modify the file system
7039 */
7040
7041 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
7042 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7043 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7044 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
7045 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
7046 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7047 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7048 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7049 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
7050 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7051 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
7052 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7053 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7054 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7055 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
7056 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
7057 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
7058 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
7059 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
7060 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
7061 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
7062 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
7063 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
7064 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7065 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7066 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7067 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7068 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7069 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7070 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7071 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7072 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
7073 #if CONFIG_SEARCHFS
7074 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7075 #else
7076 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7077 #endif
7078 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
7079 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7080 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
7081 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
7082 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7083 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7084 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7085 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7086 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
7087 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
7088 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7089 #if NAMEDSTREAMS
7090 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7091 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7092 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7093 #endif
7094 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7095 { NULL, (VOPFUNC)NULL }
7096 };
7097
7098 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
7099 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
7100 #endif
7101
7102 /* VNOP table for HFS+ */
7103 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
7104 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7105 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7106 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
7107 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
7108 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7109 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7110 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7111 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7112 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7113 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
7114 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7115 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7116 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7117 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
7118 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
7119 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7120 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
7121 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
7122 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
7123 { &vnop_renamex_desc, (VOPFUNC)hfs_vnop_renamex }, /* renamex (with flags) */
7124 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
7125 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
7126 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
7127 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7128 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7129 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7130 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7131 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7132 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7133 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7134 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7135 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
7136 #if CONFIG_SEARCHFS
7137 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7138 #else
7139 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7140 #endif
7141 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
7142 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7143 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
7144 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7145 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7146 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7147 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7148 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7149 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7150 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7151 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7152 #if NAMEDSTREAMS
7153 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7154 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
7155 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
7156 #endif
7157 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7158 { &vnop_mnomap_desc, (VOPFUNC)hfs_vnop_mnomap },
7159 { NULL, (VOPFUNC)NULL }
7160 };
7161
7162 struct vnodeopv_desc hfs_vnodeop_opv_desc =
7163 { &hfs_vnodeop_p, hfs_vnodeop_entries };
7164
7165
7166 /* Spec Op vnop table for HFS+ */
7167 int (**hfs_specop_p)(void *);
7168 struct vnodeopv_entry_desc hfs_specop_entries[] = {
7169 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7170 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
7171 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
7172 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
7173 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
7174 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
7175 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7176 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7177 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
7178 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
7179 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
7180 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
7181 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
7182 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
7183 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7184 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
7185 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
7186 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
7187 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
7188 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
7189 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
7190 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
7191 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
7192 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7193 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7194 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
7195 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
7196 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7197 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7198 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7199 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7200 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7201 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7202 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7203 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7204 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7205 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7206 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7207 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7208 };
7209 struct vnodeopv_desc hfs_specop_opv_desc =
7210 { &hfs_specop_p, hfs_specop_entries };
7211
7212 #if FIFO
7213 /* HFS+ FIFO VNOP table */
7214 int (**hfs_fifoop_p)(void *);
7215 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
7216 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7217 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
7218 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
7219 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
7220 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
7221 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
7222 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7223 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7224 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
7225 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
7226 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
7227 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
7228 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
7229 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
7230 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7231 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
7232 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
7233 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
7234 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
7235 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
7236 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
7237 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
7238 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
7239 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7240 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7241 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
7242 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
7243 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7244 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7245 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7246 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7247 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7248 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7249 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7250 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7251 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7252 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7253 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7254 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7255 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7256 };
7257 struct vnodeopv_desc hfs_fifoop_opv_desc =
7258 { &hfs_fifoop_p, hfs_fifoop_entries };
7259 #endif /* FIFO */