]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdbool.h>
30 #include <sys/systm.h>
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/file_internal.h>
34 #include <sys/dirent.h>
35 #include <sys/stat.h>
36 #include <sys/buf.h>
37 #include <sys/buf_internal.h>
38 #include <sys/mount.h>
39 #include <sys/vnode_if.h>
40 #include <sys/vnode_internal.h>
41 #include <sys/malloc.h>
42 #include <sys/ubc.h>
43 #include <sys/ubc_internal.h>
44 #include <sys/paths.h>
45 #include <sys/quota.h>
46 #include <sys/time.h>
47 #include <sys/disk.h>
48 #include <sys/kauth.h>
49 #include <sys/uio_internal.h>
50 #include <sys/fsctl.h>
51 #include <sys/xattr.h>
52 #include <string.h>
53 #include <sys/fsevents.h>
54 #include <kern/kalloc.h>
55
56 #include <miscfs/specfs/specdev.h>
57 #include <miscfs/fifofs/fifo.h>
58 #include <vfs/vfs_support.h>
59 #include <machine/spl.h>
60
61 #include <sys/kdebug.h>
62 #include <sys/sysctl.h>
63 #include <stdbool.h>
64
65 #include "hfs.h"
66 #include "hfs_catalog.h"
67 #include "hfs_cnode.h"
68 #include "hfs_dbg.h"
69 #include "hfs_mount.h"
70 #include "hfs_quota.h"
71 #include "hfs_endian.h"
72 #include "hfs_kdebug.h"
73 #include "hfs_cprotect.h"
74
75
76 #include "hfscommon/headers/BTreesInternal.h"
77 #include "hfscommon/headers/FileMgrInternal.h"
78
79 #define KNDETACH_VNLOCKED 0x00000001
80
81 /* Global vfs data structures for hfs */
82
83 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
84 int always_do_fullfsync = 0;
85 SYSCTL_DECL(_vfs_generic);
86 SYSCTL_INT (_vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
87
88 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
89 struct componentname *cnp, struct vnode_attr *vap,
90 vfs_context_t ctx);
91 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
92 int hfs_metasync_all(struct hfsmount *hfsmp);
93
94 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
95 int, int);
96 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
97 int, int, int, struct vnode *, int);
98
99 /* Used here and in cnode teardown -- for symlinks */
100 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
101
102 enum {
103 HFS_MOVE_DATA_INCLUDE_RSRC = 1,
104 };
105 typedef uint32_t hfs_move_data_options_t;
106
107 static int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
108 hfs_move_data_options_t options);
109 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src,
110 filefork_t *dstfork, cnode_t *dst);
111
112 #if HFS_COMPRESSION
113 static int hfs_move_compressed(cnode_t *from_vp, cnode_t *to_vp);
114 #endif
115
116 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
117
118 #if FIFO
119 static int hfsfifo_read(struct vnop_read_args *);
120 static int hfsfifo_write(struct vnop_write_args *);
121 static int hfsfifo_close(struct vnop_close_args *);
122
123 extern int (**fifo_vnodeop_p)(void *);
124 #endif /* FIFO */
125
126 int hfs_vnop_close(struct vnop_close_args*);
127 int hfs_vnop_create(struct vnop_create_args*);
128 int hfs_vnop_exchange(struct vnop_exchange_args*);
129 int hfs_vnop_fsync(struct vnop_fsync_args*);
130 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
131 int hfs_vnop_mknod(struct vnop_mknod_args*);
132 int hfs_vnop_getattr(struct vnop_getattr_args*);
133 int hfs_vnop_open(struct vnop_open_args*);
134 int hfs_vnop_readdir(struct vnop_readdir_args*);
135 int hfs_vnop_remove(struct vnop_remove_args*);
136 int hfs_vnop_rename(struct vnop_rename_args*);
137 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
138 int hfs_vnop_symlink(struct vnop_symlink_args*);
139 int hfs_vnop_setattr(struct vnop_setattr_args*);
140 int hfs_vnop_readlink(struct vnop_readlink_args *);
141 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
142 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
143 int hfsspec_read(struct vnop_read_args *);
144 int hfsspec_write(struct vnop_write_args *);
145 int hfsspec_close(struct vnop_close_args *);
146
147 /* Options for hfs_removedir and hfs_removefile */
148 #define HFSRM_SKIP_RESERVE 0x01
149
150
151
152 /*****************************************************************************
153 *
154 * Common Operations on vnodes
155 *
156 *****************************************************************************/
157
158 /*
159 * Is the given cnode either the .journal or .journal_info_block file on
160 * a volume with an active journal? Many VNOPs use this to deny access
161 * to those files.
162 *
163 * Note: the .journal file on a volume with an external journal still
164 * returns true here, even though it does not actually hold the contents
165 * of the volume's journal.
166 */
167 static _Bool
168 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
169 {
170 if (hfsmp->jnl != NULL &&
171 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
172 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
173 return true;
174 } else {
175 return false;
176 }
177 }
178
179 /*
180 * Create a regular file.
181 */
182 int
183 hfs_vnop_create(struct vnop_create_args *ap)
184 {
185 /*
186 * We leave handling of certain race conditions here to the caller
187 * which will have a better understanding of the semantics it
188 * requires. For example, if it turns out that the file exists,
189 * it would be wrong of us to return a reference to the existing
190 * file because the caller might not want that and it would be
191 * misleading to suggest the file had been created when it hadn't
192 * been. Note that our NFS server code does not set the
193 * VA_EXCLUSIVE flag so you cannot assume that callers don't want
194 * EEXIST errors if it's not set. The common case, where users
195 * are calling open with the O_CREAT mode, is handled in VFS; when
196 * we return EEXIST, it will loop and do the look-up again.
197 */
198 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
199 }
200
201 /*
202 * Make device special file.
203 */
204 int
205 hfs_vnop_mknod(struct vnop_mknod_args *ap)
206 {
207 struct vnode_attr *vap = ap->a_vap;
208 struct vnode *dvp = ap->a_dvp;
209 struct vnode **vpp = ap->a_vpp;
210 struct cnode *cp;
211 int error;
212
213 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
214 return (ENOTSUP);
215 }
216
217 /* Create the vnode */
218 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
219 if (error)
220 return (error);
221
222 cp = VTOC(*vpp);
223 cp->c_touch_acctime = TRUE;
224 cp->c_touch_chgtime = TRUE;
225 cp->c_touch_modtime = TRUE;
226
227 if ((vap->va_rdev != VNOVAL) &&
228 (vap->va_type == VBLK || vap->va_type == VCHR))
229 cp->c_rdev = vap->va_rdev;
230
231 return (0);
232 }
233
234 #if HFS_COMPRESSION
235 /*
236 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
237 * In the (hopefully rare) case where the data fork vnode is not
238 * present, it will use hfs_vget() to create a new vnode for the
239 * data fork.
240 *
241 * NOTE: If successful and a vnode is returned, the caller is responsible
242 * for releasing the returned vnode with vnode_rele().
243 */
244 static int
245 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
246 {
247 int vref = 0;
248
249 if (!data_vp || !cp) /* sanity check incoming parameters */
250 return EINVAL;
251
252 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
253
254 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
255 struct vnode *c_vp = cp->c_vp;
256 if (c_vp) {
257 /* we already have a data vnode */
258 *data_vp = c_vp;
259 vref = vnode_ref(*data_vp);
260 if (!skiplock) hfs_unlock(cp);
261 if (vref == 0) {
262 return 0;
263 }
264 return EINVAL;
265 }
266 /* no data fork vnode in the cnode, so ask hfs for one. */
267
268 if (!cp->c_rsrc_vp) {
269 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
270 *data_vp = NULL;
271 if (!skiplock) hfs_unlock(cp);
272 return EINVAL;
273 }
274
275 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
276 0 != data_vp) {
277 vref = vnode_ref(*data_vp);
278 vnode_put(*data_vp);
279 if (!skiplock) hfs_unlock(cp);
280 if (vref == 0) {
281 return 0;
282 }
283 return EINVAL;
284 }
285 /* there was an error getting the vnode */
286 *data_vp = NULL;
287 if (!skiplock) hfs_unlock(cp);
288 return EINVAL;
289 }
290
291 /*
292 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
293 * allocating it if necessary; returns NULL if there was an allocation error.
294 * function is non-static so that it can be used from the FCNTL handler.
295 */
296 decmpfs_cnode *
297 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
298 {
299 if (!cp->c_decmp) {
300 decmpfs_cnode *dp = NULL;
301 MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK);
302 if (!dp) {
303 /* error allocating a decmpfs cnode */
304 return NULL;
305 }
306 decmpfs_cnode_init(dp);
307 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
308 /* another thread got here first, so free the decmpfs_cnode we allocated */
309 decmpfs_cnode_destroy(dp);
310 FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE);
311 }
312 }
313
314 return cp->c_decmp;
315 }
316
317 /*
318 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
319 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
320 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
321 * or else fills it in via the decmpfs_file_is_compressed() function.
322 */
323 int
324 hfs_file_is_compressed(struct cnode *cp, int skiplock)
325 {
326 int ret = 0;
327
328 /* fast check to see if file is compressed. If flag is clear, just answer no */
329 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
330 return 0;
331 }
332
333 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
334 if (!dp) {
335 /* error allocating a decmpfs cnode, treat the file as uncompressed */
336 return 0;
337 }
338
339 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
340 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
341 switch(decmpfs_state) {
342 case FILE_IS_COMPRESSED:
343 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
344 return 1;
345 case FILE_IS_NOT_COMPRESSED:
346 return 0;
347 /* otherwise the state is not cached yet */
348 }
349
350 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
351 struct vnode *data_vp = NULL;
352 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
353 if (data_vp) {
354 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
355 vnode_rele(data_vp);
356 }
357 }
358 return ret;
359 }
360
361 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
362 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
363 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
364 * files size is returned in size (required)
365 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
366 * will return an error and the caller should fall back to treating the item as an uncompressed file
367 */
368 int
369 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
370 {
371 int ret = 0;
372 int putaway = 0; /* flag to remember if we used hfs_vget() */
373
374 if (!size) {
375 return EINVAL; /* no place to put the file size */
376 }
377
378 if (NULL == vp) {
379 if (!hfsmp || !fid) { /* make sure we have the required parameters */
380 return EINVAL;
381 }
382 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
383 vp = NULL;
384 } else {
385 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
386 }
387 }
388 /* this double check for compression (hfs_file_is_compressed)
389 * ensures the cached size is present in case decmpfs hasn't
390 * encountered this node yet.
391 */
392 if (vp) {
393 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
394 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
395 } else {
396 if (VTOCMP(vp) && VTOCMP(vp)->cmp_type >= CMP_MAX) {
397 if (VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) {
398 // if we don't recognize this type, just use the real data fork size
399 if (VTOC(vp)->c_datafork) {
400 *size = VTOC(vp)->c_datafork->ff_size;
401 ret = 0;
402 } else {
403 ret = EINVAL;
404 }
405 } else {
406 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
407 ret = 0;
408 }
409 } else {
410 ret = EINVAL;
411 }
412 }
413 }
414
415 if (putaway) { /* did we use hfs_vget() to get this vnode? */
416 vnode_put(vp); /* if so, release it and set it to null */
417 vp = NULL;
418 }
419 return ret;
420 }
421
422 int
423 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
424 {
425 if (ctx == decmpfs_ctx)
426 return 0;
427 if (!hfs_file_is_compressed(cp, skiplock))
428 return 0;
429 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
430 }
431
432 int
433 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
434 {
435 if (ctx == decmpfs_ctx)
436 return 0;
437 if (!hfs_file_is_compressed(cp, skiplock))
438 return 0;
439 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
440 }
441 #endif /* HFS_COMPRESSION */
442
443
444 //
445 // This function gets the doc_tombstone structure for the
446 // current thread. If the thread doesn't have one, the
447 // structure is allocated.
448 //
449 static struct doc_tombstone *
450 get_uthread_doc_tombstone(void)
451 {
452 struct uthread *ut;
453 ut = get_bsdthread_info(current_thread());
454
455 if (ut->t_tombstone == NULL) {
456 ut->t_tombstone = kalloc(sizeof(struct doc_tombstone));
457 if (ut->t_tombstone) {
458 memset(ut->t_tombstone, 0, sizeof(struct doc_tombstone));
459 }
460 }
461
462 return ut->t_tombstone;
463 }
464
465 //
466 // This routine clears out the current tombstone for the
467 // current thread and if necessary passes the doc-id of
468 // the tombstone on to the dst_cnode.
469 //
470 // If the doc-id transfers to dst_cnode, we also generate
471 // a doc-id changed fsevent. Unlike all the other fsevents,
472 // doc-id changed events can only be generated here in HFS
473 // where we have the necessary info.
474 //
475 static void
476 clear_tombstone_docid(struct doc_tombstone *ut, __unused struct hfsmount *hfsmp, struct cnode *dst_cnode)
477 {
478 uint32_t old_id = ut->t_lastop_document_id;
479
480 ut->t_lastop_document_id = 0;
481 ut->t_lastop_parent = NULL;
482 ut->t_lastop_parent_vid = 0;
483 ut->t_lastop_filename[0] = '\0';
484
485 //
486 // If the lastop item is still the same and needs to be cleared,
487 // clear it.
488 //
489 if (dst_cnode && old_id && ut->t_lastop_item && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) {
490 //
491 // clear the document_id from the file that used to have it.
492 // XXXdbg - we need to lock the other vnode and make sure to
493 // update it on disk.
494 //
495 struct cnode *ocp = VTOC(ut->t_lastop_item);
496 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
497
498 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
499 ofip->document_id = 0;
500 ocp->c_bsdflags &= ~UF_TRACKED;
501 ocp->c_flag |= C_MODIFIED;
502 /* cat_update(hfsmp, &ocp->c_desc, &ocp->c_attr, NULL, NULL); */
503
504 }
505
506 #if CONFIG_FSE
507 if (dst_cnode && old_id) {
508 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&dst_cnode->c_attr.ca_finderinfo + 16);
509
510 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
511 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
512 FSE_ARG_INO, (ino64_t)ut->t_lastop_fileid, // src inode #
513 FSE_ARG_INO, (ino64_t)dst_cnode->c_fileid, // dst inode #
514 FSE_ARG_INT32, (uint32_t)fip->document_id,
515 FSE_ARG_DONE);
516 }
517 #endif
518 // last, clear these now that we're all done
519 ut->t_lastop_item = NULL;
520 ut->t_lastop_fileid = 0;
521 ut->t_lastop_item_vid = 0;
522 }
523
524
525 //
526 // This function is used to filter out operations on temp
527 // filenames. We have to filter out operations on certain
528 // temp filenames to work-around questionable application
529 // behavior from apps like Autocad that perform unusual
530 // sequences of file system operations for a "safe save".
531 static int
532 is_ignorable_temp_name(const char *nameptr, int len)
533 {
534 if (len == 0) {
535 len = strlen(nameptr);
536 }
537
538 if ( strncmp(nameptr, "atmp", 4) == 0
539 || (len > 4 && strncmp(nameptr+len-4, ".bak", 4) == 0)
540 || (len > 4 && strncmp(nameptr+len-4, ".tmp", 4) == 0)) {
541 return 1;
542 }
543
544 return 0;
545 }
546
547 //
548 // Decide if we need to save a tombstone or not. Normally we always
549 // save a tombstone - but if there already is one and the name we're
550 // given is an ignorable name, then we will not save a tombstone.
551 //
552 static int
553 should_save_docid_tombstone(struct doc_tombstone *ut, struct vnode *vp, struct componentname *cnp)
554 {
555 if (cnp->cn_nameptr == NULL) {
556 return 0;
557 }
558
559 if (ut->t_lastop_document_id && ut->t_lastop_item == vp && is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
560 return 0;
561 }
562
563 return 1;
564 }
565
566
567 //
568 // This function saves a tombstone for the given vnode and name. The
569 // tombstone represents the parent directory and name where the document
570 // used to live and the document-id of that file. This info is recorded
571 // in the doc_tombstone structure hanging off the uthread (which assumes
572 // that all safe-save operations happen on the same thread).
573 //
574 // If later on the same parent/name combo comes back into existence then
575 // we'll preserve the doc-id from this vnode onto the new vnode.
576 //
577 static void
578 save_tombstone(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int for_unlink)
579 {
580 struct cnode *cp = VTOC(vp);
581 struct doc_tombstone *ut;
582 ut = get_uthread_doc_tombstone();
583
584 if (for_unlink && vp->v_type == VREG && cp->c_linkcount > 1) {
585 //
586 // a regular file that is being unlinked and that is also
587 // hardlinked should not clear the UF_TRACKED state or
588 // mess with the tombstone because somewhere else in the
589 // file system the file is still alive.
590 //
591 return;
592 }
593
594 ut->t_lastop_parent = dvp;
595 ut->t_lastop_parent_vid = vnode_vid(dvp);
596 ut->t_lastop_fileid = cp->c_fileid;
597 if (for_unlink) {
598 ut->t_lastop_item = NULL;
599 ut->t_lastop_item_vid = 0;
600 } else {
601 ut->t_lastop_item = vp;
602 ut->t_lastop_item_vid = vnode_vid(vp);
603 }
604
605 strlcpy((char *)&ut->t_lastop_filename[0], cnp->cn_nameptr, sizeof(ut->t_lastop_filename));
606
607 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
608 ut->t_lastop_document_id = fip->document_id;
609
610 if (for_unlink) {
611 // clear this so it's never returned again
612 fip->document_id = 0;
613 cp->c_bsdflags &= ~UF_TRACKED;
614
615 if (ut->t_lastop_document_id) {
616 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
617
618 #if CONFIG_FSE
619 // this event is more of a "pending-delete"
620 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
621 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
622 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
623 FSE_ARG_INO, (ino64_t)0, // dst inode #
624 FSE_ARG_INT32, ut->t_lastop_document_id, // document id
625 FSE_ARG_DONE);
626 #endif
627 }
628 }
629 }
630
631
632 /*
633 * Open a file/directory.
634 */
635 int
636 hfs_vnop_open(struct vnop_open_args *ap)
637 {
638 struct vnode *vp = ap->a_vp;
639 struct filefork *fp;
640 struct timeval tv;
641 int error;
642 static int past_bootup = 0;
643 struct cnode *cp = VTOC(vp);
644 struct hfsmount *hfsmp = VTOHFS(vp);
645
646 #if HFS_COMPRESSION
647 if (ap->a_mode & FWRITE) {
648 /* open for write */
649 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
650 /* opening a compressed file for write, so convert it to decompressed */
651 struct vnode *data_vp = NULL;
652 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
653 if (0 == error) {
654 if (data_vp) {
655 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
656 vnode_rele(data_vp);
657 } else {
658 error = EINVAL;
659 }
660 }
661 if (error != 0)
662 return error;
663 }
664 } else {
665 /* open for read */
666 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
667 if (VNODE_IS_RSRC(vp)) {
668 /* opening the resource fork of a compressed file, so nothing to do */
669 } else {
670 /* opening a compressed file for read, make sure it validates */
671 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
672 if (error != 0)
673 return error;
674 }
675 }
676 }
677 #endif
678
679 /*
680 * Files marked append-only must be opened for appending.
681 */
682 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
683 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
684 return (EPERM);
685
686 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
687 return (EBUSY); /* file is in use by the kernel */
688
689 /* Don't allow journal to be opened externally. */
690 if (hfs_is_journal_file(hfsmp, cp))
691 return (EPERM);
692
693 bool have_lock = false;
694
695 #if CONFIG_PROTECT
696 if (ISSET(ap->a_mode, FENCRYPTED) && cp->c_cpentry && vnode_isreg(vp)) {
697 bool have_trunc_lock = false;
698
699
700 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
701 if (have_trunc_lock)
702 hfs_unlock_truncate(cp, 0);
703 return error;
704 }
705
706 have_lock = true;
707
708 if (cp->c_cpentry->cp_raw_open_count + 1
709 < cp->c_cpentry->cp_raw_open_count) {
710 // Overflow; too many raw opens on this file
711 hfs_unlock(cp);
712 if (have_trunc_lock)
713 hfs_unlock_truncate(cp, 0);
714 return ENFILE;
715 }
716
717
718 if (have_trunc_lock)
719 hfs_unlock_truncate(cp, 0);
720
721 ++cp->c_cpentry->cp_raw_open_count;
722 }
723 #endif
724
725 if ((hfsmp->hfs_flags & HFS_READ_ONLY) ||
726 (hfsmp->jnl == NULL) ||
727 #if NAMEDSTREAMS
728 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
729 #else
730 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
731 #endif
732
733 #if CONFIG_PROTECT
734 if (have_lock)
735 hfs_unlock(cp);
736 #endif
737
738 return (0);
739 }
740
741 if (!have_lock && (error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
742 return (error);
743
744 #if QUOTA
745 /* If we're going to write to the file, initialize quotas. */
746 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
747 (void)hfs_getinoquota(cp);
748 #endif /* QUOTA */
749
750 /*
751 * On the first (non-busy) open of a fragmented
752 * file attempt to de-frag it (if its less than 20MB).
753 */
754 fp = VTOF(vp);
755 if (fp->ff_blocks &&
756 fp->ff_extents[7].blockCount != 0 &&
757 fp->ff_size <= (20 * 1024 * 1024)) {
758 int no_mods = 0;
759 struct timeval now;
760 /*
761 * Wait until system bootup is done (3 min).
762 * And don't relocate a file that's been modified
763 * within the past minute -- this can lead to
764 * system thrashing.
765 */
766
767 if (!past_bootup) {
768 microuptime(&tv);
769 if (tv.tv_sec > (60*3)) {
770 past_bootup = 1;
771 }
772 }
773
774 microtime(&now);
775 if ((now.tv_sec - cp->c_mtime) > 60) {
776 no_mods = 1;
777 }
778
779 if (past_bootup && no_mods) {
780 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
781 vfs_context_ucred(ap->a_context),
782 vfs_context_proc(ap->a_context));
783 }
784 }
785
786 hfs_unlock(cp);
787
788 return (0);
789 }
790
791
792 /*
793 * Close a file/directory.
794 */
795 int
796 hfs_vnop_close(ap)
797 struct vnop_close_args /* {
798 struct vnode *a_vp;
799 int a_fflag;
800 vfs_context_t a_context;
801 } */ *ap;
802 {
803 register struct vnode *vp = ap->a_vp;
804 register struct cnode *cp;
805 struct proc *p = vfs_context_proc(ap->a_context);
806 struct hfsmount *hfsmp;
807 int busy;
808 int tooktrunclock = 0;
809 int knownrefs = 0;
810
811 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
812 return (0);
813 cp = VTOC(vp);
814 hfsmp = VTOHFS(vp);
815
816 #if CONFIG_PROTECT
817 if (cp->c_cpentry && ISSET(ap->a_fflag, FENCRYPTED) && vnode_isreg(vp)) {
818 assert(cp->c_cpentry->cp_raw_open_count > 0);
819 --cp->c_cpentry->cp_raw_open_count;
820 }
821 #endif
822
823 /*
824 * If the rsrc fork is a named stream, it can cause the data fork to
825 * stay around, preventing de-allocation of these blocks.
826 * Do checks for truncation on close. Purge extra extents if they exist.
827 * Make sure the vp is not a directory, and that it has a resource fork,
828 * and that resource fork is also a named stream.
829 */
830
831 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
832 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
833 uint32_t blks;
834
835 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
836 /*
837 * If there are extra blocks and there are only 2 refs on
838 * this vp (ourselves + rsrc fork holding ref on us), go ahead
839 * and try to truncate.
840 */
841 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
842 // release cnode lock; must acquire truncate lock BEFORE cnode lock
843 hfs_unlock(cp);
844
845 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
846 tooktrunclock = 1;
847
848 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
849 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
850 // bail out if we can't re-acquire cnode lock
851 return 0;
852 }
853 // now re-test to make sure it's still valid
854 if (cp->c_rsrc_vp) {
855 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
856 if (!vnode_isinuse(vp, knownrefs)){
857 // now we can truncate the file, if necessary
858 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
859 if (blks < VTOF(vp)->ff_blocks){
860 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY,
861 0, ap->a_context);
862 }
863 }
864 }
865 }
866 }
867
868
869 // if we froze the fs and we're exiting, then "thaw" the fs
870 if (hfsmp->hfs_freeze_state == HFS_FROZEN
871 && hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
872 hfs_thaw(hfsmp, p);
873 }
874
875 busy = vnode_isinuse(vp, 1);
876
877 if (busy) {
878 hfs_touchtimes(VTOHFS(vp), cp);
879 }
880 if (vnode_isdir(vp)) {
881 hfs_reldirhints(cp, busy);
882 } else if (vnode_issystem(vp) && !busy) {
883 vnode_recycle(vp);
884 }
885
886 if (tooktrunclock){
887 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
888 }
889 hfs_unlock(cp);
890
891 if (ap->a_fflag & FWASWRITTEN) {
892 hfs_sync_ejectable(hfsmp);
893 }
894
895 return (0);
896 }
897
898 static bool hfs_should_generate_document_id(hfsmount_t *hfsmp, cnode_t *cp)
899 {
900 return (!ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
901 && ISSET(cp->c_bsdflags, UF_TRACKED)
902 && cp->c_desc.cd_cnid != kHFSRootFolderID
903 && (S_ISDIR(cp->c_mode) || S_ISREG(cp->c_mode) || S_ISLNK(cp->c_mode)));
904 }
905
906 /*
907 * Get basic attributes.
908 */
909 int
910 hfs_vnop_getattr(struct vnop_getattr_args *ap)
911 {
912 #define VNODE_ATTR_TIMES \
913 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
914 #define VNODE_ATTR_AUTH \
915 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
916 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
917
918 struct vnode *vp = ap->a_vp;
919 struct vnode_attr *vap = ap->a_vap;
920 struct vnode *rvp = NULLVP;
921 struct hfsmount *hfsmp;
922 struct cnode *cp;
923 uint64_t data_size;
924 enum vtype v_type;
925 int error = 0;
926 cp = VTOC(vp);
927
928 #if HFS_COMPRESSION
929 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
930 int compressed = 0;
931 int hide_size = 0;
932 off_t uncompressed_size = -1;
933 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
934 /* we only care about whether the file is compressed if asked for the uncompressed size */
935 if (VNODE_IS_RSRC(vp)) {
936 /* if it's a resource fork, decmpfs may want us to hide the size */
937 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
938 } else {
939 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
940 compressed = hfs_file_is_compressed(cp, 0);
941 }
942 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
943 // if it's compressed
944 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && cp->c_decmp->cmp_type >= CMP_MAX)) {
945 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
946 /* failed to get the uncompressed size, we'll check for this later */
947 uncompressed_size = -1;
948 } else {
949 // fake that it's compressed
950 compressed = 1;
951 }
952 }
953 }
954 }
955 #endif
956
957 /*
958 * Shortcut for vnode_authorize path. Each of the attributes
959 * in this set is updated atomically so we don't need to take
960 * the cnode lock to access them.
961 */
962 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
963 /* Make sure file still exists. */
964 if (cp->c_flag & C_NOEXISTS)
965 return (ENOENT);
966
967 vap->va_uid = cp->c_uid;
968 vap->va_gid = cp->c_gid;
969 vap->va_mode = cp->c_mode;
970 vap->va_flags = cp->c_bsdflags;
971 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
972
973 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
974 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
975 VATTR_SET_SUPPORTED(vap, va_acl);
976 }
977
978 return (0);
979 }
980
981 hfsmp = VTOHFS(vp);
982 v_type = vnode_vtype(vp);
983
984 if (VATTR_IS_ACTIVE(vap, va_document_id)) {
985 uint32_t document_id;
986
987 if (cp->c_desc.cd_cnid == kHFSRootFolderID)
988 document_id = kHFSRootFolderID;
989 else {
990 /*
991 * This is safe without a lock because we're just reading
992 * a 32 bit aligned integer which should be atomic on all
993 * platforms we support.
994 */
995 document_id = hfs_get_document_id(cp);
996
997 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
998 uint32_t new_document_id;
999
1000 error = hfs_generate_document_id(hfsmp, &new_document_id);
1001 if (error)
1002 return error;
1003
1004 error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1005 if (error)
1006 return error;
1007
1008 bool want_docid_fsevent = false;
1009
1010 // Need to check again now that we have the lock
1011 document_id = hfs_get_document_id(cp);
1012 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
1013 cp->c_attr.ca_finderextendeddirinfo.document_id = document_id = new_document_id;
1014 want_docid_fsevent = true;
1015 SET(cp->c_flag, C_MODIFIED);
1016 }
1017
1018 hfs_unlock(cp);
1019
1020 if (want_docid_fsevent) {
1021 #if CONFIG_FSE
1022 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1023 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1024 FSE_ARG_INO, (ino64_t)0, // src inode #
1025 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1026 FSE_ARG_INT32, document_id,
1027 FSE_ARG_DONE);
1028
1029 if (need_fsevent(FSE_STAT_CHANGED, vp)) {
1030 add_fsevent(FSE_STAT_CHANGED, ap->a_context,
1031 FSE_ARG_VNODE, vp, FSE_ARG_DONE);
1032 }
1033 #endif
1034 }
1035 }
1036 }
1037
1038 vap->va_document_id = document_id;
1039 VATTR_SET_SUPPORTED(vap, va_document_id);
1040 }
1041
1042 /*
1043 * If time attributes are requested and we have cnode times
1044 * that require updating, then acquire an exclusive lock on
1045 * the cnode before updating the times. Otherwise we can
1046 * just acquire a shared lock.
1047 */
1048 if ((vap->va_active & VNODE_ATTR_TIMES) &&
1049 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
1050 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1051 return (error);
1052 hfs_touchtimes(hfsmp, cp);
1053
1054 // downgrade to a shared lock since that's all we need from here on out
1055 cp->c_lockowner = HFS_SHARED_OWNER;
1056 lck_rw_lock_exclusive_to_shared(&cp->c_rwlock);
1057
1058 } else if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
1059 return (error);
1060 }
1061
1062 if (v_type == VDIR) {
1063 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
1064
1065 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
1066 int nlink;
1067
1068 /*
1069 * For directories, the va_nlink is esentially a count
1070 * of the ".." references to a directory plus the "."
1071 * reference and the directory itself. So for HFS+ this
1072 * becomes the sub-directory count plus two.
1073 *
1074 * In the absence of a sub-directory count we use the
1075 * directory's item count. This will be too high in
1076 * most cases since it also includes files.
1077 */
1078 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
1079 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
1080 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
1081 else
1082 nlink = cp->c_entries;
1083
1084 /* Account for ourself and our "." entry */
1085 nlink += 2;
1086 /* Hide our private directories. */
1087 if (cp->c_cnid == kHFSRootFolderID) {
1088 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
1089 --nlink;
1090 }
1091 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
1092 --nlink;
1093 }
1094 }
1095 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
1096 }
1097 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
1098 int entries;
1099
1100 entries = cp->c_entries;
1101 /* Hide our private files and directories. */
1102 if (cp->c_cnid == kHFSRootFolderID) {
1103 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
1104 --entries;
1105 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
1106 --entries;
1107 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
1108 entries -= 2; /* hide the journal files */
1109 }
1110 VATTR_RETURN(vap, va_nchildren, entries);
1111 }
1112 /*
1113 * The va_dirlinkcount is the count of real directory hard links.
1114 * (i.e. its not the sum of the implied "." and ".." references)
1115 */
1116 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
1117 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
1118 }
1119 } else /* !VDIR */ {
1120 data_size = VCTOF(vp, cp)->ff_size;
1121
1122 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
1123 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
1124 u_int64_t blocks;
1125
1126 #if HFS_COMPRESSION
1127 if (hide_size) {
1128 VATTR_RETURN(vap, va_data_alloc, 0);
1129 } else if (compressed) {
1130 /* for compressed files, we report all allocated blocks as belonging to the data fork */
1131 blocks = cp->c_blocks;
1132 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
1133 }
1134 else
1135 #endif
1136 {
1137 blocks = VCTOF(vp, cp)->ff_blocks;
1138 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
1139 }
1140 }
1141 }
1142
1143 /* conditional because 64-bit arithmetic can be expensive */
1144 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
1145 if (v_type == VDIR) {
1146 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
1147 } else {
1148 u_int64_t total_size = ~0ULL;
1149 struct cnode *rcp;
1150 #if HFS_COMPRESSION
1151 if (hide_size) {
1152 /* we're hiding the size of this file, so just return 0 */
1153 total_size = 0;
1154 } else if (compressed) {
1155 if (uncompressed_size == -1) {
1156 /*
1157 * We failed to get the uncompressed size above,
1158 * so we'll fall back to the standard path below
1159 * since total_size is still -1
1160 */
1161 } else {
1162 /* use the uncompressed size we fetched above */
1163 total_size = uncompressed_size;
1164 }
1165 }
1166 #endif
1167 if (total_size == ~0ULL) {
1168 if (cp->c_datafork) {
1169 total_size = cp->c_datafork->ff_size;
1170 }
1171
1172 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1173 /* We deal with rsrc fork vnode iocount at the end of the function */
1174 error = hfs_vgetrsrc(hfsmp, vp, &rvp);
1175 if (error) {
1176 /*
1177 * Note that we call hfs_vgetrsrc with error_on_unlinked
1178 * set to FALSE. This is because we may be invoked via
1179 * fstat() on an open-unlinked file descriptor and we must
1180 * continue to support access to the rsrc fork until it disappears.
1181 * The code at the end of this function will be
1182 * responsible for releasing the iocount generated by
1183 * hfs_vgetrsrc. This is because we can't drop the iocount
1184 * without unlocking the cnode first.
1185 */
1186 goto out;
1187 }
1188
1189 rcp = VTOC(rvp);
1190 if (rcp && rcp->c_rsrcfork) {
1191 total_size += rcp->c_rsrcfork->ff_size;
1192 }
1193 }
1194 }
1195
1196 VATTR_RETURN(vap, va_total_size, total_size);
1197 }
1198 }
1199 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1200 if (v_type == VDIR) {
1201 VATTR_RETURN(vap, va_total_alloc, 0);
1202 } else {
1203 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
1204 }
1205 }
1206
1207 /*
1208 * If the VFS wants extended security data, and we know that we
1209 * don't have any (because it never told us it was setting any)
1210 * then we can return the supported bit and no data. If we do
1211 * have extended security, we can just leave the bit alone and
1212 * the VFS will use the fallback path to fetch it.
1213 */
1214 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1215 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
1216 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
1217 VATTR_SET_SUPPORTED(vap, va_acl);
1218 }
1219 }
1220
1221 vap->va_access_time.tv_sec = cp->c_atime;
1222 vap->va_access_time.tv_nsec = 0;
1223 vap->va_create_time.tv_sec = cp->c_itime;
1224 vap->va_create_time.tv_nsec = 0;
1225 vap->va_modify_time.tv_sec = cp->c_mtime;
1226 vap->va_modify_time.tv_nsec = 0;
1227 vap->va_change_time.tv_sec = cp->c_ctime;
1228 vap->va_change_time.tv_nsec = 0;
1229 vap->va_backup_time.tv_sec = cp->c_btime;
1230 vap->va_backup_time.tv_nsec = 0;
1231
1232 /* See if we need to emit the date added field to the user */
1233 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1234 u_int32_t dateadded = hfs_get_dateadded (cp);
1235 if (dateadded) {
1236 vap->va_addedtime.tv_sec = dateadded;
1237 vap->va_addedtime.tv_nsec = 0;
1238 VATTR_SET_SUPPORTED (vap, va_addedtime);
1239 }
1240 }
1241
1242 /* XXX is this really a good 'optimal I/O size'? */
1243 vap->va_iosize = hfsmp->hfs_logBlockSize;
1244 vap->va_uid = cp->c_uid;
1245 vap->va_gid = cp->c_gid;
1246 vap->va_mode = cp->c_mode;
1247 vap->va_flags = cp->c_bsdflags;
1248
1249 /*
1250 * Exporting file IDs from HFS Plus:
1251 *
1252 * For "normal" files the c_fileid is the same value as the
1253 * c_cnid. But for hard link files, they are different - the
1254 * c_cnid belongs to the active directory entry (ie the link)
1255 * and the c_fileid is for the actual inode (ie the data file).
1256 *
1257 * The stat call (getattr) uses va_fileid and the Carbon APIs,
1258 * which are hardlink-ignorant, will ask for va_linkid.
1259 */
1260 vap->va_fileid = (u_int64_t)cp->c_fileid;
1261 /*
1262 * We need to use the origin cache for both hardlinked files
1263 * and directories. Hardlinked directories have multiple cnids
1264 * and parents (one per link). Hardlinked files also have their
1265 * own parents and link IDs separate from the indirect inode number.
1266 * If we don't use the cache, we could end up vending the wrong ID
1267 * because the cnode will only reflect the link that was looked up most recently.
1268 */
1269 if (cp->c_flag & C_HARDLINK) {
1270 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
1271 vap->va_parentid = (u_int64_t)hfs_currentparent(cp, /* have_lock: */ true);
1272 } else {
1273 vap->va_linkid = (u_int64_t)cp->c_cnid;
1274 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
1275 }
1276 vap->va_fsid = hfsmp->hfs_raw_dev;
1277 vap->va_filerev = 0;
1278 vap->va_encoding = cp->c_encoding;
1279 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
1280 #if HFS_COMPRESSION
1281 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1282 if (hide_size)
1283 vap->va_data_size = 0;
1284 else if (compressed) {
1285 if (uncompressed_size == -1) {
1286 /* failed to get the uncompressed size above, so just return data_size */
1287 vap->va_data_size = data_size;
1288 } else {
1289 /* use the uncompressed size we fetched above */
1290 vap->va_data_size = uncompressed_size;
1291 }
1292 } else
1293 vap->va_data_size = data_size;
1294 VATTR_SET_SUPPORTED(vap, va_data_size);
1295 }
1296 #else
1297 vap->va_data_size = data_size;
1298 vap->va_supported |= VNODE_ATTR_va_data_size;
1299 #endif
1300
1301 #if CONFIG_PROTECT
1302 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
1303 vap->va_dataprotect_class = cp->c_cpentry ? CP_CLASS(cp->c_cpentry->cp_pclass) : 0;
1304 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1305 }
1306 #endif
1307 if (VATTR_IS_ACTIVE(vap, va_write_gencount)) {
1308 if (ubc_is_mapped_writable(vp)) {
1309 /*
1310 * Return 0 to the caller to indicate the file may be
1311 * changing. There is no need for us to increment the
1312 * generation counter here because it gets done as part of
1313 * page-out and also when the file is unmapped (to account
1314 * for changes we might not have seen).
1315 */
1316 vap->va_write_gencount = 0;
1317 } else {
1318 vap->va_write_gencount = hfs_get_gencount(cp);
1319 }
1320
1321 VATTR_SET_SUPPORTED(vap, va_write_gencount);
1322 }
1323
1324 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1325 vap->va_supported |= VNODE_ATTR_va_access_time |
1326 VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1327 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1328 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1329 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1330 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1331 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1332 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1333 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1334
1335 /* If this is the root, let VFS to find out the mount name, which
1336 * may be different from the real name. Otherwise, we need to take care
1337 * for hardlinked files, which need to be looked up, if necessary
1338 */
1339 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1340 struct cat_desc linkdesc;
1341 int lockflags;
1342 int uselinkdesc = 0;
1343 cnid_t nextlinkid = 0;
1344 cnid_t prevlinkid = 0;
1345
1346 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1347 * here because the info. for the link ID requested by getattrlist may be
1348 * different than what's currently in the cnode. This is because the cnode
1349 * will be filled in with the information for the most recent link ID that went
1350 * through namei/lookup(). If there are competing lookups for hardlinks that point
1351 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1352 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1353 */
1354
1355 if ((cp->c_flag & C_HARDLINK) &&
1356 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1357 /*
1358 * If we have no name and our link ID is the raw inode number, then we may
1359 * have an open-unlinked file. Go to the next link in this case.
1360 */
1361 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1362 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1363 goto out;
1364 }
1365 }
1366 else {
1367 /* just use link obtained from vap above */
1368 nextlinkid = vap->va_linkid;
1369 }
1370
1371 /* We need to probe the catalog for the descriptor corresponding to the link ID
1372 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1373 * for the cnode here, so we can't just update the descriptor. Instead,
1374 * we should just store the descriptor's value locally and then use it to pass
1375 * out the name value as needed below.
1376 */
1377 if (nextlinkid){
1378 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1379 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1380 hfs_systemfile_unlock(hfsmp, lockflags);
1381 if (error == 0) {
1382 uselinkdesc = 1;
1383 }
1384 }
1385 }
1386
1387 /* By this point, we've either patched up the name above and the c_desc
1388 * points to the correct data, or it already did, in which case we just proceed
1389 * by copying the name into the vap. Note that we will never set va_name to
1390 * supported if nextlinkid is never initialized. This could happen in the degenerate
1391 * case above involving the raw inode number, where it has no nextlinkid. In this case
1392 * we will simply not mark the name bit as supported.
1393 */
1394 if (uselinkdesc) {
1395 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1396 VATTR_SET_SUPPORTED(vap, va_name);
1397 cat_releasedesc(&linkdesc);
1398 }
1399 else if (cp->c_desc.cd_namelen) {
1400 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1401 VATTR_SET_SUPPORTED(vap, va_name);
1402 }
1403 }
1404
1405 out:
1406 hfs_unlock(cp);
1407 /*
1408 * We need to vnode_put the rsrc fork vnode only *after* we've released
1409 * the cnode lock, since vnode_put can trigger an inactive call, which
1410 * will go back into HFS and try to acquire a cnode lock.
1411 */
1412 if (rvp) {
1413 vnode_put (rvp);
1414 }
1415
1416 return (error);
1417 }
1418
1419 int
1420 hfs_vnop_setattr(ap)
1421 struct vnop_setattr_args /* {
1422 struct vnode *a_vp;
1423 struct vnode_attr *a_vap;
1424 vfs_context_t a_context;
1425 } */ *ap;
1426 {
1427 struct vnode_attr *vap = ap->a_vap;
1428 struct vnode *vp = ap->a_vp;
1429 struct cnode *cp = NULL;
1430 struct hfsmount *hfsmp;
1431 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1432 struct proc *p = vfs_context_proc(ap->a_context);
1433 int error = 0;
1434 uid_t nuid;
1435 gid_t ngid;
1436 time_t orig_ctime;
1437
1438 orig_ctime = VTOC(vp)->c_ctime;
1439
1440 #if HFS_COMPRESSION
1441 int decmpfs_reset_state = 0;
1442 /*
1443 we call decmpfs_update_attributes even if the file is not compressed
1444 because we want to update the incoming flags if the xattrs are invalid
1445 */
1446 error = decmpfs_update_attributes(vp, vap);
1447 if (error)
1448 return error;
1449 #endif
1450 //
1451 // if this is not a size-changing setattr and it is not just
1452 // an atime update, then check for a snapshot.
1453 //
1454 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1455 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1456 }
1457
1458 #if CONFIG_PROTECT
1459 /*
1460 * All metadata changes should be allowed except a size-changing setattr, which
1461 * has effects on file content and requires calling into cp_handle_vnop
1462 * to have content protection check.
1463 */
1464 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1465 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1466 return (error);
1467 }
1468 }
1469 #endif /* CONFIG_PROTECT */
1470
1471 hfsmp = VTOHFS(vp);
1472
1473 /* Don't allow modification of the journal. */
1474 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1475 return (EPERM);
1476 }
1477
1478 //
1479 // Check if we'll need a document_id and if so, get it before we lock the
1480 // the cnode to avoid any possible deadlock with the root vnode which has
1481 // to get locked to get the document id
1482 //
1483 u_int32_t document_id=0;
1484 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
1485 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
1486 //
1487 // If the document_id is not set, get a new one. It will be set
1488 // on the file down below once we hold the cnode lock.
1489 //
1490 if (fip->document_id == 0) {
1491 if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
1492 document_id = 0;
1493 }
1494 }
1495 }
1496
1497
1498 /*
1499 * File size change request.
1500 * We are guaranteed that this is not a directory, and that
1501 * the filesystem object is writeable.
1502 *
1503 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1504 */
1505 VATTR_SET_SUPPORTED(vap, va_data_size);
1506 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
1507 #if HFS_COMPRESSION
1508 /* keep the compressed state locked until we're done truncating the file */
1509 decmpfs_cnode *dp = VTOCMP(vp);
1510 if (!dp) {
1511 /*
1512 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1513 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1514 * on this file while it's truncating
1515 */
1516 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1517 if (!dp) {
1518 /* failed to allocate a decmpfs_cnode */
1519 return ENOMEM; /* what should this be? */
1520 }
1521 }
1522
1523 check_for_tracked_file(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1524
1525 decmpfs_lock_compressed_data(dp, 1);
1526 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1527 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1528 if (error != 0) {
1529 decmpfs_unlock_compressed_data(dp, 1);
1530 return error;
1531 }
1532 }
1533 #endif
1534
1535 // Take truncate lock
1536 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1537
1538 // hfs_truncate will deal with the cnode lock
1539 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff,
1540 0, ap->a_context);
1541
1542 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1543 #if HFS_COMPRESSION
1544 decmpfs_unlock_compressed_data(dp, 1);
1545 #endif
1546 if (error)
1547 return error;
1548 }
1549 if (cp == NULL) {
1550 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1551 return (error);
1552 cp = VTOC(vp);
1553 }
1554
1555 /*
1556 * If it is just an access time update request by itself
1557 * we know the request is from kernel level code, and we
1558 * can delay it without being as worried about consistency.
1559 * This change speeds up mmaps, in the rare case that they
1560 * get caught behind a sync.
1561 */
1562
1563 if (vap->va_active == VNODE_ATTR_va_access_time) {
1564 cp->c_touch_acctime=TRUE;
1565 goto out;
1566 }
1567
1568
1569
1570 /*
1571 * Owner/group change request.
1572 * We are guaranteed that the new owner/group is valid and legal.
1573 */
1574 VATTR_SET_SUPPORTED(vap, va_uid);
1575 VATTR_SET_SUPPORTED(vap, va_gid);
1576 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1577 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1578 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1579 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1580 goto out;
1581
1582 /*
1583 * Mode change request.
1584 * We are guaranteed that the mode value is valid and that in
1585 * conjunction with the owner and group, this change is legal.
1586 */
1587 VATTR_SET_SUPPORTED(vap, va_mode);
1588 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1589 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1590 goto out;
1591
1592 /*
1593 * File flags change.
1594 * We are guaranteed that only flags allowed to change given the
1595 * current securelevel are being changed.
1596 */
1597 VATTR_SET_SUPPORTED(vap, va_flags);
1598 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1599 u_int16_t *fdFlags;
1600
1601 #if HFS_COMPRESSION
1602 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1603 /*
1604 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1605 * but we don't want to actually do the update until we've released the cnode lock down below
1606 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1607 * turn off the flag and look at the "raw" file for debugging purposes
1608 */
1609 decmpfs_reset_state = 1;
1610 }
1611 #endif
1612 if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
1613 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1614
1615 //
1616 // we're marking this item UF_TRACKED. if the document_id is
1617 // not set, get a new one and put it on the file.
1618 //
1619 if (fip->document_id == 0) {
1620 if (document_id != 0) {
1621 // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
1622 fip->document_id = (uint32_t)document_id;
1623 #if CONFIG_FSE
1624 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1625 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1626 FSE_ARG_INO, (ino64_t)0, // src inode #
1627 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1628 FSE_ARG_INT32, document_id,
1629 FSE_ARG_DONE);
1630 #endif
1631 } else {
1632 // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
1633 }
1634 }
1635
1636 } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
1637 //
1638 // UF_TRACKED is being cleared so clear the document_id
1639 //
1640 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1641 if (fip->document_id) {
1642 // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
1643 #if CONFIG_FSE
1644 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1645 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1646 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
1647 FSE_ARG_INO, (ino64_t)0, // dst inode #
1648 FSE_ARG_INT32, fip->document_id, // document id
1649 FSE_ARG_DONE);
1650 #endif
1651 fip->document_id = 0;
1652 cp->c_bsdflags &= ~UF_TRACKED;
1653 }
1654 }
1655
1656 cp->c_bsdflags = vap->va_flags;
1657 cp->c_flag |= C_MODIFIED;
1658 cp->c_touch_chgtime = TRUE;
1659
1660
1661 /*
1662 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1663 *
1664 * The fdFlags for files and frFlags for folders are both 8 bytes
1665 * into the userInfo (the first 16 bytes of the Finder Info). They
1666 * are both 16-bit fields.
1667 */
1668 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1669 if (vap->va_flags & UF_HIDDEN)
1670 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1671 else
1672 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1673 }
1674
1675 /*
1676 * Timestamp updates.
1677 */
1678 VATTR_SET_SUPPORTED(vap, va_create_time);
1679 VATTR_SET_SUPPORTED(vap, va_access_time);
1680 VATTR_SET_SUPPORTED(vap, va_modify_time);
1681 VATTR_SET_SUPPORTED(vap, va_backup_time);
1682 VATTR_SET_SUPPORTED(vap, va_change_time);
1683 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1684 VATTR_IS_ACTIVE(vap, va_access_time) ||
1685 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1686 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1687 if (VATTR_IS_ACTIVE(vap, va_create_time))
1688 cp->c_itime = vap->va_create_time.tv_sec;
1689 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1690 cp->c_atime = vap->va_access_time.tv_sec;
1691 cp->c_touch_acctime = FALSE;
1692 }
1693 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1694 cp->c_mtime = vap->va_modify_time.tv_sec;
1695 cp->c_touch_modtime = FALSE;
1696 cp->c_touch_chgtime = TRUE;
1697
1698 hfs_clear_might_be_dirty_flag(cp);
1699
1700 /*
1701 * The utimes system call can reset the modification
1702 * time but it doesn't know about HFS create times.
1703 * So we need to ensure that the creation time is
1704 * always at least as old as the modification time.
1705 */
1706 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1707 (cp->c_cnid != kHFSRootFolderID) &&
1708 !VATTR_IS_ACTIVE(vap, va_create_time) &&
1709 (cp->c_mtime < cp->c_itime)) {
1710 cp->c_itime = cp->c_mtime;
1711 }
1712 }
1713 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1714 cp->c_btime = vap->va_backup_time.tv_sec;
1715 cp->c_flag |= C_MINOR_MOD;
1716 }
1717
1718 /*
1719 * Set name encoding.
1720 */
1721 VATTR_SET_SUPPORTED(vap, va_encoding);
1722 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1723 cp->c_encoding = vap->va_encoding;
1724 cp->c_flag |= C_MODIFIED;
1725 hfs_setencodingbits(hfsmp, cp->c_encoding);
1726 }
1727
1728 if ((error = hfs_update(vp, 0)) != 0)
1729 goto out;
1730 out:
1731 if (cp) {
1732 /* Purge origin cache for cnode, since caller now has correct link ID for it
1733 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1734 */
1735 if ((cp->c_flag & C_HARDLINK) && (vp->v_type != VDIR)){
1736 hfs_relorigin(cp, 0);
1737 }
1738
1739 hfs_unlock(cp);
1740 #if HFS_COMPRESSION
1741 if (decmpfs_reset_state) {
1742 /*
1743 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1744 * but don't do it while holding the hfs cnode lock
1745 */
1746 decmpfs_cnode *dp = VTOCMP(vp);
1747 if (!dp) {
1748 /*
1749 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1750 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1751 * on this file if it's locked
1752 */
1753 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1754 if (!dp) {
1755 /* failed to allocate a decmpfs_cnode */
1756 return ENOMEM; /* what should this be? */
1757 }
1758 }
1759 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1760 }
1761 #endif
1762 }
1763 return (error);
1764 }
1765
1766
1767 /*
1768 * Change the mode on a file.
1769 * cnode must be locked before calling.
1770 */
1771 int
1772 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1773 {
1774 register struct cnode *cp = VTOC(vp);
1775
1776 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1777 return (0);
1778
1779 // Don't allow modification of the journal or journal_info_block
1780 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1781 return EPERM;
1782 }
1783
1784 #if OVERRIDE_UNKNOWN_PERMISSIONS
1785 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1786 return (0);
1787 };
1788 #endif
1789
1790 mode_t new_mode = (cp->c_mode & ~ALLPERMS) | (mode & ALLPERMS);
1791 if (new_mode != cp->c_mode) {
1792 cp->c_mode = new_mode;
1793 cp->c_flag |= C_MINOR_MOD;
1794 }
1795 cp->c_touch_chgtime = TRUE;
1796 return (0);
1797 }
1798
1799
1800 int
1801 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1802 {
1803 struct cnode *cp = VTOC(vp);
1804 int retval = 0;
1805 int is_member;
1806
1807 /*
1808 * Disallow write attempts on read-only file systems;
1809 * unless the file is a socket, fifo, or a block or
1810 * character device resident on the file system.
1811 */
1812 switch (vnode_vtype(vp)) {
1813 case VDIR:
1814 case VLNK:
1815 case VREG:
1816 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1817 return (EROFS);
1818 break;
1819 default:
1820 break;
1821 }
1822
1823 /* If immutable bit set, nobody gets to write it. */
1824 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1825 return (EPERM);
1826
1827 /* Otherwise, user id 0 always gets access. */
1828 if (!suser(cred, NULL))
1829 return (0);
1830
1831 /* Otherwise, check the owner. */
1832 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1833 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1834
1835 /* Otherwise, check the groups. */
1836 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1837 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1838 }
1839
1840 /* Otherwise, check everyone else. */
1841 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1842 }
1843
1844
1845 /*
1846 * Perform chown operation on cnode cp;
1847 * code must be locked prior to call.
1848 */
1849 int
1850 #if !QUOTA
1851 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1852 __unused struct proc *p)
1853 #else
1854 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1855 __unused struct proc *p)
1856 #endif
1857 {
1858 register struct cnode *cp = VTOC(vp);
1859 uid_t ouid;
1860 gid_t ogid;
1861 #if QUOTA
1862 int error = 0;
1863 register int i;
1864 int64_t change;
1865 #endif /* QUOTA */
1866
1867 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1868 return (ENOTSUP);
1869
1870 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1871 return (0);
1872
1873 if (uid == (uid_t)VNOVAL)
1874 uid = cp->c_uid;
1875 if (gid == (gid_t)VNOVAL)
1876 gid = cp->c_gid;
1877
1878 #if 0 /* we are guaranteed that this is already the case */
1879 /*
1880 * If we don't own the file, are trying to change the owner
1881 * of the file, or are not a member of the target group,
1882 * the caller must be superuser or the call fails.
1883 */
1884 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1885 (gid != cp->c_gid &&
1886 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1887 (error = suser(cred, 0)))
1888 return (error);
1889 #endif
1890
1891 ogid = cp->c_gid;
1892 ouid = cp->c_uid;
1893
1894 if (ouid == uid && ogid == gid) {
1895 // No change, just set change time
1896 cp->c_touch_chgtime = TRUE;
1897 return 0;
1898 }
1899
1900 #if QUOTA
1901 if ((error = hfs_getinoquota(cp)))
1902 return (error);
1903 if (ouid == uid) {
1904 dqrele(cp->c_dquot[USRQUOTA]);
1905 cp->c_dquot[USRQUOTA] = NODQUOT;
1906 }
1907 if (ogid == gid) {
1908 dqrele(cp->c_dquot[GRPQUOTA]);
1909 cp->c_dquot[GRPQUOTA] = NODQUOT;
1910 }
1911
1912 /*
1913 * Eventually need to account for (fake) a block per directory
1914 * if (vnode_isdir(vp))
1915 * change = VTOHFS(vp)->blockSize;
1916 * else
1917 */
1918
1919 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1920 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1921 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1922 for (i = 0; i < MAXQUOTAS; i++) {
1923 dqrele(cp->c_dquot[i]);
1924 cp->c_dquot[i] = NODQUOT;
1925 }
1926 #endif /* QUOTA */
1927 cp->c_gid = gid;
1928 cp->c_uid = uid;
1929 #if QUOTA
1930 if ((error = hfs_getinoquota(cp)) == 0) {
1931 if (ouid == uid) {
1932 dqrele(cp->c_dquot[USRQUOTA]);
1933 cp->c_dquot[USRQUOTA] = NODQUOT;
1934 }
1935 if (ogid == gid) {
1936 dqrele(cp->c_dquot[GRPQUOTA]);
1937 cp->c_dquot[GRPQUOTA] = NODQUOT;
1938 }
1939 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1940 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1941 goto good;
1942 else
1943 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1944 }
1945 for (i = 0; i < MAXQUOTAS; i++) {
1946 dqrele(cp->c_dquot[i]);
1947 cp->c_dquot[i] = NODQUOT;
1948 }
1949 }
1950 cp->c_gid = ogid;
1951 cp->c_uid = ouid;
1952 if (hfs_getinoquota(cp) == 0) {
1953 if (ouid == uid) {
1954 dqrele(cp->c_dquot[USRQUOTA]);
1955 cp->c_dquot[USRQUOTA] = NODQUOT;
1956 }
1957 if (ogid == gid) {
1958 dqrele(cp->c_dquot[GRPQUOTA]);
1959 cp->c_dquot[GRPQUOTA] = NODQUOT;
1960 }
1961 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1962 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1963 (void) hfs_getinoquota(cp);
1964 }
1965 return (error);
1966 good:
1967 if (hfs_getinoquota(cp))
1968 panic("hfs_chown: lost quota");
1969 #endif /* QUOTA */
1970
1971 /*
1972 * Without quotas, we could probably make this a minor
1973 * modification.
1974 */
1975 cp->c_flag |= C_MODIFIED;
1976
1977 /*
1978 According to the SUSv3 Standard, chown() shall mark
1979 for update the st_ctime field of the file.
1980 (No exceptions mentioned)
1981 */
1982 cp->c_touch_chgtime = TRUE;
1983 return (0);
1984 }
1985
1986 #if HFS_COMPRESSION
1987 /*
1988 * Flush the resource fork if it exists. vp is the data fork and has
1989 * an iocount.
1990 */
1991 static int hfs_flush_rsrc(vnode_t vp, vfs_context_t ctx)
1992 {
1993 cnode_t *cp = VTOC(vp);
1994
1995 hfs_lock(cp, HFS_SHARED_LOCK, 0);
1996
1997 vnode_t rvp = cp->c_rsrc_vp;
1998
1999 if (!rvp) {
2000 hfs_unlock(cp);
2001 return 0;
2002 }
2003
2004 int vid = vnode_vid(rvp);
2005
2006 hfs_unlock(cp);
2007
2008 int error = vnode_getwithvid(rvp, vid);
2009
2010 if (error)
2011 return error == ENOENT ? 0 : error;
2012
2013 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
2014 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
2015 hfs_filedone(rvp, ctx, HFS_FILE_DONE_NO_SYNC);
2016 hfs_unlock(cp);
2017 hfs_unlock_truncate(cp, 0);
2018
2019 error = ubc_msync(rvp, 0, ubc_getsize(rvp), NULL,
2020 UBC_PUSHALL | UBC_SYNC);
2021
2022 vnode_put(rvp);
2023
2024 return error;
2025 }
2026 #endif // HFS_COMPRESSION
2027
2028 /*
2029 * hfs_vnop_exchange:
2030 *
2031 * Inputs:
2032 * 'from' vnode/cnode
2033 * 'to' vnode/cnode
2034 * options flag bits
2035 * vfs_context
2036 *
2037 * Discussion:
2038 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
2039 * Per the requirements of that system call, this function "swaps" some
2040 * of the information that lives in one catalog record for some that
2041 * lives in another. Note that not everything is swapped; in particular,
2042 * the extent information stored in each cnode is kept local to that
2043 * cnode. This allows existing file descriptor references to continue
2044 * to operate on the same content, regardless of the location in the
2045 * namespace that the file may have moved to. See inline comments
2046 * in the function for more information.
2047 */
2048 int
2049 hfs_vnop_exchange(ap)
2050 struct vnop_exchange_args /* {
2051 struct vnode *a_fvp;
2052 struct vnode *a_tvp;
2053 int a_options;
2054 vfs_context_t a_context;
2055 } */ *ap;
2056 {
2057 struct vnode *from_vp = ap->a_fvp;
2058 struct vnode *to_vp = ap->a_tvp;
2059 struct cnode *from_cp;
2060 struct cnode *to_cp;
2061 struct hfsmount *hfsmp;
2062 struct cat_desc tempdesc;
2063 struct cat_attr tempattr;
2064 const unsigned char *from_nameptr;
2065 const unsigned char *to_nameptr;
2066 char from_iname[32];
2067 char to_iname[32];
2068 uint32_t to_flag_special;
2069 uint32_t from_flag_special;
2070 cnid_t from_parid;
2071 cnid_t to_parid;
2072 int lockflags;
2073 int error = 0, started_tr = 0, got_cookie = 0;
2074 cat_cookie_t cookie;
2075 time_t orig_from_ctime, orig_to_ctime;
2076 bool have_cnode_locks = false, have_from_trunc_lock = false, have_to_trunc_lock = false;
2077
2078 /*
2079 * VFS does the following checks:
2080 * 1. Validate that both are files.
2081 * 2. Validate that both are on the same mount.
2082 * 3. Validate that they're not the same vnode.
2083 */
2084
2085 from_cp = VTOC(from_vp);
2086 to_cp = VTOC(to_vp);
2087 hfsmp = VTOHFS(from_vp);
2088
2089 orig_from_ctime = from_cp->c_ctime;
2090 orig_to_ctime = to_cp->c_ctime;
2091
2092 #if CONFIG_PROTECT
2093 /*
2094 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
2095 * because the EAs will not be swapped. As a result, the persistent keys would not
2096 * match and the files will be garbage.
2097 */
2098 if (cp_fs_protected (vnode_mount(from_vp))) {
2099 return EINVAL;
2100 }
2101 #endif
2102
2103 #if HFS_COMPRESSION
2104 if (!ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2105 if ( hfs_file_is_compressed(from_cp, 0) ) {
2106 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
2107 return error;
2108 }
2109 }
2110
2111 if ( hfs_file_is_compressed(to_cp, 0) ) {
2112 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
2113 return error;
2114 }
2115 }
2116 }
2117 #endif // HFS_COMPRESSION
2118
2119 // Resource forks cannot be exchanged.
2120 if (VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
2121 return EINVAL;
2122
2123 /*
2124 * Normally, we want to notify the user handlers about the event,
2125 * except if it's a handler driving the event.
2126 */
2127 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
2128 check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2129 check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2130 } else {
2131 /*
2132 * This is currently used by mtmd so we should tidy up the
2133 * file now because the data won't be used again in the
2134 * destination file.
2135 */
2136 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, 0);
2137 hfs_lock_always(from_cp, HFS_EXCLUSIVE_LOCK);
2138 hfs_filedone(from_vp, ap->a_context, HFS_FILE_DONE_NO_SYNC);
2139 hfs_unlock(from_cp);
2140 hfs_unlock_truncate(from_cp, 0);
2141
2142 // Flush all the data from the source file
2143 error = ubc_msync(from_vp, 0, ubc_getsize(from_vp), NULL,
2144 UBC_PUSHALL | UBC_SYNC);
2145 if (error)
2146 goto exit;
2147
2148 #if HFS_COMPRESSION
2149 /*
2150 * If this is a compressed file, we need to do the same for
2151 * the resource fork.
2152 */
2153 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2154 error = hfs_flush_rsrc(from_vp, ap->a_context);
2155 if (error)
2156 goto exit;
2157 }
2158 #endif
2159
2160 /*
2161 * We're doing a data-swap so we need to take the truncate
2162 * lock exclusively. We need an exclusive lock because we
2163 * will be completely truncating the source file and we must
2164 * make sure nobody else sneaks in and trys to issue I/O
2165 * whilst we don't have the cnode lock.
2166 *
2167 * After taking the truncate lock we do a quick check to
2168 * verify there are no other references (including mmap
2169 * references), but we must remember that this does not stop
2170 * anybody coming in later and taking a reference. We will
2171 * have the truncate lock exclusively so that will prevent
2172 * them from issuing any I/O.
2173 */
2174
2175 if (to_cp < from_cp) {
2176 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2177 have_to_trunc_lock = true;
2178 }
2179
2180 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2181 have_from_trunc_lock = true;
2182
2183 /*
2184 * Do an early check to verify the source is not in use by
2185 * anyone. We should be called from an FD opened as F_EVTONLY
2186 * so that doesn't count as a reference.
2187 */
2188 if (vnode_isinuse(from_vp, 0)) {
2189 error = EBUSY;
2190 goto exit;
2191 }
2192
2193 if (to_cp >= from_cp) {
2194 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2195 have_to_trunc_lock = true;
2196 }
2197 }
2198
2199 if ((error = hfs_lockpair(from_cp, to_cp, HFS_EXCLUSIVE_LOCK)))
2200 goto exit;
2201 have_cnode_locks = true;
2202
2203 // Don't allow modification of the journal or journal_info_block
2204 if (hfs_is_journal_file(hfsmp, from_cp) ||
2205 hfs_is_journal_file(hfsmp, to_cp)) {
2206 error = EPERM;
2207 goto exit;
2208 }
2209
2210 /*
2211 * Ok, now that all of the pre-flighting is done, call the underlying
2212 * function if needed.
2213 */
2214 if (ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2215 #if HFS_COMPRESSION
2216 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2217 error = hfs_move_compressed(from_cp, to_cp);
2218 goto exit;
2219 }
2220 #endif
2221
2222 error = hfs_move_data(from_cp, to_cp, 0);
2223 goto exit;
2224 }
2225
2226 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2227 goto exit;
2228 }
2229 started_tr = 1;
2230
2231 /*
2232 * Reserve some space in the Catalog file.
2233 */
2234 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
2235 goto exit;
2236 }
2237 got_cookie = 1;
2238
2239 /* The backend code always tries to delete the virtual
2240 * extent id for exchanging files so we need to lock
2241 * the extents b-tree.
2242 */
2243 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2244
2245 /* Account for the location of the catalog objects. */
2246 if (from_cp->c_flag & C_HARDLINK) {
2247 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
2248 from_cp->c_attr.ca_linkref);
2249 from_nameptr = (unsigned char *)from_iname;
2250 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2251 from_cp->c_hint = 0;
2252 } else {
2253 from_nameptr = from_cp->c_desc.cd_nameptr;
2254 from_parid = from_cp->c_parentcnid;
2255 }
2256 if (to_cp->c_flag & C_HARDLINK) {
2257 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
2258 to_cp->c_attr.ca_linkref);
2259 to_nameptr = (unsigned char *)to_iname;
2260 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2261 to_cp->c_hint = 0;
2262 } else {
2263 to_nameptr = to_cp->c_desc.cd_nameptr;
2264 to_parid = to_cp->c_parentcnid;
2265 }
2266
2267 /*
2268 * ExchangeFileIDs swaps the on-disk, or in-BTree extent information
2269 * attached to two different file IDs. It also swaps the extent
2270 * information that may live in the extents-overflow B-Tree.
2271 *
2272 * We do this in a transaction as this may require a lot of B-Tree nodes
2273 * to do completely, particularly if one of the files in question
2274 * has a lot of extents.
2275 *
2276 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
2277 * For the on-disk records, which are assumed to be synced, we will
2278 * first swap the resident inline-8 extents as part of the catalog records.
2279 * Then we will swap any extents overflow records for each file.
2280 *
2281 * When ExchangeFileIDs returns successfully, "file1" will have fileID 52,
2282 * and "file2" will have fileID 50. However, note that this is only
2283 * approximately half of the work that exchangedata(2) will need to
2284 * accomplish. In other words, we swap "too much" of the information
2285 * because if we only called ExchangeFileIDs, both the fileID and extent
2286 * information would be the invariants of this operation. We don't
2287 * actually want that; we want to conclude with "file1" having
2288 * file ID 50, and "file2" having fileID 52.
2289 *
2290 * The remainder of hfs_vnop_exchange will swap the file ID and other cnode
2291 * data back to the proper ownership, while still allowing the cnode to remain
2292 * pointing at the same set of extents that it did originally.
2293 */
2294 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
2295 to_parid, from_cp->c_hint, to_cp->c_hint);
2296 hfs_systemfile_unlock(hfsmp, lockflags);
2297
2298 /*
2299 * Note that we don't need to exchange any extended attributes
2300 * since the attributes are keyed by file ID.
2301 */
2302
2303 if (error != E_NONE) {
2304 error = MacToVFSError(error);
2305 goto exit;
2306 }
2307
2308 /* Purge the vnodes from the name cache */
2309 if (from_vp)
2310 cache_purge(from_vp);
2311 if (to_vp)
2312 cache_purge(to_vp);
2313
2314 /* Bump both source and destination write counts before any swaps. */
2315 {
2316 hfs_incr_gencount (from_cp);
2317 hfs_incr_gencount (to_cp);
2318 }
2319
2320 /* Save a copy of "from" attributes before swapping. */
2321 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
2322 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
2323
2324 /* Save whether or not each cnode is a hardlink or has EAs */
2325 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2326 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2327
2328 /* Drop the special bits from each cnode */
2329 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2330 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2331
2332 /*
2333 * Now complete the in-memory portion of the copy.
2334 *
2335 * ExchangeFileIDs swaps the on-disk records involved. We complete the
2336 * operation by swapping the in-memory contents of the two files here.
2337 * We swap the cnode descriptors, which contain name, BSD attributes,
2338 * timestamps, etc, about the file.
2339 *
2340 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
2341 * already swapped the on-disk extent information. As long as we swap the
2342 * IDs, the in-line resident 8 extents that live in the filefork data
2343 * structure will point to the right data for the new file ID if we leave
2344 * them alone.
2345 *
2346 * As a result, any file descriptor that points to a particular
2347 * vnode (even though it should change names), will continue
2348 * to point to the same content.
2349 */
2350
2351 /* Copy the "to" -> "from" cnode */
2352 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
2353
2354 from_cp->c_hint = 0;
2355 /*
2356 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
2357 * when we bcopy'd the descriptor above. However, the cnode attributes
2358 * are not bcopied. As a result, make sure to swap the file IDs of each item.
2359 *
2360 * Further, other hardlink attributes must be moved along in this swap:
2361 * the linkcount, the linkref, and the firstlink all need to move
2362 * along with the file IDs. See note below regarding the flags and
2363 * what moves vs. what does not.
2364 *
2365 * For Reference:
2366 * linkcount == total # of hardlinks.
2367 * linkref == the indirect inode pointer.
2368 * firstlink == the first hardlink in the chain (written to the raw inode).
2369 * These three are tied to the fileID and must move along with the rest of the data.
2370 */
2371 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
2372
2373 from_cp->c_itime = to_cp->c_itime;
2374 from_cp->c_btime = to_cp->c_btime;
2375 from_cp->c_atime = to_cp->c_atime;
2376 from_cp->c_ctime = to_cp->c_ctime;
2377 from_cp->c_gid = to_cp->c_gid;
2378 from_cp->c_uid = to_cp->c_uid;
2379 from_cp->c_bsdflags = to_cp->c_bsdflags;
2380 from_cp->c_mode = to_cp->c_mode;
2381 from_cp->c_linkcount = to_cp->c_linkcount;
2382 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
2383 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
2384
2385 /*
2386 * The cnode flags need to stay with the cnode and not get transferred
2387 * over along with everything else because they describe the content; they are
2388 * not attributes that reflect changes specific to the file ID. In general,
2389 * fields that are tied to the file ID are the ones that will move.
2390 *
2391 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
2392 * or other extents, which may not yet have been written to the catalog. If
2393 * they were, they would have been transferred above in the ExchangeFileIDs call above...
2394 *
2395 * The flags that are special are:
2396 * C_HARDLINK, C_HASXATTRS
2397 *
2398 * These flags move with the item and file ID in the namespace since their
2399 * state is tied to that of the file ID.
2400 *
2401 * So to transfer the flags, we have to take the following steps
2402 * 1) Store in a localvar whether or not the special bits are set.
2403 * 2) Drop the special bits from the current flags
2404 * 3) swap the special flag bits to their destination
2405 */
2406 from_cp->c_flag |= to_flag_special | C_MODIFIED;
2407 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
2408 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
2409
2410
2411 /* Copy the "from" -> "to" cnode */
2412 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
2413 to_cp->c_hint = 0;
2414 /*
2415 * Pull the file ID from the tempattr we copied above. We can't assume
2416 * it is the same as the CNID.
2417 */
2418 to_cp->c_fileid = tempattr.ca_fileid;
2419 to_cp->c_itime = tempattr.ca_itime;
2420 to_cp->c_btime = tempattr.ca_btime;
2421 to_cp->c_atime = tempattr.ca_atime;
2422 to_cp->c_ctime = tempattr.ca_ctime;
2423 to_cp->c_gid = tempattr.ca_gid;
2424 to_cp->c_uid = tempattr.ca_uid;
2425 to_cp->c_bsdflags = tempattr.ca_flags;
2426 to_cp->c_mode = tempattr.ca_mode;
2427 to_cp->c_linkcount = tempattr.ca_linkcount;
2428 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
2429 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
2430
2431 /*
2432 * Only OR in the "from" flags into our cnode flags below.
2433 * Leave the rest of the flags alone.
2434 */
2435 to_cp->c_flag |= from_flag_special | C_MODIFIED;
2436
2437 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
2438 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
2439
2440
2441 /* Rehash the cnodes using their new file IDs */
2442 hfs_chash_rehash(hfsmp, from_cp, to_cp);
2443
2444 /*
2445 * When a file moves out of "Cleanup At Startup"
2446 * we can drop its NODUMP status.
2447 */
2448 if ((from_cp->c_bsdflags & UF_NODUMP) &&
2449 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
2450 from_cp->c_bsdflags &= ~UF_NODUMP;
2451 from_cp->c_touch_chgtime = TRUE;
2452 }
2453 if ((to_cp->c_bsdflags & UF_NODUMP) &&
2454 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
2455 to_cp->c_bsdflags &= ~UF_NODUMP;
2456 to_cp->c_touch_chgtime = TRUE;
2457 }
2458
2459 exit:
2460 if (got_cookie) {
2461 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
2462 }
2463 if (started_tr) {
2464 hfs_end_transaction(hfsmp);
2465 }
2466
2467 if (have_cnode_locks)
2468 hfs_unlockpair(from_cp, to_cp);
2469
2470 if (have_from_trunc_lock)
2471 hfs_unlock_truncate(from_cp, 0);
2472
2473 if (have_to_trunc_lock)
2474 hfs_unlock_truncate(to_cp, 0);
2475
2476 return (error);
2477 }
2478
2479 #if HFS_COMPRESSION
2480 /*
2481 * This function is used specifically for the case when a namespace
2482 * handler is trying to steal data before it's deleted. Note that we
2483 * don't bother deleting the xattr from the source because it will get
2484 * deleted a short time later anyway.
2485 *
2486 * cnodes must be locked
2487 */
2488 static int hfs_move_compressed(cnode_t *from_cp, cnode_t *to_cp)
2489 {
2490 int ret;
2491 void *data = NULL;
2492
2493 CLR(from_cp->c_bsdflags, UF_COMPRESSED);
2494 SET(from_cp->c_flag, C_MODIFIED);
2495
2496 ret = hfs_move_data(from_cp, to_cp, HFS_MOVE_DATA_INCLUDE_RSRC);
2497 if (ret)
2498 goto exit;
2499
2500 /*
2501 * Transfer the xattr that decmpfs uses. Ideally, this code
2502 * should be with the other decmpfs code but it's file system
2503 * agnostic and this path is currently, and likely to remain, HFS+
2504 * specific. It's easier and more performant if we implement it
2505 * here.
2506 */
2507
2508 size_t size = MAX_DECMPFS_XATTR_SIZE;
2509 MALLOC(data, void *, size, M_TEMP, M_WAITOK);
2510
2511 ret = hfs_xattr_read(from_cp->c_vp, DECMPFS_XATTR_NAME, data, &size);
2512 if (ret)
2513 goto exit;
2514
2515 ret = hfs_xattr_write(to_cp->c_vp, DECMPFS_XATTR_NAME, data, size);
2516 if (ret)
2517 goto exit;
2518
2519 SET(to_cp->c_bsdflags, UF_COMPRESSED);
2520 SET(to_cp->c_flag, C_MODIFIED);
2521
2522 exit:
2523 if (data)
2524 FREE(data, M_TEMP);
2525
2526 return ret;
2527 }
2528 #endif // HFS_COMPRESSION
2529
2530 int
2531 hfs_vnop_mmap(struct vnop_mmap_args *ap)
2532 {
2533 struct vnode *vp = ap->a_vp;
2534 cnode_t *cp = VTOC(vp);
2535 int error;
2536
2537 if (VNODE_IS_RSRC(vp)) {
2538 /* allow pageins of the resource fork */
2539 } else {
2540 int compressed = hfs_file_is_compressed(cp, 1); /* 1 == don't take the cnode lock */
2541 time_t orig_ctime = cp->c_ctime;
2542
2543 if (!compressed && (cp->c_bsdflags & UF_COMPRESSED)) {
2544 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2545 if (error != 0) {
2546 return error;
2547 }
2548 }
2549
2550 if (ap->a_fflags & PROT_WRITE) {
2551 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2552 }
2553 }
2554
2555 //
2556 // NOTE: we return ENOTSUP because we want the cluster layer
2557 // to actually do all the real work.
2558 //
2559 return (ENOTSUP);
2560 }
2561
2562 static errno_t hfs_vnop_mnomap(struct vnop_mnomap_args *ap)
2563 {
2564 vnode_t vp = ap->a_vp;
2565
2566 /*
2567 * Whilst the file was mapped, there may not have been any
2568 * page-outs so we need to increment the generation counter now.
2569 * Unfortunately this may lead to a change in the generation
2570 * counter when no actual change has been made, but there is
2571 * little we can do about that with our current architecture.
2572 */
2573 if (ubc_is_mapped_writable(vp)) {
2574 cnode_t *cp = VTOC(vp);
2575 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2576 hfs_incr_gencount(cp);
2577
2578 /*
2579 * We don't want to set the modification time here since a
2580 * change to that is not acceptable if no changes were made.
2581 * Instead we set a flag so that if we get any page-outs we
2582 * know to update the modification time. It's possible that
2583 * they weren't actually because of changes made whilst the
2584 * file was mapped but that's not easy to fix now.
2585 */
2586 SET(cp->c_flag, C_MIGHT_BE_DIRTY_FROM_MAPPING);
2587
2588 hfs_unlock(cp);
2589 }
2590
2591 return 0;
2592 }
2593
2594 /*
2595 * Mark the resource fork as needing a ubc_setsize when we drop the
2596 * cnode lock later.
2597 */
2598 static void hfs_rsrc_setsize(cnode_t *cp)
2599 {
2600 /*
2601 * We need to take an iocount if we don't have one. vnode_get
2602 * will return ENOENT if the vnode is terminating which is what we
2603 * want as it's not safe to call ubc_setsize in that case.
2604 */
2605 if (cp->c_rsrc_vp && !vnode_get(cp->c_rsrc_vp)) {
2606 // Shouldn't happen, but better safe...
2607 if (ISSET(cp->c_flag, C_NEED_RVNODE_PUT))
2608 vnode_put(cp->c_rsrc_vp);
2609 SET(cp->c_flag, C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE);
2610 }
2611 }
2612
2613 /*
2614 * hfs_move_data
2615 *
2616 * This is a non-symmetric variant of exchangedata. In this function,
2617 * the contents of the data fork (and optionally the resource fork)
2618 * are moved from from_cp to to_cp.
2619 *
2620 * The cnodes must be locked.
2621 *
2622 * The cnode pointed to by 'to_cp' *must* be empty prior to invoking
2623 * this function. We impose this restriction because we may not be
2624 * able to fully delete the entire file's contents in a single
2625 * transaction, particularly if it has a lot of extents. In the
2626 * normal file deletion codepath, the file is screened for two
2627 * conditions: 1) bigger than 400MB, and 2) more than 8 extents. If
2628 * so, the file is relocated to the hidden directory and the deletion
2629 * is broken up into multiple truncates. We can't do that here
2630 * because both files need to exist in the namespace. The main reason
2631 * this is imposed is that we may have to touch a whole lot of bitmap
2632 * blocks if there are many extents.
2633 *
2634 * Any data written to 'from_cp' after this call completes is not
2635 * guaranteed to be moved.
2636 *
2637 * Arguments:
2638 * cnode_t *from_cp : source file
2639 * cnode_t *to_cp : destination file; must be empty
2640 *
2641 * Returns:
2642 *
2643 * EBUSY - File has been deleted or is in use
2644 * EFBIG - Destination file was not empty
2645 * EIO - An I/O error
2646 * 0 - success
2647 * other - Other errors that can be returned from called functions
2648 */
2649 int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
2650 hfs_move_data_options_t options)
2651 {
2652 hfsmount_t *hfsmp = VTOHFS(from_cp->c_vp);
2653 int error = 0;
2654 int lockflags = 0;
2655 bool return_EIO_on_error = false;
2656 const bool include_rsrc = ISSET(options, HFS_MOVE_DATA_INCLUDE_RSRC);
2657
2658 /* Verify that neither source/dest file is open-unlinked */
2659 if (ISSET(from_cp->c_flag, C_DELETED | C_NOEXISTS)
2660 || ISSET(to_cp->c_flag, C_DELETED | C_NOEXISTS)) {
2661 return EBUSY;
2662 }
2663
2664 /*
2665 * Verify the source file is not in use by anyone besides us.
2666 *
2667 * This function is typically invoked by a namespace handler
2668 * process responding to a temporarily stalled system call.
2669 * The FD that it is working off of is opened O_EVTONLY, so
2670 * it really has no active usecounts (the kusecount from O_EVTONLY
2671 * is subtracted from the total usecounts).
2672 *
2673 * As a result, we shouldn't have any active usecounts against
2674 * this vnode when we go to check it below.
2675 */
2676 if (vnode_isinuse(from_cp->c_vp, 0))
2677 return EBUSY;
2678
2679 if (include_rsrc && from_cp->c_rsrc_vp) {
2680 if (vnode_isinuse(from_cp->c_rsrc_vp, 0))
2681 return EBUSY;
2682
2683 /*
2684 * In the code below, if the destination file doesn't have a
2685 * c_rsrcfork then we don't create it which means we we cannot
2686 * transfer the ff_invalidranges and cf_vblocks fields. These
2687 * shouldn't be set because we flush the resource fork before
2688 * calling this function but there is a tiny window when we
2689 * did not have any locks...
2690 */
2691 if (!to_cp->c_rsrcfork
2692 && (!TAILQ_EMPTY(&from_cp->c_rsrcfork->ff_invalidranges)
2693 || from_cp->c_rsrcfork->ff_unallocblocks)) {
2694 /*
2695 * The file isn't really busy now but something did slip
2696 * in and tinker with the file while we didn't have any
2697 * locks, so this is the most meaningful return code for
2698 * the caller.
2699 */
2700 return EBUSY;
2701 }
2702 }
2703
2704 // Check the destination file is empty
2705 if (to_cp->c_datafork->ff_blocks
2706 || to_cp->c_datafork->ff_size
2707 || (include_rsrc
2708 && (to_cp->c_blocks
2709 || (to_cp->c_rsrcfork && to_cp->c_rsrcfork->ff_size)))) {
2710 return EFBIG;
2711 }
2712
2713 if ((error = hfs_start_transaction (hfsmp)))
2714 return error;
2715
2716 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE,
2717 HFS_EXCLUSIVE_LOCK);
2718
2719 // filefork_t is 128 bytes which should be OK
2720 filefork_t rfork_buf, *from_rfork = NULL;
2721
2722 if (include_rsrc) {
2723 from_rfork = from_cp->c_rsrcfork;
2724
2725 /*
2726 * Creating resource fork vnodes is expensive, so just get get
2727 * the fork data if we need it.
2728 */
2729 if (!from_rfork && hfs_has_rsrc(from_cp)) {
2730 from_rfork = &rfork_buf;
2731
2732 from_rfork->ff_cp = from_cp;
2733 TAILQ_INIT(&from_rfork->ff_invalidranges);
2734
2735 error = cat_idlookup(hfsmp, from_cp->c_fileid, 0, 1, NULL, NULL,
2736 &from_rfork->ff_data);
2737
2738 if (error)
2739 goto exit;
2740 }
2741 }
2742
2743 /*
2744 * From here on, any failures mean that we might be leaving things
2745 * in a weird or inconsistent state. Ideally, we should back out
2746 * all the changes, but to do that properly we need to fix
2747 * MoveData. We'll save fixing that for another time. For now,
2748 * just return EIO in all cases to the caller so that they know.
2749 */
2750 return_EIO_on_error = true;
2751
2752 bool data_overflow_extents = overflow_extents(from_cp->c_datafork);
2753
2754 // Move the data fork
2755 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp,
2756 to_cp->c_datafork, to_cp))) {
2757 goto exit;
2758 }
2759
2760 SET(from_cp->c_flag, C_NEED_DATA_SETSIZE);
2761 SET(to_cp->c_flag, C_NEED_DATA_SETSIZE);
2762
2763 // We move the resource fork later
2764
2765 /*
2766 * Note that because all we're doing is moving the extents around,
2767 * we can probably do this in a single transaction: Each extent
2768 * record (group of 8) is 64 bytes. A extent overflow B-Tree node
2769 * is typically 4k. This means each node can hold roughly ~60
2770 * extent records == (480 extents).
2771 *
2772 * If a file was massively fragmented and had 20k extents, this
2773 * means we'd roughly touch 20k/480 == 41 to 42 nodes, plus the
2774 * index nodes, for half of the operation. (inserting or
2775 * deleting). So if we're manipulating 80-100 nodes, this is
2776 * basically 320k of data to write to the journal in a bad case.
2777 */
2778 if (data_overflow_extents) {
2779 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0)))
2780 goto exit;
2781 }
2782
2783 if (from_rfork && overflow_extents(from_rfork)) {
2784 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1)))
2785 goto exit;
2786 }
2787
2788 // Touch times
2789 from_cp->c_touch_acctime = TRUE;
2790 from_cp->c_touch_chgtime = TRUE;
2791 from_cp->c_touch_modtime = TRUE;
2792 hfs_touchtimes(hfsmp, from_cp);
2793
2794 to_cp->c_touch_acctime = TRUE;
2795 to_cp->c_touch_chgtime = TRUE;
2796 to_cp->c_touch_modtime = TRUE;
2797 hfs_touchtimes(hfsmp, to_cp);
2798
2799 struct cat_fork dfork_buf;
2800 const struct cat_fork *dfork, *rfork;
2801
2802 dfork = hfs_prepare_fork_for_update(to_cp->c_datafork, NULL,
2803 &dfork_buf, hfsmp->blockSize);
2804 rfork = hfs_prepare_fork_for_update(from_rfork, NULL,
2805 &rfork_buf.ff_data, hfsmp->blockSize);
2806
2807 // Update the catalog nodes, to_cp first
2808 if ((error = cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2809 dfork, rfork))) {
2810 goto exit;
2811 }
2812
2813 CLR(to_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
2814
2815 // Update in-memory resource fork data here
2816 if (from_rfork) {
2817 // Update c_blocks
2818 uint32_t moving = from_rfork->ff_blocks + from_rfork->ff_unallocblocks;
2819
2820 from_cp->c_blocks -= moving;
2821 to_cp->c_blocks += moving;
2822
2823 // Update to_cp's resource data if it has it
2824 filefork_t *to_rfork = to_cp->c_rsrcfork;
2825 if (to_rfork) {
2826 TAILQ_SWAP(&to_rfork->ff_invalidranges,
2827 &from_rfork->ff_invalidranges, rl_entry, rl_link);
2828 to_rfork->ff_data = from_rfork->ff_data;
2829
2830 // Deal with ubc_setsize
2831 hfs_rsrc_setsize(to_cp);
2832 }
2833
2834 // Wipe out the resource fork in from_cp
2835 rl_init(&from_rfork->ff_invalidranges);
2836 bzero(&from_rfork->ff_data, sizeof(from_rfork->ff_data));
2837
2838 // Deal with ubc_setsize
2839 hfs_rsrc_setsize(from_cp);
2840 }
2841
2842 // Currently unnecessary, but might be useful in future...
2843 dfork = hfs_prepare_fork_for_update(from_cp->c_datafork, NULL, &dfork_buf,
2844 hfsmp->blockSize);
2845 rfork = hfs_prepare_fork_for_update(from_rfork, NULL, &rfork_buf.ff_data,
2846 hfsmp->blockSize);
2847
2848 // Update from_cp
2849 if ((error = cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2850 dfork, rfork))) {
2851 goto exit;
2852 }
2853
2854 CLR(from_cp->c_flag, C_MODIFIED | C_MINOR_MOD);
2855
2856 exit:
2857 if (lockflags) {
2858 hfs_systemfile_unlock(hfsmp, lockflags);
2859 hfs_end_transaction(hfsmp);
2860 }
2861
2862 if (error && error != EIO && return_EIO_on_error) {
2863 printf("hfs_move_data: encountered error %d\n", error);
2864 error = EIO;
2865 }
2866
2867 return error;
2868 }
2869
2870 /*
2871 * Move all of the catalog and runtime data in srcfork to dstfork.
2872 *
2873 * This allows us to maintain the invalid ranges across the move data
2874 * operation so we don't need to force all of the pending IO right
2875 * now. In addition, we move all non overflow-extent extents into the
2876 * destination here.
2877 *
2878 * The destination fork must be empty and should have been checked
2879 * prior to calling this.
2880 */
2881 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src_cp,
2882 filefork_t *dstfork, cnode_t *dst_cp)
2883 {
2884 // Move the invalid ranges
2885 TAILQ_SWAP(&dstfork->ff_invalidranges, &srcfork->ff_invalidranges,
2886 rl_entry, rl_link);
2887 rl_remove_all(&srcfork->ff_invalidranges);
2888
2889 // Move the fork data (copy whole structure)
2890 dstfork->ff_data = srcfork->ff_data;
2891 bzero(&srcfork->ff_data, sizeof(srcfork->ff_data));
2892
2893 // Update c_blocks
2894 src_cp->c_blocks -= dstfork->ff_blocks + dstfork->ff_unallocblocks;
2895 dst_cp->c_blocks += dstfork->ff_blocks + dstfork->ff_unallocblocks;
2896
2897 return 0;
2898 }
2899
2900
2901 #include <i386/panic_hooks.h>
2902
2903 struct hfs_fsync_panic_hook {
2904 panic_hook_t hook;
2905 struct cnode *cp;
2906 };
2907
2908 static void hfs_fsync_panic_hook(panic_hook_t *hook_)
2909 {
2910 struct hfs_fsync_panic_hook *hook = (struct hfs_fsync_panic_hook *)hook_;
2911 extern int kdb_log(const char *fmt, ...);
2912
2913 // Get the physical region just before cp
2914 panic_phys_range_t range;
2915 uint64_t phys;
2916
2917 if (panic_phys_range_before(hook->cp, &phys, &range)) {
2918 kdb_log("cp = %p, phys = %p, prev (%p: %p-%p)\n",
2919 hook->cp, phys, range.type, range.phys_start,
2920 range.phys_start + range.len);
2921 } else
2922 kdb_log("cp = %p, phys = %p, prev (!)\n", hook->cp, phys);
2923
2924 panic_dump_mem((void *)(((vm_offset_t)hook->cp - 4096) & ~4095), 12288);
2925
2926 kdb_log("\n");
2927 }
2928
2929
2930 /*
2931 * cnode must be locked
2932 */
2933 int
2934 hfs_fsync(struct vnode *vp, int waitfor, hfs_fsync_mode_t fsyncmode, struct proc *p)
2935 {
2936 struct cnode *cp = VTOC(vp);
2937 struct filefork *fp = NULL;
2938 int retval = 0;
2939 struct hfsmount *hfsmp = VTOHFS(vp);
2940 struct timeval tv;
2941 int waitdata; /* attributes necessary for data retrieval */
2942 int wait; /* all other attributes (e.g. atime, etc.) */
2943 int lockflag;
2944 int took_trunc_lock = 0;
2945 int locked_buffers = 0;
2946 int fsync_default = 1;
2947
2948 /*
2949 * Applications which only care about data integrity rather than full
2950 * file integrity may opt out of (delay) expensive metadata update
2951 * operations as a performance optimization.
2952 */
2953 wait = (waitfor == MNT_WAIT);
2954 waitdata = (waitfor == MNT_DWAIT) | wait;
2955
2956 if (always_do_fullfsync)
2957 fsyncmode = HFS_FSYNC_FULL;
2958 if (fsyncmode != HFS_FSYNC)
2959 fsync_default = 0;
2960
2961 /* HFS directories don't have any data blocks. */
2962 if (vnode_isdir(vp))
2963 goto metasync;
2964 fp = VTOF(vp);
2965
2966 /*
2967 * For system files flush the B-tree header and
2968 * for regular files write out any clusters
2969 */
2970 if (vnode_issystem(vp)) {
2971 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2972 // XXXdbg
2973 if (hfsmp->jnl == NULL) {
2974 BTFlushPath(VTOF(vp));
2975 }
2976 }
2977 } else if (UBCINFOEXISTS(vp)) {
2978 hfs_unlock(cp);
2979 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
2980 took_trunc_lock = 1;
2981
2982 struct hfs_fsync_panic_hook hook;
2983 hook.cp = cp;
2984 panic_hook(&hook.hook, hfs_fsync_panic_hook);
2985
2986 if (fp->ff_unallocblocks != 0) {
2987 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2988
2989 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2990 }
2991
2992 panic_unhook(&hook.hook);
2993
2994 /* Don't hold cnode lock when calling into cluster layer. */
2995 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2996
2997 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2998 }
2999 /*
3000 * When MNT_WAIT is requested and the zero fill timeout
3001 * has expired then we must explicitly zero out any areas
3002 * that are currently marked invalid (holes).
3003 *
3004 * Files with NODUMP can bypass zero filling here.
3005 */
3006 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
3007 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
3008 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
3009 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) &&
3010 cp->c_zftimeout != 0))) {
3011
3012 microuptime(&tv);
3013 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && fsync_default && tv.tv_sec < (long)cp->c_zftimeout) {
3014 /* Remember that a force sync was requested. */
3015 cp->c_flag |= C_ZFWANTSYNC;
3016 goto datasync;
3017 }
3018 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
3019 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
3020 hfs_unlock(cp);
3021 if (took_trunc_lock) {
3022 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3023 }
3024 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3025 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
3026 took_trunc_lock = 1;
3027 }
3028 hfs_flush_invalid_ranges(vp);
3029 hfs_unlock(cp);
3030 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
3031 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
3032 }
3033 }
3034 datasync:
3035 if (took_trunc_lock) {
3036 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3037 took_trunc_lock = 0;
3038 }
3039 /*
3040 * if we have a journal and if journal_active() returns != 0 then the
3041 * we shouldn't do anything to a locked block (because it is part
3042 * of a transaction). otherwise we'll just go through the normal
3043 * code path and flush the buffer. note journal_active() can return
3044 * -1 if the journal is invalid -- however we still need to skip any
3045 * locked blocks as they get cleaned up when we finish the transaction
3046 * or close the journal.
3047 */
3048 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
3049 if (hfsmp->jnl)
3050 lockflag = BUF_SKIP_LOCKED;
3051 else
3052 lockflag = 0;
3053
3054 /*
3055 * Flush all dirty buffers associated with a vnode.
3056 * Record how many of them were dirty AND locked (if necessary).
3057 */
3058 locked_buffers = buf_flushdirtyblks_skipinfo(vp, waitdata, lockflag, "hfs_fsync");
3059 if ((lockflag & BUF_SKIP_LOCKED) && (locked_buffers) && (vnode_vtype(vp) == VLNK)) {
3060 /*
3061 * If there are dirty symlink buffers, then we may need to take action
3062 * to prevent issues later on if we are journaled. If we're fsyncing a
3063 * symlink vnode then we are in one of three cases:
3064 *
3065 * 1) automatic sync has fired. In this case, we don't want the behavior to change.
3066 *
3067 * 2) Someone has opened the FD for the symlink (not what it points to)
3068 * and has issued an fsync against it. This should be rare, and we don't
3069 * want the behavior to change.
3070 *
3071 * 3) We are being called by a vclean which is trying to reclaim this
3072 * symlink vnode. If this is the case, then allowing this fsync to
3073 * proceed WITHOUT flushing the journal could result in the vclean
3074 * invalidating the buffer's blocks before the journal transaction is
3075 * written to disk. To prevent this, we force a journal flush
3076 * if the vnode is in the middle of a recycle (VL_TERMINATE or VL_DEAD is set).
3077 */
3078 if (vnode_isrecycled(vp)) {
3079 fsync_default = 0;
3080 }
3081 }
3082
3083 metasync:
3084 if (vnode_isreg(vp) && vnode_issystem(vp)) {
3085 if (VTOF(vp)->fcbBTCBPtr != NULL) {
3086 microuptime(&tv);
3087 BTSetLastSync(VTOF(vp), tv.tv_sec);
3088 }
3089 cp->c_touch_acctime = FALSE;
3090 cp->c_touch_chgtime = FALSE;
3091 cp->c_touch_modtime = FALSE;
3092 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
3093 retval = hfs_update(vp, HFS_UPDATE_FORCE);
3094
3095 /*
3096 * When MNT_WAIT is requested push out the catalog record for
3097 * this file. If they asked for a full fsync, we can skip this
3098 * because the journal_flush or hfs_metasync_all will push out
3099 * all of the metadata changes.
3100 */
3101 if ((retval == 0) && wait && fsync_default && cp->c_hint &&
3102 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
3103 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
3104 }
3105
3106 /*
3107 * If this was a full fsync, make sure all metadata
3108 * changes get to stable storage.
3109 */
3110 if (!fsync_default) {
3111 if (hfsmp->jnl) {
3112 if (fsyncmode == HFS_FSYNC_FULL)
3113 hfs_flush(hfsmp, HFS_FLUSH_FULL);
3114 else
3115 hfs_flush(hfsmp,
3116 HFS_FLUSH_JOURNAL_BARRIER);
3117 } else {
3118 retval = hfs_metasync_all(hfsmp);
3119 /* XXX need to pass context! */
3120 hfs_flush(hfsmp, HFS_FLUSH_CACHE);
3121 }
3122 }
3123 }
3124
3125 if (!hfs_is_dirty(cp) && !ISSET(cp->c_flag, C_DELETED))
3126 vnode_cleardirty(vp);
3127
3128 return (retval);
3129 }
3130
3131
3132 /* Sync an hfs catalog b-tree node */
3133 int
3134 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
3135 {
3136 vnode_t vp;
3137 buf_t bp;
3138 int lockflags;
3139
3140 vp = HFSTOVCB(hfsmp)->catalogRefNum;
3141
3142 // XXXdbg - don't need to do this on a journaled volume
3143 if (hfsmp->jnl) {
3144 return 0;
3145 }
3146
3147 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3148 /*
3149 * Look for a matching node that has been delayed
3150 * but is not part of a set (B_LOCKED).
3151 *
3152 * BLK_ONLYVALID causes buf_getblk to return a
3153 * buf_t for the daddr64_t specified only if it's
3154 * currently resident in the cache... the size
3155 * parameter to buf_getblk is ignored when this flag
3156 * is set
3157 */
3158 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
3159
3160 if (bp) {
3161 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
3162 (void) VNOP_BWRITE(bp);
3163 else
3164 buf_brelse(bp);
3165 }
3166
3167 hfs_systemfile_unlock(hfsmp, lockflags);
3168
3169 return (0);
3170 }
3171
3172
3173 /*
3174 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
3175 * without a journal. Note that the volume bitmap does not get written;
3176 * we rely on fsck_hfs to fix that up (which it can do without any loss
3177 * of data).
3178 */
3179 int
3180 hfs_metasync_all(struct hfsmount *hfsmp)
3181 {
3182 int lockflags;
3183
3184 /* Lock all of the B-trees so we get a mutually consistent state */
3185 lockflags = hfs_systemfile_lock(hfsmp,
3186 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
3187
3188 /* Sync each of the B-trees */
3189 if (hfsmp->hfs_catalog_vp)
3190 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
3191 if (hfsmp->hfs_extents_vp)
3192 hfs_btsync(hfsmp->hfs_extents_vp, 0);
3193 if (hfsmp->hfs_attribute_vp)
3194 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
3195
3196 /* Wait for all of the writes to complete */
3197 if (hfsmp->hfs_catalog_vp)
3198 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
3199 if (hfsmp->hfs_extents_vp)
3200 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
3201 if (hfsmp->hfs_attribute_vp)
3202 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
3203
3204 hfs_systemfile_unlock(hfsmp, lockflags);
3205
3206 return 0;
3207 }
3208
3209
3210 /*ARGSUSED 1*/
3211 static int
3212 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
3213 {
3214 buf_clearflags(bp, B_LOCKED);
3215 (void) buf_bawrite(bp);
3216
3217 return(BUF_CLAIMED);
3218 }
3219
3220
3221 int
3222 hfs_btsync(struct vnode *vp, int sync_transaction)
3223 {
3224 struct cnode *cp = VTOC(vp);
3225 struct timeval tv;
3226 int flags = 0;
3227
3228 if (sync_transaction)
3229 flags |= BUF_SKIP_NONLOCKED;
3230 /*
3231 * Flush all dirty buffers associated with b-tree.
3232 */
3233 buf_iterate(vp, hfs_btsync_callback, flags, 0);
3234
3235 microuptime(&tv);
3236 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
3237 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
3238 cp->c_touch_acctime = FALSE;
3239 cp->c_touch_chgtime = FALSE;
3240 cp->c_touch_modtime = FALSE;
3241
3242 return 0;
3243 }
3244
3245 /*
3246 * Remove a directory.
3247 */
3248 int
3249 hfs_vnop_rmdir(ap)
3250 struct vnop_rmdir_args /* {
3251 struct vnode *a_dvp;
3252 struct vnode *a_vp;
3253 struct componentname *a_cnp;
3254 vfs_context_t a_context;
3255 } */ *ap;
3256 {
3257 struct vnode *dvp = ap->a_dvp;
3258 struct vnode *vp = ap->a_vp;
3259 struct cnode *dcp = VTOC(dvp);
3260 struct cnode *cp = VTOC(vp);
3261 int error;
3262 time_t orig_ctime;
3263
3264 orig_ctime = VTOC(vp)->c_ctime;
3265
3266 if (!S_ISDIR(cp->c_mode)) {
3267 return (ENOTDIR);
3268 }
3269 if (dvp == vp) {
3270 return (EINVAL);
3271 }
3272
3273 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3274 cp = VTOC(vp);
3275
3276 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3277 return (error);
3278 }
3279
3280 /* Check for a race with rmdir on the parent directory */
3281 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3282 hfs_unlockpair (dcp, cp);
3283 return ENOENT;
3284 }
3285
3286 //
3287 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3288 //
3289 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3290 uint32_t newid;
3291
3292 hfs_unlockpair(dcp, cp);
3293
3294 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3295 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3296 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3297 #if CONFIG_FSE
3298 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3299 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3300 FSE_ARG_INO, (ino64_t)0, // src inode #
3301 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3302 FSE_ARG_INT32, newid,
3303 FSE_ARG_DONE);
3304 #endif
3305 } else {
3306 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3307 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3308 }
3309 }
3310
3311 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
3312
3313 hfs_unlockpair(dcp, cp);
3314
3315 return (error);
3316 }
3317
3318 /*
3319 * Remove a directory
3320 *
3321 * Both dvp and vp cnodes are locked
3322 */
3323 int
3324 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3325 int skip_reserve, int only_unlink)
3326 {
3327 struct cnode *cp;
3328 struct cnode *dcp;
3329 struct hfsmount * hfsmp;
3330 struct cat_desc desc;
3331 int lockflags;
3332 int error = 0, started_tr = 0;
3333
3334 cp = VTOC(vp);
3335 dcp = VTOC(dvp);
3336 hfsmp = VTOHFS(vp);
3337
3338 if (dcp == cp) {
3339 return (EINVAL); /* cannot remove "." */
3340 }
3341 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3342 return (0);
3343 }
3344 if (cp->c_entries != 0) {
3345 return (ENOTEMPTY);
3346 }
3347
3348 /*
3349 * If the directory is open or in use (e.g. opendir() or current working
3350 * directory for some process); wait for inactive/reclaim to actually
3351 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
3352 * of removing open-unlinked directories from the catalog, as well as getting rid
3353 * of EAs still on the element. So change only_unlink to true, so that it will get
3354 * cleaned up below.
3355 *
3356 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
3357 * but it really means C_NOEXISTS because the item was actually removed from the
3358 * catalog. Then when we try to remove the entry from the catalog later on, it won't
3359 * really be there anymore.
3360 */
3361 if (vnode_isinuse(vp, 0)) {
3362 only_unlink = 1;
3363 }
3364
3365 /* Deal with directory hardlinks */
3366 if (cp->c_flag & C_HARDLINK) {
3367 /*
3368 * Note that if we have a directory which was a hardlink at any point,
3369 * its actual directory data is stored in the directory inode in the hidden
3370 * directory rather than the leaf element(s) present in the namespace.
3371 *
3372 * If there are still other hardlinks to this directory,
3373 * then we'll just eliminate this particular link and the vnode will still exist.
3374 * If this is the last link to an empty directory, then we'll open-unlink the
3375 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
3376 *
3377 * We could also return EBUSY here.
3378 */
3379
3380 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3381 }
3382
3383 /*
3384 * In a few cases, we may want to allow the directory to persist in an
3385 * open-unlinked state. If the directory is being open-unlinked (still has usecount
3386 * references), or if it has EAs, or if it was being deleted as part of a rename,
3387 * then we go ahead and move it to the hidden directory.
3388 *
3389 * If the directory is being open-unlinked, then we want to keep the catalog entry
3390 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
3391 *
3392 * If the directory had EAs, then we want to use the open-unlink trick so that the
3393 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
3394 * due to overflowing the journal.
3395 *
3396 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
3397 * in order to maintain rename atomicity.
3398 *
3399 * Note that the allow_dirs argument to hfs_removefile specifies that it is
3400 * supposed to handle directories for this case.
3401 */
3402
3403 if (((hfsmp->hfs_attribute_vp != NULL) &&
3404 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
3405 (only_unlink != 0)) {
3406
3407 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
3408 /*
3409 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
3410 * it here just in case we were invoked by rmdir() on a directory that had
3411 * EAs. To ensure that we start reclaiming the space as soon as possible,
3412 * we call vnode_recycle on the directory.
3413 */
3414 vnode_recycle(vp);
3415
3416 return ret;
3417
3418 }
3419
3420 dcp->c_flag |= C_DIR_MODIFICATION;
3421
3422 #if QUOTA
3423 if (hfsmp->hfs_flags & HFS_QUOTAS)
3424 (void)hfs_getinoquota(cp);
3425 #endif
3426 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3427 goto out;
3428 }
3429 started_tr = 1;
3430
3431 /*
3432 * Verify the directory is empty (and valid).
3433 * (Rmdir ".." won't be valid since
3434 * ".." will contain a reference to
3435 * the current directory and thus be
3436 * non-empty.)
3437 */
3438 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
3439 error = EPERM;
3440 goto out;
3441 }
3442
3443 /* Remove the entry from the namei cache: */
3444 cache_purge(vp);
3445
3446 /*
3447 * Protect against a race with rename by using the component
3448 * name passed in and parent id from dvp (instead of using
3449 * the cp->c_desc which may have changed).
3450 */
3451 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3452 desc.cd_namelen = cnp->cn_namelen;
3453 desc.cd_parentcnid = dcp->c_fileid;
3454 desc.cd_cnid = cp->c_cnid;
3455 desc.cd_flags = CD_ISDIR;
3456 desc.cd_encoding = cp->c_encoding;
3457 desc.cd_hint = 0;
3458
3459 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3460 error = 0;
3461 goto out;
3462 }
3463
3464 /* Remove entry from catalog */
3465 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3466
3467 if (!skip_reserve) {
3468 /*
3469 * Reserve some space in the Catalog file.
3470 */
3471 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3472 hfs_systemfile_unlock(hfsmp, lockflags);
3473 goto out;
3474 }
3475 }
3476
3477 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3478
3479 if (!error) {
3480 //
3481 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3482 // we don't need to touch the document_id as it's handled by the rename code.
3483 // otherwise it's a normal remove and we need to save the document id in the
3484 // per thread struct and clear it from the cnode.
3485 //
3486 struct doc_tombstone *ut;
3487 ut = get_uthread_doc_tombstone();
3488 if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
3489
3490 if (ut->t_lastop_document_id) {
3491 clear_tombstone_docid(ut, hfsmp, NULL);
3492 }
3493 save_tombstone(hfsmp, dvp, vp, cnp, 1);
3494
3495 }
3496
3497 /* The parent lost a child */
3498 if (dcp->c_entries > 0)
3499 dcp->c_entries--;
3500 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3501 dcp->c_dirchangecnt++;
3502 hfs_incr_gencount(dcp);
3503
3504 dcp->c_touch_chgtime = TRUE;
3505 dcp->c_touch_modtime = TRUE;
3506 dcp->c_flag |= C_MODIFIED;
3507
3508 hfs_update(dcp->c_vp, 0);
3509 }
3510
3511 hfs_systemfile_unlock(hfsmp, lockflags);
3512
3513 if (error)
3514 goto out;
3515
3516 #if QUOTA
3517 if (hfsmp->hfs_flags & HFS_QUOTAS)
3518 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3519 #endif /* QUOTA */
3520
3521 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
3522
3523 /* Mark C_NOEXISTS since the catalog entry is now gone */
3524 cp->c_flag |= C_NOEXISTS;
3525
3526 out:
3527 dcp->c_flag &= ~C_DIR_MODIFICATION;
3528 wakeup((caddr_t)&dcp->c_flag);
3529
3530 if (started_tr) {
3531 hfs_end_transaction(hfsmp);
3532 }
3533
3534 return (error);
3535 }
3536
3537
3538 /*
3539 * Remove a file or link.
3540 */
3541 int
3542 hfs_vnop_remove(ap)
3543 struct vnop_remove_args /* {
3544 struct vnode *a_dvp;
3545 struct vnode *a_vp;
3546 struct componentname *a_cnp;
3547 int a_flags;
3548 vfs_context_t a_context;
3549 } */ *ap;
3550 {
3551 struct vnode *dvp = ap->a_dvp;
3552 struct vnode *vp = ap->a_vp;
3553 struct cnode *dcp = VTOC(dvp);
3554 struct cnode *cp;
3555 struct vnode *rvp = NULL;
3556 int error=0, recycle_rsrc=0;
3557 int recycle_vnode = 0;
3558 uint32_t rsrc_vid = 0;
3559 time_t orig_ctime;
3560
3561 if (dvp == vp) {
3562 return (EINVAL);
3563 }
3564
3565 orig_ctime = VTOC(vp)->c_ctime;
3566 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
3567 error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3568 if (error) {
3569 // XXXdbg - decide on a policy for handling namespace handler failures!
3570 // for now we just let them proceed.
3571 }
3572 }
3573 error = 0;
3574
3575 cp = VTOC(vp);
3576
3577 relock:
3578
3579 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3580
3581 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3582 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3583 if (rvp) {
3584 vnode_put (rvp);
3585 }
3586 return (error);
3587 }
3588 //
3589 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3590 //
3591 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3592 uint32_t newid;
3593
3594 hfs_unlockpair(dcp, cp);
3595
3596 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3597 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3598 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3599 #if CONFIG_FSE
3600 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3601 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3602 FSE_ARG_INO, (ino64_t)0, // src inode #
3603 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3604 FSE_ARG_INT32, newid,
3605 FSE_ARG_DONE);
3606 #endif
3607 } else {
3608 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3609 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3610 }
3611 }
3612
3613 /*
3614 * Lazily respond to determining if there is a valid resource fork
3615 * vnode attached to 'cp' if it is a regular file or symlink.
3616 * If the vnode does not exist, then we may proceed without having to
3617 * create it.
3618 *
3619 * If, however, it does exist, then we need to acquire an iocount on the
3620 * vnode after acquiring its vid. This ensures that if we have to do I/O
3621 * against it, it can't get recycled from underneath us in the middle
3622 * of this call.
3623 *
3624 * Note: this function may be invoked for directory hardlinks, so just skip these
3625 * steps if 'vp' is a directory.
3626 */
3627
3628 if ((vp->v_type == VLNK) || (vp->v_type == VREG)) {
3629 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
3630 /* We need to acquire the rsrc vnode */
3631 rvp = cp->c_rsrc_vp;
3632 rsrc_vid = vnode_vid (rvp);
3633
3634 /* Unlock everything to acquire iocount on the rsrc vnode */
3635 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
3636 hfs_unlockpair (dcp, cp);
3637 /* Use the vid to maintain identity on rvp */
3638 if (vnode_getwithvid(rvp, rsrc_vid)) {
3639 /*
3640 * If this fails, then it was recycled or
3641 * reclaimed in the interim. Reset fields and
3642 * start over.
3643 */
3644 rvp = NULL;
3645 rsrc_vid = 0;
3646 }
3647 goto relock;
3648 }
3649 }
3650
3651 /*
3652 * Check to see if we raced rmdir for the parent directory
3653 * hfs_removefile already checks for a race on vp/cp
3654 */
3655 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3656 error = ENOENT;
3657 goto rm_done;
3658 }
3659
3660 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
3661
3662 /*
3663 * If the remove succeeded in deleting the file, then we may need to mark
3664 * the resource fork for recycle so that it is reclaimed as quickly
3665 * as possible. If it were not recycled quickly, then this resource fork
3666 * vnode could keep a v_parent reference on the data fork, which prevents it
3667 * from going through reclaim (by giving it extra usecounts), except in the force-
3668 * unmount case.
3669 *
3670 * However, a caveat: we need to continue to supply resource fork
3671 * access to open-unlinked files even if the resource fork is not open. This is
3672 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3673 * this already if the data fork has been re-parented to the hidden directory.
3674 *
3675 * As a result, all we really need to do here is mark the resource fork vnode
3676 * for recycle. If it goes out of core, it can be brought in again if needed.
3677 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3678 * more work.
3679 */
3680 if (error == 0) {
3681 hfs_hotfile_deleted(vp);
3682
3683 if (rvp) {
3684 recycle_rsrc = 1;
3685 }
3686 /*
3687 * If the target was actually removed from the catalog schedule it for
3688 * full reclamation/inactivation. We hold an iocount on it so it should just
3689 * get marked with MARKTERM
3690 */
3691 if (cp->c_flag & C_NOEXISTS) {
3692 recycle_vnode = 1;
3693 }
3694 }
3695
3696
3697 /*
3698 * Drop the truncate lock before unlocking the cnode
3699 * (which can potentially perform a vnode_put and
3700 * recycle the vnode which in turn might require the
3701 * truncate lock)
3702 */
3703 rm_done:
3704 hfs_unlockpair(dcp, cp);
3705 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3706
3707 if (recycle_rsrc) {
3708 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3709 vnode_recycle(rvp);
3710 }
3711 if (recycle_vnode) {
3712 vnode_recycle (vp);
3713 }
3714
3715 if (rvp) {
3716 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3717 vnode_put(rvp);
3718 }
3719
3720 return (error);
3721 }
3722
3723
3724 int
3725 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3726
3727 if ( !(buf_flags(bp) & B_META))
3728 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3729 /*
3730 * it's part of the current transaction, kill it.
3731 */
3732 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3733
3734 return (BUF_CLAIMED);
3735 }
3736
3737 /*
3738 * hfs_removefile
3739 *
3740 * Similar to hfs_vnop_remove except there are additional options.
3741 * This function may be used to remove directories if they have
3742 * lots of EA's -- note the 'allow_dirs' argument.
3743 *
3744 * This function is able to delete blocks & fork data for the resource
3745 * fork even if it does not exist in core (and have a backing vnode).
3746 * It should infer the correct behavior based on the number of blocks
3747 * in the cnode and whether or not the resource fork pointer exists or
3748 * not. As a result, one only need pass in the 'vp' corresponding to the
3749 * data fork of this file (or main vnode in the case of a directory).
3750 * Passing in a resource fork will result in an error.
3751 *
3752 * Because we do not create any vnodes in this function, we are not at
3753 * risk of deadlocking against ourselves by double-locking.
3754 *
3755 * Requires cnode and truncate locks to be held.
3756 */
3757 int
3758 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3759 int flags, int skip_reserve, int allow_dirs,
3760 __unused struct vnode *rvp, int only_unlink)
3761 {
3762 struct cnode *cp;
3763 struct cnode *dcp;
3764 struct vnode *rsrc_vp = NULL;
3765 struct hfsmount *hfsmp;
3766 struct cat_desc desc;
3767 struct timeval tv;
3768 int dataforkbusy = 0;
3769 int rsrcforkbusy = 0;
3770 int lockflags;
3771 int error = 0;
3772 int started_tr = 0;
3773 int isbigfile = 0, defer_remove=0, isdir=0;
3774 int update_vh = 0;
3775
3776 cp = VTOC(vp);
3777 dcp = VTOC(dvp);
3778 hfsmp = VTOHFS(vp);
3779
3780 /* Check if we lost a race post lookup. */
3781 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3782 return (0);
3783 }
3784
3785 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3786 return 0;
3787 }
3788
3789 /* Make sure a remove is permitted */
3790 if (VNODE_IS_RSRC(vp)) {
3791 return (EPERM);
3792 }
3793 else {
3794 /*
3795 * We know it's a data fork.
3796 * Probe the cnode to see if we have a valid resource fork
3797 * in hand or not.
3798 */
3799 rsrc_vp = cp->c_rsrc_vp;
3800 }
3801
3802 /* Don't allow deleting the journal or journal_info_block. */
3803 if (hfs_is_journal_file(hfsmp, cp)) {
3804 return (EPERM);
3805 }
3806
3807 /*
3808 * Hard links require special handling.
3809 */
3810 if (cp->c_flag & C_HARDLINK) {
3811 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3812 return (EBUSY);
3813 } else {
3814 /* A directory hard link with a link count of one is
3815 * treated as a regular directory. Therefore it should
3816 * only be removed using rmdir().
3817 */
3818 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3819 (allow_dirs == 0)) {
3820 return (EPERM);
3821 }
3822 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3823 }
3824 }
3825
3826 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3827 if (vnode_isdir(vp)) {
3828 if (allow_dirs == 0)
3829 return (EPERM); /* POSIX */
3830 isdir = 1;
3831 }
3832 /* Sanity check the parent ids. */
3833 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3834 (cp->c_parentcnid != dcp->c_fileid)) {
3835 return (EINVAL);
3836 }
3837
3838 dcp->c_flag |= C_DIR_MODIFICATION;
3839
3840 // this guy is going away so mark him as such
3841 cp->c_flag |= C_DELETED;
3842
3843
3844 /* Remove our entry from the namei cache. */
3845 cache_purge(vp);
3846
3847 /*
3848 * If the caller was operating on a file (as opposed to a
3849 * directory with EAs), then we need to figure out
3850 * whether or not it has a valid resource fork vnode.
3851 *
3852 * If there was a valid resource fork vnode, then we need
3853 * to use hfs_truncate to eliminate its data. If there is
3854 * no vnode, then we hold the cnode lock which would
3855 * prevent it from being created. As a result,
3856 * we can use the data deletion functions which do not
3857 * require that a cnode/vnode pair exist.
3858 */
3859
3860 /* Check if this file is being used. */
3861 if (isdir == 0) {
3862 dataforkbusy = vnode_isinuse(vp, 0);
3863 /*
3864 * At this point, we know that 'vp' points to the
3865 * a data fork because we checked it up front. And if
3866 * there is no rsrc fork, rsrc_vp will be NULL.
3867 */
3868 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3869 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3870 }
3871 }
3872
3873 /* Check if we have to break the deletion into multiple pieces. */
3874 if (isdir == 0)
3875 isbigfile = cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE;
3876
3877 /* Check if the file has xattrs. If it does we'll have to delete them in
3878 individual transactions in case there are too many */
3879 if ((hfsmp->hfs_attribute_vp != NULL) &&
3880 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3881 defer_remove = 1;
3882 }
3883
3884 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3885 if (only_unlink) {
3886 defer_remove = 1;
3887 }
3888
3889 /*
3890 * Carbon semantics prohibit deleting busy files.
3891 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3892 */
3893 if (dataforkbusy || rsrcforkbusy) {
3894 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3895 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3896 error = EBUSY;
3897 goto out;
3898 }
3899 }
3900
3901 #if QUOTA
3902 if (hfsmp->hfs_flags & HFS_QUOTAS)
3903 (void)hfs_getinoquota(cp);
3904 #endif /* QUOTA */
3905
3906 /*
3907 * Do a ubc_setsize to indicate we need to wipe contents if:
3908 * 1) item is a regular file.
3909 * 2) Neither fork is busy AND we are not told to unlink this.
3910 *
3911 * We need to check for the defer_remove since it can be set without
3912 * having a busy data or rsrc fork
3913 */
3914 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3915 /*
3916 * A ubc_setsize can cause a pagein so defer it
3917 * until after the cnode lock is dropped. The
3918 * cnode lock cannot be dropped/reacquired here
3919 * since we might already hold the journal lock.
3920 */
3921 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3922 cp->c_flag |= C_NEED_DATA_SETSIZE;
3923 }
3924 if (!rsrcforkbusy && rsrc_vp) {
3925 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3926 }
3927 }
3928
3929 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3930 goto out;
3931 }
3932 started_tr = 1;
3933
3934 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3935 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3936 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3937 }
3938
3939 /*
3940 * Prepare to truncate any non-busy forks. Busy forks will
3941 * get truncated when their vnode goes inactive.
3942 * Note that we will only enter this region if we
3943 * can avoid creating an open-unlinked file. If
3944 * either region is busy, we will have to create an open
3945 * unlinked file.
3946 *
3947 * Since we are deleting the file, we need to stagger the runtime
3948 * modifications to do things in such a way that a crash won't
3949 * result in us getting overlapped extents or any other
3950 * bad inconsistencies. As such, we call prepare_release_storage
3951 * which updates the UBC, updates quota information, and releases
3952 * any loaned blocks that belong to this file. No actual
3953 * truncation or bitmap manipulation is done until *AFTER*
3954 * the catalog record is removed.
3955 */
3956 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3957
3958 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3959
3960 error = hfs_prepare_release_storage (hfsmp, vp);
3961 if (error) {
3962 goto out;
3963 }
3964 update_vh = 1;
3965 }
3966
3967 /*
3968 * If the resource fork vnode does not exist, we can skip this step.
3969 */
3970 if (!rsrcforkbusy && rsrc_vp) {
3971 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3972 if (error) {
3973 goto out;
3974 }
3975 update_vh = 1;
3976 }
3977 }
3978
3979 /*
3980 * Protect against a race with rename by using the component
3981 * name passed in and parent id from dvp (instead of using
3982 * the cp->c_desc which may have changed). Also, be aware that
3983 * because we allow directories to be passed in, we need to special case
3984 * this temporary descriptor in case we were handed a directory.
3985 */
3986 if (isdir) {
3987 desc.cd_flags = CD_ISDIR;
3988 }
3989 else {
3990 desc.cd_flags = 0;
3991 }
3992 desc.cd_encoding = cp->c_desc.cd_encoding;
3993 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3994 desc.cd_namelen = cnp->cn_namelen;
3995 desc.cd_parentcnid = dcp->c_fileid;
3996 desc.cd_hint = cp->c_desc.cd_hint;
3997 desc.cd_cnid = cp->c_cnid;
3998 microtime(&tv);
3999
4000 /*
4001 * There are two cases to consider:
4002 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
4003 * 2. File is not in use ==> remove the file
4004 *
4005 * We can get a directory in case 1 because it may have had lots of attributes,
4006 * which need to get removed here.
4007 */
4008 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
4009 char delname[32];
4010 struct cat_desc to_desc;
4011 struct cat_desc todir_desc;
4012
4013 /*
4014 * Orphan this file or directory (move to hidden directory).
4015 * Again, we need to take care that we treat directories as directories,
4016 * and files as files. Because directories with attributes can be passed in
4017 * check to make sure that we have a directory or a file before filling in the
4018 * temporary descriptor's flags. We keep orphaned directories AND files in
4019 * the FILE_HARDLINKS private directory since we're generalizing over all
4020 * orphaned filesystem objects.
4021 */
4022 bzero(&todir_desc, sizeof(todir_desc));
4023 todir_desc.cd_parentcnid = 2;
4024
4025 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
4026 bzero(&to_desc, sizeof(to_desc));
4027 to_desc.cd_nameptr = (const u_int8_t *)delname;
4028 to_desc.cd_namelen = strlen(delname);
4029 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
4030 if (isdir) {
4031 to_desc.cd_flags = CD_ISDIR;
4032 }
4033 else {
4034 to_desc.cd_flags = 0;
4035 }
4036 to_desc.cd_cnid = cp->c_cnid;
4037
4038 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4039 if (!skip_reserve) {
4040 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
4041 hfs_systemfile_unlock(hfsmp, lockflags);
4042 goto out;
4043 }
4044 }
4045
4046 error = cat_rename(hfsmp, &desc, &todir_desc,
4047 &to_desc, (struct cat_desc *)NULL);
4048
4049 if (error == 0) {
4050 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
4051 if (isdir == 1) {
4052 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
4053 }
4054 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
4055 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
4056
4057 /* Update the parent directory */
4058 if (dcp->c_entries > 0)
4059 dcp->c_entries--;
4060 if (isdir == 1) {
4061 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
4062 }
4063 dcp->c_dirchangecnt++;
4064 hfs_incr_gencount(dcp);
4065
4066 dcp->c_ctime = tv.tv_sec;
4067 dcp->c_mtime = tv.tv_sec;
4068 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
4069
4070 /* Update the file or directory's state */
4071 cp->c_flag |= C_DELETED;
4072 cp->c_ctime = tv.tv_sec;
4073 --cp->c_linkcount;
4074 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
4075 }
4076 hfs_systemfile_unlock(hfsmp, lockflags);
4077 if (error)
4078 goto out;
4079
4080 }
4081 else {
4082 /*
4083 * Nobody is using this item; we can safely remove everything.
4084 */
4085 struct filefork *temp_rsrc_fork = NULL;
4086 #if QUOTA
4087 off_t savedbytes;
4088 int blksize = hfsmp->blockSize;
4089 #endif
4090 u_int32_t fileid = cp->c_fileid;
4091
4092 /*
4093 * Figure out if we need to read the resource fork data into
4094 * core before wiping out the catalog record.
4095 *
4096 * 1) Must not be a directory
4097 * 2) cnode's c_rsrcfork ptr must be NULL.
4098 * 3) rsrc fork must have actual blocks
4099 */
4100 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
4101 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
4102 /*
4103 * The resource fork vnode & filefork did not exist.
4104 * Create a temporary one for use in this function only.
4105 */
4106 MALLOC_ZONE (temp_rsrc_fork, struct filefork *, sizeof (struct filefork), M_HFSFORK, M_WAITOK);
4107 bzero(temp_rsrc_fork, sizeof(struct filefork));
4108 temp_rsrc_fork->ff_cp = cp;
4109 rl_init(&temp_rsrc_fork->ff_invalidranges);
4110 }
4111
4112 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4113
4114 /* Look up the resource fork first, if necessary */
4115 if (temp_rsrc_fork) {
4116 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
4117 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
4118 if (error) {
4119 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4120 hfs_systemfile_unlock (hfsmp, lockflags);
4121 goto out;
4122 }
4123 }
4124
4125 if (!skip_reserve) {
4126 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
4127 if (temp_rsrc_fork) {
4128 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4129 }
4130 hfs_systemfile_unlock(hfsmp, lockflags);
4131 goto out;
4132 }
4133 }
4134
4135 error = cat_delete(hfsmp, &desc, &cp->c_attr);
4136
4137 if (error && error != ENXIO && error != ENOENT) {
4138 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
4139 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
4140 }
4141
4142 if (error == 0) {
4143 /* Update the parent directory */
4144 if (dcp->c_entries > 0)
4145 dcp->c_entries--;
4146 dcp->c_dirchangecnt++;
4147 hfs_incr_gencount(dcp);
4148
4149 dcp->c_ctime = tv.tv_sec;
4150 dcp->c_mtime = tv.tv_sec;
4151 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
4152 }
4153 hfs_systemfile_unlock(hfsmp, lockflags);
4154
4155 if (error) {
4156 if (temp_rsrc_fork) {
4157 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4158 }
4159 goto out;
4160 }
4161
4162 /*
4163 * Now that we've wiped out the catalog record, the file effectively doesn't
4164 * exist anymore. So update the quota records to reflect the loss of the
4165 * data fork and the resource fork.
4166 */
4167 #if QUOTA
4168 if (cp->c_datafork->ff_blocks > 0) {
4169 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
4170 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4171 }
4172
4173 /*
4174 * We may have just deleted the catalog record for a resource fork even
4175 * though it did not exist in core as a vnode. However, just because there
4176 * was a resource fork pointer in the cnode does not mean that it had any blocks.
4177 */
4178 if (temp_rsrc_fork || cp->c_rsrcfork) {
4179 if (cp->c_rsrcfork) {
4180 if (cp->c_rsrcfork->ff_blocks > 0) {
4181 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
4182 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4183 }
4184 }
4185 else {
4186 /* we must have used a temporary fork */
4187 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
4188 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4189 }
4190 }
4191
4192 if (hfsmp->hfs_flags & HFS_QUOTAS) {
4193 (void)hfs_chkiq(cp, -1, NOCRED, 0);
4194 }
4195 #endif
4196
4197 /*
4198 * If we didn't get any errors deleting the catalog entry, then go ahead
4199 * and release the backing store now. The filefork pointers are still valid.
4200 */
4201 if (temp_rsrc_fork) {
4202 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
4203 }
4204 else {
4205 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
4206 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
4207 }
4208 if (error) {
4209 /*
4210 * If we encountered an error updating the extents and bitmap,
4211 * mark the volume inconsistent. At this point, the catalog record has
4212 * already been deleted, so we can't recover it at this point. We need
4213 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
4214 * The subsequent fsck should be able to recover the free space for us.
4215 */
4216 hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE);
4217 }
4218 else {
4219 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
4220 update_vh = 0;
4221 }
4222
4223 /* Get rid of the temporary rsrc fork */
4224 if (temp_rsrc_fork) {
4225 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4226 }
4227
4228 cp->c_flag |= C_NOEXISTS;
4229 cp->c_flag &= ~C_DELETED;
4230
4231 cp->c_touch_chgtime = TRUE;
4232 --cp->c_linkcount;
4233
4234 /*
4235 * We must never get a directory if we're in this else block. We could
4236 * accidentally drop the number of files in the volume header if we did.
4237 */
4238 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
4239
4240 }
4241
4242 //
4243 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
4244 // we don't need to touch the document_id as it's handled by the rename code.
4245 // otherwise it's a normal remove and we need to save the document id in the
4246 // per thread struct and clear it from the cnode.
4247 //
4248 struct doc_tombstone *ut;
4249 ut = get_uthread_doc_tombstone();
4250 if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
4251
4252 if (ut->t_lastop_document_id) {
4253 clear_tombstone_docid(ut, hfsmp, NULL);
4254 }
4255 save_tombstone(hfsmp, dvp, vp, cnp, 1);
4256
4257 }
4258
4259
4260 /*
4261 * All done with this cnode's descriptor...
4262 *
4263 * Note: all future catalog calls for this cnode must be by
4264 * fileid only. This is OK for HFS (which doesn't have file
4265 * thread records) since HFS doesn't support the removal of
4266 * busy files.
4267 */
4268 cat_releasedesc(&cp->c_desc);
4269
4270 out:
4271 if (error) {
4272 cp->c_flag &= ~C_DELETED;
4273 }
4274
4275 if (update_vh) {
4276 /*
4277 * If we bailed out earlier, we may need to update the volume header
4278 * to deal with the borrowed blocks accounting.
4279 */
4280 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
4281 }
4282
4283 if (started_tr) {
4284 hfs_end_transaction(hfsmp);
4285 }
4286
4287 dcp->c_flag &= ~C_DIR_MODIFICATION;
4288 wakeup((caddr_t)&dcp->c_flag);
4289
4290 return (error);
4291 }
4292
4293
4294 __private_extern__ void
4295 replace_desc(struct cnode *cp, struct cat_desc *cdp)
4296 {
4297 // fixes 4348457 and 4463138
4298 if (&cp->c_desc == cdp) {
4299 return;
4300 }
4301
4302 /* First release allocated name buffer */
4303 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
4304 const u_int8_t *name = cp->c_desc.cd_nameptr;
4305
4306 cp->c_desc.cd_nameptr = 0;
4307 cp->c_desc.cd_namelen = 0;
4308 cp->c_desc.cd_flags &= ~CD_HASBUF;
4309 vfs_removename((const char *)name);
4310 }
4311 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
4312
4313 /* Cnode now owns the name buffer */
4314 cdp->cd_nameptr = 0;
4315 cdp->cd_namelen = 0;
4316 cdp->cd_flags &= ~CD_HASBUF;
4317 }
4318
4319
4320 /*
4321 * Rename a cnode.
4322 *
4323 * The VFS layer guarantees that:
4324 * - source and destination will either both be directories, or
4325 * both not be directories.
4326 * - all the vnodes are from the same file system
4327 *
4328 * When the target is a directory, HFS must ensure that its empty.
4329 *
4330 * Note that this function requires up to 6 vnodes in order to work properly
4331 * if it is operating on files (and not on directories). This is because only
4332 * files can have resource forks, and we now require iocounts to be held on the
4333 * vnodes corresponding to the resource forks (if applicable) as well as
4334 * the files or directories undergoing rename. The problem with not holding
4335 * iocounts on the resource fork vnodes is that it can lead to a deadlock
4336 * situation: The rsrc fork of the source file may be recycled and reclaimed
4337 * in order to provide a vnode for the destination file's rsrc fork. Since
4338 * data and rsrc forks share the same cnode, we'd eventually try to lock the
4339 * source file's cnode in order to sync its rsrc fork to disk, but it's already
4340 * been locked. By taking the rsrc fork vnodes up front we ensure that they
4341 * cannot be recycled, and that the situation mentioned above cannot happen.
4342 */
4343 int
4344 hfs_vnop_rename(ap)
4345 struct vnop_rename_args /* {
4346 struct vnode *a_fdvp;
4347 struct vnode *a_fvp;
4348 struct componentname *a_fcnp;
4349 struct vnode *a_tdvp;
4350 struct vnode *a_tvp;
4351 struct componentname *a_tcnp;
4352 vfs_context_t a_context;
4353 } */ *ap;
4354 {
4355 struct vnode *tvp = ap->a_tvp;
4356 struct vnode *tdvp = ap->a_tdvp;
4357 struct vnode *fvp = ap->a_fvp;
4358 struct vnode *fdvp = ap->a_fdvp;
4359 /*
4360 * Note that we only need locals for the target/destination's
4361 * resource fork vnode (and only if necessary). We don't care if the
4362 * source has a resource fork vnode or not.
4363 */
4364 struct vnode *tvp_rsrc = NULLVP;
4365 uint32_t tvp_rsrc_vid = 0;
4366 struct componentname *tcnp = ap->a_tcnp;
4367 struct componentname *fcnp = ap->a_fcnp;
4368 struct proc *p = vfs_context_proc(ap->a_context);
4369 struct cnode *fcp;
4370 struct cnode *fdcp;
4371 struct cnode *tdcp;
4372 struct cnode *tcp;
4373 struct cnode *error_cnode;
4374 struct cat_desc from_desc;
4375 struct cat_desc to_desc;
4376 struct cat_desc out_desc;
4377 struct hfsmount *hfsmp;
4378 cat_cookie_t cookie;
4379 int tvp_deleted = 0;
4380 int started_tr = 0, got_cookie = 0;
4381 int took_trunc_lock = 0;
4382 int lockflags;
4383 int error;
4384 time_t orig_from_ctime, orig_to_ctime;
4385 int emit_rename = 1;
4386 int emit_delete = 1;
4387 int is_tracked = 0;
4388 int unlocked;
4389
4390 orig_from_ctime = VTOC(fvp)->c_ctime;
4391 if (tvp && VTOC(tvp)) {
4392 orig_to_ctime = VTOC(tvp)->c_ctime;
4393 } else {
4394 orig_to_ctime = ~0;
4395 }
4396
4397 hfsmp = VTOHFS(tdvp);
4398 /*
4399 * Do special case checks here. If fvp == tvp then we need to check the
4400 * cnode with locks held.
4401 */
4402 if (fvp == tvp) {
4403 int is_hardlink = 0;
4404 /*
4405 * In this case, we do *NOT* ever emit a DELETE event.
4406 * We may not necessarily emit a RENAME event
4407 */
4408 emit_delete = 0;
4409 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
4410 return error;
4411 }
4412 /* Check to see if the item is a hardlink or not */
4413 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
4414 hfs_unlock (VTOC(fvp));
4415
4416 /*
4417 * If the item is not a hardlink, then case sensitivity must be off, otherwise
4418 * two names should not resolve to the same cnode unless they were case variants.
4419 */
4420 if (is_hardlink) {
4421 emit_rename = 0;
4422 /*
4423 * Hardlinks are a little trickier. We only want to emit a rename event
4424 * if the item is a hardlink, the parent directories are the same, case sensitivity
4425 * is off, and the case folded names are the same. See the fvp == tvp case below for more
4426 * info.
4427 */
4428
4429 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
4430 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4431 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4432 /* Then in this case only it is ok to emit a rename */
4433 emit_rename = 1;
4434 }
4435 }
4436 }
4437 }
4438 if (emit_rename) {
4439 /* c_bsdflags should only be assessed while holding the cnode lock.
4440 * This is not done consistently throughout the code and can result
4441 * in race. This will be fixed via rdar://12181064
4442 */
4443 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
4444 is_tracked = 1;
4445 }
4446 check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
4447 }
4448
4449 if (tvp && VTOC(tvp)) {
4450 if (emit_delete) {
4451 check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
4452 }
4453 }
4454
4455 retry:
4456 /* When tvp exists, take the truncate lock for hfs_removefile(). */
4457 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
4458 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4459 took_trunc_lock = 1;
4460 }
4461
4462 relock:
4463 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
4464 HFS_EXCLUSIVE_LOCK, &error_cnode);
4465 if (error) {
4466 if (took_trunc_lock) {
4467 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4468 took_trunc_lock = 0;
4469 }
4470
4471 /*
4472 * We hit an error path. If we were trying to re-acquire the locks
4473 * after coming through here once, we might have already obtained
4474 * an iocount on tvp's resource fork vnode. Drop that before dealing
4475 * with the failure. Note this is safe -- since we are in an
4476 * error handling path, we can't be holding the cnode locks.
4477 */
4478 if (tvp_rsrc) {
4479 vnode_put (tvp_rsrc);
4480 tvp_rsrc_vid = 0;
4481 tvp_rsrc = NULL;
4482 }
4483
4484 /*
4485 * tvp might no longer exist. If the cause of the lock failure
4486 * was tvp, then we can try again with tvp/tcp set to NULL.
4487 * This is ok because the vfs syscall will vnode_put the vnodes
4488 * after we return from hfs_vnop_rename.
4489 */
4490 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
4491 tcp = NULL;
4492 tvp = NULL;
4493 goto retry;
4494 }
4495
4496 /* If we want to reintroduce notifications for failed renames, this
4497 is the place to do it. */
4498
4499 return (error);
4500 }
4501
4502 fdcp = VTOC(fdvp);
4503 fcp = VTOC(fvp);
4504 tdcp = VTOC(tdvp);
4505 tcp = tvp ? VTOC(tvp) : NULL;
4506
4507 //
4508 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
4509 //
4510 unlocked = 0;
4511 if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4512 uint32_t newid;
4513
4514 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4515 unlocked = 1;
4516
4517 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4518 hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4519 ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4520 #if CONFIG_FSE
4521 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4522 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4523 FSE_ARG_INO, (ino64_t)0, // src inode #
4524 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4525 FSE_ARG_INT32, newid,
4526 FSE_ARG_DONE);
4527 #endif
4528 hfs_unlock(fcp);
4529 } else {
4530 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4531 }
4532
4533 //
4534 // check if we're going to need to fix tcp as well. if we aren't, go back relock
4535 // everything. otherwise continue on and fix up tcp as well before relocking.
4536 //
4537 if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
4538 goto relock;
4539 }
4540 }
4541
4542 //
4543 // same thing for tcp if it's set
4544 //
4545 if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4546 uint32_t newid;
4547
4548 if (!unlocked) {
4549 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4550 unlocked = 1;
4551 }
4552
4553 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4554 hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4555 ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4556 #if CONFIG_FSE
4557 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4558 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4559 FSE_ARG_INO, (ino64_t)0, // src inode #
4560 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
4561 FSE_ARG_INT32, newid,
4562 FSE_ARG_DONE);
4563 #endif
4564 hfs_unlock(tcp);
4565 } else {
4566 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4567 }
4568
4569 // go back up and relock everything. next time through the if statement won't be true
4570 // and we'll skip over this block of code.
4571 goto relock;
4572 }
4573
4574
4575
4576 /*
4577 * Acquire iocounts on the destination's resource fork vnode
4578 * if necessary. If dst/src are files and the dst has a resource
4579 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
4580 * If it does not exist, then we don't care and can skip it.
4581 */
4582 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
4583 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
4584 tvp_rsrc = tcp->c_rsrc_vp;
4585 /*
4586 * We can look at the vid here because we're holding the
4587 * cnode lock on the underlying cnode for this rsrc vnode.
4588 */
4589 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
4590
4591 /* Unlock everything to acquire iocount on this rsrc vnode */
4592 if (took_trunc_lock) {
4593 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
4594 took_trunc_lock = 0;
4595 }
4596 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4597
4598 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
4599 /* iocount acquisition failed. Reset fields and start over.. */
4600 tvp_rsrc_vid = 0;
4601 tvp_rsrc = NULL;
4602 }
4603 goto retry;
4604 }
4605 }
4606
4607
4608
4609 /* Ensure we didn't race src or dst parent directories with rmdir. */
4610 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4611 error = ENOENT;
4612 goto out;
4613 }
4614
4615 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4616 error = ENOENT;
4617 goto out;
4618 }
4619
4620
4621 /* Check for a race against unlink. The hfs_valid_cnode checks validate
4622 * the parent/child relationship with fdcp and tdcp, as well as the
4623 * component name of the target cnodes.
4624 */
4625 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
4626 error = ENOENT;
4627 goto out;
4628 }
4629
4630 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
4631 //
4632 // hmm, the destination vnode isn't valid any more.
4633 // in this case we can just drop him and pretend he
4634 // never existed in the first place.
4635 //
4636 if (took_trunc_lock) {
4637 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4638 took_trunc_lock = 0;
4639 }
4640 error = 0;
4641
4642 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4643
4644 tcp = NULL;
4645 tvp = NULL;
4646
4647 // retry the locking with tvp null'ed out
4648 goto retry;
4649 }
4650
4651 fdcp->c_flag |= C_DIR_MODIFICATION;
4652 if (fdvp != tdvp) {
4653 tdcp->c_flag |= C_DIR_MODIFICATION;
4654 }
4655
4656 /*
4657 * Disallow renaming of a directory hard link if the source and
4658 * destination parent directories are different, or a directory whose
4659 * descendant is a directory hard link and the one of the ancestors
4660 * of the destination directory is a directory hard link.
4661 */
4662 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4663 if (fcp->c_flag & C_HARDLINK) {
4664 error = EPERM;
4665 goto out;
4666 }
4667 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4668 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4669 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4670 error = EPERM;
4671 hfs_systemfile_unlock(hfsmp, lockflags);
4672 goto out;
4673 }
4674 hfs_systemfile_unlock(hfsmp, lockflags);
4675 }
4676 }
4677
4678 /*
4679 * The following edge case is caught here:
4680 * (to cannot be a descendent of from)
4681 *
4682 * o fdvp
4683 * /
4684 * /
4685 * o fvp
4686 * \
4687 * \
4688 * o tdvp
4689 * /
4690 * /
4691 * o tvp
4692 */
4693 if (tdcp->c_parentcnid == fcp->c_fileid) {
4694 error = EINVAL;
4695 goto out;
4696 }
4697
4698 /*
4699 * The following two edge cases are caught here:
4700 * (note tvp is not empty)
4701 *
4702 * o tdvp o tdvp
4703 * / /
4704 * / /
4705 * o tvp tvp o fdvp
4706 * \ \
4707 * \ \
4708 * o fdvp o fvp
4709 * /
4710 * /
4711 * o fvp
4712 */
4713 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
4714 error = ENOTEMPTY;
4715 goto out;
4716 }
4717
4718 /*
4719 * The following edge case is caught here:
4720 * (the from child and parent are the same)
4721 *
4722 * o tdvp
4723 * /
4724 * /
4725 * fdvp o fvp
4726 */
4727 if (fdvp == fvp) {
4728 error = EINVAL;
4729 goto out;
4730 }
4731
4732 /*
4733 * Make sure "from" vnode and its parent are changeable.
4734 */
4735 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
4736 error = EPERM;
4737 goto out;
4738 }
4739
4740 /*
4741 * If the destination parent directory is "sticky", then the
4742 * user must own the parent directory, or the destination of
4743 * the rename, otherwise the destination may not be changed
4744 * (except by root). This implements append-only directories.
4745 *
4746 * Note that checks for immutable and write access are done
4747 * by the call to hfs_removefile.
4748 */
4749 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4750 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
4751 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
4752 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
4753 error = EPERM;
4754 goto out;
4755 }
4756
4757 /* Don't allow modification of the journal or journal_info_block */
4758 if (hfs_is_journal_file(hfsmp, fcp) ||
4759 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4760 error = EPERM;
4761 goto out;
4762 }
4763
4764 #if QUOTA
4765 if (tvp)
4766 (void)hfs_getinoquota(tcp);
4767 #endif
4768 /* Preflighting done, take fvp out of the name space. */
4769 cache_purge(fvp);
4770
4771 #if CONFIG_SECLUDED_RENAME
4772 /*
4773 * Check for "secure" rename that imposes additional restrictions on the
4774 * source vnode. We wait until here to check in order to prevent a race
4775 * with other threads that manage to look up fvp, but their open or link
4776 * is blocked by our locks. At this point, with fvp out of the name cache,
4777 * and holding the lock on fdvp, no other thread can find fvp.
4778 *
4779 * TODO: Do we need to limit these checks to regular files only?
4780 */
4781 if (fcnp->cn_flags & CN_SECLUDE_RENAME) {
4782 if (vnode_isdir(fvp)) {
4783 error = EISDIR;
4784 goto out;
4785 }
4786
4787 /*
4788 * Neither fork of source may be open or memory mapped.
4789 * We also don't want it in use by any other system call.
4790 * The file must not have hard links.
4791 *
4792 * We can't simply use vnode_isinuse() because that does not
4793 * count opens with O_EVTONLY. We don't want a malicious
4794 * process using O_EVTONLY to subvert a secluded rename.
4795 */
4796 if (fcp->c_linkcount != 1) {
4797 error = EMLINK;
4798 goto out;
4799 }
4800
4801 if (fcp->c_rsrc_vp && (fcp->c_rsrc_vp->v_usecount > 0 ||
4802 fcp->c_rsrc_vp->v_iocount > 0)) {
4803 /* Resource fork is in use (including O_EVTONLY) */
4804 error = EBUSY;
4805 goto out;
4806 }
4807 if (fcp->c_vp && (fcp->c_vp->v_usecount > (fcp->c_rsrc_vp ? 1 : 0) ||
4808 fcp->c_vp->v_iocount > 1)) {
4809 /*
4810 * Data fork is in use, including O_EVTONLY, but not
4811 * including a reference from the resource fork.
4812 */
4813 error = EBUSY;
4814 goto out;
4815 }
4816 }
4817 #endif
4818
4819 bzero(&from_desc, sizeof(from_desc));
4820 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4821 from_desc.cd_namelen = fcnp->cn_namelen;
4822 from_desc.cd_parentcnid = fdcp->c_fileid;
4823 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4824 from_desc.cd_cnid = fcp->c_cnid;
4825
4826 bzero(&to_desc, sizeof(to_desc));
4827 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4828 to_desc.cd_namelen = tcnp->cn_namelen;
4829 to_desc.cd_parentcnid = tdcp->c_fileid;
4830 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4831 to_desc.cd_cnid = fcp->c_cnid;
4832
4833 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4834 goto out;
4835 }
4836 started_tr = 1;
4837
4838 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
4839 * inside a journal transaction and without holding a cnode lock.
4840 * As setting of this bit depends on being in journal transaction for
4841 * concurrency, check this bit again after we start journal transaction for rename
4842 * to ensure that this directory does not have any descendant that
4843 * is a directory hard link.
4844 */
4845 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4846 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4847 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4848 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4849 error = EPERM;
4850 hfs_systemfile_unlock(hfsmp, lockflags);
4851 goto out;
4852 }
4853 hfs_systemfile_unlock(hfsmp, lockflags);
4854 }
4855 }
4856
4857 // if it's a hardlink then re-lookup the name so
4858 // that we get the correct cnid in from_desc (see
4859 // the comment in hfs_removefile for more details)
4860 //
4861 if (fcp->c_flag & C_HARDLINK) {
4862 struct cat_desc tmpdesc;
4863 cnid_t real_cnid;
4864
4865 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4866 tmpdesc.cd_namelen = fcnp->cn_namelen;
4867 tmpdesc.cd_parentcnid = fdcp->c_fileid;
4868 tmpdesc.cd_hint = fdcp->c_childhint;
4869 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
4870 tmpdesc.cd_encoding = 0;
4871
4872 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4873
4874 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
4875 hfs_systemfile_unlock(hfsmp, lockflags);
4876 goto out;
4877 }
4878
4879 // use the real cnid instead of whatever happened to be there
4880 from_desc.cd_cnid = real_cnid;
4881 hfs_systemfile_unlock(hfsmp, lockflags);
4882 }
4883
4884 /*
4885 * Reserve some space in the Catalog file.
4886 */
4887 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
4888 goto out;
4889 }
4890 got_cookie = 1;
4891
4892 /*
4893 * If the destination exists then it may need to be removed.
4894 *
4895 * Due to HFS's locking system, we should always move the
4896 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
4897 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
4898 * of the actual vnop that it was trying to do (stat, link, readlink),
4899 * we must release the cnode lock of that element during the interim to
4900 * do MAC checking, vnode authorization, and other calls. In that time,
4901 * the item can be deleted (or renamed over). However, only in the rename
4902 * case is it inappropriate to return ENOENT from any of those calls. Either
4903 * the call should return information about the old element (stale), or get
4904 * information about the newer element that we are about to write in its place.
4905 *
4906 * HFS lookup has been modified to detect a rename and re-drive its
4907 * lookup internally. For other calls that have already succeeded in
4908 * their lookup call and are waiting to acquire the cnode lock in order
4909 * to proceed, that cnode lock will not fail due to the cnode being marked
4910 * C_NOEXISTS, because it won't have been marked as such. It will only
4911 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
4912 * element. All future callers will get the new element.
4913 *
4914 * To implement this behavior, we pass the "only_unlink" argument to
4915 * hfs_removefile and hfs_removedir. This will result in the vnode acting
4916 * as though it is open-unlinked. Additionally, when we are done moving the
4917 * element to the hidden directory, we vnode_recycle the target so that it is
4918 * reclaimed as soon as possible. Reclaim and inactive are both
4919 * capable of clearing out unused blocks for an open-unlinked file or dir.
4920 */
4921 if (tvp) {
4922 //
4923 // if the destination has a document id, we need to preserve it
4924 //
4925 if (fvp != tvp) {
4926 uint32_t document_id;
4927 struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4928 struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
4929
4930 if (ffip->document_id && tfip->document_id) {
4931 // both documents are tracked. only save a tombstone from tcp and do nothing else.
4932 save_tombstone(hfsmp, tdvp, tvp, tcnp, 0);
4933 } else {
4934 struct doc_tombstone *ut;
4935 ut = get_uthread_doc_tombstone();
4936
4937 document_id = tfip->document_id;
4938 tfip->document_id = 0;
4939
4940 if (document_id != 0) {
4941 // clear UF_TRACKED as well since tcp is now no longer tracked
4942 tcp->c_bsdflags &= ~UF_TRACKED;
4943 (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
4944 }
4945
4946 if (ffip->document_id == 0 && document_id != 0) {
4947 // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
4948 fcp->c_bsdflags |= UF_TRACKED;
4949 ffip->document_id = document_id;
4950
4951 (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4952 #if CONFIG_FSE
4953 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4954 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4955 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
4956 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4957 FSE_ARG_INT32, (uint32_t)ffip->document_id,
4958 FSE_ARG_DONE);
4959 #endif
4960 } else if ((fcp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, fvp, fcnp)) {
4961
4962 if (ut->t_lastop_document_id) {
4963 clear_tombstone_docid(ut, hfsmp, NULL);
4964 }
4965 save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
4966
4967 //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
4968 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4969 }
4970 }
4971 }
4972
4973 /*
4974 * When fvp matches tvp they could be case variants
4975 * or matching hard links.
4976 */
4977 if (fvp == tvp) {
4978 if (!(fcp->c_flag & C_HARDLINK)) {
4979 /*
4980 * If they're not hardlinks, then fvp == tvp must mean we
4981 * are using case-insensitive HFS because case-sensitive would
4982 * not use the same vnode for both. In this case we just update
4983 * the catalog for: a -> A
4984 */
4985 goto skip_rm; /* simple case variant */
4986
4987 }
4988 /* For all cases below, we must be using hardlinks */
4989 else if ((fdvp != tdvp) ||
4990 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4991 /*
4992 * If the parent directories are not the same, AND the two items
4993 * are hardlinks, posix says to do nothing:
4994 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4995 * We just return 0 in this case.
4996 *
4997 * If case sensitivity is on, and we are using hardlinks
4998 * then renaming is supposed to do nothing.
4999 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
5000 */
5001 goto out; /* matching hardlinks, nothing to do */
5002
5003 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
5004 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
5005 /*
5006 * If we get here, then the following must be true:
5007 * a) We are running case-insensitive HFS+.
5008 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
5009 * c) the two names are case-variants of each other.
5010 *
5011 * In this case, we are really only dealing with a single catalog record
5012 * whose name is being updated.
5013 *
5014 * op is dir1/fred -> dir1/FRED
5015 *
5016 * We need to special case the name matching, because if
5017 * dir1/fred <-> dir1/bob were the two links, and the
5018 * op was dir1/fred -> dir1/bob
5019 * That would fail/do nothing.
5020 */
5021 goto skip_rm; /* case-variant hardlink in the same dir */
5022 } else {
5023 goto out; /* matching hardlink, nothing to do */
5024 }
5025 }
5026
5027
5028 if (vnode_isdir(tvp)) {
5029 /*
5030 * hfs_removedir will eventually call hfs_removefile on the directory
5031 * we're working on, because only hfs_removefile does the renaming of the
5032 * item to the hidden directory. The directory will stay around in the
5033 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
5034 * That way, we can destroy all of the EAs as needed and allow new ones to be
5035 * written.
5036 */
5037 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
5038 }
5039 else {
5040 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
5041
5042 /*
5043 * If the destination file had a resource fork vnode, then we need to get rid of
5044 * its blocks when there are no more references to it. Because the call to
5045 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
5046 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
5047 * the resource fork vnode could prevent the data fork vnode from going out of scope
5048 * because it holds a v_parent reference on it. So we mark it for termination
5049 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
5050 * can clean up the blocks of open-unlinked files and resource forks.
5051 *
5052 * We can safely call vnode_recycle on the resource fork because we took an iocount
5053 * reference on it at the beginning of the function.
5054 */
5055
5056 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
5057 vnode_recycle(tvp_rsrc);
5058 }
5059 }
5060
5061 if (error) {
5062 goto out;
5063 }
5064
5065 tvp_deleted = 1;
5066
5067 /* Mark 'tcp' as being deleted due to a rename */
5068 tcp->c_flag |= C_RENAMED;
5069
5070 /*
5071 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
5072 * as quickly as possible.
5073 */
5074 vnode_recycle(tvp);
5075 } else {
5076 struct doc_tombstone *ut;
5077 ut = get_uthread_doc_tombstone();
5078
5079 //
5080 // There is nothing at the destination. If the file being renamed is
5081 // tracked, save a "tombstone" of the document_id. If the file is
5082 // not a tracked file, then see if it needs to inherit a tombstone.
5083 //
5084 // NOTE: we do not save a tombstone if the file being renamed begins
5085 // with "atmp" which is done to work-around AutoCad's bizarre
5086 // 5-step un-safe save behavior
5087 //
5088 if (fcp->c_bsdflags & UF_TRACKED) {
5089 if (should_save_docid_tombstone(ut, fvp, fcnp)) {
5090 save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
5091
5092 //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
5093 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
5094 } else {
5095 // intentionally do nothing
5096 }
5097 } else if ( ut->t_lastop_document_id != 0
5098 && tdvp == ut->t_lastop_parent
5099 && vnode_vid(tdvp) == ut->t_lastop_parent_vid
5100 && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
5101
5102 //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
5103 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
5104 fcp->c_bsdflags |= UF_TRACKED;
5105 fip->document_id = ut->t_lastop_document_id;
5106 cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
5107
5108 clear_tombstone_docid(ut, hfsmp, fcp); // will send the docid-changed fsevent
5109
5110 } else if (ut->t_lastop_document_id && should_save_docid_tombstone(ut, fvp, fcnp) && should_save_docid_tombstone(ut, tvp, tcnp)) {
5111 // no match, clear the tombstone
5112 //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
5113 clear_tombstone_docid(ut, hfsmp, NULL);
5114 }
5115
5116 }
5117 skip_rm:
5118 /*
5119 * All done with tvp and fvp.
5120 *
5121 * We also jump to this point if there was no destination observed during lookup and namei.
5122 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
5123 * competing thread from racing us and creating a file or dir at the destination of this rename
5124 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
5125 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
5126 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
5127 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
5128 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
5129 * will be swallowed and it will restart the operation.
5130 */
5131
5132 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5133 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
5134 hfs_systemfile_unlock(hfsmp, lockflags);
5135
5136 if (error) {
5137 if (error == EEXIST) {
5138 error = ERECYCLE;
5139 }
5140 goto out;
5141 }
5142
5143 /* Invalidate negative cache entries in the destination directory */
5144 if (tdcp->c_flag & C_NEG_ENTRIES) {
5145 cache_purge_negatives(tdvp);
5146 tdcp->c_flag &= ~C_NEG_ENTRIES;
5147 }
5148
5149 /* Update cnode's catalog descriptor */
5150 replace_desc(fcp, &out_desc);
5151 fcp->c_parentcnid = tdcp->c_fileid;
5152 fcp->c_hint = 0;
5153
5154 /* Now indicate this cnode needs to have date-added written to the finderinfo */
5155 fcp->c_flag |= C_NEEDS_DATEADDED;
5156 (void) hfs_update (fvp, 0);
5157
5158
5159 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
5160 (fdcp->c_cnid == kHFSRootFolderID));
5161 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
5162 (tdcp->c_cnid == kHFSRootFolderID));
5163
5164 /* Update both parent directories. */
5165 if (fdvp != tdvp) {
5166 if (vnode_isdir(fvp)) {
5167 /* If the source directory has directory hard link
5168 * descendants, set the kHFSHasChildLinkBit in the
5169 * destination parent hierarchy
5170 */
5171 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
5172 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
5173
5174 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
5175
5176 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
5177 if (error) {
5178 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
5179 error = 0;
5180 }
5181 }
5182 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
5183 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
5184 }
5185 tdcp->c_entries++;
5186 tdcp->c_dirchangecnt++;
5187 tdcp->c_flag |= C_MODIFIED;
5188 hfs_incr_gencount(tdcp);
5189
5190 if (fdcp->c_entries > 0)
5191 fdcp->c_entries--;
5192 fdcp->c_dirchangecnt++;
5193 fdcp->c_flag |= C_MODIFIED;
5194 fdcp->c_touch_chgtime = TRUE;
5195 fdcp->c_touch_modtime = TRUE;
5196
5197 if (ISSET(fcp->c_flag, C_HARDLINK)) {
5198 hfs_relorigin(fcp, fdcp->c_fileid);
5199 if (fdcp->c_fileid != fdcp->c_cnid)
5200 hfs_relorigin(fcp, fdcp->c_cnid);
5201 }
5202
5203 (void) hfs_update(fdvp, 0);
5204 }
5205 hfs_incr_gencount(fdcp);
5206
5207 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5208 tdcp->c_touch_chgtime = TRUE;
5209 tdcp->c_touch_modtime = TRUE;
5210
5211 (void) hfs_update(tdvp, 0);
5212
5213 /* Update the vnode's name now that the rename has completed. */
5214 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
5215 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
5216
5217 /*
5218 * At this point, we may have a resource fork vnode attached to the
5219 * 'from' vnode. If it exists, we will want to update its name, because
5220 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
5221 *
5222 * Note that the only thing we need to update here is the name attached to
5223 * the vnode, since a resource fork vnode does not have a separate resource
5224 * cnode -- it's still 'fcp'.
5225 */
5226 if (fcp->c_rsrc_vp) {
5227 char* rsrc_path = NULL;
5228 int len;
5229
5230 /* Create a new temporary buffer that's going to hold the new name */
5231 MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
5232 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
5233 len = MIN(len, MAXPATHLEN);
5234
5235 /*
5236 * vnode_update_identity will do the following for us:
5237 * 1) release reference on the existing rsrc vnode's name.
5238 * 2) copy/insert new name into the name cache
5239 * 3) attach the new name to the resource vnode
5240 * 4) update the vnode's vid
5241 */
5242 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
5243
5244 /* Free the memory associated with the resource fork's name */
5245 FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
5246 }
5247 out:
5248 if (got_cookie) {
5249 cat_postflight(hfsmp, &cookie, p);
5250 }
5251 if (started_tr) {
5252 hfs_end_transaction(hfsmp);
5253 }
5254
5255 fdcp->c_flag &= ~C_DIR_MODIFICATION;
5256 wakeup((caddr_t)&fdcp->c_flag);
5257 if (fdvp != tdvp) {
5258 tdcp->c_flag &= ~C_DIR_MODIFICATION;
5259 wakeup((caddr_t)&tdcp->c_flag);
5260 }
5261
5262 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
5263
5264 if (took_trunc_lock) {
5265 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
5266 }
5267
5268 /* Now vnode_put the resource forks vnodes if necessary */
5269 if (tvp_rsrc) {
5270 vnode_put(tvp_rsrc);
5271 tvp_rsrc = NULL;
5272 }
5273
5274 /* After tvp is removed the only acceptable error is EIO */
5275 if (error && tvp_deleted)
5276 error = EIO;
5277
5278 /* If we want to reintroduce notifications for renames, this is the
5279 place to do it. */
5280
5281 return (error);
5282 }
5283
5284
5285 /*
5286 * Make a directory.
5287 */
5288 int
5289 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
5290 {
5291 /***** HACK ALERT ********/
5292 ap->a_cnp->cn_flags |= MAKEENTRY;
5293 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
5294 }
5295
5296
5297 /*
5298 * Create a symbolic link.
5299 */
5300 int
5301 hfs_vnop_symlink(struct vnop_symlink_args *ap)
5302 {
5303 struct vnode **vpp = ap->a_vpp;
5304 struct vnode *dvp = ap->a_dvp;
5305 struct vnode *vp = NULL;
5306 struct cnode *cp = NULL;
5307 struct hfsmount *hfsmp;
5308 struct filefork *fp;
5309 struct buf *bp = NULL;
5310 char *datap;
5311 int started_tr = 0;
5312 u_int32_t len;
5313 int error;
5314
5315 /* HFS standard disks don't support symbolic links */
5316 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
5317 return (ENOTSUP);
5318
5319 /* Check for empty target name */
5320 if (ap->a_target[0] == 0)
5321 return (EINVAL);
5322
5323 hfsmp = VTOHFS(dvp);
5324 len = strlen(ap->a_target);
5325
5326 /* Check for free space */
5327 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
5328 return (ENOSPC);
5329 }
5330
5331 /* Create the vnode */
5332 ap->a_vap->va_mode |= S_IFLNK;
5333 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
5334 goto out;
5335 }
5336 vp = *vpp;
5337 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5338 goto out;
5339 }
5340 cp = VTOC(vp);
5341 fp = VTOF(vp);
5342
5343 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
5344 goto out;
5345 }
5346
5347 #if QUOTA
5348 (void)hfs_getinoquota(cp);
5349 #endif /* QUOTA */
5350
5351 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5352 goto out;
5353 }
5354 started_tr = 1;
5355
5356 /*
5357 * Allocate space for the link.
5358 *
5359 * Since we're already inside a transaction,
5360 *
5361 * Don't need truncate lock since a symlink is treated as a system file.
5362 */
5363 error = hfs_truncate(vp, len, IO_NOZEROFILL, 0, ap->a_context);
5364
5365 /* On errors, remove the symlink file */
5366 if (error) {
5367 /*
5368 * End the transaction so we don't re-take the cnode lock
5369 * below while inside a transaction (lock order violation).
5370 */
5371 hfs_end_transaction(hfsmp);
5372
5373 /* hfs_removefile() requires holding the truncate lock */
5374 hfs_unlock(cp);
5375 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
5376 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5377
5378 if (hfs_start_transaction(hfsmp) != 0) {
5379 started_tr = 0;
5380 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5381 goto out;
5382 }
5383
5384 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
5385 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5386 goto out;
5387 }
5388
5389 /* Write the link to disk */
5390 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
5391 0, 0, BLK_META);
5392 if (hfsmp->jnl) {
5393 journal_modify_block_start(hfsmp->jnl, bp);
5394 }
5395 datap = (char *)buf_dataptr(bp);
5396 bzero(datap, buf_size(bp));
5397 bcopy(ap->a_target, datap, len);
5398
5399 if (hfsmp->jnl) {
5400 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
5401 } else {
5402 buf_bawrite(bp);
5403 }
5404 out:
5405 if (started_tr)
5406 hfs_end_transaction(hfsmp);
5407 if ((cp != NULL) && (vp != NULL)) {
5408 hfs_unlock(cp);
5409 }
5410 if (error) {
5411 if (vp) {
5412 vnode_put(vp);
5413 }
5414 *vpp = NULL;
5415 }
5416 return (error);
5417 }
5418
5419
5420 /* structures to hold a "." or ".." directory entry */
5421 struct hfs_stddotentry {
5422 u_int32_t d_fileno; /* unique file number */
5423 u_int16_t d_reclen; /* length of this structure */
5424 u_int8_t d_type; /* dirent file type */
5425 u_int8_t d_namlen; /* len of filename */
5426 char d_name[4]; /* "." or ".." */
5427 };
5428
5429 struct hfs_extdotentry {
5430 u_int64_t d_fileno; /* unique file number */
5431 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
5432 u_int16_t d_reclen; /* length of this structure */
5433 u_int16_t d_namlen; /* len of filename */
5434 u_int8_t d_type; /* dirent file type */
5435 u_char d_name[3]; /* "." or ".." */
5436 };
5437
5438 typedef union {
5439 struct hfs_stddotentry std;
5440 struct hfs_extdotentry ext;
5441 } hfs_dotentry_t;
5442
5443 /*
5444 * hfs_vnop_readdir reads directory entries into the buffer pointed
5445 * to by uio, in a filesystem independent format. Up to uio_resid
5446 * bytes of data can be transferred. The data in the buffer is a
5447 * series of packed dirent structures where each one contains the
5448 * following entries:
5449 *
5450 * u_int32_t d_fileno; // file number of entry
5451 * u_int16_t d_reclen; // length of this record
5452 * u_int8_t d_type; // file type
5453 * u_int8_t d_namlen; // length of string in d_name
5454 * char d_name[MAXNAMELEN+1]; // null terminated file name
5455 *
5456 * The current position (uio_offset) refers to the next block of
5457 * entries. The offset can only be set to a value previously
5458 * returned by hfs_vnop_readdir or zero. This offset does not have
5459 * to match the number of bytes returned (in uio_resid).
5460 *
5461 * In fact, the offset used by HFS is essentially an index (26 bits)
5462 * with a tag (6 bits). The tag is for associating the next request
5463 * with the current request. This enables us to have multiple threads
5464 * reading the directory while the directory is also being modified.
5465 *
5466 * Each tag/index pair is tied to a unique directory hint. The hint
5467 * contains information (filename) needed to build the catalog b-tree
5468 * key for finding the next set of entries.
5469 *
5470 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
5471 * do NOT synthesize entries for "." and "..".
5472 */
5473 int
5474 hfs_vnop_readdir(ap)
5475 struct vnop_readdir_args /* {
5476 vnode_t a_vp;
5477 uio_t a_uio;
5478 int a_flags;
5479 int *a_eofflag;
5480 int *a_numdirent;
5481 vfs_context_t a_context;
5482 } */ *ap;
5483 {
5484 struct vnode *vp = ap->a_vp;
5485 uio_t uio = ap->a_uio;
5486 struct cnode *cp;
5487 struct hfsmount *hfsmp;
5488 directoryhint_t *dirhint = NULL;
5489 directoryhint_t localhint;
5490 off_t offset;
5491 off_t startoffset;
5492 int error = 0;
5493 int eofflag = 0;
5494 user_addr_t user_start = 0;
5495 user_size_t user_len = 0;
5496 int index;
5497 unsigned int tag;
5498 int items;
5499 int lockflags;
5500 int extended;
5501 int nfs_cookies;
5502 cnid_t cnid_hint = 0;
5503 int bump_valence = 0;
5504
5505 items = 0;
5506 startoffset = offset = uio_offset(uio);
5507 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5508 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
5509
5510 /* Sanity check the uio data. */
5511 if (uio_iovcnt(uio) > 1)
5512 return (EINVAL);
5513
5514 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
5515 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
5516 if (VTOCMP(vp) != NULL && !compressed) {
5517 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
5518 if (error) {
5519 return error;
5520 }
5521 }
5522 }
5523
5524 cp = VTOC(vp);
5525 hfsmp = VTOHFS(vp);
5526
5527 /* Note that the dirhint calls require an exclusive lock. */
5528 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5529 return (error);
5530
5531 /* Pick up cnid hint (if any). */
5532 if (nfs_cookies) {
5533 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
5534 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
5535 if (cnid_hint == INT_MAX) { /* searching pass the last item */
5536 eofflag = 1;
5537 goto out;
5538 }
5539 }
5540 /*
5541 * Synthesize entries for "." and "..", unless the directory has
5542 * been deleted, but not closed yet (lazy delete in progress).
5543 */
5544 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
5545 hfs_dotentry_t dotentry[2];
5546 size_t uiosize;
5547
5548 if (extended) {
5549 struct hfs_extdotentry *entry = &dotentry[0].ext;
5550
5551 entry->d_fileno = cp->c_cnid;
5552 entry->d_reclen = sizeof(struct hfs_extdotentry);
5553 entry->d_type = DT_DIR;
5554 entry->d_namlen = 1;
5555 entry->d_name[0] = '.';
5556 entry->d_name[1] = '\0';
5557 entry->d_name[2] = '\0';
5558 entry->d_seekoff = 1;
5559
5560 ++entry;
5561 entry->d_fileno = cp->c_parentcnid;
5562 entry->d_reclen = sizeof(struct hfs_extdotentry);
5563 entry->d_type = DT_DIR;
5564 entry->d_namlen = 2;
5565 entry->d_name[0] = '.';
5566 entry->d_name[1] = '.';
5567 entry->d_name[2] = '\0';
5568 entry->d_seekoff = 2;
5569 uiosize = 2 * sizeof(struct hfs_extdotentry);
5570 } else {
5571 struct hfs_stddotentry *entry = &dotentry[0].std;
5572
5573 entry->d_fileno = cp->c_cnid;
5574 entry->d_reclen = sizeof(struct hfs_stddotentry);
5575 entry->d_type = DT_DIR;
5576 entry->d_namlen = 1;
5577 *(int *)&entry->d_name[0] = 0;
5578 entry->d_name[0] = '.';
5579
5580 ++entry;
5581 entry->d_fileno = cp->c_parentcnid;
5582 entry->d_reclen = sizeof(struct hfs_stddotentry);
5583 entry->d_type = DT_DIR;
5584 entry->d_namlen = 2;
5585 *(int *)&entry->d_name[0] = 0;
5586 entry->d_name[0] = '.';
5587 entry->d_name[1] = '.';
5588 uiosize = 2 * sizeof(struct hfs_stddotentry);
5589 }
5590 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
5591 goto out;
5592 }
5593 offset += 2;
5594 }
5595
5596 /*
5597 * Intentionally avoid checking the valence here. If we
5598 * have FS corruption that reports the valence is 0, even though it
5599 * has contents, we might artificially skip over iterating
5600 * this directory.
5601 */
5602
5603 //
5604 // We have to lock the user's buffer here so that we won't
5605 // fault on it after we've acquired a shared lock on the
5606 // catalog file. The issue is that you can get a 3-way
5607 // deadlock if someone else starts a transaction and then
5608 // tries to lock the catalog file but can't because we're
5609 // here and we can't service our page fault because VM is
5610 // blocked trying to start a transaction as a result of
5611 // trying to free up pages for our page fault. It's messy
5612 // but it does happen on dual-processors that are paging
5613 // heavily (see radar 3082639 for more info). By locking
5614 // the buffer up-front we prevent ourselves from faulting
5615 // while holding the shared catalog file lock.
5616 //
5617 // Fortunately this and hfs_search() are the only two places
5618 // currently (10/30/02) that can fault on user data with a
5619 // shared lock on the catalog file.
5620 //
5621 if (hfsmp->jnl && uio_isuserspace(uio)) {
5622 user_start = uio_curriovbase(uio);
5623 user_len = uio_curriovlen(uio);
5624
5625 if ((error = vslock(user_start, user_len)) != 0) {
5626 user_start = 0;
5627 goto out;
5628 }
5629 }
5630 /* Convert offset into a catalog directory index. */
5631 index = (offset & HFS_INDEX_MASK) - 2;
5632 tag = offset & ~HFS_INDEX_MASK;
5633
5634 /* Lock catalog during cat_findname and cat_getdirentries. */
5635 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5636
5637 /* When called from NFS, try and resolve a cnid hint. */
5638 if (nfs_cookies && cnid_hint != 0) {
5639 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
5640 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
5641 localhint.dh_index = index - 1;
5642 localhint.dh_time = 0;
5643 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
5644 dirhint = &localhint; /* don't forget to release the descriptor */
5645 } else {
5646 cat_releasedesc(&localhint.dh_desc);
5647 }
5648 }
5649 }
5650
5651 /* Get a directory hint (cnode must be locked exclusive) */
5652 if (dirhint == NULL) {
5653 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
5654
5655 /* Hide tag from catalog layer. */
5656 dirhint->dh_index &= HFS_INDEX_MASK;
5657 if (dirhint->dh_index == HFS_INDEX_MASK) {
5658 dirhint->dh_index = -1;
5659 }
5660 }
5661
5662 if (index == 0) {
5663 dirhint->dh_threadhint = cp->c_dirthreadhint;
5664 }
5665 else {
5666 /*
5667 * If we have a non-zero index, there is a possibility that during the last
5668 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
5669 * then we don't want to return any new entries for the caller. Just return 0
5670 * items, mark the eofflag, and bail out. Because we won't have done any work, the
5671 * code at the end of the function will release the dirhint for us.
5672 *
5673 * Don't forget to unlock the catalog lock on the way out, too.
5674 */
5675 if (dirhint->dh_desc.cd_flags & CD_EOF) {
5676 error = 0;
5677 eofflag = 1;
5678 uio_setoffset(uio, startoffset);
5679 hfs_systemfile_unlock (hfsmp, lockflags);
5680
5681 goto seekoffcalc;
5682 }
5683 }
5684
5685 /* Pack the buffer with dirent entries. */
5686 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
5687
5688 if (index == 0 && error == 0) {
5689 cp->c_dirthreadhint = dirhint->dh_threadhint;
5690 }
5691
5692 hfs_systemfile_unlock(hfsmp, lockflags);
5693
5694 if (error != 0) {
5695 goto out;
5696 }
5697
5698 /* Get index to the next item */
5699 index += items;
5700
5701 if (items >= (int)cp->c_entries) {
5702 eofflag = 1;
5703 }
5704
5705 /*
5706 * Detect valence FS corruption.
5707 *
5708 * We are holding the cnode lock exclusive, so there should not be
5709 * anybody modifying the valence field of this cnode. If we enter
5710 * this block, that means we observed filesystem corruption, because
5711 * this directory reported a valence of 0, yet we found at least one
5712 * item. In this case, we need to minimally self-heal this
5713 * directory to prevent userland from tripping over a directory
5714 * that appears empty (getattr of valence reports 0), but actually
5715 * has contents.
5716 *
5717 * We'll force the cnode update at the end of the function after
5718 * completing all of the normal getdirentries steps.
5719 */
5720 if ((cp->c_entries == 0) && (items > 0)) {
5721 /* disk corruption */
5722 cp->c_entries++;
5723 /* Mark the cnode as dirty. */
5724 cp->c_flag |= C_MODIFIED;
5725 printf("hfs_vnop_readdir: repairing valence to non-zero! \n");
5726 bump_valence++;
5727 }
5728
5729
5730 /* Convert catalog directory index back into an offset. */
5731 while (tag == 0)
5732 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
5733 uio_setoffset(uio, (index + 2) | tag);
5734 dirhint->dh_index |= tag;
5735
5736 seekoffcalc:
5737 cp->c_touch_acctime = TRUE;
5738
5739 if (ap->a_numdirent) {
5740 if (startoffset == 0)
5741 items += 2;
5742 *ap->a_numdirent = items;
5743 }
5744
5745 out:
5746 if (user_start) {
5747 vsunlock(user_start, user_len, TRUE);
5748 }
5749 /* If we didn't do anything then go ahead and dump the hint. */
5750 if ((dirhint != NULL) &&
5751 (dirhint != &localhint) &&
5752 (uio_offset(uio) == startoffset)) {
5753 hfs_reldirhint(cp, dirhint);
5754 eofflag = 1;
5755 }
5756 if (ap->a_eofflag) {
5757 *ap->a_eofflag = eofflag;
5758 }
5759 if (dirhint == &localhint) {
5760 cat_releasedesc(&localhint.dh_desc);
5761 }
5762
5763 if (bump_valence) {
5764 /* force the update before dropping the cnode lock*/
5765 hfs_update(vp, 0);
5766 }
5767
5768 hfs_unlock(cp);
5769
5770 return (error);
5771 }
5772
5773
5774 /*
5775 * Read contents of a symbolic link.
5776 */
5777 int
5778 hfs_vnop_readlink(ap)
5779 struct vnop_readlink_args /* {
5780 struct vnode *a_vp;
5781 struct uio *a_uio;
5782 vfs_context_t a_context;
5783 } */ *ap;
5784 {
5785 struct vnode *vp = ap->a_vp;
5786 struct cnode *cp;
5787 struct filefork *fp;
5788 int error;
5789
5790 if (!vnode_islnk(vp))
5791 return (EINVAL);
5792
5793 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5794 return (error);
5795 cp = VTOC(vp);
5796 fp = VTOF(vp);
5797
5798 /* Zero length sym links are not allowed */
5799 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
5800 error = EINVAL;
5801 goto exit;
5802 }
5803
5804 /* Cache the path so we don't waste buffer cache resources */
5805 if (fp->ff_symlinkptr == NULL) {
5806 struct buf *bp = NULL;
5807
5808 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
5809 if (fp->ff_symlinkptr == NULL) {
5810 error = ENOMEM;
5811 goto exit;
5812 }
5813 error = (int)buf_meta_bread(vp, (daddr64_t)0,
5814 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
5815 vfs_context_ucred(ap->a_context), &bp);
5816 if (error) {
5817 if (bp)
5818 buf_brelse(bp);
5819 if (fp->ff_symlinkptr) {
5820 FREE(fp->ff_symlinkptr, M_TEMP);
5821 fp->ff_symlinkptr = NULL;
5822 }
5823 goto exit;
5824 }
5825 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
5826
5827 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
5828 buf_markinvalid(bp); /* data no longer needed */
5829 }
5830 buf_brelse(bp);
5831 }
5832 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
5833
5834 /*
5835 * Keep track blocks read
5836 */
5837 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
5838
5839 /*
5840 * If this file hasn't been seen since the start of
5841 * the current sampling period then start over.
5842 */
5843 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
5844 VTOF(vp)->ff_bytesread = fp->ff_size;
5845 else
5846 VTOF(vp)->ff_bytesread += fp->ff_size;
5847
5848 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
5849 // cp->c_touch_acctime = TRUE;
5850 }
5851
5852 exit:
5853 hfs_unlock(cp);
5854 return (error);
5855 }
5856
5857
5858 /*
5859 * Get configurable pathname variables.
5860 */
5861 int
5862 hfs_vnop_pathconf(ap)
5863 struct vnop_pathconf_args /* {
5864 struct vnode *a_vp;
5865 int a_name;
5866 int *a_retval;
5867 vfs_context_t a_context;
5868 } */ *ap;
5869 {
5870
5871 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
5872 switch (ap->a_name) {
5873 case _PC_LINK_MAX:
5874 if (std_hfs == 0){
5875 *ap->a_retval = HFS_LINK_MAX;
5876 }
5877 #if CONFIG_HFS_STD
5878 else {
5879 *ap->a_retval = 1;
5880 }
5881 #endif
5882 break;
5883 case _PC_NAME_MAX:
5884 if (std_hfs == 0) {
5885 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5886 }
5887 #if CONFIG_HFS_STD
5888 else {
5889 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5890 }
5891 #endif
5892 break;
5893 case _PC_PATH_MAX:
5894 *ap->a_retval = PATH_MAX; /* 1024 */
5895 break;
5896 case _PC_PIPE_BUF:
5897 *ap->a_retval = PIPE_BUF;
5898 break;
5899 case _PC_CHOWN_RESTRICTED:
5900 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
5901 break;
5902 case _PC_NO_TRUNC:
5903 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
5904 break;
5905 case _PC_NAME_CHARS_MAX:
5906 if (std_hfs == 0) {
5907 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5908 }
5909 #if CONFIG_HFS_STD
5910 else {
5911 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5912 }
5913 #endif
5914 break;
5915 case _PC_CASE_SENSITIVE:
5916 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
5917 *ap->a_retval = 1;
5918 else
5919 *ap->a_retval = 0;
5920 break;
5921 case _PC_CASE_PRESERVING:
5922 *ap->a_retval = 1;
5923 break;
5924 case _PC_FILESIZEBITS:
5925 /* number of bits to store max file size */
5926 if (std_hfs == 0) {
5927 *ap->a_retval = 64;
5928 }
5929 #if CONFIG_HFS_STD
5930 else {
5931 *ap->a_retval = 32;
5932 }
5933 #endif
5934 break;
5935 case _PC_XATTR_SIZE_BITS:
5936 /* Number of bits to store maximum extended attribute size */
5937 *ap->a_retval = HFS_XATTR_SIZE_BITS;
5938 break;
5939 default:
5940 return (EINVAL);
5941 }
5942
5943 return (0);
5944 }
5945
5946 /*
5947 * Prepares a fork for cat_update by making sure ff_size and ff_blocks
5948 * are no bigger than the valid data on disk thus reducing the chance
5949 * of exposing uninitialised data in the event of a non clean unmount.
5950 * fork_buf is where to put the temporary copy if required. (It can
5951 * be inside pfork.)
5952 */
5953 const struct cat_fork *
5954 hfs_prepare_fork_for_update(filefork_t *ff,
5955 const struct cat_fork *cf,
5956 struct cat_fork *cf_buf,
5957 uint32_t block_size)
5958 {
5959 if (!ff)
5960 return NULL;
5961
5962 if (!cf)
5963 cf = &ff->ff_data;
5964 if (!cf_buf)
5965 cf_buf = &ff->ff_data;
5966
5967 off_t max_size = ff->ff_size;
5968
5969 // Check first invalid range
5970 if (!TAILQ_EMPTY(&ff->ff_invalidranges))
5971 max_size = TAILQ_FIRST(&ff->ff_invalidranges)->rl_start;
5972
5973 if (!ff->ff_unallocblocks && ff->ff_size <= max_size)
5974 return cf; // Nothing to do
5975
5976 if (ff->ff_blocks < ff->ff_unallocblocks) {
5977 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
5978 ff->ff_blocks, ff->ff_unallocblocks);
5979 }
5980
5981 struct cat_fork *out = cf_buf;
5982
5983 if (out != cf)
5984 bcopy(cf, out, sizeof(*cf));
5985
5986 // Adjust cf_blocks for cf_vblocks
5987 out->cf_blocks -= out->cf_vblocks;
5988
5989 /*
5990 * Here we trim the size with the updated cf_blocks. This is
5991 * probably unnecessary now because the invalid ranges should
5992 * catch this (but that wasn't always the case).
5993 */
5994 off_t alloc_bytes = hfs_blk_to_bytes(out->cf_blocks, block_size);
5995 if (out->cf_size > alloc_bytes)
5996 out->cf_size = alloc_bytes;
5997
5998 // Trim cf_size to first invalid range
5999 if (out->cf_size > max_size)
6000 out->cf_size = max_size;
6001
6002 return out;
6003 }
6004
6005 /*
6006 * Update a cnode's on-disk metadata.
6007 *
6008 * The cnode must be locked exclusive. See declaration for possible
6009 * options.
6010 */
6011 int
6012 hfs_update(struct vnode *vp, int options)
6013 {
6014 struct cnode *cp = VTOC(vp);
6015 struct proc *p;
6016 const struct cat_fork *dataforkp = NULL;
6017 const struct cat_fork *rsrcforkp = NULL;
6018 struct cat_fork datafork;
6019 struct cat_fork rsrcfork;
6020 struct hfsmount *hfsmp;
6021 int lockflags;
6022 int error;
6023 uint32_t tstate = 0;
6024
6025 if (ISSET(cp->c_flag, C_NOEXISTS))
6026 return 0;
6027
6028 p = current_proc();
6029 hfsmp = VTOHFS(vp);
6030
6031 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
6032 hfsmp->hfs_catalog_vp == NULL){
6033 return (0);
6034 }
6035 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
6036 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD | C_NEEDS_DATEADDED);
6037 cp->c_touch_acctime = 0;
6038 cp->c_touch_chgtime = 0;
6039 cp->c_touch_modtime = 0;
6040 return (0);
6041 }
6042 if (kdebug_enable) {
6043 if (cp->c_touch_acctime || cp->c_atime != cp->c_attr.ca_atimeondisk)
6044 tstate |= DBG_HFS_UPDATE_ACCTIME;
6045 if (cp->c_touch_modtime)
6046 tstate |= DBG_HFS_UPDATE_MODTIME;
6047 if (cp->c_touch_chgtime)
6048 tstate |= DBG_HFS_UPDATE_CHGTIME;
6049
6050 if (cp->c_flag & C_MODIFIED)
6051 tstate |= DBG_HFS_UPDATE_MODIFIED;
6052 if (ISSET(options, HFS_UPDATE_FORCE))
6053 tstate |= DBG_HFS_UPDATE_FORCE;
6054 if (cp->c_flag & C_NEEDS_DATEADDED)
6055 tstate |= DBG_HFS_UPDATE_DATEADDED;
6056 if (cp->c_flag & C_MINOR_MOD)
6057 tstate |= DBG_HFS_UPDATE_MINOR;
6058 }
6059 hfs_touchtimes(hfsmp, cp);
6060
6061 if (!ISSET(cp->c_flag, C_MODIFIED | C_MINOR_MOD)
6062 && !hfs_should_save_atime(cp)) {
6063 // Nothing to update
6064 return 0;
6065 }
6066
6067 KDBG(HFSDBG_UPDATE | DBG_FUNC_START, VM_KERNEL_ADDRPERM(vp), tstate);
6068
6069 bool check_txn = false;
6070
6071 if (!ISSET(options, HFS_UPDATE_FORCE) && !ISSET(cp->c_flag, C_MODIFIED)) {
6072 /*
6073 * This must be a minor modification. If the current
6074 * transaction already has an update for this node, then we
6075 * bundle in the modification.
6076 */
6077 if (hfsmp->jnl
6078 && journal_current_txn(hfsmp->jnl) == cp->c_update_txn) {
6079 check_txn = true;
6080 } else {
6081 tstate |= DBG_HFS_UPDATE_SKIPPED;
6082 error = 0;
6083 goto exit;
6084 }
6085 }
6086
6087 if ((error = hfs_start_transaction(hfsmp)) != 0)
6088 goto exit;
6089
6090 if (check_txn
6091 && journal_current_txn(hfsmp->jnl) != cp->c_update_txn) {
6092 hfs_end_transaction(hfsmp);
6093 tstate |= DBG_HFS_UPDATE_SKIPPED;
6094 error = 0;
6095 goto exit;
6096 }
6097
6098 if (cp->c_datafork)
6099 dataforkp = &cp->c_datafork->ff_data;
6100 if (cp->c_rsrcfork)
6101 rsrcforkp = &cp->c_rsrcfork->ff_data;
6102
6103 /*
6104 * Modify the values passed to cat_update based on whether or not
6105 * the file has invalid ranges or borrowed blocks.
6106 */
6107 dataforkp = hfs_prepare_fork_for_update(cp->c_datafork, NULL, &datafork, hfsmp->blockSize);
6108 rsrcforkp = hfs_prepare_fork_for_update(cp->c_rsrcfork, NULL, &rsrcfork, hfsmp->blockSize);
6109
6110 if (__improbable(kdebug_enable & KDEBUG_TRACE)) {
6111 long dbg_parms[NUMPARMS];
6112 int dbg_namelen;
6113
6114 dbg_namelen = NUMPARMS * sizeof(long);
6115 vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
6116
6117 if (dbg_namelen < (int)sizeof(dbg_parms))
6118 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
6119
6120 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
6121 }
6122
6123 /*
6124 * Lock the Catalog b-tree file.
6125 */
6126 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6127
6128 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
6129
6130 if (hfsmp->jnl)
6131 cp->c_update_txn = journal_current_txn(hfsmp->jnl);
6132
6133 hfs_systemfile_unlock(hfsmp, lockflags);
6134
6135 CLR(cp->c_flag, C_MODIFIED | C_MINOR_MOD);
6136
6137 hfs_end_transaction(hfsmp);
6138
6139 exit:
6140
6141 KDBG(HFSDBG_UPDATE | DBG_FUNC_END, VM_KERNEL_ADDRPERM(vp), tstate, error);
6142
6143 return error;
6144 }
6145
6146 /*
6147 * Allocate a new node
6148 */
6149 int
6150 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
6151 struct vnode_attr *vap, vfs_context_t ctx)
6152 {
6153 struct cnode *cp = NULL;
6154 struct cnode *dcp = NULL;
6155 struct vnode *tvp;
6156 struct hfsmount *hfsmp;
6157 struct cat_desc in_desc, out_desc;
6158 struct cat_attr attr;
6159 struct timeval tv;
6160 int lockflags;
6161 int error, started_tr = 0;
6162 enum vtype vnodetype;
6163 int mode;
6164 int newvnode_flags = 0;
6165 u_int32_t gnv_flags = 0;
6166 int protectable_target = 0;
6167 int nocache = 0;
6168
6169 #if CONFIG_PROTECT
6170 struct cprotect *entry = NULL;
6171 int32_t cp_class = -1;
6172
6173 /*
6174 * By default, it's OK for AKS to overrride our target class preferences.
6175 */
6176 uint32_t keywrap_flags = CP_KEYWRAP_DIFFCLASS;
6177
6178 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
6179 cp_class = (int32_t)vap->va_dataprotect_class;
6180 /*
6181 * Since the user specifically requested this target class be used,
6182 * we want to fail this creation operation if we cannot wrap to their
6183 * target class. The CP_KEYWRAP_DIFFCLASS bit says that it is OK to
6184 * use a different class than the one specified, so we turn that off
6185 * now.
6186 */
6187 keywrap_flags &= ~CP_KEYWRAP_DIFFCLASS;
6188 }
6189 int protected_mount = 0;
6190 #endif
6191
6192
6193 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
6194 return (error);
6195
6196 /* set the cnode pointer only after successfully acquiring lock */
6197 dcp = VTOC(dvp);
6198
6199 /* Don't allow creation of new entries in open-unlinked directories */
6200 if ((error = hfs_checkdeleted(dcp))) {
6201 hfs_unlock(dcp);
6202 return error;
6203 }
6204
6205 dcp->c_flag |= C_DIR_MODIFICATION;
6206
6207 hfsmp = VTOHFS(dvp);
6208
6209 *vpp = NULL;
6210 tvp = NULL;
6211 out_desc.cd_flags = 0;
6212 out_desc.cd_nameptr = NULL;
6213
6214 vnodetype = vap->va_type;
6215 if (vnodetype == VNON)
6216 vnodetype = VREG;
6217 mode = MAKEIMODE(vnodetype, vap->va_mode);
6218
6219 if (S_ISDIR (mode) || S_ISREG (mode)) {
6220 protectable_target = 1;
6221 }
6222
6223
6224 /* Check if were out of usable disk space. */
6225 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
6226 error = ENOSPC;
6227 goto exit;
6228 }
6229
6230 microtime(&tv);
6231
6232 /* Setup the default attributes */
6233 bzero(&attr, sizeof(attr));
6234 attr.ca_mode = mode;
6235 attr.ca_linkcount = 1;
6236 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
6237 attr.ca_rdev = vap->va_rdev;
6238 }
6239 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
6240 VATTR_SET_SUPPORTED(vap, va_create_time);
6241 attr.ca_itime = vap->va_create_time.tv_sec;
6242 } else {
6243 attr.ca_itime = tv.tv_sec;
6244 }
6245 #if CONFIG_HFS_STD
6246 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
6247 attr.ca_itime += 3600; /* Same as what hfs_update does */
6248 }
6249 #endif
6250 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
6251 attr.ca_atimeondisk = attr.ca_atime;
6252 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6253 VATTR_SET_SUPPORTED(vap, va_flags);
6254 attr.ca_flags = vap->va_flags;
6255 }
6256
6257 /*
6258 * HFS+ only: all files get ThreadExists
6259 * HFSX only: dirs get HasFolderCount
6260 */
6261 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
6262 if (vnodetype == VDIR) {
6263 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
6264 attr.ca_recflags = kHFSHasFolderCountMask;
6265 } else {
6266 attr.ca_recflags = kHFSThreadExistsMask;
6267 }
6268 }
6269
6270 #if CONFIG_PROTECT
6271 if (cp_fs_protected(hfsmp->hfs_mp)) {
6272 protected_mount = 1;
6273 }
6274 /*
6275 * On a content-protected HFS+/HFSX filesystem, files and directories
6276 * cannot be created without atomically setting/creating the EA that
6277 * contains the protection class metadata and keys at the same time, in
6278 * the same transaction. As a result, pre-set the "EAs exist" flag
6279 * on the cat_attr for protectable catalog record creations. This will
6280 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
6281 * as having EAs.
6282 */
6283 if ((protected_mount) && (protectable_target)) {
6284 attr.ca_recflags |= kHFSHasAttributesMask;
6285 /* delay entering in the namecache */
6286 nocache = 1;
6287 }
6288 #endif
6289
6290
6291 /*
6292 * Add the date added to the item. See above, as
6293 * all of the dates are set to the itime.
6294 */
6295 hfs_write_dateadded (&attr, attr.ca_atime);
6296
6297 /* Initialize the gen counter to 1 */
6298 hfs_write_gencount(&attr, (uint32_t)1);
6299
6300 attr.ca_uid = vap->va_uid;
6301 attr.ca_gid = vap->va_gid;
6302 VATTR_SET_SUPPORTED(vap, va_mode);
6303 VATTR_SET_SUPPORTED(vap, va_uid);
6304 VATTR_SET_SUPPORTED(vap, va_gid);
6305
6306 #if QUOTA
6307 /* check to see if this node's creation would cause us to go over
6308 * quota. If so, abort this operation.
6309 */
6310 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6311 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
6312 vfs_context_ucred(ctx)))) {
6313 goto exit;
6314 }
6315 }
6316 #endif
6317
6318
6319 /* Tag symlinks with a type and creator. */
6320 if (vnodetype == VLNK) {
6321 struct FndrFileInfo *fip;
6322
6323 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
6324 fip->fdType = SWAP_BE32(kSymLinkFileType);
6325 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
6326 }
6327
6328 /* Setup the descriptor */
6329 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
6330 in_desc.cd_namelen = cnp->cn_namelen;
6331 in_desc.cd_parentcnid = dcp->c_fileid;
6332 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
6333 in_desc.cd_hint = dcp->c_childhint;
6334 in_desc.cd_encoding = 0;
6335
6336 #if CONFIG_PROTECT
6337 /*
6338 * To preserve file creation atomicity with regards to the content protection EA,
6339 * we must create the file in the catalog and then write out its EA in the same
6340 * transaction.
6341 *
6342 * We only denote the target class in this EA; key generation is not completed
6343 * until the file has been inserted into the catalog and will be done
6344 * in a separate transaction.
6345 */
6346 if ((protected_mount) && (protectable_target)) {
6347 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
6348 if (error) {
6349 goto exit;
6350 }
6351 }
6352 #endif
6353
6354 if ((error = hfs_start_transaction(hfsmp)) != 0) {
6355 goto exit;
6356 }
6357 started_tr = 1;
6358
6359 // have to also lock the attribute file because cat_create() needs
6360 // to check that any fileID it wants to use does not have orphaned
6361 // attributes in it.
6362 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
6363 cnid_t new_id;
6364
6365 /* Reserve some space in the Catalog file. */
6366 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
6367 hfs_systemfile_unlock(hfsmp, lockflags);
6368 goto exit;
6369 }
6370
6371 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
6372 hfs_systemfile_unlock (hfsmp, lockflags);
6373 goto exit;
6374 }
6375
6376 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
6377 if (error == 0) {
6378 /* Update the parent directory */
6379 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
6380 dcp->c_entries++;
6381
6382 if (vnodetype == VDIR) {
6383 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
6384 }
6385 dcp->c_dirchangecnt++;
6386 hfs_incr_gencount(dcp);
6387
6388 dcp->c_touch_chgtime = dcp->c_touch_modtime = true;
6389 dcp->c_flag |= C_MODIFIED;
6390
6391 hfs_update(dcp->c_vp, 0);
6392
6393 #if CONFIG_PROTECT
6394 /*
6395 * If we are creating a content protected file, now is when
6396 * we create the EA. We must create it in the same transaction
6397 * that creates the file. We can also guarantee that the file
6398 * MUST exist because we are still holding the catalog lock
6399 * at this point.
6400 */
6401 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6402 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
6403
6404 if (error) {
6405 int delete_err;
6406 /*
6407 * If we fail the EA creation, then we need to delete the file.
6408 * Luckily, we are still holding all of the right locks.
6409 */
6410 delete_err = cat_delete (hfsmp, &out_desc, &attr);
6411 if (delete_err == 0) {
6412 /* Update the parent directory */
6413 if (dcp->c_entries > 0)
6414 dcp->c_entries--;
6415 dcp->c_dirchangecnt++;
6416 dcp->c_ctime = tv.tv_sec;
6417 dcp->c_mtime = tv.tv_sec;
6418 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6419 }
6420
6421 /* Emit EINVAL if we fail to create EA*/
6422 error = EINVAL;
6423 }
6424 }
6425 #endif
6426 }
6427 hfs_systemfile_unlock(hfsmp, lockflags);
6428 if (error)
6429 goto exit;
6430
6431 uint32_t txn = hfsmp->jnl ? journal_current_txn(hfsmp->jnl) : 0;
6432
6433 /* Invalidate negative cache entries in the directory */
6434 if (dcp->c_flag & C_NEG_ENTRIES) {
6435 cache_purge_negatives(dvp);
6436 dcp->c_flag &= ~C_NEG_ENTRIES;
6437 }
6438
6439 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
6440 (dcp->c_cnid == kHFSRootFolderID));
6441
6442 // XXXdbg
6443 // have to end the transaction here before we call hfs_getnewvnode()
6444 // because that can cause us to try and reclaim a vnode on a different
6445 // file system which could cause us to start a transaction which can
6446 // deadlock with someone on that other file system (since we could be
6447 // holding two transaction locks as well as various vnodes and we did
6448 // not obtain the locks on them in the proper order).
6449 //
6450 // NOTE: this means that if the quota check fails or we have to update
6451 // the change time on a block-special device that those changes
6452 // will happen as part of independent transactions.
6453 //
6454 if (started_tr) {
6455 hfs_end_transaction(hfsmp);
6456 started_tr = 0;
6457 }
6458
6459 #if CONFIG_PROTECT
6460 /*
6461 * At this point, we must have encountered success with writing the EA.
6462 * Destroy our temporary cprotect (which had no keys).
6463 */
6464
6465 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6466 cp_entry_destroy (hfsmp, entry);
6467 entry = NULL;
6468 }
6469 #endif
6470 gnv_flags |= GNV_CREATE;
6471 if (nocache) {
6472 gnv_flags |= GNV_NOCACHE;
6473 }
6474
6475 /*
6476 * Create a vnode for the object just created.
6477 *
6478 * NOTE: Maintaining the cnode lock on the parent directory is important,
6479 * as it prevents race conditions where other threads want to look up entries
6480 * in the directory and/or add things as we are in the process of creating
6481 * the vnode below. However, this has the potential for causing a
6482 * double lock panic when dealing with shadow files on a HFS boot partition.
6483 * The panic could occur if we are not cleaning up after ourselves properly
6484 * when done with a shadow file or in the error cases. The error would occur if we
6485 * try to create a new vnode, and then end up reclaiming another shadow vnode to
6486 * create the new one. However, if everything is working properly, this should
6487 * be a non-issue as we would never enter that reclaim codepath.
6488 *
6489 * The cnode is locked on successful return.
6490 */
6491 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
6492 NULL, &tvp, &newvnode_flags);
6493 if (error)
6494 goto exit;
6495
6496 cp = VTOC(tvp);
6497
6498 cp->c_update_txn = txn;
6499
6500 struct doc_tombstone *ut;
6501 ut = get_uthread_doc_tombstone();
6502 if ( ut->t_lastop_document_id != 0
6503 && ut->t_lastop_parent == dvp
6504 && ut->t_lastop_parent_vid == vnode_vid(dvp)
6505 && strcmp((char *)ut->t_lastop_filename, (const char *)cp->c_desc.cd_nameptr) == 0) {
6506 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
6507
6508 //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
6509 fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
6510
6511 cp->c_bsdflags |= UF_TRACKED;
6512 cp->c_flag |= C_MODIFIED;
6513
6514 if ((error = hfs_start_transaction(hfsmp)) == 0) {
6515 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6516
6517 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
6518
6519 hfs_systemfile_unlock (hfsmp, lockflags);
6520 (void) hfs_end_transaction(hfsmp);
6521 }
6522
6523 clear_tombstone_docid(ut, hfsmp, cp); // will send the docid-changed fsevent
6524 } else if (ut->t_lastop_document_id != 0) {
6525 int len = cnp->cn_namelen;
6526 if (len == 0) {
6527 len = strlen(cnp->cn_nameptr);
6528 }
6529
6530 if (is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
6531 // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
6532 } else {
6533 // Clear the tombstone because the thread is not recreating the same path
6534 // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
6535 clear_tombstone_docid(ut, hfsmp, NULL);
6536 }
6537 }
6538
6539 if ((hfsmp->hfs_flags & HFS_CS_HOTFILE_PIN) && (vnode_isfastdevicecandidate(dvp) && !vnode_isautocandidate(dvp))) {
6540
6541 //printf("hfs: flagging %s (fileid: %d) as VFASTDEVCANDIDATE (dvp name: %s)\n",
6542 // cnp->cn_nameptr ? cnp->cn_nameptr : "<NONAME>",
6543 // cp->c_fileid,
6544 // dvp->v_name ? dvp->v_name : "no-dir-name");
6545
6546 //
6547 // On new files we set the FastDevCandidate flag so that
6548 // any new blocks allocated to it will be pinned.
6549 //
6550 cp->c_attr.ca_recflags |= kHFSFastDevCandidateMask;
6551 vnode_setfastdevicecandidate(tvp);
6552
6553 //
6554 // properly inherit auto-cached flags
6555 //
6556 if (vnode_isautocandidate(dvp)) {
6557 cp->c_attr.ca_recflags |= kHFSAutoCandidateMask;
6558 vnode_setautocandidate(tvp);
6559 }
6560
6561
6562 //
6563 // We also want to add it to the hotfile adoption list so
6564 // that it will eventually land in the hotfile btree
6565 //
6566 (void) hfs_addhotfile(tvp);
6567 }
6568
6569 *vpp = tvp;
6570
6571 #if CONFIG_PROTECT
6572 /*
6573 * Now that we have a vnode-in-hand, generate keys for this namespace item.
6574 * If we fail to create the keys, then attempt to delete the item from the
6575 * namespace. If we can't delete the item, that's not desirable but also not fatal..
6576 * All of the places which deal with restoring/unwrapping keys must also be
6577 * prepared to encounter an entry that does not have keys.
6578 */
6579 if ((protectable_target) && (protected_mount)) {
6580 struct cprotect *keyed_entry = NULL;
6581
6582 if (cp->c_cpentry == NULL) {
6583 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
6584 }
6585
6586 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), keywrap_flags, &keyed_entry);
6587 if (error == 0) {
6588 /*
6589 * Upon success, the keys were generated and written out.
6590 * Update the cp pointer in the cnode.
6591 */
6592 cp_replace_entry (hfsmp, cp, keyed_entry);
6593 if (nocache) {
6594 cache_enter (dvp, tvp, cnp);
6595 }
6596 }
6597 else {
6598 /* If key creation OR the setxattr failed, emit EPERM to userland */
6599 error = EPERM;
6600
6601 /*
6602 * Beware! This slightly violates the lock ordering for the
6603 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
6604 * which guards file size changes before acquiring the normal cnode lock
6605 * and calling hfs_removefile on an item.
6606 *
6607 * However, in this case, we are still holding the directory lock so
6608 * 'tvp' is not lookup-able and it was a newly created vnode so it
6609 * cannot have any content yet. The only reason we are initiating
6610 * the removefile is because we could not generate content protection keys
6611 * for this namespace item. Note also that we pass a '1' in the allow_dirs
6612 * argument for hfs_removefile because we may be creating a directory here.
6613 *
6614 * All this to say that while it is technically a violation it is
6615 * impossible to race with another thread for this cnode so it is safe.
6616 */
6617 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
6618 if (err) {
6619 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
6620 }
6621
6622 /* Release the cnode lock and mark the vnode for termination */
6623 hfs_unlock (cp);
6624 err = vnode_recycle (tvp);
6625 if (err) {
6626 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
6627 }
6628
6629 /* Drop the iocount on the new vnode to force reclamation/recycling */
6630 vnode_put (tvp);
6631 cp = NULL;
6632 *vpp = NULL;
6633 }
6634 }
6635 #endif
6636
6637 #if QUOTA
6638 /*
6639 * Once we create this vnode, we need to initialize its quota data
6640 * structures, if necessary. We know that it is OK to just go ahead and
6641 * initialize because we've already validated earlier (through the hfs_quotacheck
6642 * function) to see if creating this cnode/vnode would cause us to go over quota.
6643 */
6644 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6645 if (cp) {
6646 /* cp could have been zeroed earlier */
6647 (void) hfs_getinoquota(cp);
6648 }
6649 }
6650 #endif
6651
6652 exit:
6653 cat_releasedesc(&out_desc);
6654
6655 #if CONFIG_PROTECT
6656 /*
6657 * We may have jumped here in error-handling various situations above.
6658 * If we haven't already dumped the temporary CP used to initialize
6659 * the file atomically, then free it now. cp_entry_destroy should null
6660 * out the pointer if it was called already.
6661 */
6662 if (entry) {
6663 cp_entry_destroy (hfsmp, entry);
6664 entry = NULL;
6665 }
6666 #endif
6667
6668 /*
6669 * Make sure we release cnode lock on dcp.
6670 */
6671 if (dcp) {
6672 dcp->c_flag &= ~C_DIR_MODIFICATION;
6673 wakeup((caddr_t)&dcp->c_flag);
6674
6675 hfs_unlock(dcp);
6676 }
6677 if (error == 0 && cp != NULL) {
6678 hfs_unlock(cp);
6679 }
6680 if (started_tr) {
6681 hfs_end_transaction(hfsmp);
6682 started_tr = 0;
6683 }
6684
6685 return (error);
6686 }
6687
6688
6689 /*
6690 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the
6691 * cnode that is found in 'vp'. The cnode should be locked upon entry
6692 * and will be returned locked, but it may be dropped temporarily.
6693 *
6694 * If the resource fork vnode does not exist, HFS will attempt to acquire an
6695 * empty (uninitialized) vnode from VFS so as to avoid deadlocks with
6696 * jetsam. If we let the normal getnewvnode code produce the vnode for us
6697 * we would be doing so while holding the cnode lock of our cnode.
6698 *
6699 * On success, *rvpp wlll hold the resource fork vnode with an
6700 * iocount. *Don't* forget the vnode_put.
6701 */
6702 int
6703 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp)
6704 {
6705 struct vnode *rvp = NULLVP;
6706 struct vnode *empty_rvp = NULLVP;
6707 struct vnode *dvp = NULLVP;
6708 struct cnode *cp = VTOC(vp);
6709 int error;
6710 int vid;
6711
6712 if (vnode_vtype(vp) == VDIR) {
6713 return EINVAL;
6714 }
6715
6716 restart:
6717 /* Attempt to use existing vnode */
6718 if ((rvp = cp->c_rsrc_vp)) {
6719 vid = vnode_vid(rvp);
6720
6721 // vnode_getwithvid can block so we need to drop the cnode lock
6722 hfs_unlock(cp);
6723
6724 error = vnode_getwithvid(rvp, vid);
6725
6726 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
6727
6728 /*
6729 * When our lock was relinquished, the resource fork
6730 * could have been recycled. Check for this and try
6731 * again.
6732 */
6733 if (error == ENOENT)
6734 goto restart;
6735
6736 if (error) {
6737 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
6738
6739 if (name)
6740 printf("hfs_vgetrsrc: couldn't get resource"
6741 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
6742 return (error);
6743 }
6744 } else {
6745 struct cat_fork rsrcfork;
6746 struct componentname cn;
6747 struct cat_desc *descptr = NULL;
6748 struct cat_desc to_desc;
6749 char delname[32];
6750 int lockflags;
6751 int newvnode_flags = 0;
6752
6753 /*
6754 * In this case, we don't currently see a resource fork vnode attached
6755 * to this cnode. In most cases, we were called from a read-only VNOP
6756 * like getattr, so it should be safe to drop the cnode lock and then
6757 * re-acquire it.
6758 *
6759 * Here, we drop the lock so that we can acquire an empty/husk
6760 * vnode so that we don't deadlock against jetsam.
6761 *
6762 * It does not currently appear possible to hold the truncate lock via
6763 * FS re-entrancy when we get to this point. (8/2014)
6764 */
6765 hfs_unlock (cp);
6766
6767 error = vnode_create_empty (&empty_rvp);
6768
6769 hfs_lock_always (cp, HFS_EXCLUSIVE_LOCK);
6770
6771 if (error) {
6772 /* If acquiring the 'empty' vnode failed, then nothing to clean up */
6773 return error;
6774 }
6775
6776 /*
6777 * We could have raced with another thread here while we dropped our cnode
6778 * lock. See if the cnode now has a resource fork vnode and restart if appropriate.
6779 *
6780 * Note: We just released the cnode lock, so there is a possibility that the
6781 * cnode that we just acquired has been deleted or even removed from disk
6782 * completely, though this is unlikely. If the file is open-unlinked, the
6783 * check below will resolve it for us. If it has been completely
6784 * removed (even from the catalog!), then when we examine the catalog
6785 * directly, below, while holding the catalog lock, we will not find the
6786 * item and we can fail out properly.
6787 */
6788 if (cp->c_rsrc_vp) {
6789 /* Drop the empty vnode before restarting */
6790 vnode_put (empty_rvp);
6791 empty_rvp = NULL;
6792 rvp = NULL;
6793 goto restart;
6794 }
6795
6796 /*
6797 * hfs_vgetsrc may be invoked for a cnode that has already been marked
6798 * C_DELETED. This is because we need to continue to provide rsrc
6799 * fork access to open-unlinked files. In this case, build a fake descriptor
6800 * like in hfs_removefile. If we don't do this, buildkey will fail in
6801 * cat_lookup because this cnode has no name in its descriptor.
6802 */
6803 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
6804 bzero (&to_desc, sizeof(to_desc));
6805 bzero (delname, 32);
6806 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
6807 to_desc.cd_nameptr = (const u_int8_t*) delname;
6808 to_desc.cd_namelen = strlen(delname);
6809 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
6810 to_desc.cd_flags = 0;
6811 to_desc.cd_cnid = cp->c_cnid;
6812
6813 descptr = &to_desc;
6814 }
6815 else {
6816 descptr = &cp->c_desc;
6817 }
6818
6819
6820 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
6821
6822 /*
6823 * We call cat_idlookup (instead of cat_lookup) below because we can't
6824 * trust the descriptor in the provided cnode for lookups at this point.
6825 * Between the time of the original lookup of this vnode and now, the
6826 * descriptor could have gotten swapped or replaced. If this occurred,
6827 * the parent/name combo originally desired may not necessarily be provided
6828 * if we use the descriptor. Even worse, if the vnode represents
6829 * a hardlink, we could have removed one of the links from the namespace
6830 * but left the descriptor alone, since hfs_unlink does not invalidate
6831 * the descriptor in the cnode if other links still point to the inode.
6832 *
6833 * Consider the following (slightly contrived) scenario:
6834 * /tmp/a <--> /tmp/b (hardlinks).
6835 * 1. Thread A: open rsrc fork on /tmp/b.
6836 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
6837 * 2. Thread B does 'mv /foo/b /tmp/b'
6838 * 2. Thread B succeeds.
6839 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
6840 *
6841 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
6842 * is not removed/updated during the unlink process. So, if you were to
6843 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
6844 * fork.
6845 *
6846 * As a result, we use the fileid, which should be invariant for the lifetime
6847 * of the cnode (possibly barring calls to exchangedata).
6848 *
6849 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
6850 * have thread records for files. They were only required for directories. So
6851 * we need to do the lookup with the catalog name. This is OK since hardlinks were
6852 * never allowed on HFS standard.
6853 */
6854
6855 /* Get resource fork data */
6856 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
6857 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
6858 }
6859 #if CONFIG_HFS_STD
6860 else {
6861 /*
6862 * HFS standard only:
6863 *
6864 * Get the resource fork for this item with a cat_lookup call, but do not
6865 * force a case lookup since HFS standard is case-insensitive only. We
6866 * don't want the descriptor; just the fork data here. If we tried to
6867 * do a ID lookup (via thread record -> catalog record), then we might fail
6868 * prematurely since, as noted above, thread records were not strictly required
6869 * on files in HFS.
6870 */
6871 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
6872 (struct cat_attr*)NULL, &rsrcfork, NULL);
6873 }
6874 #endif
6875
6876 hfs_systemfile_unlock(hfsmp, lockflags);
6877 if (error) {
6878 /* Drop our 'empty' vnode ! */
6879 vnode_put (empty_rvp);
6880 return (error);
6881 }
6882 /*
6883 * Supply hfs_getnewvnode with a component name.
6884 */
6885 cn.cn_pnbuf = NULL;
6886 if (descptr->cd_nameptr) {
6887 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
6888 cn.cn_nameiop = LOOKUP;
6889 cn.cn_flags = ISLASTCN | HASBUF;
6890 cn.cn_context = NULL;
6891 cn.cn_pnlen = MAXPATHLEN;
6892 cn.cn_nameptr = cn.cn_pnbuf;
6893 cn.cn_hash = 0;
6894 cn.cn_consume = 0;
6895 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
6896 "%s%s", descptr->cd_nameptr,
6897 _PATH_RSRCFORKSPEC);
6898 // Should never happen because cn.cn_nameptr won't ever be long...
6899 if (cn.cn_namelen >= MAXPATHLEN) {
6900 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
6901 /* Drop our 'empty' vnode ! */
6902 vnode_put (empty_rvp);
6903 return ENAMETOOLONG;
6904
6905 }
6906 }
6907 dvp = vnode_getparent(vp);
6908
6909 /*
6910 * We are about to call hfs_getnewvnode and pass in the vnode that we acquired
6911 * earlier when we were not holding any locks. The semantics of GNV_USE_VP require that
6912 * either hfs_getnewvnode consume the vnode and vend it back to us, properly initialized,
6913 * or it will consume/dispose of it properly if it errors out.
6914 */
6915 rvp = empty_rvp;
6916
6917 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
6918 descptr, (GNV_WANTRSRC | GNV_SKIPLOCK | GNV_USE_VP),
6919 &cp->c_attr, &rsrcfork, &rvp, &newvnode_flags);
6920
6921 if (dvp)
6922 vnode_put(dvp);
6923 if (cn.cn_pnbuf)
6924 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
6925 if (error)
6926 return (error);
6927 } /* End 'else' for rsrc fork not existing */
6928
6929 *rvpp = rvp;
6930 return (0);
6931 }
6932
6933 /*
6934 * Wrapper for special device reads
6935 */
6936 int
6937 hfsspec_read(ap)
6938 struct vnop_read_args /* {
6939 struct vnode *a_vp;
6940 struct uio *a_uio;
6941 int a_ioflag;
6942 vfs_context_t a_context;
6943 } */ *ap;
6944 {
6945 /*
6946 * Set access flag.
6947 */
6948 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6949 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
6950 }
6951
6952 /*
6953 * Wrapper for special device writes
6954 */
6955 int
6956 hfsspec_write(ap)
6957 struct vnop_write_args /* {
6958 struct vnode *a_vp;
6959 struct uio *a_uio;
6960 int a_ioflag;
6961 vfs_context_t a_context;
6962 } */ *ap;
6963 {
6964 /*
6965 * Set update and change flags.
6966 */
6967 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6968 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6969 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
6970 }
6971
6972 /*
6973 * Wrapper for special device close
6974 *
6975 * Update the times on the cnode then do device close.
6976 */
6977 int
6978 hfsspec_close(ap)
6979 struct vnop_close_args /* {
6980 struct vnode *a_vp;
6981 int a_fflag;
6982 vfs_context_t a_context;
6983 } */ *ap;
6984 {
6985 struct vnode *vp = ap->a_vp;
6986 struct cnode *cp;
6987
6988 if (vnode_isinuse(ap->a_vp, 0)) {
6989 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6990 cp = VTOC(vp);
6991 hfs_touchtimes(VTOHFS(vp), cp);
6992 hfs_unlock(cp);
6993 }
6994 }
6995 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
6996 }
6997
6998 #if FIFO
6999 /*
7000 * Wrapper for fifo reads
7001 */
7002 static int
7003 hfsfifo_read(ap)
7004 struct vnop_read_args /* {
7005 struct vnode *a_vp;
7006 struct uio *a_uio;
7007 int a_ioflag;
7008 vfs_context_t a_context;
7009 } */ *ap;
7010 {
7011 /*
7012 * Set access flag.
7013 */
7014 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
7015 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
7016 }
7017
7018 /*
7019 * Wrapper for fifo writes
7020 */
7021 static int
7022 hfsfifo_write(ap)
7023 struct vnop_write_args /* {
7024 struct vnode *a_vp;
7025 struct uio *a_uio;
7026 int a_ioflag;
7027 vfs_context_t a_context;
7028 } */ *ap;
7029 {
7030 /*
7031 * Set update and change flags.
7032 */
7033 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
7034 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
7035 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
7036 }
7037
7038 /*
7039 * Wrapper for fifo close
7040 *
7041 * Update the times on the cnode then do device close.
7042 */
7043 static int
7044 hfsfifo_close(ap)
7045 struct vnop_close_args /* {
7046 struct vnode *a_vp;
7047 int a_fflag;
7048 vfs_context_t a_context;
7049 } */ *ap;
7050 {
7051 struct vnode *vp = ap->a_vp;
7052 struct cnode *cp;
7053
7054 if (vnode_isinuse(ap->a_vp, 1)) {
7055 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
7056 cp = VTOC(vp);
7057 hfs_touchtimes(VTOHFS(vp), cp);
7058 hfs_unlock(cp);
7059 }
7060 }
7061 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
7062 }
7063
7064
7065 #endif /* FIFO */
7066
7067 /*
7068 * Getter for the document_id
7069 * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
7070 */
7071 static u_int32_t
7072 hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
7073 {
7074 const uint8_t *finfo = NULL;
7075 u_int32_t doc_id = 0;
7076
7077 /* overlay the FinderInfo to the correct pointer, and advance */
7078 finfo = finderinfo + 16;
7079
7080 if (S_ISDIR(mode) || S_ISREG(mode)) {
7081 const struct FndrExtendedFileInfo *extinfo = (const struct FndrExtendedFileInfo *)finfo;
7082 doc_id = extinfo->document_id;
7083 } else if (S_ISDIR(mode)) {
7084 const struct FndrExtendedDirInfo *extinfo = (const struct FndrExtendedDirInfo *)finfo;
7085 doc_id = extinfo->document_id;
7086 }
7087
7088 return doc_id;
7089 }
7090
7091
7092 /* getter(s) for document id */
7093 u_int32_t
7094 hfs_get_document_id(struct cnode *cp)
7095 {
7096 return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
7097 cp->c_attr.ca_mode));
7098 }
7099
7100 /* If you have finderinfo and mode, you can use this */
7101 u_int32_t
7102 hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
7103 {
7104 return (hfs_get_document_id_internal(finderinfo, mode));
7105 }
7106
7107 /*
7108 * Synchronize a file's in-core state with that on disk.
7109 */
7110 int
7111 hfs_vnop_fsync(ap)
7112 struct vnop_fsync_args /* {
7113 struct vnode *a_vp;
7114 int a_waitfor;
7115 vfs_context_t a_context;
7116 } */ *ap;
7117 {
7118 struct vnode* vp = ap->a_vp;
7119 int error;
7120
7121 /* Note: We check hfs flags instead of vfs mount flag because during
7122 * read-write update, hfs marks itself read-write much earlier than
7123 * the vfs, and hence won't result in skipping of certain writes like
7124 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
7125 */
7126 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
7127 return 0;
7128 }
7129
7130 /*
7131 * No need to call cp_handle_vnop to resolve fsync(). Any dirty data
7132 * should have caused the keys to be unwrapped at the time the data was
7133 * put into the UBC, either at mmap/pagein/read-write. If we did manage
7134 * to let this by, then strategy will auto-resolve for us.
7135 *
7136 * We also need to allow ENOENT lock errors since unlink
7137 * system call can call VNOP_FSYNC during vclean.
7138 */
7139 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
7140 if (error)
7141 return (0);
7142
7143 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
7144
7145 hfs_unlock(VTOC(vp));
7146 return (error);
7147 }
7148
7149 int (**hfs_vnodeop_p)(void *);
7150
7151 #define VOPFUNC int (*)(void *)
7152
7153
7154 #if CONFIG_HFS_STD
7155 int (**hfs_std_vnodeop_p) (void *);
7156 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
7157
7158 /*
7159 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
7160 * is for use with HFS standard to block out operations that would modify the file system
7161 */
7162
7163 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
7164 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7165 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7166 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
7167 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
7168 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7169 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7170 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7171 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
7172 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7173 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
7174 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7175 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7176 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7177 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
7178 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
7179 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
7180 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
7181 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
7182 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
7183 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
7184 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
7185 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
7186 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7187 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7188 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7189 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7190 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7191 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7192 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7193 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7194 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
7195 #if CONFIG_SEARCHFS
7196 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7197 #else
7198 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7199 #endif
7200 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
7201 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7202 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
7203 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
7204 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7205 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7206 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7207 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7208 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
7209 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
7210 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7211 #if NAMEDSTREAMS
7212 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7213 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7214 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7215 #endif
7216 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7217 { NULL, (VOPFUNC)NULL }
7218 };
7219
7220 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
7221 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
7222 #endif
7223
7224 /* VNOP table for HFS+ */
7225 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
7226 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7227 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7228 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
7229 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
7230 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7231 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7232 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7233 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7234 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7235 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
7236 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7237 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7238 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7239 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
7240 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
7241 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7242 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
7243 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
7244 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
7245 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
7246 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
7247 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
7248 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7249 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7250 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7251 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7252 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7253 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7254 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7255 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7256 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
7257 #if CONFIG_SEARCHFS
7258 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7259 #else
7260 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7261 #endif
7262 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
7263 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7264 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
7265 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7266 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7267 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7268 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7269 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7270 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7271 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7272 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7273 #if NAMEDSTREAMS
7274 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7275 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
7276 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
7277 #endif
7278 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7279 { &vnop_mnomap_desc, (VOPFUNC)hfs_vnop_mnomap },
7280 { NULL, (VOPFUNC)NULL }
7281 };
7282
7283 struct vnodeopv_desc hfs_vnodeop_opv_desc =
7284 { &hfs_vnodeop_p, hfs_vnodeop_entries };
7285
7286
7287 /* Spec Op vnop table for HFS+ */
7288 int (**hfs_specop_p)(void *);
7289 struct vnodeopv_entry_desc hfs_specop_entries[] = {
7290 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7291 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
7292 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
7293 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
7294 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
7295 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
7296 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7297 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7298 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
7299 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
7300 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
7301 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
7302 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
7303 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
7304 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7305 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
7306 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
7307 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
7308 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
7309 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
7310 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
7311 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
7312 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
7313 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7314 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7315 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
7316 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
7317 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7318 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7319 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7320 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7321 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7322 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7323 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7324 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7325 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7326 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7327 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7328 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7329 };
7330 struct vnodeopv_desc hfs_specop_opv_desc =
7331 { &hfs_specop_p, hfs_specop_entries };
7332
7333 #if FIFO
7334 /* HFS+ FIFO VNOP table */
7335 int (**hfs_fifoop_p)(void *);
7336 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
7337 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7338 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
7339 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
7340 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
7341 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
7342 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
7343 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7344 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7345 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
7346 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
7347 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
7348 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
7349 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
7350 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
7351 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7352 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
7353 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
7354 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
7355 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
7356 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
7357 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
7358 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
7359 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
7360 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7361 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7362 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
7363 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
7364 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7365 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7366 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7367 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7368 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7369 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7370 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7371 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7372 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7373 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7374 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7375 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7376 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7377 };
7378 struct vnodeopv_desc hfs_fifoop_opv_desc =
7379 { &hfs_fifoop_p, hfs_fifoop_entries };
7380 #endif /* FIFO */
7381
7382
7383