]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
414d6de78975f7cc8810d272ff2e5b1ecbb6fb1d
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/systm.h>
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/file_internal.h>
33 #include <sys/dirent.h>
34 #include <sys/stat.h>
35 #include <sys/buf.h>
36 #include <sys/buf_internal.h>
37 #include <sys/mount.h>
38 #include <sys/vnode_if.h>
39 #include <sys/vnode_internal.h>
40 #include <sys/malloc.h>
41 #include <sys/ubc.h>
42 #include <sys/ubc_internal.h>
43 #include <sys/paths.h>
44 #include <sys/quota.h>
45 #include <sys/time.h>
46 #include <sys/disk.h>
47 #include <sys/kauth.h>
48 #include <sys/uio_internal.h>
49 #include <sys/fsctl.h>
50 #include <sys/cprotect.h>
51 #include <sys/xattr.h>
52 #include <string.h>
53 #include <sys/fsevents.h>
54 #include <kern/kalloc.h>
55
56 #include <miscfs/specfs/specdev.h>
57 #include <miscfs/fifofs/fifo.h>
58 #include <vfs/vfs_support.h>
59 #include <machine/spl.h>
60
61 #include <sys/kdebug.h>
62 #include <sys/sysctl.h>
63
64 #include "hfs.h"
65 #include "hfs_catalog.h"
66 #include "hfs_cnode.h"
67 #include "hfs_dbg.h"
68 #include "hfs_mount.h"
69 #include "hfs_quota.h"
70 #include "hfs_endian.h"
71
72 #include "hfscommon/headers/BTreesInternal.h"
73 #include "hfscommon/headers/FileMgrInternal.h"
74
75 #define KNDETACH_VNLOCKED 0x00000001
76
77 /* Global vfs data structures for hfs */
78
79 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
80 int always_do_fullfsync = 0;
81 SYSCTL_DECL(_vfs_generic);
82 SYSCTL_INT (_vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
83
84 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
85 struct componentname *cnp, struct vnode_attr *vap,
86 vfs_context_t ctx);
87 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
88 int hfs_metasync_all(struct hfsmount *hfsmp);
89
90 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
91 int, int);
92 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
93 int, int, int, struct vnode *, int);
94
95 /* Used here and in cnode teardown -- for symlinks */
96 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
97
98 int hfs_movedata (struct vnode *, struct vnode*);
99 static int hfs_move_fork (struct filefork *srcfork, struct cnode *src,
100 struct filefork *dstfork, struct cnode *dst);
101
102 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
103
104 #if FIFO
105 static int hfsfifo_read(struct vnop_read_args *);
106 static int hfsfifo_write(struct vnop_write_args *);
107 static int hfsfifo_close(struct vnop_close_args *);
108
109 extern int (**fifo_vnodeop_p)(void *);
110 #endif /* FIFO */
111
112 int hfs_vnop_close(struct vnop_close_args*);
113 int hfs_vnop_create(struct vnop_create_args*);
114 int hfs_vnop_exchange(struct vnop_exchange_args*);
115 int hfs_vnop_fsync(struct vnop_fsync_args*);
116 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
117 int hfs_vnop_mknod(struct vnop_mknod_args*);
118 int hfs_vnop_getattr(struct vnop_getattr_args*);
119 int hfs_vnop_open(struct vnop_open_args*);
120 int hfs_vnop_readdir(struct vnop_readdir_args*);
121 int hfs_vnop_remove(struct vnop_remove_args*);
122 int hfs_vnop_rename(struct vnop_rename_args*);
123 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
124 int hfs_vnop_symlink(struct vnop_symlink_args*);
125 int hfs_vnop_setattr(struct vnop_setattr_args*);
126 int hfs_vnop_readlink(struct vnop_readlink_args *);
127 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
128 int hfs_vnop_whiteout(struct vnop_whiteout_args *);
129 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
130 int hfsspec_read(struct vnop_read_args *);
131 int hfsspec_write(struct vnop_write_args *);
132 int hfsspec_close(struct vnop_close_args *);
133
134 /* Options for hfs_removedir and hfs_removefile */
135 #define HFSRM_SKIP_RESERVE 0x01
136
137
138
139 /*****************************************************************************
140 *
141 * Common Operations on vnodes
142 *
143 *****************************************************************************/
144
145 /*
146 * Is the given cnode either the .journal or .journal_info_block file on
147 * a volume with an active journal? Many VNOPs use this to deny access
148 * to those files.
149 *
150 * Note: the .journal file on a volume with an external journal still
151 * returns true here, even though it does not actually hold the contents
152 * of the volume's journal.
153 */
154 static _Bool
155 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
156 {
157 if (hfsmp->jnl != NULL &&
158 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
159 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
160 return true;
161 } else {
162 return false;
163 }
164 }
165
166 /*
167 * Create a regular file.
168 */
169 int
170 hfs_vnop_create(struct vnop_create_args *ap)
171 {
172 int error;
173
174 again:
175 error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
176
177 /*
178 * We speculatively skipped the original lookup of the leaf
179 * for CREATE. Since it exists, go get it as long as they
180 * didn't want an exclusive create.
181 */
182 if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) {
183 struct vnop_lookup_args args;
184
185 args.a_desc = &vnop_lookup_desc;
186 args.a_dvp = ap->a_dvp;
187 args.a_vpp = ap->a_vpp;
188 args.a_cnp = ap->a_cnp;
189 args.a_context = ap->a_context;
190 args.a_cnp->cn_nameiop = LOOKUP;
191 error = hfs_vnop_lookup(&args);
192 /*
193 * We can also race with remove for this file.
194 */
195 if (error == ENOENT) {
196 goto again;
197 }
198
199 /* Make sure it was file. */
200 if ((error == 0) && !vnode_isreg(*args.a_vpp)) {
201 vnode_put(*args.a_vpp);
202 *args.a_vpp = NULLVP;
203 error = EEXIST;
204 }
205 args.a_cnp->cn_nameiop = CREATE;
206 }
207 return (error);
208 }
209
210 /*
211 * Make device special file.
212 */
213 int
214 hfs_vnop_mknod(struct vnop_mknod_args *ap)
215 {
216 struct vnode_attr *vap = ap->a_vap;
217 struct vnode *dvp = ap->a_dvp;
218 struct vnode **vpp = ap->a_vpp;
219 struct cnode *cp;
220 int error;
221
222 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
223 return (ENOTSUP);
224 }
225
226 /* Create the vnode */
227 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
228 if (error)
229 return (error);
230
231 cp = VTOC(*vpp);
232 cp->c_touch_acctime = TRUE;
233 cp->c_touch_chgtime = TRUE;
234 cp->c_touch_modtime = TRUE;
235
236 if ((vap->va_rdev != VNOVAL) &&
237 (vap->va_type == VBLK || vap->va_type == VCHR))
238 cp->c_rdev = vap->va_rdev;
239
240 return (0);
241 }
242
243 #if HFS_COMPRESSION
244 /*
245 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
246 * In the (hopefully rare) case where the data fork vnode is not
247 * present, it will use hfs_vget() to create a new vnode for the
248 * data fork.
249 *
250 * NOTE: If successful and a vnode is returned, the caller is responsible
251 * for releasing the returned vnode with vnode_rele().
252 */
253 static int
254 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
255 {
256 int vref = 0;
257
258 if (!data_vp || !cp) /* sanity check incoming parameters */
259 return EINVAL;
260
261 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
262
263 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
264 struct vnode *c_vp = cp->c_vp;
265 if (c_vp) {
266 /* we already have a data vnode */
267 *data_vp = c_vp;
268 vref = vnode_ref(*data_vp);
269 if (!skiplock) hfs_unlock(cp);
270 if (vref == 0) {
271 return 0;
272 }
273 return EINVAL;
274 }
275 /* no data fork vnode in the cnode, so ask hfs for one. */
276
277 if (!cp->c_rsrc_vp) {
278 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
279 *data_vp = NULL;
280 if (!skiplock) hfs_unlock(cp);
281 return EINVAL;
282 }
283
284 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
285 0 != data_vp) {
286 vref = vnode_ref(*data_vp);
287 vnode_put(*data_vp);
288 if (!skiplock) hfs_unlock(cp);
289 if (vref == 0) {
290 return 0;
291 }
292 return EINVAL;
293 }
294 /* there was an error getting the vnode */
295 *data_vp = NULL;
296 if (!skiplock) hfs_unlock(cp);
297 return EINVAL;
298 }
299
300 /*
301 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
302 * allocating it if necessary; returns NULL if there was an allocation error.
303 * function is non-static so that it can be used from the FCNTL handler.
304 */
305 decmpfs_cnode *
306 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
307 {
308 if (!cp->c_decmp) {
309 decmpfs_cnode *dp = NULL;
310 MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK);
311 if (!dp) {
312 /* error allocating a decmpfs cnode */
313 return NULL;
314 }
315 decmpfs_cnode_init(dp);
316 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
317 /* another thread got here first, so free the decmpfs_cnode we allocated */
318 decmpfs_cnode_destroy(dp);
319 FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE);
320 }
321 }
322
323 return cp->c_decmp;
324 }
325
326 /*
327 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
328 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
329 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
330 * or else fills it in via the decmpfs_file_is_compressed() function.
331 */
332 int
333 hfs_file_is_compressed(struct cnode *cp, int skiplock)
334 {
335 int ret = 0;
336
337 /* fast check to see if file is compressed. If flag is clear, just answer no */
338 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
339 return 0;
340 }
341
342 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
343 if (!dp) {
344 /* error allocating a decmpfs cnode, treat the file as uncompressed */
345 return 0;
346 }
347
348 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
349 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
350 switch(decmpfs_state) {
351 case FILE_IS_COMPRESSED:
352 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
353 return 1;
354 case FILE_IS_NOT_COMPRESSED:
355 return 0;
356 /* otherwise the state is not cached yet */
357 }
358
359 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
360 struct vnode *data_vp = NULL;
361 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
362 if (data_vp) {
363 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
364 vnode_rele(data_vp);
365 }
366 }
367 return ret;
368 }
369
370 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
371 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
372 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
373 * files size is returned in size (required)
374 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
375 * will return an error and the caller should fall back to treating the item as an uncompressed file
376 */
377 int
378 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
379 {
380 int ret = 0;
381 int putaway = 0; /* flag to remember if we used hfs_vget() */
382
383 if (!size) {
384 return EINVAL; /* no place to put the file size */
385 }
386
387 if (NULL == vp) {
388 if (!hfsmp || !fid) { /* make sure we have the required parameters */
389 return EINVAL;
390 }
391 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
392 vp = NULL;
393 } else {
394 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
395 }
396 }
397 /* this double check for compression (hfs_file_is_compressed)
398 * ensures the cached size is present in case decmpfs hasn't
399 * encountered this node yet.
400 */
401 if (vp) {
402 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
403 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
404 } else {
405 if (VTOCMP(vp) && VTOCMP(vp)->cmp_type >= CMP_MAX) {
406 if (VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) {
407 // if we don't recognize this type, just use the real data fork size
408 if (VTOC(vp)->c_datafork) {
409 *size = VTOC(vp)->c_datafork->ff_size;
410 ret = 0;
411 } else {
412 ret = EINVAL;
413 }
414 } else {
415 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
416 ret = 0;
417 }
418 } else {
419 ret = EINVAL;
420 }
421 }
422 }
423
424 if (putaway) { /* did we use hfs_vget() to get this vnode? */
425 vnode_put(vp); /* if so, release it and set it to null */
426 vp = NULL;
427 }
428 return ret;
429 }
430
431 int
432 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
433 {
434 if (ctx == decmpfs_ctx)
435 return 0;
436 if (!hfs_file_is_compressed(cp, skiplock))
437 return 0;
438 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
439 }
440
441 int
442 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
443 {
444 if (ctx == decmpfs_ctx)
445 return 0;
446 if (!hfs_file_is_compressed(cp, skiplock))
447 return 0;
448 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
449 }
450 #endif /* HFS_COMPRESSION */
451
452
453 //
454 // This function gets the doc_tombstone structure for the
455 // current thread. If the thread doesn't have one, the
456 // structure is allocated.
457 //
458 static struct doc_tombstone *
459 get_uthread_doc_tombstone(void)
460 {
461 struct uthread *ut;
462 ut = get_bsdthread_info(current_thread());
463
464 if (ut->t_tombstone == NULL) {
465 ut->t_tombstone = kalloc(sizeof(struct doc_tombstone));
466 if (ut->t_tombstone) {
467 memset(ut->t_tombstone, 0, sizeof(struct doc_tombstone));
468 }
469 }
470
471 return ut->t_tombstone;
472 }
473
474 //
475 // This routine clears out the current tombstone for the
476 // current thread and if necessary passes the doc-id of
477 // the tombstone on to the dst_cnode.
478 //
479 // If the doc-id transfers to dst_cnode, we also generate
480 // a doc-id changed fsevent. Unlike all the other fsevents,
481 // doc-id changed events can only be generated here in HFS
482 // where we have the necessary info.
483 //
484 static void
485 clear_tombstone_docid(struct doc_tombstone *ut, struct hfsmount *hfsmp, struct cnode *dst_cnode)
486 {
487 uint32_t old_id = ut->t_lastop_document_id;
488
489 ut->t_lastop_document_id = 0;
490 ut->t_lastop_parent = NULL;
491 ut->t_lastop_parent_vid = 0;
492 ut->t_lastop_filename[0] = '\0';
493
494 //
495 // If the lastop item is still the same and needs to be cleared,
496 // clear it.
497 //
498 if (dst_cnode && old_id && ut->t_lastop_item && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) {
499 //
500 // clear the document_id from the file that used to have it.
501 // XXXdbg - we need to lock the other vnode and make sure to
502 // update it on disk.
503 //
504 struct cnode *ocp = VTOC(ut->t_lastop_item);
505 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
506
507 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
508 ofip->document_id = 0;
509 ocp->c_bsdflags &= ~UF_TRACKED;
510 ocp->c_flag |= C_MODIFIED | C_FORCEUPDATE; // mark it dirty
511 /* cat_update(hfsmp, &ocp->c_desc, &ocp->c_attr, NULL, NULL); */
512
513 }
514
515 #if CONFIG_FSE
516 if (dst_cnode && old_id) {
517 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&dst_cnode->c_attr.ca_finderinfo + 16);
518
519 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
520 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
521 FSE_ARG_INO, (ino64_t)ut->t_lastop_fileid, // src inode #
522 FSE_ARG_INO, (ino64_t)dst_cnode->c_fileid, // dst inode #
523 FSE_ARG_INT32, (uint32_t)fip->document_id,
524 FSE_ARG_DONE);
525 }
526 #endif
527 // last, clear these now that we're all done
528 ut->t_lastop_item = NULL;
529 ut->t_lastop_fileid = 0;
530 ut->t_lastop_item_vid = 0;
531 }
532
533
534 //
535 // This function is used to filter out operations on temp
536 // filenames. We have to filter out operations on certain
537 // temp filenames to work-around questionable application
538 // behavior from apps like Autocad that perform unusual
539 // sequences of file system operations for a "safe save".
540 static int
541 is_ignorable_temp_name(const char *nameptr, int len)
542 {
543 if (len == 0) {
544 len = strlen(nameptr);
545 }
546
547 if ( strncmp(nameptr, "atmp", 4) == 0
548 || (len > 4 && strncmp(nameptr+len-4, ".bak", 4) == 0)
549 || (len > 4 && strncmp(nameptr+len-4, ".tmp", 4) == 0)) {
550 return 1;
551 }
552
553 return 0;
554 }
555
556 //
557 // Decide if we need to save a tombstone or not. Normally we always
558 // save a tombstone - but if there already is one and the name we're
559 // given is an ignorable name, then we will not save a tombstone.
560 //
561 static int
562 should_save_docid_tombstone(struct doc_tombstone *ut, struct vnode *vp, struct componentname *cnp)
563 {
564 if (cnp->cn_nameptr == NULL) {
565 return 0;
566 }
567
568 if (ut->t_lastop_document_id && ut->t_lastop_item == vp && is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
569 return 0;
570 }
571
572 return 1;
573 }
574
575
576 //
577 // This function saves a tombstone for the given vnode and name. The
578 // tombstone represents the parent directory and name where the document
579 // used to live and the document-id of that file. This info is recorded
580 // in the doc_tombstone structure hanging off the uthread (which assumes
581 // that all safe-save operations happen on the same thread).
582 //
583 // If later on the same parent/name combo comes back into existence then
584 // we'll preserve the doc-id from this vnode onto the new vnode.
585 //
586 static void
587 save_tombstone(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int for_unlink)
588 {
589 struct cnode *cp = VTOC(vp);
590 struct doc_tombstone *ut;
591 ut = get_uthread_doc_tombstone();
592
593 if (for_unlink && vp->v_type == VREG && cp->c_linkcount > 1) {
594 //
595 // a regular file that is being unlinked and that is also
596 // hardlinked should not clear the UF_TRACKED state or
597 // mess with the tombstone because somewhere else in the
598 // file system the file is still alive.
599 //
600 return;
601 }
602
603 ut->t_lastop_parent = dvp;
604 ut->t_lastop_parent_vid = vnode_vid(dvp);
605 ut->t_lastop_fileid = cp->c_fileid;
606 if (for_unlink) {
607 ut->t_lastop_item = NULL;
608 ut->t_lastop_item_vid = 0;
609 } else {
610 ut->t_lastop_item = vp;
611 ut->t_lastop_item_vid = vnode_vid(vp);
612 }
613
614 strlcpy((char *)&ut->t_lastop_filename[0], cnp->cn_nameptr, sizeof(ut->t_lastop_filename));
615
616 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
617 ut->t_lastop_document_id = fip->document_id;
618
619 if (for_unlink) {
620 // clear this so it's never returned again
621 fip->document_id = 0;
622 cp->c_bsdflags &= ~UF_TRACKED;
623
624 if (ut->t_lastop_document_id) {
625 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
626
627 #if CONFIG_FSE
628 // this event is more of a "pending-delete"
629 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
630 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
631 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
632 FSE_ARG_INO, (ino64_t)0, // dst inode #
633 FSE_ARG_INT32, ut->t_lastop_document_id, // document id
634 FSE_ARG_DONE);
635 #endif
636 }
637 }
638 }
639
640
641 /*
642 * Open a file/directory.
643 */
644 int
645 hfs_vnop_open(struct vnop_open_args *ap)
646 {
647 struct vnode *vp = ap->a_vp;
648 struct filefork *fp;
649 struct timeval tv;
650 int error;
651 static int past_bootup = 0;
652 struct cnode *cp = VTOC(vp);
653 struct hfsmount *hfsmp = VTOHFS(vp);
654
655 #if HFS_COMPRESSION
656 if (ap->a_mode & FWRITE) {
657 /* open for write */
658 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
659 /* opening a compressed file for write, so convert it to decompressed */
660 struct vnode *data_vp = NULL;
661 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
662 if (0 == error) {
663 if (data_vp) {
664 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
665 vnode_rele(data_vp);
666 } else {
667 error = EINVAL;
668 }
669 }
670 if (error != 0)
671 return error;
672 }
673 } else {
674 /* open for read */
675 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
676 if (VNODE_IS_RSRC(vp)) {
677 /* opening the resource fork of a compressed file, so nothing to do */
678 } else {
679 /* opening a compressed file for read, make sure it validates */
680 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
681 if (error != 0)
682 return error;
683 }
684 }
685 }
686 #endif
687
688 /*
689 * Files marked append-only must be opened for appending.
690 */
691 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
692 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
693 return (EPERM);
694
695 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
696 return (EBUSY); /* file is in use by the kernel */
697
698 /* Don't allow journal to be opened externally. */
699 if (hfs_is_journal_file(hfsmp, cp))
700 return (EPERM);
701
702 if ((hfsmp->hfs_flags & HFS_READ_ONLY) ||
703 (hfsmp->jnl == NULL) ||
704 #if NAMEDSTREAMS
705 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
706 #else
707 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
708 #endif
709 return (0);
710 }
711
712 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
713 return (error);
714
715 #if QUOTA
716 /* If we're going to write to the file, initialize quotas. */
717 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
718 (void)hfs_getinoquota(cp);
719 #endif /* QUOTA */
720
721 /*
722 * On the first (non-busy) open of a fragmented
723 * file attempt to de-frag it (if its less than 20MB).
724 */
725 fp = VTOF(vp);
726 if (fp->ff_blocks &&
727 fp->ff_extents[7].blockCount != 0 &&
728 fp->ff_size <= (20 * 1024 * 1024)) {
729 int no_mods = 0;
730 struct timeval now;
731 /*
732 * Wait until system bootup is done (3 min).
733 * And don't relocate a file that's been modified
734 * within the past minute -- this can lead to
735 * system thrashing.
736 */
737
738 if (!past_bootup) {
739 microuptime(&tv);
740 if (tv.tv_sec > (60*3)) {
741 past_bootup = 1;
742 }
743 }
744
745 microtime(&now);
746 if ((now.tv_sec - cp->c_mtime) > 60) {
747 no_mods = 1;
748 }
749
750 if (past_bootup && no_mods) {
751 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
752 vfs_context_ucred(ap->a_context),
753 vfs_context_proc(ap->a_context));
754 }
755 }
756
757 hfs_unlock(cp);
758
759 return (0);
760 }
761
762
763 /*
764 * Close a file/directory.
765 */
766 int
767 hfs_vnop_close(ap)
768 struct vnop_close_args /* {
769 struct vnode *a_vp;
770 int a_fflag;
771 vfs_context_t a_context;
772 } */ *ap;
773 {
774 register struct vnode *vp = ap->a_vp;
775 register struct cnode *cp;
776 struct proc *p = vfs_context_proc(ap->a_context);
777 struct hfsmount *hfsmp;
778 int busy;
779 int tooktrunclock = 0;
780 int knownrefs = 0;
781
782 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
783 return (0);
784 cp = VTOC(vp);
785 hfsmp = VTOHFS(vp);
786
787 /*
788 * If the rsrc fork is a named stream, it can cause the data fork to
789 * stay around, preventing de-allocation of these blocks.
790 * Do checks for truncation on close. Purge extra extents if they exist.
791 * Make sure the vp is not a directory, and that it has a resource fork,
792 * and that resource fork is also a named stream.
793 */
794
795 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
796 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
797 uint32_t blks;
798
799 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
800 /*
801 * If there are extra blocks and there are only 2 refs on
802 * this vp (ourselves + rsrc fork holding ref on us), go ahead
803 * and try to truncate.
804 */
805 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
806 // release cnode lock; must acquire truncate lock BEFORE cnode lock
807 hfs_unlock(cp);
808
809 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
810 tooktrunclock = 1;
811
812 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
813 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
814 // bail out if we can't re-acquire cnode lock
815 return 0;
816 }
817 // now re-test to make sure it's still valid
818 if (cp->c_rsrc_vp) {
819 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
820 if (!vnode_isinuse(vp, knownrefs)){
821 // now we can truncate the file, if necessary
822 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
823 if (blks < VTOF(vp)->ff_blocks){
824 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, 0, ap->a_context);
825 }
826 }
827 }
828 }
829 }
830
831
832 // if we froze the fs and we're exiting, then "thaw" the fs
833 if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
834 hfsmp->hfs_freezing_proc = NULL;
835 hfs_unlock_global (hfsmp);
836 lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
837 }
838
839 busy = vnode_isinuse(vp, 1);
840
841 if (busy) {
842 hfs_touchtimes(VTOHFS(vp), cp);
843 }
844 if (vnode_isdir(vp)) {
845 hfs_reldirhints(cp, busy);
846 } else if (vnode_issystem(vp) && !busy) {
847 vnode_recycle(vp);
848 }
849
850 if (tooktrunclock){
851 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
852 }
853 hfs_unlock(cp);
854
855 if (ap->a_fflag & FWASWRITTEN) {
856 hfs_sync_ejectable(hfsmp);
857 }
858
859 return (0);
860 }
861
862 /*
863 * Get basic attributes.
864 */
865 int
866 hfs_vnop_getattr(struct vnop_getattr_args *ap)
867 {
868 #define VNODE_ATTR_TIMES \
869 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
870 #define VNODE_ATTR_AUTH \
871 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
872 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
873
874 struct vnode *vp = ap->a_vp;
875 struct vnode_attr *vap = ap->a_vap;
876 struct vnode *rvp = NULLVP;
877 struct hfsmount *hfsmp;
878 struct cnode *cp;
879 uint64_t data_size;
880 enum vtype v_type;
881 int error = 0;
882 cp = VTOC(vp);
883
884 #if HFS_COMPRESSION
885 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
886 int compressed = 0;
887 int hide_size = 0;
888 off_t uncompressed_size = -1;
889 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
890 /* we only care about whether the file is compressed if asked for the uncompressed size */
891 if (VNODE_IS_RSRC(vp)) {
892 /* if it's a resource fork, decmpfs may want us to hide the size */
893 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
894 } else {
895 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
896 compressed = hfs_file_is_compressed(cp, 0);
897 }
898 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
899 // if it's compressed
900 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && cp->c_decmp->cmp_type >= CMP_MAX)) {
901 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
902 /* failed to get the uncompressed size, we'll check for this later */
903 uncompressed_size = -1;
904 } else {
905 // fake that it's compressed
906 compressed = 1;
907 }
908 }
909 }
910 }
911 #endif
912
913 /*
914 * Shortcut for vnode_authorize path. Each of the attributes
915 * in this set is updated atomically so we don't need to take
916 * the cnode lock to access them.
917 */
918 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
919 /* Make sure file still exists. */
920 if (cp->c_flag & C_NOEXISTS)
921 return (ENOENT);
922
923 vap->va_uid = cp->c_uid;
924 vap->va_gid = cp->c_gid;
925 vap->va_mode = cp->c_mode;
926 vap->va_flags = cp->c_bsdflags;
927 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
928
929 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
930 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
931 VATTR_SET_SUPPORTED(vap, va_acl);
932 }
933
934 return (0);
935 }
936
937 hfsmp = VTOHFS(vp);
938 v_type = vnode_vtype(vp);
939 /*
940 * If time attributes are requested and we have cnode times
941 * that require updating, then acquire an exclusive lock on
942 * the cnode before updating the times. Otherwise we can
943 * just acquire a shared lock.
944 */
945 if ((vap->va_active & VNODE_ATTR_TIMES) &&
946 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
947 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
948 return (error);
949 hfs_touchtimes(hfsmp, cp);
950 }
951 else {
952 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT)))
953 return (error);
954 }
955
956 if (v_type == VDIR) {
957 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
958
959 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
960 int nlink;
961
962 /*
963 * For directories, the va_nlink is esentially a count
964 * of the ".." references to a directory plus the "."
965 * reference and the directory itself. So for HFS+ this
966 * becomes the sub-directory count plus two.
967 *
968 * In the absence of a sub-directory count we use the
969 * directory's item count. This will be too high in
970 * most cases since it also includes files.
971 */
972 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
973 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
974 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
975 else
976 nlink = cp->c_entries;
977
978 /* Account for ourself and our "." entry */
979 nlink += 2;
980 /* Hide our private directories. */
981 if (cp->c_cnid == kHFSRootFolderID) {
982 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
983 --nlink;
984 }
985 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
986 --nlink;
987 }
988 }
989 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
990 }
991 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
992 int entries;
993
994 entries = cp->c_entries;
995 /* Hide our private files and directories. */
996 if (cp->c_cnid == kHFSRootFolderID) {
997 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
998 --entries;
999 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
1000 --entries;
1001 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
1002 entries -= 2; /* hide the journal files */
1003 }
1004 VATTR_RETURN(vap, va_nchildren, entries);
1005 }
1006 /*
1007 * The va_dirlinkcount is the count of real directory hard links.
1008 * (i.e. its not the sum of the implied "." and ".." references)
1009 */
1010 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
1011 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
1012 }
1013 } else /* !VDIR */ {
1014 data_size = VCTOF(vp, cp)->ff_size;
1015
1016 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
1017 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
1018 u_int64_t blocks;
1019
1020 #if HFS_COMPRESSION
1021 if (hide_size) {
1022 VATTR_RETURN(vap, va_data_alloc, 0);
1023 } else if (compressed) {
1024 /* for compressed files, we report all allocated blocks as belonging to the data fork */
1025 blocks = cp->c_blocks;
1026 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
1027 }
1028 else
1029 #endif
1030 {
1031 blocks = VCTOF(vp, cp)->ff_blocks;
1032 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
1033 }
1034 }
1035 }
1036
1037 /* conditional because 64-bit arithmetic can be expensive */
1038 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
1039 if (v_type == VDIR) {
1040 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
1041 } else {
1042 u_int64_t total_size = ~0ULL;
1043 struct cnode *rcp;
1044 #if HFS_COMPRESSION
1045 if (hide_size) {
1046 /* we're hiding the size of this file, so just return 0 */
1047 total_size = 0;
1048 } else if (compressed) {
1049 if (uncompressed_size == -1) {
1050 /*
1051 * We failed to get the uncompressed size above,
1052 * so we'll fall back to the standard path below
1053 * since total_size is still -1
1054 */
1055 } else {
1056 /* use the uncompressed size we fetched above */
1057 total_size = uncompressed_size;
1058 }
1059 }
1060 #endif
1061 if (total_size == ~0ULL) {
1062 if (cp->c_datafork) {
1063 total_size = cp->c_datafork->ff_size;
1064 }
1065
1066 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1067 /* We deal with rsrc fork vnode iocount at the end of the function */
1068 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, FALSE);
1069 if (error) {
1070 /*
1071 * Note that we call hfs_vgetrsrc with error_on_unlinked
1072 * set to FALSE. This is because we may be invoked via
1073 * fstat() on an open-unlinked file descriptor and we must
1074 * continue to support access to the rsrc fork until it disappears.
1075 * The code at the end of this function will be
1076 * responsible for releasing the iocount generated by
1077 * hfs_vgetrsrc. This is because we can't drop the iocount
1078 * without unlocking the cnode first.
1079 */
1080 goto out;
1081 }
1082
1083 rcp = VTOC(rvp);
1084 if (rcp && rcp->c_rsrcfork) {
1085 total_size += rcp->c_rsrcfork->ff_size;
1086 }
1087 }
1088 }
1089
1090 VATTR_RETURN(vap, va_total_size, total_size);
1091 }
1092 }
1093 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1094 if (v_type == VDIR) {
1095 VATTR_RETURN(vap, va_total_alloc, 0);
1096 } else {
1097 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
1098 }
1099 }
1100
1101 /*
1102 * If the VFS wants extended security data, and we know that we
1103 * don't have any (because it never told us it was setting any)
1104 * then we can return the supported bit and no data. If we do
1105 * have extended security, we can just leave the bit alone and
1106 * the VFS will use the fallback path to fetch it.
1107 */
1108 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1109 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
1110 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
1111 VATTR_SET_SUPPORTED(vap, va_acl);
1112 }
1113 }
1114 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1115 /* Access times are lazily updated, get current time if needed */
1116 if (cp->c_touch_acctime) {
1117 struct timeval tv;
1118
1119 microtime(&tv);
1120 vap->va_access_time.tv_sec = tv.tv_sec;
1121 } else {
1122 vap->va_access_time.tv_sec = cp->c_atime;
1123 }
1124 vap->va_access_time.tv_nsec = 0;
1125 VATTR_SET_SUPPORTED(vap, va_access_time);
1126 }
1127 vap->va_create_time.tv_sec = cp->c_itime;
1128 vap->va_create_time.tv_nsec = 0;
1129 vap->va_modify_time.tv_sec = cp->c_mtime;
1130 vap->va_modify_time.tv_nsec = 0;
1131 vap->va_change_time.tv_sec = cp->c_ctime;
1132 vap->va_change_time.tv_nsec = 0;
1133 vap->va_backup_time.tv_sec = cp->c_btime;
1134 vap->va_backup_time.tv_nsec = 0;
1135
1136 /* See if we need to emit the date added field to the user */
1137 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1138 u_int32_t dateadded = hfs_get_dateadded (cp);
1139 if (dateadded) {
1140 vap->va_addedtime.tv_sec = dateadded;
1141 vap->va_addedtime.tv_nsec = 0;
1142 VATTR_SET_SUPPORTED (vap, va_addedtime);
1143 }
1144 }
1145
1146 /* XXX is this really a good 'optimal I/O size'? */
1147 vap->va_iosize = hfsmp->hfs_logBlockSize;
1148 vap->va_uid = cp->c_uid;
1149 vap->va_gid = cp->c_gid;
1150 vap->va_mode = cp->c_mode;
1151 vap->va_flags = cp->c_bsdflags;
1152
1153 /*
1154 * Exporting file IDs from HFS Plus:
1155 *
1156 * For "normal" files the c_fileid is the same value as the
1157 * c_cnid. But for hard link files, they are different - the
1158 * c_cnid belongs to the active directory entry (ie the link)
1159 * and the c_fileid is for the actual inode (ie the data file).
1160 *
1161 * The stat call (getattr) uses va_fileid and the Carbon APIs,
1162 * which are hardlink-ignorant, will ask for va_linkid.
1163 */
1164 vap->va_fileid = (u_int64_t)cp->c_fileid;
1165 /*
1166 * We need to use the origin cache for both hardlinked files
1167 * and directories. Hardlinked directories have multiple cnids
1168 * and parents (one per link). Hardlinked files also have their
1169 * own parents and link IDs separate from the indirect inode number.
1170 * If we don't use the cache, we could end up vending the wrong ID
1171 * because the cnode will only reflect the link that was looked up most recently.
1172 */
1173 if (cp->c_flag & C_HARDLINK) {
1174 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
1175 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
1176 } else {
1177 vap->va_linkid = (u_int64_t)cp->c_cnid;
1178 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
1179 }
1180 vap->va_fsid = hfsmp->hfs_raw_dev;
1181 vap->va_filerev = 0;
1182 vap->va_encoding = cp->c_encoding;
1183 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
1184 #if HFS_COMPRESSION
1185 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1186 if (hide_size)
1187 vap->va_data_size = 0;
1188 else if (compressed) {
1189 if (uncompressed_size == -1) {
1190 /* failed to get the uncompressed size above, so just return data_size */
1191 vap->va_data_size = data_size;
1192 } else {
1193 /* use the uncompressed size we fetched above */
1194 vap->va_data_size = uncompressed_size;
1195 }
1196 } else
1197 vap->va_data_size = data_size;
1198 // vap->va_supported |= VNODE_ATTR_va_data_size;
1199 VATTR_SET_SUPPORTED(vap, va_data_size);
1200 }
1201 #else
1202 vap->va_data_size = data_size;
1203 vap->va_supported |= VNODE_ATTR_va_data_size;
1204 #endif
1205
1206 if (VATTR_IS_ACTIVE(vap, va_gen)) {
1207 if (UBCINFOEXISTS(vp) && (vp->v_ubcinfo->ui_flags & UI_ISMAPPED)) {
1208 /* While file is mmapped the generation count is invalid.
1209 * However, bump the value so that the write-gen counter
1210 * will be different once the file is unmapped (since,
1211 * when unmapped the pageouts may not yet have happened)
1212 */
1213 if (vp->v_ubcinfo->ui_flags & UI_MAPPEDWRITE) {
1214 hfs_incr_gencount (cp);
1215 }
1216 vap->va_gen = 0;
1217 } else {
1218 vap->va_gen = hfs_get_gencount(cp);
1219 }
1220
1221 VATTR_SET_SUPPORTED(vap, va_gen);
1222 }
1223 if (VATTR_IS_ACTIVE(vap, va_document_id)) {
1224 vap->va_document_id = hfs_get_document_id(cp);
1225 VATTR_SET_SUPPORTED(vap, va_document_id);
1226 }
1227
1228 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1229 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1230 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1231 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1232 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1233 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1234 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1235 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1236 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1237
1238 /* If this is the root, let VFS to find out the mount name, which
1239 * may be different from the real name. Otherwise, we need to take care
1240 * for hardlinked files, which need to be looked up, if necessary
1241 */
1242 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1243 struct cat_desc linkdesc;
1244 int lockflags;
1245 int uselinkdesc = 0;
1246 cnid_t nextlinkid = 0;
1247 cnid_t prevlinkid = 0;
1248
1249 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1250 * here because the info. for the link ID requested by getattrlist may be
1251 * different than what's currently in the cnode. This is because the cnode
1252 * will be filled in with the information for the most recent link ID that went
1253 * through namei/lookup(). If there are competing lookups for hardlinks that point
1254 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1255 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1256 */
1257
1258 if ((cp->c_flag & C_HARDLINK) &&
1259 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1260 /*
1261 * If we have no name and our link ID is the raw inode number, then we may
1262 * have an open-unlinked file. Go to the next link in this case.
1263 */
1264 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1265 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1266 goto out;
1267 }
1268 }
1269 else {
1270 /* just use link obtained from vap above */
1271 nextlinkid = vap->va_linkid;
1272 }
1273
1274 /* We need to probe the catalog for the descriptor corresponding to the link ID
1275 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1276 * for the cnode here, so we can't just update the descriptor. Instead,
1277 * we should just store the descriptor's value locally and then use it to pass
1278 * out the name value as needed below.
1279 */
1280 if (nextlinkid){
1281 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1282 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1283 hfs_systemfile_unlock(hfsmp, lockflags);
1284 if (error == 0) {
1285 uselinkdesc = 1;
1286 }
1287 }
1288 }
1289
1290 /* By this point, we've either patched up the name above and the c_desc
1291 * points to the correct data, or it already did, in which case we just proceed
1292 * by copying the name into the vap. Note that we will never set va_name to
1293 * supported if nextlinkid is never initialized. This could happen in the degenerate
1294 * case above involving the raw inode number, where it has no nextlinkid. In this case
1295 * we will simply not mark the name bit as supported.
1296 */
1297 if (uselinkdesc) {
1298 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1299 VATTR_SET_SUPPORTED(vap, va_name);
1300 cat_releasedesc(&linkdesc);
1301 }
1302 else if (cp->c_desc.cd_namelen) {
1303 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1304 VATTR_SET_SUPPORTED(vap, va_name);
1305 }
1306 }
1307
1308 out:
1309 hfs_unlock(cp);
1310 /*
1311 * We need to vnode_put the rsrc fork vnode only *after* we've released
1312 * the cnode lock, since vnode_put can trigger an inactive call, which
1313 * will go back into HFS and try to acquire a cnode lock.
1314 */
1315 if (rvp) {
1316 vnode_put (rvp);
1317 }
1318
1319 return (error);
1320 }
1321
1322 int
1323 hfs_vnop_setattr(ap)
1324 struct vnop_setattr_args /* {
1325 struct vnode *a_vp;
1326 struct vnode_attr *a_vap;
1327 vfs_context_t a_context;
1328 } */ *ap;
1329 {
1330 struct vnode_attr *vap = ap->a_vap;
1331 struct vnode *vp = ap->a_vp;
1332 struct cnode *cp = NULL;
1333 struct hfsmount *hfsmp;
1334 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1335 struct proc *p = vfs_context_proc(ap->a_context);
1336 int error = 0;
1337 uid_t nuid;
1338 gid_t ngid;
1339 time_t orig_ctime;
1340
1341 orig_ctime = VTOC(vp)->c_ctime;
1342
1343 #if HFS_COMPRESSION
1344 int decmpfs_reset_state = 0;
1345 /*
1346 we call decmpfs_update_attributes even if the file is not compressed
1347 because we want to update the incoming flags if the xattrs are invalid
1348 */
1349 error = decmpfs_update_attributes(vp, vap);
1350 if (error)
1351 return error;
1352 #endif
1353 //
1354 // if this is not a size-changing setattr and it is not just
1355 // an atime update, then check for a snapshot.
1356 //
1357 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1358 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1359 }
1360
1361 #if CONFIG_PROTECT
1362 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1363 return (error);
1364 }
1365 #endif /* CONFIG_PROTECT */
1366
1367 hfsmp = VTOHFS(vp);
1368
1369 /* Don't allow modification of the journal. */
1370 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1371 return (EPERM);
1372 }
1373
1374 //
1375 // Check if we'll need a document_id and if so, get it before we lock the
1376 // the cnode to avoid any possible deadlock with the root vnode which has
1377 // to get locked to get the document id
1378 //
1379 u_int32_t document_id=0;
1380 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
1381 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
1382 //
1383 // If the document_id is not set, get a new one. It will be set
1384 // on the file down below once we hold the cnode lock.
1385 //
1386 if (fip->document_id == 0) {
1387 if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
1388 document_id = 0;
1389 }
1390 }
1391 }
1392
1393
1394 /*
1395 * File size change request.
1396 * We are guaranteed that this is not a directory, and that
1397 * the filesystem object is writeable.
1398 *
1399 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1400 */
1401 VATTR_SET_SUPPORTED(vap, va_data_size);
1402 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
1403 #if HFS_COMPRESSION
1404 /* keep the compressed state locked until we're done truncating the file */
1405 decmpfs_cnode *dp = VTOCMP(vp);
1406 if (!dp) {
1407 /*
1408 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1409 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1410 * on this file while it's truncating
1411 */
1412 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1413 if (!dp) {
1414 /* failed to allocate a decmpfs_cnode */
1415 return ENOMEM; /* what should this be? */
1416 }
1417 }
1418
1419 check_for_tracked_file(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1420
1421 decmpfs_lock_compressed_data(dp, 1);
1422 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1423 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1424 if (error != 0) {
1425 decmpfs_unlock_compressed_data(dp, 1);
1426 return error;
1427 }
1428 }
1429 #endif
1430
1431 /* Take truncate lock before taking cnode lock. */
1432 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1433
1434 /* Perform the ubc_setsize before taking the cnode lock. */
1435 ubc_setsize(vp, vap->va_data_size);
1436
1437 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1438 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1439 #if HFS_COMPRESSION
1440 decmpfs_unlock_compressed_data(dp, 1);
1441 #endif
1442 return (error);
1443 }
1444 cp = VTOC(vp);
1445
1446 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, 0, ap->a_context);
1447
1448 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
1449 #if HFS_COMPRESSION
1450 decmpfs_unlock_compressed_data(dp, 1);
1451 #endif
1452 if (error)
1453 goto out;
1454 }
1455 if (cp == NULL) {
1456 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1457 return (error);
1458 cp = VTOC(vp);
1459 }
1460
1461 /*
1462 * If it is just an access time update request by itself
1463 * we know the request is from kernel level code, and we
1464 * can delay it without being as worried about consistency.
1465 * This change speeds up mmaps, in the rare case that they
1466 * get caught behind a sync.
1467 */
1468
1469 if (vap->va_active == VNODE_ATTR_va_access_time) {
1470 cp->c_touch_acctime=TRUE;
1471 goto out;
1472 }
1473
1474
1475
1476 /*
1477 * Owner/group change request.
1478 * We are guaranteed that the new owner/group is valid and legal.
1479 */
1480 VATTR_SET_SUPPORTED(vap, va_uid);
1481 VATTR_SET_SUPPORTED(vap, va_gid);
1482 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1483 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1484 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1485 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1486 goto out;
1487
1488 /*
1489 * Mode change request.
1490 * We are guaranteed that the mode value is valid and that in
1491 * conjunction with the owner and group, this change is legal.
1492 */
1493 VATTR_SET_SUPPORTED(vap, va_mode);
1494 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1495 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1496 goto out;
1497
1498 /*
1499 * File flags change.
1500 * We are guaranteed that only flags allowed to change given the
1501 * current securelevel are being changed.
1502 */
1503 VATTR_SET_SUPPORTED(vap, va_flags);
1504 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1505 u_int16_t *fdFlags;
1506
1507 #if HFS_COMPRESSION
1508 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1509 /*
1510 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1511 * but we don't want to actually do the update until we've released the cnode lock down below
1512 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1513 * turn off the flag and look at the "raw" file for debugging purposes
1514 */
1515 decmpfs_reset_state = 1;
1516 }
1517 #endif
1518 if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
1519 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1520
1521 //
1522 // we're marking this item UF_TRACKED. if the document_id is
1523 // not set, get a new one and put it on the file.
1524 //
1525 if (fip->document_id == 0) {
1526 if (document_id != 0) {
1527 // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
1528 fip->document_id = (uint32_t)document_id;
1529 #if CONFIG_FSE
1530 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1531 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1532 FSE_ARG_INO, (ino64_t)0, // src inode #
1533 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1534 FSE_ARG_INT32, document_id,
1535 FSE_ARG_DONE);
1536 #endif
1537 } else {
1538 // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
1539 }
1540 }
1541
1542 } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
1543 //
1544 // UF_TRACKED is being cleared so clear the document_id
1545 //
1546 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1547 if (fip->document_id) {
1548 // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
1549 #if CONFIG_FSE
1550 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1551 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1552 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
1553 FSE_ARG_INO, (ino64_t)0, // dst inode #
1554 FSE_ARG_INT32, fip->document_id, // document id
1555 FSE_ARG_DONE);
1556 #endif
1557 fip->document_id = 0;
1558 cp->c_bsdflags &= ~UF_TRACKED;
1559 }
1560 }
1561
1562 cp->c_bsdflags = vap->va_flags;
1563 cp->c_touch_chgtime = TRUE;
1564
1565
1566 /*
1567 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1568 *
1569 * The fdFlags for files and frFlags for folders are both 8 bytes
1570 * into the userInfo (the first 16 bytes of the Finder Info). They
1571 * are both 16-bit fields.
1572 */
1573 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1574 if (vap->va_flags & UF_HIDDEN)
1575 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1576 else
1577 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1578 }
1579
1580 /*
1581 * Timestamp updates.
1582 */
1583 VATTR_SET_SUPPORTED(vap, va_create_time);
1584 VATTR_SET_SUPPORTED(vap, va_access_time);
1585 VATTR_SET_SUPPORTED(vap, va_modify_time);
1586 VATTR_SET_SUPPORTED(vap, va_backup_time);
1587 VATTR_SET_SUPPORTED(vap, va_change_time);
1588 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1589 VATTR_IS_ACTIVE(vap, va_access_time) ||
1590 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1591 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1592 if (VATTR_IS_ACTIVE(vap, va_create_time))
1593 cp->c_itime = vap->va_create_time.tv_sec;
1594 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1595 cp->c_atime = vap->va_access_time.tv_sec;
1596 cp->c_touch_acctime = FALSE;
1597 }
1598 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1599 cp->c_mtime = vap->va_modify_time.tv_sec;
1600 cp->c_touch_modtime = FALSE;
1601 cp->c_touch_chgtime = TRUE;
1602
1603 /*
1604 * The utimes system call can reset the modification
1605 * time but it doesn't know about HFS create times.
1606 * So we need to ensure that the creation time is
1607 * always at least as old as the modification time.
1608 */
1609 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1610 (cp->c_cnid != kHFSRootFolderID) &&
1611 (cp->c_mtime < cp->c_itime)) {
1612 cp->c_itime = cp->c_mtime;
1613 }
1614 }
1615 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1616 cp->c_btime = vap->va_backup_time.tv_sec;
1617 cp->c_flag |= C_MODIFIED;
1618 }
1619
1620 /*
1621 * Set name encoding.
1622 */
1623 VATTR_SET_SUPPORTED(vap, va_encoding);
1624 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1625 cp->c_encoding = vap->va_encoding;
1626 hfs_setencodingbits(hfsmp, cp->c_encoding);
1627 }
1628
1629 if ((error = hfs_update(vp, TRUE)) != 0)
1630 goto out;
1631 out:
1632 if (cp) {
1633 /* Purge origin cache for cnode, since caller now has correct link ID for it
1634 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1635 */
1636 if ((cp->c_flag & C_HARDLINK) && (vp->v_type != VDIR)){
1637 hfs_relorigin(cp, 0);
1638 }
1639
1640 hfs_unlock(cp);
1641 #if HFS_COMPRESSION
1642 if (decmpfs_reset_state) {
1643 /*
1644 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1645 * but don't do it while holding the hfs cnode lock
1646 */
1647 decmpfs_cnode *dp = VTOCMP(vp);
1648 if (!dp) {
1649 /*
1650 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1651 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1652 * on this file if it's locked
1653 */
1654 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1655 if (!dp) {
1656 /* failed to allocate a decmpfs_cnode */
1657 return ENOMEM; /* what should this be? */
1658 }
1659 }
1660 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1661 }
1662 #endif
1663 }
1664 return (error);
1665 }
1666
1667
1668 /*
1669 * Change the mode on a file.
1670 * cnode must be locked before calling.
1671 */
1672 int
1673 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1674 {
1675 register struct cnode *cp = VTOC(vp);
1676
1677 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1678 return (0);
1679
1680 // Don't allow modification of the journal or journal_info_block
1681 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1682 return EPERM;
1683 }
1684
1685 #if OVERRIDE_UNKNOWN_PERMISSIONS
1686 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1687 return (0);
1688 };
1689 #endif
1690 cp->c_mode &= ~ALLPERMS;
1691 cp->c_mode |= (mode & ALLPERMS);
1692 cp->c_touch_chgtime = TRUE;
1693 return (0);
1694 }
1695
1696
1697 int
1698 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1699 {
1700 struct cnode *cp = VTOC(vp);
1701 int retval = 0;
1702 int is_member;
1703
1704 /*
1705 * Disallow write attempts on read-only file systems;
1706 * unless the file is a socket, fifo, or a block or
1707 * character device resident on the file system.
1708 */
1709 switch (vnode_vtype(vp)) {
1710 case VDIR:
1711 case VLNK:
1712 case VREG:
1713 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1714 return (EROFS);
1715 break;
1716 default:
1717 break;
1718 }
1719
1720 /* If immutable bit set, nobody gets to write it. */
1721 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1722 return (EPERM);
1723
1724 /* Otherwise, user id 0 always gets access. */
1725 if (!suser(cred, NULL))
1726 return (0);
1727
1728 /* Otherwise, check the owner. */
1729 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1730 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1731
1732 /* Otherwise, check the groups. */
1733 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1734 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1735 }
1736
1737 /* Otherwise, check everyone else. */
1738 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1739 }
1740
1741
1742 /*
1743 * Perform chown operation on cnode cp;
1744 * code must be locked prior to call.
1745 */
1746 int
1747 #if !QUOTA
1748 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1749 __unused struct proc *p)
1750 #else
1751 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1752 __unused struct proc *p)
1753 #endif
1754 {
1755 register struct cnode *cp = VTOC(vp);
1756 uid_t ouid;
1757 gid_t ogid;
1758 #if QUOTA
1759 int error = 0;
1760 register int i;
1761 int64_t change;
1762 #endif /* QUOTA */
1763
1764 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1765 return (ENOTSUP);
1766
1767 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1768 return (0);
1769
1770 if (uid == (uid_t)VNOVAL)
1771 uid = cp->c_uid;
1772 if (gid == (gid_t)VNOVAL)
1773 gid = cp->c_gid;
1774
1775 #if 0 /* we are guaranteed that this is already the case */
1776 /*
1777 * If we don't own the file, are trying to change the owner
1778 * of the file, or are not a member of the target group,
1779 * the caller must be superuser or the call fails.
1780 */
1781 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1782 (gid != cp->c_gid &&
1783 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1784 (error = suser(cred, 0)))
1785 return (error);
1786 #endif
1787
1788 ogid = cp->c_gid;
1789 ouid = cp->c_uid;
1790 #if QUOTA
1791 if ((error = hfs_getinoquota(cp)))
1792 return (error);
1793 if (ouid == uid) {
1794 dqrele(cp->c_dquot[USRQUOTA]);
1795 cp->c_dquot[USRQUOTA] = NODQUOT;
1796 }
1797 if (ogid == gid) {
1798 dqrele(cp->c_dquot[GRPQUOTA]);
1799 cp->c_dquot[GRPQUOTA] = NODQUOT;
1800 }
1801
1802 /*
1803 * Eventually need to account for (fake) a block per directory
1804 * if (vnode_isdir(vp))
1805 * change = VTOHFS(vp)->blockSize;
1806 * else
1807 */
1808
1809 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1810 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1811 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1812 for (i = 0; i < MAXQUOTAS; i++) {
1813 dqrele(cp->c_dquot[i]);
1814 cp->c_dquot[i] = NODQUOT;
1815 }
1816 #endif /* QUOTA */
1817 cp->c_gid = gid;
1818 cp->c_uid = uid;
1819 #if QUOTA
1820 if ((error = hfs_getinoquota(cp)) == 0) {
1821 if (ouid == uid) {
1822 dqrele(cp->c_dquot[USRQUOTA]);
1823 cp->c_dquot[USRQUOTA] = NODQUOT;
1824 }
1825 if (ogid == gid) {
1826 dqrele(cp->c_dquot[GRPQUOTA]);
1827 cp->c_dquot[GRPQUOTA] = NODQUOT;
1828 }
1829 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1830 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1831 goto good;
1832 else
1833 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1834 }
1835 for (i = 0; i < MAXQUOTAS; i++) {
1836 dqrele(cp->c_dquot[i]);
1837 cp->c_dquot[i] = NODQUOT;
1838 }
1839 }
1840 cp->c_gid = ogid;
1841 cp->c_uid = ouid;
1842 if (hfs_getinoquota(cp) == 0) {
1843 if (ouid == uid) {
1844 dqrele(cp->c_dquot[USRQUOTA]);
1845 cp->c_dquot[USRQUOTA] = NODQUOT;
1846 }
1847 if (ogid == gid) {
1848 dqrele(cp->c_dquot[GRPQUOTA]);
1849 cp->c_dquot[GRPQUOTA] = NODQUOT;
1850 }
1851 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1852 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1853 (void) hfs_getinoquota(cp);
1854 }
1855 return (error);
1856 good:
1857 if (hfs_getinoquota(cp))
1858 panic("hfs_chown: lost quota");
1859 #endif /* QUOTA */
1860
1861
1862 /*
1863 According to the SUSv3 Standard, chown() shall mark
1864 for update the st_ctime field of the file.
1865 (No exceptions mentioned)
1866 */
1867 cp->c_touch_chgtime = TRUE;
1868 return (0);
1869 }
1870
1871
1872 /*
1873 * hfs_vnop_exchange:
1874 *
1875 * Inputs:
1876 * 'from' vnode/cnode
1877 * 'to' vnode/cnode
1878 * options flag bits
1879 * vfs_context
1880 *
1881 * Discussion:
1882 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
1883 * Per the requirements of that system call, this function "swaps" some
1884 * of the information that lives in one catalog record for some that
1885 * lives in another. Note that not everything is swapped; in particular,
1886 * the extent information stored in each cnode is kept local to that
1887 * cnode. This allows existing file descriptor references to continue
1888 * to operate on the same content, regardless of the location in the
1889 * namespace that the file may have moved to. See inline comments
1890 * in the function for more information.
1891 */
1892 int
1893 hfs_vnop_exchange(ap)
1894 struct vnop_exchange_args /* {
1895 struct vnode *a_fvp;
1896 struct vnode *a_tvp;
1897 int a_options;
1898 vfs_context_t a_context;
1899 } */ *ap;
1900 {
1901 struct vnode *from_vp = ap->a_fvp;
1902 struct vnode *to_vp = ap->a_tvp;
1903 struct cnode *from_cp;
1904 struct cnode *to_cp;
1905 struct hfsmount *hfsmp;
1906 struct cat_desc tempdesc;
1907 struct cat_attr tempattr;
1908 const unsigned char *from_nameptr;
1909 const unsigned char *to_nameptr;
1910 char from_iname[32];
1911 char to_iname[32];
1912 uint32_t to_flag_special;
1913 uint32_t from_flag_special;
1914 cnid_t from_parid;
1915 cnid_t to_parid;
1916 int lockflags;
1917 int error = 0, started_tr = 0, got_cookie = 0;
1918 cat_cookie_t cookie;
1919 time_t orig_from_ctime, orig_to_ctime;
1920
1921 /*
1922 * VFS does the following checks:
1923 * 1. Validate that both are files.
1924 * 2. Validate that both are on the same mount.
1925 * 3. Validate that they're not the same vnode.
1926 */
1927
1928 orig_from_ctime = VTOC(from_vp)->c_ctime;
1929 orig_to_ctime = VTOC(to_vp)->c_ctime;
1930
1931
1932 #if CONFIG_PROTECT
1933 /*
1934 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
1935 * because the EAs will not be swapped. As a result, the persistent keys would not
1936 * match and the files will be garbage.
1937 */
1938 if (cp_fs_protected (vnode_mount(from_vp))) {
1939 return EINVAL;
1940 }
1941 #endif
1942
1943 #if HFS_COMPRESSION
1944 if ( hfs_file_is_compressed(VTOC(from_vp), 0) ) {
1945 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
1946 return error;
1947 }
1948 }
1949
1950 if ( hfs_file_is_compressed(VTOC(to_vp), 0) ) {
1951 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
1952 return error;
1953 }
1954 }
1955 #endif // HFS_COMPRESSION
1956
1957 /*
1958 * Normally, we want to notify the user handlers about the event,
1959 * except if it's a handler driving the event.
1960 */
1961 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
1962 check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1963 check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1964 } else {
1965 /*
1966 * We're doing a data-swap.
1967 * Take the truncate lock/cnode lock, then verify there are no mmap references.
1968 * Issue a hfs_filedone to flush out all of the remaining state for this file.
1969 * Allow the rest of the codeflow to re-acquire the cnode locks in order.
1970 */
1971
1972 hfs_lock_truncate (VTOC(from_vp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
1973
1974 if ((error = hfs_lock(VTOC(from_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1975 hfs_unlock_truncate (VTOC(from_vp), HFS_LOCK_DEFAULT);
1976 return error;
1977 }
1978
1979 /* Verify the source file is not in use by anyone besides us (including mmap refs) */
1980 if (vnode_isinuse(from_vp, 1)) {
1981 error = EBUSY;
1982 hfs_unlock(VTOC(from_vp));
1983 hfs_unlock_truncate (VTOC(from_vp), HFS_LOCK_DEFAULT);
1984 return error;
1985 }
1986
1987 /* Flush out the data in the source file */
1988 VTOC(from_vp)->c_flag |= C_SWAPINPROGRESS;
1989 error = hfs_filedone (from_vp, ap->a_context);
1990 VTOC(from_vp)->c_flag &= ~C_SWAPINPROGRESS;
1991 hfs_unlock(VTOC(from_vp));
1992 hfs_unlock_truncate(VTOC(from_vp), HFS_LOCK_DEFAULT);
1993
1994 if (error) {
1995 return error;
1996 }
1997 }
1998
1999 if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
2000 return (error);
2001
2002 from_cp = VTOC(from_vp);
2003 to_cp = VTOC(to_vp);
2004 hfsmp = VTOHFS(from_vp);
2005
2006 /* Resource forks cannot be exchanged. */
2007 if ( VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
2008 error = EINVAL;
2009 goto exit;
2010 }
2011
2012 // Don't allow modification of the journal or journal_info_block
2013 if (hfs_is_journal_file(hfsmp, from_cp) ||
2014 hfs_is_journal_file(hfsmp, to_cp)) {
2015 error = EPERM;
2016 goto exit;
2017 }
2018
2019 /*
2020 * Ok, now that all of the pre-flighting is done, call the underlying
2021 * function if needed.
2022 */
2023 if (ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) {
2024 error = hfs_movedata(from_vp, to_vp);
2025 goto exit;
2026 }
2027
2028
2029 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2030 goto exit;
2031 }
2032 started_tr = 1;
2033
2034 /*
2035 * Reserve some space in the Catalog file.
2036 */
2037 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
2038 goto exit;
2039 }
2040 got_cookie = 1;
2041
2042 /* The backend code always tries to delete the virtual
2043 * extent id for exchanging files so we need to lock
2044 * the extents b-tree.
2045 */
2046 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2047
2048 /* Account for the location of the catalog objects. */
2049 if (from_cp->c_flag & C_HARDLINK) {
2050 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
2051 from_cp->c_attr.ca_linkref);
2052 from_nameptr = (unsigned char *)from_iname;
2053 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2054 from_cp->c_hint = 0;
2055 } else {
2056 from_nameptr = from_cp->c_desc.cd_nameptr;
2057 from_parid = from_cp->c_parentcnid;
2058 }
2059 if (to_cp->c_flag & C_HARDLINK) {
2060 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
2061 to_cp->c_attr.ca_linkref);
2062 to_nameptr = (unsigned char *)to_iname;
2063 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2064 to_cp->c_hint = 0;
2065 } else {
2066 to_nameptr = to_cp->c_desc.cd_nameptr;
2067 to_parid = to_cp->c_parentcnid;
2068 }
2069
2070 /*
2071 * ExchangeFileIDs swaps the extent information attached to two
2072 * different file IDs. It also swaps the extent information that
2073 * may live in the extents-overflow B-Tree.
2074 *
2075 * We do this in a transaction as this may require a lot of B-Tree nodes
2076 * to do completely, particularly if one of the files in question
2077 * has a lot of extents.
2078 *
2079 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
2080 * For the on-disk records, which are assumed to be synced, we will
2081 * first swap the resident inline-8 extents as part of the catalog records.
2082 * Then we will swap any extents overflow records for each file.
2083 *
2084 * When this function is done, "file1" will have fileID 52, and "file2" will
2085 * have fileID 50.
2086 */
2087 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
2088 to_parid, from_cp->c_hint, to_cp->c_hint);
2089 hfs_systemfile_unlock(hfsmp, lockflags);
2090
2091 /*
2092 * Note that we don't need to exchange any extended attributes
2093 * since the attributes are keyed by file ID.
2094 */
2095
2096 if (error != E_NONE) {
2097 error = MacToVFSError(error);
2098 goto exit;
2099 }
2100
2101 /* Purge the vnodes from the name cache */
2102 if (from_vp)
2103 cache_purge(from_vp);
2104 if (to_vp)
2105 cache_purge(to_vp);
2106
2107 /* Bump both source and destination write counts before any swaps. */
2108 {
2109 hfs_incr_gencount (from_cp);
2110 hfs_incr_gencount (to_cp);
2111 }
2112
2113
2114 /* Save a copy of "from" attributes before swapping. */
2115 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
2116 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
2117
2118 /* Save whether or not each cnode is a hardlink or has EAs */
2119 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2120 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2121
2122 /* Drop the special bits from each cnode */
2123 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2124 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2125
2126 /*
2127 * Complete the in-memory portion of the copy.
2128 *
2129 * ExchangeFileIDs swaps the on-disk records involved. We complete the
2130 * operation by swapping the in-memory contents of the two files here.
2131 * We swap the cnode descriptors, which contain name, BSD attributes,
2132 * timestamps, etc, about the file.
2133 *
2134 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
2135 * already swapped the on-disk extent information. As long as we swap the
2136 * IDs, the in-line resident 8 extents that live in the filefork data
2137 * structure will point to the right data for the new file ID if we leave
2138 * them alone.
2139 *
2140 * As a result, any file descriptor that points to a particular
2141 * vnode (even though it should change names), will continue
2142 * to point to the same content.
2143 */
2144
2145 /* Copy the "to" -> "from" cnode */
2146 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
2147
2148 from_cp->c_hint = 0;
2149 /*
2150 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
2151 * when we bcopy'd the descriptor above. However, the cnode attributes
2152 * are not bcopied. As a result, make sure to swap the file IDs of each item.
2153 *
2154 * Further, other hardlink attributes must be moved along in this swap:
2155 * the linkcount, the linkref, and the firstlink all need to move
2156 * along with the file IDs. See note below regarding the flags and
2157 * what moves vs. what does not.
2158 *
2159 * For Reference:
2160 * linkcount == total # of hardlinks.
2161 * linkref == the indirect inode pointer.
2162 * firstlink == the first hardlink in the chain (written to the raw inode).
2163 * These three are tied to the fileID and must move along with the rest of the data.
2164 */
2165 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
2166
2167 from_cp->c_itime = to_cp->c_itime;
2168 from_cp->c_btime = to_cp->c_btime;
2169 from_cp->c_atime = to_cp->c_atime;
2170 from_cp->c_ctime = to_cp->c_ctime;
2171 from_cp->c_gid = to_cp->c_gid;
2172 from_cp->c_uid = to_cp->c_uid;
2173 from_cp->c_bsdflags = to_cp->c_bsdflags;
2174 from_cp->c_mode = to_cp->c_mode;
2175 from_cp->c_linkcount = to_cp->c_linkcount;
2176 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
2177 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
2178
2179 /*
2180 * The cnode flags need to stay with the cnode and not get transferred
2181 * over along with everything else because they describe the content; they are
2182 * not attributes that reflect changes specific to the file ID. In general,
2183 * fields that are tied to the file ID are the ones that will move.
2184 *
2185 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
2186 * or other extents, which may not yet have been written to the catalog. If
2187 * they were, they would have been transferred above in the ExchangeFileIDs call above...
2188 *
2189 * The flags that are special are:
2190 * C_HARDLINK, C_HASXATTRS
2191 *
2192 * These flags move with the item and file ID in the namespace since their
2193 * state is tied to that of the file ID.
2194 *
2195 * So to transfer the flags, we have to take the following steps
2196 * 1) Store in a localvar whether or not the special bits are set.
2197 * 2) Drop the special bits from the current flags
2198 * 3) swap the special flag bits to their destination
2199 */
2200 from_cp->c_flag |= to_flag_special;
2201 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
2202 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
2203
2204
2205 /* Copy the "from" -> "to" cnode */
2206 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
2207 to_cp->c_hint = 0;
2208 /*
2209 * Pull the file ID from the tempattr we copied above. We can't assume
2210 * it is the same as the CNID.
2211 */
2212 to_cp->c_fileid = tempattr.ca_fileid;
2213 to_cp->c_itime = tempattr.ca_itime;
2214 to_cp->c_btime = tempattr.ca_btime;
2215 to_cp->c_atime = tempattr.ca_atime;
2216 to_cp->c_ctime = tempattr.ca_ctime;
2217 to_cp->c_gid = tempattr.ca_gid;
2218 to_cp->c_uid = tempattr.ca_uid;
2219 to_cp->c_bsdflags = tempattr.ca_flags;
2220 to_cp->c_mode = tempattr.ca_mode;
2221 to_cp->c_linkcount = tempattr.ca_linkcount;
2222 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
2223 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
2224
2225 /*
2226 * Only OR in the "from" flags into our cnode flags below.
2227 * Leave the rest of the flags alone.
2228 */
2229 to_cp->c_flag |= from_flag_special;
2230
2231 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
2232 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
2233
2234
2235 /* Rehash the cnodes using their new file IDs */
2236 hfs_chash_rehash(hfsmp, from_cp, to_cp);
2237
2238 /*
2239 * When a file moves out of "Cleanup At Startup"
2240 * we can drop its NODUMP status.
2241 */
2242 if ((from_cp->c_bsdflags & UF_NODUMP) &&
2243 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
2244 from_cp->c_bsdflags &= ~UF_NODUMP;
2245 from_cp->c_touch_chgtime = TRUE;
2246 }
2247 if ((to_cp->c_bsdflags & UF_NODUMP) &&
2248 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
2249 to_cp->c_bsdflags &= ~UF_NODUMP;
2250 to_cp->c_touch_chgtime = TRUE;
2251 }
2252
2253 exit:
2254 if (got_cookie) {
2255 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
2256 }
2257 if (started_tr) {
2258 hfs_end_transaction(hfsmp);
2259 }
2260
2261 hfs_unlockpair(from_cp, to_cp);
2262 return (error);
2263 }
2264
2265 int
2266 hfs_vnop_mmap(struct vnop_mmap_args *ap)
2267 {
2268 struct vnode *vp = ap->a_vp;
2269 int error;
2270
2271 if (VNODE_IS_RSRC(vp)) {
2272 /* allow pageins of the resource fork */
2273 } else {
2274 int compressed = hfs_file_is_compressed(VTOC(vp), 1); /* 1 == don't take the cnode lock */
2275 time_t orig_ctime = VTOC(vp)->c_ctime;
2276
2277 if (!compressed && (VTOC(vp)->c_bsdflags & UF_COMPRESSED)) {
2278 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2279 if (error != 0) {
2280 return error;
2281 }
2282 }
2283
2284 if (ap->a_fflags & PROT_WRITE) {
2285 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2286
2287 /* even though we're manipulating a cnode field here, we're only monotonically increasing
2288 * the generation counter. The vnode can't be recycled (because we hold a FD in order to cause the
2289 * map to happen). So it's safe to do this without holding the cnode lock. The caller's only
2290 * requirement is that the number has been changed.
2291 */
2292 struct cnode *cp = VTOC(vp);
2293 if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
2294 hfs_incr_gencount(cp);
2295 }
2296 }
2297 }
2298
2299 //
2300 // NOTE: we return ENOTSUP because we want the cluster layer
2301 // to actually do all the real work.
2302 //
2303 return (ENOTSUP);
2304 }
2305
2306 /*
2307 * hfs_movedata
2308 *
2309 * This is a non-symmetric variant of exchangedata. In this function,
2310 * the contents of the fork in from_vp are moved to the fork
2311 * specified by to_vp.
2312 *
2313 * The cnodes pointed to by 'from_vp' and 'to_vp' must be locked.
2314 *
2315 * The vnode pointed to by 'to_vp' *must* be empty prior to invoking this function.
2316 * We impose this restriction because we may not be able to fully delete the entire
2317 * file's contents in a single transaction, particularly if it has a lot of extents.
2318 * In the normal file deletion codepath, the file is screened for two conditions:
2319 * 1) bigger than 400MB, and 2) more than 8 extents. If so, the file is relocated to
2320 * the hidden directory and the deletion is broken up into multiple truncates. We can't
2321 * do that here because both files need to exist in the namespace. The main reason this
2322 * is imposed is that we may have to touch a whole lot of bitmap blocks if there are
2323 * many extents.
2324 *
2325 * Any data written to 'from_vp' after this call completes is not guaranteed
2326 * to be moved.
2327 *
2328 * Arguments:
2329 * vnode from_vp: source file
2330 * vnode to_vp: destination file; must be empty
2331 *
2332 * Returns:
2333 * EFBIG - Destination file was not empty
2334 * 0 - success
2335 *
2336 *
2337 */
2338 int hfs_movedata (struct vnode *from_vp, struct vnode *to_vp) {
2339
2340 struct cnode *from_cp;
2341 struct cnode *to_cp;
2342 struct hfsmount *hfsmp = NULL;
2343 int error = 0;
2344 int started_tr = 0;
2345 int lockflags = 0;
2346 int overflow_blocks;
2347 int rsrc = 0;
2348
2349
2350 /* Get the HFS pointers */
2351 from_cp = VTOC(from_vp);
2352 to_cp = VTOC(to_vp);
2353 hfsmp = VTOHFS(from_vp);
2354
2355 /* Verify that neither source/dest file is open-unlinked */
2356 if (from_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
2357 error = EBUSY;
2358 goto movedata_exit;
2359 }
2360
2361 if (to_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
2362 error = EBUSY;
2363 goto movedata_exit;
2364 }
2365
2366 /*
2367 * Verify the source file is not in use by anyone besides us.
2368 *
2369 * This function is typically invoked by a namespace handler
2370 * process responding to a temporarily stalled system call.
2371 * The FD that it is working off of is opened O_EVTONLY, so
2372 * it really has no active usecounts (the kusecount from O_EVTONLY
2373 * is subtracted from the total usecounts).
2374 *
2375 * As a result, we shouldn't have any active usecounts against
2376 * this vnode when we go to check it below.
2377 */
2378 if (vnode_isinuse(from_vp, 0)) {
2379 error = EBUSY;
2380 goto movedata_exit;
2381 }
2382
2383 if (from_cp->c_rsrc_vp == from_vp) {
2384 rsrc = 1;
2385 }
2386
2387 /*
2388 * We assume that the destination file is already empty.
2389 * Verify that it is.
2390 */
2391 if (rsrc) {
2392 if (to_cp->c_rsrcfork->ff_size > 0) {
2393 error = EFBIG;
2394 goto movedata_exit;
2395 }
2396 }
2397 else {
2398 if (to_cp->c_datafork->ff_size > 0) {
2399 error = EFBIG;
2400 goto movedata_exit;
2401 }
2402 }
2403
2404 /* If the source has the rsrc open, make sure the destination is also the rsrc */
2405 if (rsrc) {
2406 if (to_vp != to_cp->c_rsrc_vp) {
2407 error = EINVAL;
2408 goto movedata_exit;
2409 }
2410 }
2411 else {
2412 /* Verify that both forks are data forks */
2413 if (to_vp != to_cp->c_vp) {
2414 error = EINVAL;
2415 goto movedata_exit;
2416 }
2417 }
2418
2419 /*
2420 * See if the source file has overflow extents. If it doesn't, we don't
2421 * need to call into MoveData, and the catalog will be enough.
2422 */
2423 if (rsrc) {
2424 overflow_blocks = overflow_extents(from_cp->c_rsrcfork);
2425 }
2426 else {
2427 overflow_blocks = overflow_extents(from_cp->c_datafork);
2428 }
2429
2430 if ((error = hfs_start_transaction (hfsmp)) != 0) {
2431 goto movedata_exit;
2432 }
2433 started_tr = 1;
2434
2435 /* Lock the system files: catalog, extents, attributes */
2436 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2437
2438 /* Copy over any catalog allocation data into the new spot. */
2439 if (rsrc) {
2440 if ((error = hfs_move_fork (from_cp->c_rsrcfork, from_cp, to_cp->c_rsrcfork, to_cp))){
2441 hfs_systemfile_unlock(hfsmp, lockflags);
2442 goto movedata_exit;
2443 }
2444 }
2445 else {
2446 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp, to_cp->c_datafork, to_cp))) {
2447 hfs_systemfile_unlock(hfsmp, lockflags);
2448 goto movedata_exit;
2449 }
2450 }
2451
2452 /*
2453 * Note that because all we're doing is moving the extents around, we can
2454 * probably do this in a single transaction: Each extent record (group of 8)
2455 * is 64 bytes. A extent overflow B-Tree node is typically 4k. This means
2456 * each node can hold roughly ~60 extent records == (480 extents).
2457 *
2458 * If a file was massively fragmented and had 20k extents, this means we'd
2459 * roughly touch 20k/480 == 41 to 42 nodes, plus the index nodes, for half
2460 * of the operation. (inserting or deleting). So if we're manipulating 80-100
2461 * nodes, this is basically 320k of data to write to the journal in
2462 * a bad case.
2463 */
2464 if (overflow_blocks != 0) {
2465 if (rsrc) {
2466 error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1);
2467 }
2468 else {
2469 error = MoveData (hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0);
2470 }
2471 }
2472
2473 if (error) {
2474 /* Reverse the operation. Copy the fork data back into the source */
2475 if (rsrc) {
2476 hfs_move_fork (to_cp->c_rsrcfork, to_cp, from_cp->c_rsrcfork, from_cp);
2477 }
2478 else {
2479 hfs_move_fork (to_cp->c_datafork, to_cp, from_cp->c_datafork, from_cp);
2480 }
2481 }
2482 else {
2483 struct cat_fork *src_data = NULL;
2484 struct cat_fork *src_rsrc = NULL;
2485 struct cat_fork *dst_data = NULL;
2486 struct cat_fork *dst_rsrc = NULL;
2487
2488 /* Touch the times*/
2489 to_cp->c_touch_acctime = TRUE;
2490 to_cp->c_touch_chgtime = TRUE;
2491 to_cp->c_touch_modtime = TRUE;
2492
2493 from_cp->c_touch_acctime = TRUE;
2494 from_cp->c_touch_chgtime = TRUE;
2495 from_cp->c_touch_modtime = TRUE;
2496
2497 hfs_touchtimes(hfsmp, to_cp);
2498 hfs_touchtimes(hfsmp, from_cp);
2499
2500 if (from_cp->c_datafork) {
2501 src_data = &from_cp->c_datafork->ff_data;
2502 }
2503 if (from_cp->c_rsrcfork) {
2504 src_rsrc = &from_cp->c_rsrcfork->ff_data;
2505 }
2506
2507 if (to_cp->c_datafork) {
2508 dst_data = &to_cp->c_datafork->ff_data;
2509 }
2510 if (to_cp->c_rsrcfork) {
2511 dst_rsrc = &to_cp->c_rsrcfork->ff_data;
2512 }
2513
2514 /* Update the catalog nodes */
2515 (void) cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2516 src_data, src_rsrc);
2517
2518 (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2519 dst_data, dst_rsrc);
2520
2521 }
2522 /* unlock the system files */
2523 hfs_systemfile_unlock(hfsmp, lockflags);
2524
2525
2526 movedata_exit:
2527 if (started_tr) {
2528 hfs_end_transaction(hfsmp);
2529 }
2530
2531 return error;
2532
2533 }
2534
2535 /*
2536 * Copy all of the catalog and runtime data in srcfork to dstfork.
2537 *
2538 * This allows us to maintain the invalid ranges across the movedata operation so
2539 * we don't need to force all of the pending IO right now. In addition, we move all
2540 * non overflow-extent extents into the destination here.
2541 */
2542 static int hfs_move_fork (struct filefork *srcfork, struct cnode *src_cp,
2543 struct filefork *dstfork, struct cnode *dst_cp) {
2544 struct rl_entry *invalid_range;
2545 int size = sizeof(struct HFSPlusExtentDescriptor);
2546 size = size * kHFSPlusExtentDensity;
2547
2548 /* If the dstfork has any invalid ranges, bail out */
2549 invalid_range = TAILQ_FIRST(&dstfork->ff_invalidranges);
2550 if (invalid_range != NULL) {
2551 return EFBIG;
2552 }
2553
2554 if (dstfork->ff_data.cf_size != 0 || dstfork->ff_data.cf_new_size != 0) {
2555 return EFBIG;
2556 }
2557
2558 /* First copy the invalid ranges */
2559 while ((invalid_range = TAILQ_FIRST(&srcfork->ff_invalidranges))) {
2560 off_t start = invalid_range->rl_start;
2561 off_t end = invalid_range->rl_end;
2562
2563 /* Remove it from the srcfork and add it to dstfork */
2564 rl_remove(start, end, &srcfork->ff_invalidranges);
2565 rl_add(start, end, &dstfork->ff_invalidranges);
2566 }
2567
2568 /*
2569 * Ignore the ff_union. We don't move symlinks or system files.
2570 * Now copy the in-catalog extent information
2571 */
2572 dstfork->ff_data.cf_size = srcfork->ff_data.cf_size;
2573 dstfork->ff_data.cf_new_size = srcfork->ff_data.cf_new_size;
2574 dstfork->ff_data.cf_vblocks = srcfork->ff_data.cf_vblocks;
2575 dstfork->ff_data.cf_blocks = srcfork->ff_data.cf_blocks;
2576
2577 /* just memcpy the whole array of extents to the new location. */
2578 memcpy (dstfork->ff_data.cf_extents, srcfork->ff_data.cf_extents, size);
2579
2580 /*
2581 * Copy the cnode attribute data.
2582 *
2583 */
2584 src_cp->c_blocks -= srcfork->ff_data.cf_vblocks;
2585 src_cp->c_blocks -= srcfork->ff_data.cf_blocks;
2586
2587 dst_cp->c_blocks += srcfork->ff_data.cf_vblocks;
2588 dst_cp->c_blocks += srcfork->ff_data.cf_blocks;
2589
2590 /* Now delete the entries in the source fork */
2591 srcfork->ff_data.cf_size = 0;
2592 srcfork->ff_data.cf_new_size = 0;
2593 srcfork->ff_data.cf_union.cfu_bytesread = 0;
2594 srcfork->ff_data.cf_vblocks = 0;
2595 srcfork->ff_data.cf_blocks = 0;
2596
2597 /* Zero out the old extents */
2598 bzero (srcfork->ff_data.cf_extents, size);
2599 return 0;
2600 }
2601
2602
2603 /*
2604 * cnode must be locked
2605 */
2606 int
2607 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
2608 {
2609 struct cnode *cp = VTOC(vp);
2610 struct filefork *fp = NULL;
2611 int retval = 0;
2612 struct hfsmount *hfsmp = VTOHFS(vp);
2613 struct rl_entry *invalid_range;
2614 struct timeval tv;
2615 int waitdata; /* attributes necessary for data retrieval */
2616 int wait; /* all other attributes (e.g. atime, etc.) */
2617 int lockflag;
2618 int took_trunc_lock = 0;
2619 int locked_buffers = 0;
2620
2621 /*
2622 * Applications which only care about data integrity rather than full
2623 * file integrity may opt out of (delay) expensive metadata update
2624 * operations as a performance optimization.
2625 */
2626 wait = (waitfor == MNT_WAIT);
2627 waitdata = (waitfor == MNT_DWAIT) | wait;
2628 if (always_do_fullfsync)
2629 fullsync = 1;
2630
2631 /* HFS directories don't have any data blocks. */
2632 if (vnode_isdir(vp))
2633 goto metasync;
2634 fp = VTOF(vp);
2635
2636 /*
2637 * For system files flush the B-tree header and
2638 * for regular files write out any clusters
2639 */
2640 if (vnode_issystem(vp)) {
2641 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2642 // XXXdbg
2643 if (hfsmp->jnl == NULL) {
2644 BTFlushPath(VTOF(vp));
2645 }
2646 }
2647 } else if (UBCINFOEXISTS(vp)) {
2648 hfs_unlock(cp);
2649 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
2650 took_trunc_lock = 1;
2651
2652 if (fp->ff_unallocblocks != 0) {
2653 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2654
2655 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2656 }
2657 /* Don't hold cnode lock when calling into cluster layer. */
2658 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2659
2660 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2661 }
2662 /*
2663 * When MNT_WAIT is requested and the zero fill timeout
2664 * has expired then we must explicitly zero out any areas
2665 * that are currently marked invalid (holes).
2666 *
2667 * Files with NODUMP can bypass zero filling here.
2668 */
2669 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
2670 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
2671 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
2672 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) &&
2673 cp->c_zftimeout != 0))) {
2674
2675 microuptime(&tv);
2676 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && !fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
2677 /* Remember that a force sync was requested. */
2678 cp->c_flag |= C_ZFWANTSYNC;
2679 goto datasync;
2680 }
2681 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
2682 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
2683 hfs_unlock(cp);
2684 if (took_trunc_lock) {
2685 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2686 }
2687 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2688 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2689 took_trunc_lock = 1;
2690 }
2691 while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) {
2692 off_t start = invalid_range->rl_start;
2693 off_t end = invalid_range->rl_end;
2694
2695 /* The range about to be written must be validated
2696 * first, so that VNOP_BLOCKMAP() will return the
2697 * appropriate mapping for the cluster code:
2698 */
2699 rl_remove(start, end, &fp->ff_invalidranges);
2700
2701 /* Don't hold cnode lock when calling into cluster layer. */
2702 hfs_unlock(cp);
2703 (void) cluster_write(vp, (struct uio *) 0,
2704 fp->ff_size, end + 1, start, (off_t)0,
2705 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
2706 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2707 cp->c_flag |= C_MODIFIED;
2708 }
2709 hfs_unlock(cp);
2710 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2711 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2712 }
2713 cp->c_flag &= ~C_ZFWANTSYNC;
2714 cp->c_zftimeout = 0;
2715 }
2716 datasync:
2717 if (took_trunc_lock) {
2718 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2719 took_trunc_lock = 0;
2720 }
2721 /*
2722 * if we have a journal and if journal_active() returns != 0 then the
2723 * we shouldn't do anything to a locked block (because it is part
2724 * of a transaction). otherwise we'll just go through the normal
2725 * code path and flush the buffer. note journal_active() can return
2726 * -1 if the journal is invalid -- however we still need to skip any
2727 * locked blocks as they get cleaned up when we finish the transaction
2728 * or close the journal.
2729 */
2730 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
2731 if (hfsmp->jnl)
2732 lockflag = BUF_SKIP_LOCKED;
2733 else
2734 lockflag = 0;
2735
2736 /*
2737 * Flush all dirty buffers associated with a vnode.
2738 * Record how many of them were dirty AND locked (if necessary).
2739 */
2740 locked_buffers = buf_flushdirtyblks_skipinfo(vp, waitdata, lockflag, "hfs_fsync");
2741 if ((lockflag & BUF_SKIP_LOCKED) && (locked_buffers) && (vnode_vtype(vp) == VLNK)) {
2742 /*
2743 * If there are dirty symlink buffers, then we may need to take action
2744 * to prevent issues later on if we are journaled. If we're fsyncing a
2745 * symlink vnode then we are in one of three cases:
2746 *
2747 * 1) automatic sync has fired. In this case, we don't want the behavior to change.
2748 *
2749 * 2) Someone has opened the FD for the symlink (not what it points to)
2750 * and has issued an fsync against it. This should be rare, and we don't
2751 * want the behavior to change.
2752 *
2753 * 3) We are being called by a vclean which is trying to reclaim this
2754 * symlink vnode. If this is the case, then allowing this fsync to
2755 * proceed WITHOUT flushing the journal could result in the vclean
2756 * invalidating the buffer's blocks before the journal transaction is
2757 * written to disk. To prevent this, we force a journal flush
2758 * if the vnode is in the middle of a recycle (VL_TERMINATE or VL_DEAD is set).
2759 */
2760 if (vnode_isrecycled(vp)) {
2761 fullsync = 1;
2762 }
2763 }
2764
2765 metasync:
2766 if (vnode_isreg(vp) && vnode_issystem(vp)) {
2767 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2768 microuptime(&tv);
2769 BTSetLastSync(VTOF(vp), tv.tv_sec);
2770 }
2771 cp->c_touch_acctime = FALSE;
2772 cp->c_touch_chgtime = FALSE;
2773 cp->c_touch_modtime = FALSE;
2774 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
2775 retval = hfs_update(vp, wait);
2776
2777 /*
2778 * When MNT_WAIT is requested push out the catalog record for
2779 * this file. If they asked for a full fsync, we can skip this
2780 * because the journal_flush or hfs_metasync_all will push out
2781 * all of the metadata changes.
2782 */
2783 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
2784 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
2785 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
2786 }
2787
2788 /*
2789 * If this was a full fsync, make sure all metadata
2790 * changes get to stable storage.
2791 */
2792 if (fullsync) {
2793 if (hfsmp->jnl) {
2794 hfs_journal_flush(hfsmp, FALSE);
2795
2796 if (journal_uses_fua(hfsmp->jnl)) {
2797 /*
2798 * the journal_flush did NOT issue a sync track cache command,
2799 * and the fullsync indicates we are supposed to flush all cached
2800 * data to the media, so issue the sync track cache command
2801 * explicitly
2802 */
2803 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
2804 }
2805 } else {
2806 retval = hfs_metasync_all(hfsmp);
2807 /* XXX need to pass context! */
2808 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
2809 }
2810 }
2811 }
2812
2813 return (retval);
2814 }
2815
2816
2817 /* Sync an hfs catalog b-tree node */
2818 int
2819 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
2820 {
2821 vnode_t vp;
2822 buf_t bp;
2823 int lockflags;
2824
2825 vp = HFSTOVCB(hfsmp)->catalogRefNum;
2826
2827 // XXXdbg - don't need to do this on a journaled volume
2828 if (hfsmp->jnl) {
2829 return 0;
2830 }
2831
2832 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2833 /*
2834 * Look for a matching node that has been delayed
2835 * but is not part of a set (B_LOCKED).
2836 *
2837 * BLK_ONLYVALID causes buf_getblk to return a
2838 * buf_t for the daddr64_t specified only if it's
2839 * currently resident in the cache... the size
2840 * parameter to buf_getblk is ignored when this flag
2841 * is set
2842 */
2843 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
2844
2845 if (bp) {
2846 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
2847 (void) VNOP_BWRITE(bp);
2848 else
2849 buf_brelse(bp);
2850 }
2851
2852 hfs_systemfile_unlock(hfsmp, lockflags);
2853
2854 return (0);
2855 }
2856
2857
2858 /*
2859 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
2860 * without a journal. Note that the volume bitmap does not get written;
2861 * we rely on fsck_hfs to fix that up (which it can do without any loss
2862 * of data).
2863 */
2864 int
2865 hfs_metasync_all(struct hfsmount *hfsmp)
2866 {
2867 int lockflags;
2868
2869 /* Lock all of the B-trees so we get a mutually consistent state */
2870 lockflags = hfs_systemfile_lock(hfsmp,
2871 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2872
2873 /* Sync each of the B-trees */
2874 if (hfsmp->hfs_catalog_vp)
2875 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
2876 if (hfsmp->hfs_extents_vp)
2877 hfs_btsync(hfsmp->hfs_extents_vp, 0);
2878 if (hfsmp->hfs_attribute_vp)
2879 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
2880
2881 /* Wait for all of the writes to complete */
2882 if (hfsmp->hfs_catalog_vp)
2883 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
2884 if (hfsmp->hfs_extents_vp)
2885 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
2886 if (hfsmp->hfs_attribute_vp)
2887 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
2888
2889 hfs_systemfile_unlock(hfsmp, lockflags);
2890
2891 return 0;
2892 }
2893
2894
2895 /*ARGSUSED 1*/
2896 static int
2897 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
2898 {
2899 buf_clearflags(bp, B_LOCKED);
2900 (void) buf_bawrite(bp);
2901
2902 return(BUF_CLAIMED);
2903 }
2904
2905
2906 int
2907 hfs_btsync(struct vnode *vp, int sync_transaction)
2908 {
2909 struct cnode *cp = VTOC(vp);
2910 struct timeval tv;
2911 int flags = 0;
2912
2913 if (sync_transaction)
2914 flags |= BUF_SKIP_NONLOCKED;
2915 /*
2916 * Flush all dirty buffers associated with b-tree.
2917 */
2918 buf_iterate(vp, hfs_btsync_callback, flags, 0);
2919
2920 microuptime(&tv);
2921 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
2922 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
2923 cp->c_touch_acctime = FALSE;
2924 cp->c_touch_chgtime = FALSE;
2925 cp->c_touch_modtime = FALSE;
2926
2927 return 0;
2928 }
2929
2930 /*
2931 * Remove a directory.
2932 */
2933 int
2934 hfs_vnop_rmdir(ap)
2935 struct vnop_rmdir_args /* {
2936 struct vnode *a_dvp;
2937 struct vnode *a_vp;
2938 struct componentname *a_cnp;
2939 vfs_context_t a_context;
2940 } */ *ap;
2941 {
2942 struct vnode *dvp = ap->a_dvp;
2943 struct vnode *vp = ap->a_vp;
2944 struct cnode *dcp = VTOC(dvp);
2945 struct cnode *cp = VTOC(vp);
2946 int error;
2947 time_t orig_ctime;
2948
2949 orig_ctime = VTOC(vp)->c_ctime;
2950
2951 if (!S_ISDIR(cp->c_mode)) {
2952 return (ENOTDIR);
2953 }
2954 if (dvp == vp) {
2955 return (EINVAL);
2956 }
2957
2958 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
2959 cp = VTOC(vp);
2960
2961 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
2962 return (error);
2963 }
2964
2965 /* Check for a race with rmdir on the parent directory */
2966 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
2967 hfs_unlockpair (dcp, cp);
2968 return ENOENT;
2969 }
2970
2971 //
2972 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
2973 //
2974 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
2975 uint32_t newid;
2976
2977 hfs_unlockpair(dcp, cp);
2978
2979 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
2980 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
2981 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
2982 #if CONFIG_FSE
2983 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
2984 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
2985 FSE_ARG_INO, (ino64_t)0, // src inode #
2986 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
2987 FSE_ARG_INT32, newid,
2988 FSE_ARG_DONE);
2989 #endif
2990 } else {
2991 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
2992 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
2993 }
2994 }
2995
2996 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
2997
2998 hfs_unlockpair(dcp, cp);
2999
3000 return (error);
3001 }
3002
3003 /*
3004 * Remove a directory
3005 *
3006 * Both dvp and vp cnodes are locked
3007 */
3008 int
3009 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3010 int skip_reserve, int only_unlink)
3011 {
3012 struct cnode *cp;
3013 struct cnode *dcp;
3014 struct hfsmount * hfsmp;
3015 struct cat_desc desc;
3016 int lockflags;
3017 int error = 0, started_tr = 0;
3018
3019 cp = VTOC(vp);
3020 dcp = VTOC(dvp);
3021 hfsmp = VTOHFS(vp);
3022
3023 if (dcp == cp) {
3024 return (EINVAL); /* cannot remove "." */
3025 }
3026 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3027 return (0);
3028 }
3029 if (cp->c_entries != 0) {
3030 return (ENOTEMPTY);
3031 }
3032
3033 /*
3034 * If the directory is open or in use (e.g. opendir() or current working
3035 * directory for some process); wait for inactive/reclaim to actually
3036 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
3037 * of removing open-unlinked directories from the catalog, as well as getting rid
3038 * of EAs still on the element. So change only_unlink to true, so that it will get
3039 * cleaned up below.
3040 *
3041 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
3042 * but it really means C_NOEXISTS because the item was actually removed from the
3043 * catalog. Then when we try to remove the entry from the catalog later on, it won't
3044 * really be there anymore.
3045 */
3046 if (vnode_isinuse(vp, 0)) {
3047 only_unlink = 1;
3048 }
3049
3050 /* Deal with directory hardlinks */
3051 if (cp->c_flag & C_HARDLINK) {
3052 /*
3053 * Note that if we have a directory which was a hardlink at any point,
3054 * its actual directory data is stored in the directory inode in the hidden
3055 * directory rather than the leaf element(s) present in the namespace.
3056 *
3057 * If there are still other hardlinks to this directory,
3058 * then we'll just eliminate this particular link and the vnode will still exist.
3059 * If this is the last link to an empty directory, then we'll open-unlink the
3060 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
3061 *
3062 * We could also return EBUSY here.
3063 */
3064
3065 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3066 }
3067
3068 /*
3069 * In a few cases, we may want to allow the directory to persist in an
3070 * open-unlinked state. If the directory is being open-unlinked (still has usecount
3071 * references), or if it has EAs, or if it was being deleted as part of a rename,
3072 * then we go ahead and move it to the hidden directory.
3073 *
3074 * If the directory is being open-unlinked, then we want to keep the catalog entry
3075 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
3076 *
3077 * If the directory had EAs, then we want to use the open-unlink trick so that the
3078 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
3079 * due to overflowing the journal.
3080 *
3081 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
3082 * in order to maintain rename atomicity.
3083 *
3084 * Note that the allow_dirs argument to hfs_removefile specifies that it is
3085 * supposed to handle directories for this case.
3086 */
3087
3088 if (((hfsmp->hfs_attribute_vp != NULL) &&
3089 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
3090 (only_unlink != 0)) {
3091
3092 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
3093 /*
3094 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
3095 * it here just in case we were invoked by rmdir() on a directory that had
3096 * EAs. To ensure that we start reclaiming the space as soon as possible,
3097 * we call vnode_recycle on the directory.
3098 */
3099 vnode_recycle(vp);
3100
3101 return ret;
3102
3103 }
3104
3105 dcp->c_flag |= C_DIR_MODIFICATION;
3106
3107 #if QUOTA
3108 if (hfsmp->hfs_flags & HFS_QUOTAS)
3109 (void)hfs_getinoquota(cp);
3110 #endif
3111 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3112 goto out;
3113 }
3114 started_tr = 1;
3115
3116 /*
3117 * Verify the directory is empty (and valid).
3118 * (Rmdir ".." won't be valid since
3119 * ".." will contain a reference to
3120 * the current directory and thus be
3121 * non-empty.)
3122 */
3123 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
3124 error = EPERM;
3125 goto out;
3126 }
3127
3128 /* Remove the entry from the namei cache: */
3129 cache_purge(vp);
3130
3131 /*
3132 * Protect against a race with rename by using the component
3133 * name passed in and parent id from dvp (instead of using
3134 * the cp->c_desc which may have changed).
3135 */
3136 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3137 desc.cd_namelen = cnp->cn_namelen;
3138 desc.cd_parentcnid = dcp->c_fileid;
3139 desc.cd_cnid = cp->c_cnid;
3140 desc.cd_flags = CD_ISDIR;
3141 desc.cd_encoding = cp->c_encoding;
3142 desc.cd_hint = 0;
3143
3144 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3145 error = 0;
3146 goto out;
3147 }
3148
3149 /* Remove entry from catalog */
3150 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3151
3152 if (!skip_reserve) {
3153 /*
3154 * Reserve some space in the Catalog file.
3155 */
3156 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3157 hfs_systemfile_unlock(hfsmp, lockflags);
3158 goto out;
3159 }
3160 }
3161
3162 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3163
3164 if (!error) {
3165 //
3166 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3167 // we don't need to touch the document_id as it's handled by the rename code.
3168 // otherwise it's a normal remove and we need to save the document id in the
3169 // per thread struct and clear it from the cnode.
3170 //
3171 struct doc_tombstone *ut;
3172 ut = get_uthread_doc_tombstone();
3173 if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
3174
3175 if (ut->t_lastop_document_id) {
3176 clear_tombstone_docid(ut, hfsmp, NULL);
3177 }
3178 save_tombstone(hfsmp, dvp, vp, cnp, 1);
3179
3180 }
3181
3182 /* The parent lost a child */
3183 if (dcp->c_entries > 0)
3184 dcp->c_entries--;
3185 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3186 dcp->c_dirchangecnt++;
3187 {
3188 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
3189 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
3190 }
3191 dcp->c_touch_chgtime = TRUE;
3192 dcp->c_touch_modtime = TRUE;
3193 hfs_touchtimes(hfsmp, cp);
3194 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3195 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
3196 }
3197
3198 hfs_systemfile_unlock(hfsmp, lockflags);
3199
3200 if (error)
3201 goto out;
3202
3203 #if QUOTA
3204 if (hfsmp->hfs_flags & HFS_QUOTAS)
3205 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3206 #endif /* QUOTA */
3207
3208 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
3209
3210 /* Mark C_NOEXISTS since the catalog entry is now gone */
3211 cp->c_flag |= C_NOEXISTS;
3212 out:
3213 dcp->c_flag &= ~C_DIR_MODIFICATION;
3214 wakeup((caddr_t)&dcp->c_flag);
3215
3216 if (started_tr) {
3217 hfs_end_transaction(hfsmp);
3218 }
3219
3220 return (error);
3221 }
3222
3223
3224 /*
3225 * Remove a file or link.
3226 */
3227 int
3228 hfs_vnop_remove(ap)
3229 struct vnop_remove_args /* {
3230 struct vnode *a_dvp;
3231 struct vnode *a_vp;
3232 struct componentname *a_cnp;
3233 int a_flags;
3234 vfs_context_t a_context;
3235 } */ *ap;
3236 {
3237 struct vnode *dvp = ap->a_dvp;
3238 struct vnode *vp = ap->a_vp;
3239 struct cnode *dcp = VTOC(dvp);
3240 struct cnode *cp;
3241 struct vnode *rvp = NULL;
3242 int error=0, recycle_rsrc=0;
3243 int recycle_vnode = 0;
3244 uint32_t rsrc_vid = 0;
3245 time_t orig_ctime;
3246
3247 if (dvp == vp) {
3248 return (EINVAL);
3249 }
3250
3251 orig_ctime = VTOC(vp)->c_ctime;
3252 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
3253 error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3254 if (error) {
3255 // XXXdbg - decide on a policy for handling namespace handler failures!
3256 // for now we just let them proceed.
3257 }
3258 }
3259 error = 0;
3260
3261 cp = VTOC(vp);
3262
3263 relock:
3264
3265 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3266
3267 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3268 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3269 if (rvp) {
3270 vnode_put (rvp);
3271 }
3272 return (error);
3273 }
3274 //
3275 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3276 //
3277 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3278 uint32_t newid;
3279
3280 hfs_unlockpair(dcp, cp);
3281
3282 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3283 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3284 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3285 #if CONFIG_FSE
3286 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3287 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3288 FSE_ARG_INO, (ino64_t)0, // src inode #
3289 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3290 FSE_ARG_INT32, newid,
3291 FSE_ARG_DONE);
3292 #endif
3293 } else {
3294 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3295 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3296 }
3297 }
3298
3299 /*
3300 * Lazily respond to determining if there is a valid resource fork
3301 * vnode attached to 'cp' if it is a regular file or symlink.
3302 * If the vnode does not exist, then we may proceed without having to
3303 * create it.
3304 *
3305 * If, however, it does exist, then we need to acquire an iocount on the
3306 * vnode after acquiring its vid. This ensures that if we have to do I/O
3307 * against it, it can't get recycled from underneath us in the middle
3308 * of this call.
3309 *
3310 * Note: this function may be invoked for directory hardlinks, so just skip these
3311 * steps if 'vp' is a directory.
3312 */
3313
3314 if ((vp->v_type == VLNK) || (vp->v_type == VREG)) {
3315 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
3316 /* We need to acquire the rsrc vnode */
3317 rvp = cp->c_rsrc_vp;
3318 rsrc_vid = vnode_vid (rvp);
3319
3320 /* Unlock everything to acquire iocount on the rsrc vnode */
3321 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
3322 hfs_unlockpair (dcp, cp);
3323 /* Use the vid to maintain identity on rvp */
3324 if (vnode_getwithvid(rvp, rsrc_vid)) {
3325 /*
3326 * If this fails, then it was recycled or
3327 * reclaimed in the interim. Reset fields and
3328 * start over.
3329 */
3330 rvp = NULL;
3331 rsrc_vid = 0;
3332 }
3333 goto relock;
3334 }
3335 }
3336
3337 /*
3338 * Check to see if we raced rmdir for the parent directory
3339 * hfs_removefile already checks for a race on vp/cp
3340 */
3341 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3342 error = ENOENT;
3343 goto rm_done;
3344 }
3345
3346 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
3347
3348 /*
3349 * If the remove succeeded in deleting the file, then we may need to mark
3350 * the resource fork for recycle so that it is reclaimed as quickly
3351 * as possible. If it were not recycled quickly, then this resource fork
3352 * vnode could keep a v_parent reference on the data fork, which prevents it
3353 * from going through reclaim (by giving it extra usecounts), except in the force-
3354 * unmount case.
3355 *
3356 * However, a caveat: we need to continue to supply resource fork
3357 * access to open-unlinked files even if the resource fork is not open. This is
3358 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3359 * this already if the data fork has been re-parented to the hidden directory.
3360 *
3361 * As a result, all we really need to do here is mark the resource fork vnode
3362 * for recycle. If it goes out of core, it can be brought in again if needed.
3363 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3364 * more work.
3365 */
3366 if (error == 0) {
3367 if (rvp) {
3368 recycle_rsrc = 1;
3369 }
3370 /*
3371 * If the target was actually removed from the catalog schedule it for
3372 * full reclamation/inactivation. We hold an iocount on it so it should just
3373 * get marked with MARKTERM
3374 */
3375 if (cp->c_flag & C_NOEXISTS) {
3376 recycle_vnode = 1;
3377 }
3378 }
3379
3380
3381 /*
3382 * Drop the truncate lock before unlocking the cnode
3383 * (which can potentially perform a vnode_put and
3384 * recycle the vnode which in turn might require the
3385 * truncate lock)
3386 */
3387 rm_done:
3388 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3389 hfs_unlockpair(dcp, cp);
3390
3391 if (recycle_rsrc) {
3392 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3393 vnode_recycle(rvp);
3394 }
3395 if (recycle_vnode) {
3396 vnode_recycle (vp);
3397 }
3398
3399 if (rvp) {
3400 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3401 vnode_put(rvp);
3402 }
3403
3404 return (error);
3405 }
3406
3407
3408 int
3409 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3410
3411 if ( !(buf_flags(bp) & B_META))
3412 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3413 /*
3414 * it's part of the current transaction, kill it.
3415 */
3416 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3417
3418 return (BUF_CLAIMED);
3419 }
3420
3421 /*
3422 * hfs_removefile
3423 *
3424 * Similar to hfs_vnop_remove except there are additional options.
3425 * This function may be used to remove directories if they have
3426 * lots of EA's -- note the 'allow_dirs' argument.
3427 *
3428 * This function is able to delete blocks & fork data for the resource
3429 * fork even if it does not exist in core (and have a backing vnode).
3430 * It should infer the correct behavior based on the number of blocks
3431 * in the cnode and whether or not the resource fork pointer exists or
3432 * not. As a result, one only need pass in the 'vp' corresponding to the
3433 * data fork of this file (or main vnode in the case of a directory).
3434 * Passing in a resource fork will result in an error.
3435 *
3436 * Because we do not create any vnodes in this function, we are not at
3437 * risk of deadlocking against ourselves by double-locking.
3438 *
3439 * Requires cnode and truncate locks to be held.
3440 */
3441 int
3442 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3443 int flags, int skip_reserve, int allow_dirs,
3444 __unused struct vnode *rvp, int only_unlink)
3445 {
3446 struct cnode *cp;
3447 struct cnode *dcp;
3448 struct vnode *rsrc_vp = NULL;
3449 struct hfsmount *hfsmp;
3450 struct cat_desc desc;
3451 struct timeval tv;
3452 int dataforkbusy = 0;
3453 int rsrcforkbusy = 0;
3454 int lockflags;
3455 int error = 0;
3456 int started_tr = 0;
3457 int isbigfile = 0, defer_remove=0, isdir=0;
3458 int update_vh = 0;
3459
3460 cp = VTOC(vp);
3461 dcp = VTOC(dvp);
3462 hfsmp = VTOHFS(vp);
3463
3464 /* Check if we lost a race post lookup. */
3465 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3466 return (0);
3467 }
3468
3469 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3470 return 0;
3471 }
3472
3473 /* Make sure a remove is permitted */
3474 if (VNODE_IS_RSRC(vp)) {
3475 return (EPERM);
3476 }
3477 else {
3478 /*
3479 * We know it's a data fork.
3480 * Probe the cnode to see if we have a valid resource fork
3481 * in hand or not.
3482 */
3483 rsrc_vp = cp->c_rsrc_vp;
3484 }
3485
3486 /* Don't allow deleting the journal or journal_info_block. */
3487 if (hfs_is_journal_file(hfsmp, cp)) {
3488 return (EPERM);
3489 }
3490
3491 /*
3492 * If removing a symlink, then we need to ensure that the
3493 * data blocks for the symlink are not still in-flight or pending.
3494 * If so, we will unlink the symlink here, making its blocks
3495 * available for re-allocation by a subsequent transaction. That is OK, but
3496 * then the I/O for the data blocks could then go out before the journal
3497 * transaction that created it was flushed, leading to I/O ordering issues.
3498 */
3499 if (vp->v_type == VLNK) {
3500 /*
3501 * This will block if the asynchronous journal flush is in progress.
3502 * If this symlink is not being renamed over and doesn't have any open FDs,
3503 * then we'll remove it from the journal's bufs below in kill_block.
3504 */
3505 buf_wait_for_shadow_io (vp, 0);
3506 }
3507
3508 /*
3509 * Hard links require special handling.
3510 */
3511 if (cp->c_flag & C_HARDLINK) {
3512 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3513 return (EBUSY);
3514 } else {
3515 /* A directory hard link with a link count of one is
3516 * treated as a regular directory. Therefore it should
3517 * only be removed using rmdir().
3518 */
3519 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3520 (allow_dirs == 0)) {
3521 return (EPERM);
3522 }
3523 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3524 }
3525 }
3526
3527 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3528 if (vnode_isdir(vp)) {
3529 if (allow_dirs == 0)
3530 return (EPERM); /* POSIX */
3531 isdir = 1;
3532 }
3533 /* Sanity check the parent ids. */
3534 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3535 (cp->c_parentcnid != dcp->c_fileid)) {
3536 return (EINVAL);
3537 }
3538
3539 dcp->c_flag |= C_DIR_MODIFICATION;
3540
3541 // this guy is going away so mark him as such
3542 cp->c_flag |= C_DELETED;
3543
3544
3545 /* Remove our entry from the namei cache. */
3546 cache_purge(vp);
3547
3548 /*
3549 * If the caller was operating on a file (as opposed to a
3550 * directory with EAs), then we need to figure out
3551 * whether or not it has a valid resource fork vnode.
3552 *
3553 * If there was a valid resource fork vnode, then we need
3554 * to use hfs_truncate to eliminate its data. If there is
3555 * no vnode, then we hold the cnode lock which would
3556 * prevent it from being created. As a result,
3557 * we can use the data deletion functions which do not
3558 * require that a cnode/vnode pair exist.
3559 */
3560
3561 /* Check if this file is being used. */
3562 if (isdir == 0) {
3563 dataforkbusy = vnode_isinuse(vp, 0);
3564 /*
3565 * At this point, we know that 'vp' points to the
3566 * a data fork because we checked it up front. And if
3567 * there is no rsrc fork, rsrc_vp will be NULL.
3568 */
3569 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3570 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3571 }
3572 }
3573
3574 /* Check if we have to break the deletion into multiple pieces. */
3575 if (isdir == 0) {
3576 isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp)));
3577 }
3578
3579 /* Check if the file has xattrs. If it does we'll have to delete them in
3580 individual transactions in case there are too many */
3581 if ((hfsmp->hfs_attribute_vp != NULL) &&
3582 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3583 defer_remove = 1;
3584 }
3585
3586 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3587 if (only_unlink) {
3588 defer_remove = 1;
3589 }
3590
3591 /*
3592 * Carbon semantics prohibit deleting busy files.
3593 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3594 */
3595 if (dataforkbusy || rsrcforkbusy) {
3596 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3597 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3598 error = EBUSY;
3599 goto out;
3600 }
3601 }
3602
3603 #if QUOTA
3604 if (hfsmp->hfs_flags & HFS_QUOTAS)
3605 (void)hfs_getinoquota(cp);
3606 #endif /* QUOTA */
3607
3608 /*
3609 * Do a ubc_setsize to indicate we need to wipe contents if:
3610 * 1) item is a regular file.
3611 * 2) Neither fork is busy AND we are not told to unlink this.
3612 *
3613 * We need to check for the defer_remove since it can be set without
3614 * having a busy data or rsrc fork
3615 */
3616 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3617 /*
3618 * A ubc_setsize can cause a pagein so defer it
3619 * until after the cnode lock is dropped. The
3620 * cnode lock cannot be dropped/reacquired here
3621 * since we might already hold the journal lock.
3622 */
3623 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3624 cp->c_flag |= C_NEED_DATA_SETSIZE;
3625 }
3626 if (!rsrcforkbusy && rsrc_vp) {
3627 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3628 }
3629 }
3630
3631 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3632 goto out;
3633 }
3634 started_tr = 1;
3635
3636 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3637 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3638 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3639 }
3640
3641 /*
3642 * Prepare to truncate any non-busy forks. Busy forks will
3643 * get truncated when their vnode goes inactive.
3644 * Note that we will only enter this region if we
3645 * can avoid creating an open-unlinked file. If
3646 * either region is busy, we will have to create an open
3647 * unlinked file.
3648 *
3649 * Since we are deleting the file, we need to stagger the runtime
3650 * modifications to do things in such a way that a crash won't
3651 * result in us getting overlapped extents or any other
3652 * bad inconsistencies. As such, we call prepare_release_storage
3653 * which updates the UBC, updates quota information, and releases
3654 * any loaned blocks that belong to this file. No actual
3655 * truncation or bitmap manipulation is done until *AFTER*
3656 * the catalog record is removed.
3657 */
3658 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3659
3660 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3661
3662 error = hfs_prepare_release_storage (hfsmp, vp);
3663 if (error) {
3664 goto out;
3665 }
3666 update_vh = 1;
3667 }
3668
3669 /*
3670 * If the resource fork vnode does not exist, we can skip this step.
3671 */
3672 if (!rsrcforkbusy && rsrc_vp) {
3673 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3674 if (error) {
3675 goto out;
3676 }
3677 update_vh = 1;
3678 }
3679 }
3680
3681 /*
3682 * Protect against a race with rename by using the component
3683 * name passed in and parent id from dvp (instead of using
3684 * the cp->c_desc which may have changed). Also, be aware that
3685 * because we allow directories to be passed in, we need to special case
3686 * this temporary descriptor in case we were handed a directory.
3687 */
3688 if (isdir) {
3689 desc.cd_flags = CD_ISDIR;
3690 }
3691 else {
3692 desc.cd_flags = 0;
3693 }
3694 desc.cd_encoding = cp->c_desc.cd_encoding;
3695 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3696 desc.cd_namelen = cnp->cn_namelen;
3697 desc.cd_parentcnid = dcp->c_fileid;
3698 desc.cd_hint = cp->c_desc.cd_hint;
3699 desc.cd_cnid = cp->c_cnid;
3700 microtime(&tv);
3701
3702 /*
3703 * There are two cases to consider:
3704 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
3705 * 2. File is not in use ==> remove the file
3706 *
3707 * We can get a directory in case 1 because it may have had lots of attributes,
3708 * which need to get removed here.
3709 */
3710 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
3711 char delname[32];
3712 struct cat_desc to_desc;
3713 struct cat_desc todir_desc;
3714
3715 /*
3716 * Orphan this file or directory (move to hidden directory).
3717 * Again, we need to take care that we treat directories as directories,
3718 * and files as files. Because directories with attributes can be passed in
3719 * check to make sure that we have a directory or a file before filling in the
3720 * temporary descriptor's flags. We keep orphaned directories AND files in
3721 * the FILE_HARDLINKS private directory since we're generalizing over all
3722 * orphaned filesystem objects.
3723 */
3724 bzero(&todir_desc, sizeof(todir_desc));
3725 todir_desc.cd_parentcnid = 2;
3726
3727 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
3728 bzero(&to_desc, sizeof(to_desc));
3729 to_desc.cd_nameptr = (const u_int8_t *)delname;
3730 to_desc.cd_namelen = strlen(delname);
3731 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
3732 if (isdir) {
3733 to_desc.cd_flags = CD_ISDIR;
3734 }
3735 else {
3736 to_desc.cd_flags = 0;
3737 }
3738 to_desc.cd_cnid = cp->c_cnid;
3739
3740 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3741 if (!skip_reserve) {
3742 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
3743 hfs_systemfile_unlock(hfsmp, lockflags);
3744 goto out;
3745 }
3746 }
3747
3748 error = cat_rename(hfsmp, &desc, &todir_desc,
3749 &to_desc, (struct cat_desc *)NULL);
3750
3751 if (error == 0) {
3752 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
3753 if (isdir == 1) {
3754 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
3755 }
3756 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
3757 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
3758
3759 /* Update the parent directory */
3760 if (dcp->c_entries > 0)
3761 dcp->c_entries--;
3762 if (isdir == 1) {
3763 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3764 }
3765 dcp->c_dirchangecnt++;
3766 {
3767 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
3768 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
3769 }
3770 dcp->c_ctime = tv.tv_sec;
3771 dcp->c_mtime = tv.tv_sec;
3772 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3773
3774 /* Update the file or directory's state */
3775 cp->c_flag |= C_DELETED;
3776 cp->c_ctime = tv.tv_sec;
3777 --cp->c_linkcount;
3778 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
3779 }
3780 hfs_systemfile_unlock(hfsmp, lockflags);
3781 if (error)
3782 goto out;
3783
3784 }
3785 else {
3786 /*
3787 * Nobody is using this item; we can safely remove everything.
3788 */
3789 struct filefork *temp_rsrc_fork = NULL;
3790 #if QUOTA
3791 off_t savedbytes;
3792 int blksize = hfsmp->blockSize;
3793 #endif
3794 u_int32_t fileid = cp->c_fileid;
3795
3796 /*
3797 * Figure out if we need to read the resource fork data into
3798 * core before wiping out the catalog record.
3799 *
3800 * 1) Must not be a directory
3801 * 2) cnode's c_rsrcfork ptr must be NULL.
3802 * 3) rsrc fork must have actual blocks
3803 */
3804 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
3805 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3806 /*
3807 * The resource fork vnode & filefork did not exist.
3808 * Create a temporary one for use in this function only.
3809 */
3810 MALLOC_ZONE (temp_rsrc_fork, struct filefork *, sizeof (struct filefork), M_HFSFORK, M_WAITOK);
3811 bzero(temp_rsrc_fork, sizeof(struct filefork));
3812 temp_rsrc_fork->ff_cp = cp;
3813 rl_init(&temp_rsrc_fork->ff_invalidranges);
3814 }
3815
3816 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3817
3818 /* Look up the resource fork first, if necessary */
3819 if (temp_rsrc_fork) {
3820 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
3821 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
3822 if (error) {
3823 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3824 hfs_systemfile_unlock (hfsmp, lockflags);
3825 goto out;
3826 }
3827 }
3828
3829 if (!skip_reserve) {
3830 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3831 if (temp_rsrc_fork) {
3832 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3833 }
3834 hfs_systemfile_unlock(hfsmp, lockflags);
3835 goto out;
3836 }
3837 }
3838
3839 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3840
3841 if (error && error != ENXIO && error != ENOENT) {
3842 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
3843 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
3844 }
3845
3846 if (error == 0) {
3847 /* Update the parent directory */
3848 if (dcp->c_entries > 0)
3849 dcp->c_entries--;
3850 dcp->c_dirchangecnt++;
3851 {
3852 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
3853 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
3854 }
3855 dcp->c_ctime = tv.tv_sec;
3856 dcp->c_mtime = tv.tv_sec;
3857 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3858 }
3859 hfs_systemfile_unlock(hfsmp, lockflags);
3860
3861 if (error) {
3862 if (temp_rsrc_fork) {
3863 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3864 }
3865 goto out;
3866 }
3867
3868 /*
3869 * Now that we've wiped out the catalog record, the file effectively doesn't
3870 * exist anymore. So update the quota records to reflect the loss of the
3871 * data fork and the resource fork.
3872 */
3873 #if QUOTA
3874 if (cp->c_datafork->ff_blocks > 0) {
3875 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
3876 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3877 }
3878
3879 /*
3880 * We may have just deleted the catalog record for a resource fork even
3881 * though it did not exist in core as a vnode. However, just because there
3882 * was a resource fork pointer in the cnode does not mean that it had any blocks.
3883 */
3884 if (temp_rsrc_fork || cp->c_rsrcfork) {
3885 if (cp->c_rsrcfork) {
3886 if (cp->c_rsrcfork->ff_blocks > 0) {
3887 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
3888 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3889 }
3890 }
3891 else {
3892 /* we must have used a temporary fork */
3893 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
3894 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3895 }
3896 }
3897
3898 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3899 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3900 }
3901 #endif
3902
3903 /*
3904 * If we didn't get any errors deleting the catalog entry, then go ahead
3905 * and release the backing store now. The filefork pointers are still valid.
3906 */
3907 if (temp_rsrc_fork) {
3908 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
3909 }
3910 else {
3911 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
3912 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
3913 }
3914 if (error) {
3915 /*
3916 * If we encountered an error updating the extents and bitmap,
3917 * mark the volume inconsistent. At this point, the catalog record has
3918 * already been deleted, so we can't recover it at this point. We need
3919 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
3920 * The subsequent fsck should be able to recover the free space for us.
3921 */
3922 hfs_mark_volume_inconsistent(hfsmp);
3923 }
3924 else {
3925 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
3926 update_vh = 0;
3927 }
3928
3929 /* Get rid of the temporary rsrc fork */
3930 if (temp_rsrc_fork) {
3931 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3932 }
3933
3934 cp->c_flag |= C_NOEXISTS;
3935 cp->c_flag &= ~C_DELETED;
3936
3937 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
3938 --cp->c_linkcount;
3939
3940 /*
3941 * We must never get a directory if we're in this else block. We could
3942 * accidentally drop the number of files in the volume header if we did.
3943 */
3944 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
3945
3946 }
3947
3948 //
3949 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3950 // we don't need to touch the document_id as it's handled by the rename code.
3951 // otherwise it's a normal remove and we need to save the document id in the
3952 // per thread struct and clear it from the cnode.
3953 //
3954 struct doc_tombstone *ut;
3955 ut = get_uthread_doc_tombstone();
3956 if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
3957
3958 if (ut->t_lastop_document_id) {
3959 clear_tombstone_docid(ut, hfsmp, NULL);
3960 }
3961 save_tombstone(hfsmp, dvp, vp, cnp, 1);
3962
3963 }
3964
3965
3966 /*
3967 * All done with this cnode's descriptor...
3968 *
3969 * Note: all future catalog calls for this cnode must be by
3970 * fileid only. This is OK for HFS (which doesn't have file
3971 * thread records) since HFS doesn't support the removal of
3972 * busy files.
3973 */
3974 cat_releasedesc(&cp->c_desc);
3975
3976 out:
3977 if (error) {
3978 cp->c_flag &= ~C_DELETED;
3979 }
3980
3981 if (update_vh) {
3982 /*
3983 * If we bailed out earlier, we may need to update the volume header
3984 * to deal with the borrowed blocks accounting.
3985 */
3986 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
3987 }
3988
3989 if (started_tr) {
3990 hfs_end_transaction(hfsmp);
3991 }
3992
3993 dcp->c_flag &= ~C_DIR_MODIFICATION;
3994 wakeup((caddr_t)&dcp->c_flag);
3995
3996 return (error);
3997 }
3998
3999
4000 __private_extern__ void
4001 replace_desc(struct cnode *cp, struct cat_desc *cdp)
4002 {
4003 // fixes 4348457 and 4463138
4004 if (&cp->c_desc == cdp) {
4005 return;
4006 }
4007
4008 /* First release allocated name buffer */
4009 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
4010 const u_int8_t *name = cp->c_desc.cd_nameptr;
4011
4012 cp->c_desc.cd_nameptr = 0;
4013 cp->c_desc.cd_namelen = 0;
4014 cp->c_desc.cd_flags &= ~CD_HASBUF;
4015 vfs_removename((const char *)name);
4016 }
4017 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
4018
4019 /* Cnode now owns the name buffer */
4020 cdp->cd_nameptr = 0;
4021 cdp->cd_namelen = 0;
4022 cdp->cd_flags &= ~CD_HASBUF;
4023 }
4024
4025
4026 /*
4027 * Rename a cnode.
4028 *
4029 * The VFS layer guarantees that:
4030 * - source and destination will either both be directories, or
4031 * both not be directories.
4032 * - all the vnodes are from the same file system
4033 *
4034 * When the target is a directory, HFS must ensure that its empty.
4035 *
4036 * Note that this function requires up to 6 vnodes in order to work properly
4037 * if it is operating on files (and not on directories). This is because only
4038 * files can have resource forks, and we now require iocounts to be held on the
4039 * vnodes corresponding to the resource forks (if applicable) as well as
4040 * the files or directories undergoing rename. The problem with not holding
4041 * iocounts on the resource fork vnodes is that it can lead to a deadlock
4042 * situation: The rsrc fork of the source file may be recycled and reclaimed
4043 * in order to provide a vnode for the destination file's rsrc fork. Since
4044 * data and rsrc forks share the same cnode, we'd eventually try to lock the
4045 * source file's cnode in order to sync its rsrc fork to disk, but it's already
4046 * been locked. By taking the rsrc fork vnodes up front we ensure that they
4047 * cannot be recycled, and that the situation mentioned above cannot happen.
4048 */
4049 int
4050 hfs_vnop_rename(ap)
4051 struct vnop_rename_args /* {
4052 struct vnode *a_fdvp;
4053 struct vnode *a_fvp;
4054 struct componentname *a_fcnp;
4055 struct vnode *a_tdvp;
4056 struct vnode *a_tvp;
4057 struct componentname *a_tcnp;
4058 vfs_context_t a_context;
4059 } */ *ap;
4060 {
4061 struct vnode *tvp = ap->a_tvp;
4062 struct vnode *tdvp = ap->a_tdvp;
4063 struct vnode *fvp = ap->a_fvp;
4064 struct vnode *fdvp = ap->a_fdvp;
4065 /*
4066 * Note that we only need locals for the target/destination's
4067 * resource fork vnode (and only if necessary). We don't care if the
4068 * source has a resource fork vnode or not.
4069 */
4070 struct vnode *tvp_rsrc = NULLVP;
4071 uint32_t tvp_rsrc_vid = 0;
4072 struct componentname *tcnp = ap->a_tcnp;
4073 struct componentname *fcnp = ap->a_fcnp;
4074 struct proc *p = vfs_context_proc(ap->a_context);
4075 struct cnode *fcp;
4076 struct cnode *fdcp;
4077 struct cnode *tdcp;
4078 struct cnode *tcp;
4079 struct cnode *error_cnode;
4080 struct cat_desc from_desc;
4081 struct cat_desc to_desc;
4082 struct cat_desc out_desc;
4083 struct hfsmount *hfsmp;
4084 cat_cookie_t cookie;
4085 int tvp_deleted = 0;
4086 int started_tr = 0, got_cookie = 0;
4087 int took_trunc_lock = 0;
4088 int lockflags;
4089 int error;
4090 time_t orig_from_ctime, orig_to_ctime;
4091 int emit_rename = 1;
4092 int emit_delete = 1;
4093 int is_tracked = 0;
4094 int unlocked;
4095
4096 orig_from_ctime = VTOC(fvp)->c_ctime;
4097 if (tvp && VTOC(tvp)) {
4098 orig_to_ctime = VTOC(tvp)->c_ctime;
4099 } else {
4100 orig_to_ctime = ~0;
4101 }
4102
4103 hfsmp = VTOHFS(tdvp);
4104 /*
4105 * Do special case checks here. If fvp == tvp then we need to check the
4106 * cnode with locks held.
4107 */
4108 if (fvp == tvp) {
4109 int is_hardlink = 0;
4110 /*
4111 * In this case, we do *NOT* ever emit a DELETE event.
4112 * We may not necessarily emit a RENAME event
4113 */
4114 emit_delete = 0;
4115 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
4116 return error;
4117 }
4118 /* Check to see if the item is a hardlink or not */
4119 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
4120 hfs_unlock (VTOC(fvp));
4121
4122 /*
4123 * If the item is not a hardlink, then case sensitivity must be off, otherwise
4124 * two names should not resolve to the same cnode unless they were case variants.
4125 */
4126 if (is_hardlink) {
4127 emit_rename = 0;
4128 /*
4129 * Hardlinks are a little trickier. We only want to emit a rename event
4130 * if the item is a hardlink, the parent directories are the same, case sensitivity
4131 * is off, and the case folded names are the same. See the fvp == tvp case below for more
4132 * info.
4133 */
4134
4135 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
4136 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4137 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4138 /* Then in this case only it is ok to emit a rename */
4139 emit_rename = 1;
4140 }
4141 }
4142 }
4143 }
4144 if (emit_rename) {
4145 /* c_bsdflags should only be assessed while holding the cnode lock.
4146 * This is not done consistently throughout the code and can result
4147 * in race. This will be fixed via rdar://12181064
4148 */
4149 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
4150 is_tracked = 1;
4151 }
4152 check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
4153 }
4154
4155 if (tvp && VTOC(tvp)) {
4156 if (emit_delete) {
4157 check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
4158 }
4159 }
4160
4161 retry:
4162 /* When tvp exists, take the truncate lock for hfs_removefile(). */
4163 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
4164 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4165 took_trunc_lock = 1;
4166 }
4167
4168 relock:
4169 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
4170 HFS_EXCLUSIVE_LOCK, &error_cnode);
4171 if (error) {
4172 if (took_trunc_lock) {
4173 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4174 took_trunc_lock = 0;
4175 }
4176
4177 /*
4178 * We hit an error path. If we were trying to re-acquire the locks
4179 * after coming through here once, we might have already obtained
4180 * an iocount on tvp's resource fork vnode. Drop that before dealing
4181 * with the failure. Note this is safe -- since we are in an
4182 * error handling path, we can't be holding the cnode locks.
4183 */
4184 if (tvp_rsrc) {
4185 vnode_put (tvp_rsrc);
4186 tvp_rsrc_vid = 0;
4187 tvp_rsrc = NULL;
4188 }
4189
4190 /*
4191 * tvp might no longer exist. If the cause of the lock failure
4192 * was tvp, then we can try again with tvp/tcp set to NULL.
4193 * This is ok because the vfs syscall will vnode_put the vnodes
4194 * after we return from hfs_vnop_rename.
4195 */
4196 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
4197 tcp = NULL;
4198 tvp = NULL;
4199 goto retry;
4200 }
4201
4202 if (emit_rename && is_tracked) {
4203 resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_FAILED_OP | NAMESPACE_HANDLER_TRACK_EVENT);
4204 }
4205
4206 return (error);
4207 }
4208
4209 fdcp = VTOC(fdvp);
4210 fcp = VTOC(fvp);
4211 tdcp = VTOC(tdvp);
4212 tcp = tvp ? VTOC(tvp) : NULL;
4213
4214 //
4215 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
4216 //
4217 unlocked = 0;
4218 if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4219 uint32_t newid;
4220
4221 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4222 unlocked = 1;
4223
4224 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4225 hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4226 ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4227 #if CONFIG_FSE
4228 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4229 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4230 FSE_ARG_INO, (ino64_t)0, // src inode #
4231 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4232 FSE_ARG_INT32, newid,
4233 FSE_ARG_DONE);
4234 #endif
4235 hfs_unlock(fcp);
4236 } else {
4237 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4238 }
4239
4240 //
4241 // check if we're going to need to fix tcp as well. if we aren't, go back relock
4242 // everything. otherwise continue on and fix up tcp as well before relocking.
4243 //
4244 if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
4245 goto relock;
4246 }
4247 }
4248
4249 //
4250 // same thing for tcp if it's set
4251 //
4252 if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4253 uint32_t newid;
4254
4255 if (!unlocked) {
4256 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4257 unlocked = 1;
4258 }
4259
4260 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4261 hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4262 ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4263 #if CONFIG_FSE
4264 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4265 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4266 FSE_ARG_INO, (ino64_t)0, // src inode #
4267 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
4268 FSE_ARG_INT32, newid,
4269 FSE_ARG_DONE);
4270 #endif
4271 hfs_unlock(tcp);
4272 } else {
4273 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4274 }
4275
4276 // go back up and relock everything. next time through the if statement won't be true
4277 // and we'll skip over this block of code.
4278 goto relock;
4279 }
4280
4281
4282
4283 /*
4284 * Acquire iocounts on the destination's resource fork vnode
4285 * if necessary. If dst/src are files and the dst has a resource
4286 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
4287 * If it does not exist, then we don't care and can skip it.
4288 */
4289 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
4290 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
4291 tvp_rsrc = tcp->c_rsrc_vp;
4292 /*
4293 * We can look at the vid here because we're holding the
4294 * cnode lock on the underlying cnode for this rsrc vnode.
4295 */
4296 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
4297
4298 /* Unlock everything to acquire iocount on this rsrc vnode */
4299 if (took_trunc_lock) {
4300 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
4301 took_trunc_lock = 0;
4302 }
4303 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4304
4305 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
4306 /* iocount acquisition failed. Reset fields and start over.. */
4307 tvp_rsrc_vid = 0;
4308 tvp_rsrc = NULL;
4309 }
4310 goto retry;
4311 }
4312 }
4313
4314
4315
4316 /* Ensure we didn't race src or dst parent directories with rmdir. */
4317 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4318 error = ENOENT;
4319 goto out;
4320 }
4321
4322 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4323 error = ENOENT;
4324 goto out;
4325 }
4326
4327
4328 /* Check for a race against unlink. The hfs_valid_cnode checks validate
4329 * the parent/child relationship with fdcp and tdcp, as well as the
4330 * component name of the target cnodes.
4331 */
4332 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
4333 error = ENOENT;
4334 goto out;
4335 }
4336
4337 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
4338 //
4339 // hmm, the destination vnode isn't valid any more.
4340 // in this case we can just drop him and pretend he
4341 // never existed in the first place.
4342 //
4343 if (took_trunc_lock) {
4344 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4345 took_trunc_lock = 0;
4346 }
4347 error = 0;
4348
4349 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4350
4351 tcp = NULL;
4352 tvp = NULL;
4353
4354 // retry the locking with tvp null'ed out
4355 goto retry;
4356 }
4357
4358 fdcp->c_flag |= C_DIR_MODIFICATION;
4359 if (fdvp != tdvp) {
4360 tdcp->c_flag |= C_DIR_MODIFICATION;
4361 }
4362
4363 /*
4364 * Disallow renaming of a directory hard link if the source and
4365 * destination parent directories are different, or a directory whose
4366 * descendant is a directory hard link and the one of the ancestors
4367 * of the destination directory is a directory hard link.
4368 */
4369 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4370 if (fcp->c_flag & C_HARDLINK) {
4371 error = EPERM;
4372 goto out;
4373 }
4374 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4375 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4376 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4377 error = EPERM;
4378 hfs_systemfile_unlock(hfsmp, lockflags);
4379 goto out;
4380 }
4381 hfs_systemfile_unlock(hfsmp, lockflags);
4382 }
4383 }
4384
4385 /*
4386 * The following edge case is caught here:
4387 * (to cannot be a descendent of from)
4388 *
4389 * o fdvp
4390 * /
4391 * /
4392 * o fvp
4393 * \
4394 * \
4395 * o tdvp
4396 * /
4397 * /
4398 * o tvp
4399 */
4400 if (tdcp->c_parentcnid == fcp->c_fileid) {
4401 error = EINVAL;
4402 goto out;
4403 }
4404
4405 /*
4406 * The following two edge cases are caught here:
4407 * (note tvp is not empty)
4408 *
4409 * o tdvp o tdvp
4410 * / /
4411 * / /
4412 * o tvp tvp o fdvp
4413 * \ \
4414 * \ \
4415 * o fdvp o fvp
4416 * /
4417 * /
4418 * o fvp
4419 */
4420 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
4421 error = ENOTEMPTY;
4422 goto out;
4423 }
4424
4425 /*
4426 * The following edge case is caught here:
4427 * (the from child and parent are the same)
4428 *
4429 * o tdvp
4430 * /
4431 * /
4432 * fdvp o fvp
4433 */
4434 if (fdvp == fvp) {
4435 error = EINVAL;
4436 goto out;
4437 }
4438
4439 /*
4440 * Make sure "from" vnode and its parent are changeable.
4441 */
4442 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
4443 error = EPERM;
4444 goto out;
4445 }
4446
4447 /*
4448 * If the destination parent directory is "sticky", then the
4449 * user must own the parent directory, or the destination of
4450 * the rename, otherwise the destination may not be changed
4451 * (except by root). This implements append-only directories.
4452 *
4453 * Note that checks for immutable and write access are done
4454 * by the call to hfs_removefile.
4455 */
4456 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4457 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
4458 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
4459 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
4460 error = EPERM;
4461 goto out;
4462 }
4463
4464 /* Don't allow modification of the journal or journal_info_block */
4465 if (hfs_is_journal_file(hfsmp, fcp) ||
4466 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4467 error = EPERM;
4468 goto out;
4469 }
4470
4471 #if QUOTA
4472 if (tvp)
4473 (void)hfs_getinoquota(tcp);
4474 #endif
4475 /* Preflighting done, take fvp out of the name space. */
4476 cache_purge(fvp);
4477
4478 bzero(&from_desc, sizeof(from_desc));
4479 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4480 from_desc.cd_namelen = fcnp->cn_namelen;
4481 from_desc.cd_parentcnid = fdcp->c_fileid;
4482 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4483 from_desc.cd_cnid = fcp->c_cnid;
4484
4485 bzero(&to_desc, sizeof(to_desc));
4486 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4487 to_desc.cd_namelen = tcnp->cn_namelen;
4488 to_desc.cd_parentcnid = tdcp->c_fileid;
4489 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4490 to_desc.cd_cnid = fcp->c_cnid;
4491
4492 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4493 goto out;
4494 }
4495 started_tr = 1;
4496
4497 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
4498 * inside a journal transaction and without holding a cnode lock.
4499 * As setting of this bit depends on being in journal transaction for
4500 * concurrency, check this bit again after we start journal transaction for rename
4501 * to ensure that this directory does not have any descendant that
4502 * is a directory hard link.
4503 */
4504 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4505 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4506 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4507 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4508 error = EPERM;
4509 hfs_systemfile_unlock(hfsmp, lockflags);
4510 goto out;
4511 }
4512 hfs_systemfile_unlock(hfsmp, lockflags);
4513 }
4514 }
4515
4516 // if it's a hardlink then re-lookup the name so
4517 // that we get the correct cnid in from_desc (see
4518 // the comment in hfs_removefile for more details)
4519 //
4520 if (fcp->c_flag & C_HARDLINK) {
4521 struct cat_desc tmpdesc;
4522 cnid_t real_cnid;
4523
4524 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4525 tmpdesc.cd_namelen = fcnp->cn_namelen;
4526 tmpdesc.cd_parentcnid = fdcp->c_fileid;
4527 tmpdesc.cd_hint = fdcp->c_childhint;
4528 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
4529 tmpdesc.cd_encoding = 0;
4530
4531 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4532
4533 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
4534 hfs_systemfile_unlock(hfsmp, lockflags);
4535 goto out;
4536 }
4537
4538 // use the real cnid instead of whatever happened to be there
4539 from_desc.cd_cnid = real_cnid;
4540 hfs_systemfile_unlock(hfsmp, lockflags);
4541 }
4542
4543 /*
4544 * Reserve some space in the Catalog file.
4545 */
4546 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
4547 goto out;
4548 }
4549 got_cookie = 1;
4550
4551 /*
4552 * If the destination exists then it may need to be removed.
4553 *
4554 * Due to HFS's locking system, we should always move the
4555 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
4556 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
4557 * of the actual vnop that it was trying to do (stat, link, readlink),
4558 * we must release the cnode lock of that element during the interim to
4559 * do MAC checking, vnode authorization, and other calls. In that time,
4560 * the item can be deleted (or renamed over). However, only in the rename
4561 * case is it inappropriate to return ENOENT from any of those calls. Either
4562 * the call should return information about the old element (stale), or get
4563 * information about the newer element that we are about to write in its place.
4564 *
4565 * HFS lookup has been modified to detect a rename and re-drive its
4566 * lookup internally. For other calls that have already succeeded in
4567 * their lookup call and are waiting to acquire the cnode lock in order
4568 * to proceed, that cnode lock will not fail due to the cnode being marked
4569 * C_NOEXISTS, because it won't have been marked as such. It will only
4570 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
4571 * element. All future callers will get the new element.
4572 *
4573 * To implement this behavior, we pass the "only_unlink" argument to
4574 * hfs_removefile and hfs_removedir. This will result in the vnode acting
4575 * as though it is open-unlinked. Additionally, when we are done moving the
4576 * element to the hidden directory, we vnode_recycle the target so that it is
4577 * reclaimed as soon as possible. Reclaim and inactive are both
4578 * capable of clearing out unused blocks for an open-unlinked file or dir.
4579 */
4580 if (tvp) {
4581 //
4582 // if the destination has a document id, we need to preserve it
4583 //
4584 if (fvp != tvp) {
4585 uint32_t document_id;
4586 struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4587 struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
4588
4589 if (ffip->document_id && tfip->document_id) {
4590 // both documents are tracked. only save a tombstone from tcp and do nothing else.
4591 save_tombstone(hfsmp, tdvp, tvp, tcnp, 0);
4592 } else {
4593 struct doc_tombstone *ut;
4594 ut = get_uthread_doc_tombstone();
4595
4596 document_id = tfip->document_id;
4597 tfip->document_id = 0;
4598
4599 if (document_id != 0) {
4600 // clear UF_TRACKED as well since tcp is now no longer tracked
4601 tcp->c_bsdflags &= ~UF_TRACKED;
4602 (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
4603 }
4604
4605 if (ffip->document_id == 0 && document_id != 0) {
4606 // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
4607 fcp->c_bsdflags |= UF_TRACKED;
4608 ffip->document_id = document_id;
4609
4610 (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4611 #if CONFIG_FSE
4612 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4613 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4614 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
4615 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4616 FSE_ARG_INT32, (uint32_t)ffip->document_id,
4617 FSE_ARG_DONE);
4618 #endif
4619 } else if ((fcp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, fvp, fcnp)) {
4620
4621 if (ut->t_lastop_document_id) {
4622 clear_tombstone_docid(ut, hfsmp, NULL);
4623 }
4624 save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
4625
4626 //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
4627 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4628 }
4629 }
4630 }
4631
4632 /*
4633 * When fvp matches tvp they could be case variants
4634 * or matching hard links.
4635 */
4636 if (fvp == tvp) {
4637 if (!(fcp->c_flag & C_HARDLINK)) {
4638 /*
4639 * If they're not hardlinks, then fvp == tvp must mean we
4640 * are using case-insensitive HFS because case-sensitive would
4641 * not use the same vnode for both. In this case we just update
4642 * the catalog for: a -> A
4643 */
4644 goto skip_rm; /* simple case variant */
4645
4646 }
4647 /* For all cases below, we must be using hardlinks */
4648 else if ((fdvp != tdvp) ||
4649 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4650 /*
4651 * If the parent directories are not the same, AND the two items
4652 * are hardlinks, posix says to do nothing:
4653 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4654 * We just return 0 in this case.
4655 *
4656 * If case sensitivity is on, and we are using hardlinks
4657 * then renaming is supposed to do nothing.
4658 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
4659 */
4660 goto out; /* matching hardlinks, nothing to do */
4661
4662 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4663 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4664 /*
4665 * If we get here, then the following must be true:
4666 * a) We are running case-insensitive HFS+.
4667 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
4668 * c) the two names are case-variants of each other.
4669 *
4670 * In this case, we are really only dealing with a single catalog record
4671 * whose name is being updated.
4672 *
4673 * op is dir1/fred -> dir1/FRED
4674 *
4675 * We need to special case the name matching, because if
4676 * dir1/fred <-> dir1/bob were the two links, and the
4677 * op was dir1/fred -> dir1/bob
4678 * That would fail/do nothing.
4679 */
4680 goto skip_rm; /* case-variant hardlink in the same dir */
4681 } else {
4682 goto out; /* matching hardlink, nothing to do */
4683 }
4684 }
4685
4686
4687 if (vnode_isdir(tvp)) {
4688 /*
4689 * hfs_removedir will eventually call hfs_removefile on the directory
4690 * we're working on, because only hfs_removefile does the renaming of the
4691 * item to the hidden directory. The directory will stay around in the
4692 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
4693 * That way, we can destroy all of the EAs as needed and allow new ones to be
4694 * written.
4695 */
4696 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
4697 }
4698 else {
4699 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
4700
4701 /*
4702 * If the destination file had a resource fork vnode, then we need to get rid of
4703 * its blocks when there are no more references to it. Because the call to
4704 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
4705 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
4706 * the resource fork vnode could prevent the data fork vnode from going out of scope
4707 * because it holds a v_parent reference on it. So we mark it for termination
4708 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
4709 * can clean up the blocks of open-unlinked files and resource forks.
4710 *
4711 * We can safely call vnode_recycle on the resource fork because we took an iocount
4712 * reference on it at the beginning of the function.
4713 */
4714
4715 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
4716 vnode_recycle(tvp_rsrc);
4717 }
4718 }
4719
4720 if (error) {
4721 goto out;
4722 }
4723
4724 tvp_deleted = 1;
4725
4726 /* Mark 'tcp' as being deleted due to a rename */
4727 tcp->c_flag |= C_RENAMED;
4728
4729 /*
4730 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
4731 * as quickly as possible.
4732 */
4733 vnode_recycle(tvp);
4734 } else {
4735 struct doc_tombstone *ut;
4736 ut = get_uthread_doc_tombstone();
4737
4738 //
4739 // There is nothing at the destination. If the file being renamed is
4740 // tracked, save a "tombstone" of the document_id. If the file is
4741 // not a tracked file, then see if it needs to inherit a tombstone.
4742 //
4743 // NOTE: we do not save a tombstone if the file being renamed begins
4744 // with "atmp" which is done to work-around AutoCad's bizarre
4745 // 5-step un-safe save behavior
4746 //
4747 if (fcp->c_bsdflags & UF_TRACKED) {
4748 if (should_save_docid_tombstone(ut, fvp, fcnp)) {
4749 save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
4750
4751 //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
4752 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4753 } else {
4754 // intentionally do nothing
4755 }
4756 } else if ( ut->t_lastop_document_id != 0
4757 && tdvp == ut->t_lastop_parent
4758 && vnode_vid(tdvp) == ut->t_lastop_parent_vid
4759 && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
4760
4761 //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
4762 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4763 fcp->c_bsdflags |= UF_TRACKED;
4764 fip->document_id = ut->t_lastop_document_id;
4765 cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4766
4767 clear_tombstone_docid(ut, hfsmp, fcp); // will send the docid-changed fsevent
4768
4769 } else if (ut->t_lastop_document_id && should_save_docid_tombstone(ut, fvp, fcnp) && should_save_docid_tombstone(ut, tvp, tcnp)) {
4770 // no match, clear the tombstone
4771 //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
4772 clear_tombstone_docid(ut, hfsmp, NULL);
4773 }
4774
4775 }
4776 skip_rm:
4777 /*
4778 * All done with tvp and fvp.
4779 *
4780 * We also jump to this point if there was no destination observed during lookup and namei.
4781 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
4782 * competing thread from racing us and creating a file or dir at the destination of this rename
4783 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
4784 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
4785 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
4786 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
4787 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
4788 * will be swallowed and it will restart the operation.
4789 */
4790
4791 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4792 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
4793 hfs_systemfile_unlock(hfsmp, lockflags);
4794
4795 if (error) {
4796 if (error == EEXIST) {
4797 error = ERECYCLE;
4798 }
4799 goto out;
4800 }
4801
4802 /* Invalidate negative cache entries in the destination directory */
4803 if (tdcp->c_flag & C_NEG_ENTRIES) {
4804 cache_purge_negatives(tdvp);
4805 tdcp->c_flag &= ~C_NEG_ENTRIES;
4806 }
4807
4808 /* Update cnode's catalog descriptor */
4809 replace_desc(fcp, &out_desc);
4810 fcp->c_parentcnid = tdcp->c_fileid;
4811 fcp->c_hint = 0;
4812
4813 /* Now indicate this cnode needs to have date-added written to the finderinfo */
4814 fcp->c_flag |= C_NEEDS_DATEADDED;
4815 (void) hfs_update (fvp, 0);
4816
4817
4818 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
4819 (fdcp->c_cnid == kHFSRootFolderID));
4820 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
4821 (tdcp->c_cnid == kHFSRootFolderID));
4822
4823 /* Update both parent directories. */
4824 if (fdvp != tdvp) {
4825 if (vnode_isdir(fvp)) {
4826 /* If the source directory has directory hard link
4827 * descendants, set the kHFSHasChildLinkBit in the
4828 * destination parent hierarchy
4829 */
4830 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
4831 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
4832
4833 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
4834
4835 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
4836 if (error) {
4837 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
4838 error = 0;
4839 }
4840 }
4841 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
4842 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
4843 }
4844 tdcp->c_entries++;
4845 tdcp->c_dirchangecnt++;
4846 {
4847 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)tdcp->c_finderinfo + 16);
4848 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
4849 }
4850 if (fdcp->c_entries > 0)
4851 fdcp->c_entries--;
4852 fdcp->c_dirchangecnt++;
4853 fdcp->c_touch_chgtime = TRUE;
4854 fdcp->c_touch_modtime = TRUE;
4855
4856 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
4857 (void) hfs_update(fdvp, 0);
4858 }
4859 {
4860 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)fdcp->c_finderinfo + 16);
4861 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
4862 }
4863
4864 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
4865 tdcp->c_touch_chgtime = TRUE;
4866 tdcp->c_touch_modtime = TRUE;
4867
4868 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
4869 (void) hfs_update(tdvp, 0);
4870
4871 /* Update the vnode's name now that the rename has completed. */
4872 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
4873 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
4874
4875 /*
4876 * At this point, we may have a resource fork vnode attached to the
4877 * 'from' vnode. If it exists, we will want to update its name, because
4878 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
4879 *
4880 * Note that the only thing we need to update here is the name attached to
4881 * the vnode, since a resource fork vnode does not have a separate resource
4882 * cnode -- it's still 'fcp'.
4883 */
4884 if (fcp->c_rsrc_vp) {
4885 char* rsrc_path = NULL;
4886 int len;
4887
4888 /* Create a new temporary buffer that's going to hold the new name */
4889 MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
4890 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
4891 len = MIN(len, MAXPATHLEN);
4892
4893 /*
4894 * vnode_update_identity will do the following for us:
4895 * 1) release reference on the existing rsrc vnode's name.
4896 * 2) copy/insert new name into the name cache
4897 * 3) attach the new name to the resource vnode
4898 * 4) update the vnode's vid
4899 */
4900 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
4901
4902 /* Free the memory associated with the resource fork's name */
4903 FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
4904 }
4905 out:
4906 if (got_cookie) {
4907 cat_postflight(hfsmp, &cookie, p);
4908 }
4909 if (started_tr) {
4910 hfs_end_transaction(hfsmp);
4911 }
4912
4913 fdcp->c_flag &= ~C_DIR_MODIFICATION;
4914 wakeup((caddr_t)&fdcp->c_flag);
4915 if (fdvp != tdvp) {
4916 tdcp->c_flag &= ~C_DIR_MODIFICATION;
4917 wakeup((caddr_t)&tdcp->c_flag);
4918 }
4919
4920 if (took_trunc_lock) {
4921 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4922 }
4923
4924 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4925
4926 /* Now vnode_put the resource forks vnodes if necessary */
4927 if (tvp_rsrc) {
4928 vnode_put(tvp_rsrc);
4929 tvp_rsrc = NULL;
4930 }
4931
4932 /* After tvp is removed the only acceptable error is EIO */
4933 if (error && tvp_deleted)
4934 error = EIO;
4935
4936 if (emit_rename && is_tracked) {
4937 if (error) {
4938 resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_FAILED_OP | NAMESPACE_HANDLER_TRACK_EVENT);
4939 } else {
4940 resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_SUCCESS_OP | NAMESPACE_HANDLER_TRACK_EVENT);
4941 }
4942 }
4943
4944 return (error);
4945 }
4946
4947
4948 /*
4949 * Make a directory.
4950 */
4951 int
4952 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
4953 {
4954 /***** HACK ALERT ********/
4955 ap->a_cnp->cn_flags |= MAKEENTRY;
4956 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
4957 }
4958
4959
4960 /*
4961 * Create a symbolic link.
4962 */
4963 int
4964 hfs_vnop_symlink(struct vnop_symlink_args *ap)
4965 {
4966 struct vnode **vpp = ap->a_vpp;
4967 struct vnode *dvp = ap->a_dvp;
4968 struct vnode *vp = NULL;
4969 struct cnode *cp = NULL;
4970 struct hfsmount *hfsmp;
4971 struct filefork *fp;
4972 struct buf *bp = NULL;
4973 char *datap;
4974 int started_tr = 0;
4975 u_int32_t len;
4976 int error;
4977
4978 /* HFS standard disks don't support symbolic links */
4979 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
4980 return (ENOTSUP);
4981
4982 /* Check for empty target name */
4983 if (ap->a_target[0] == 0)
4984 return (EINVAL);
4985
4986 hfsmp = VTOHFS(dvp);
4987 len = strlen(ap->a_target);
4988
4989 /* Check for free space */
4990 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
4991 return (ENOSPC);
4992 }
4993
4994 /* Create the vnode */
4995 ap->a_vap->va_mode |= S_IFLNK;
4996 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
4997 goto out;
4998 }
4999 vp = *vpp;
5000 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5001 goto out;
5002 }
5003 cp = VTOC(vp);
5004 fp = VTOF(vp);
5005
5006 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
5007 goto out;
5008 }
5009
5010 #if QUOTA
5011 (void)hfs_getinoquota(cp);
5012 #endif /* QUOTA */
5013
5014 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5015 goto out;
5016 }
5017 started_tr = 1;
5018
5019 /*
5020 * Allocate space for the link.
5021 *
5022 * Since we're already inside a transaction,
5023 * tell hfs_truncate to skip the ubc_setsize.
5024 *
5025 * Don't need truncate lock since a symlink is treated as a system file.
5026 */
5027 error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, 0, ap->a_context);
5028
5029 /* On errors, remove the symlink file */
5030 if (error) {
5031 /*
5032 * End the transaction so we don't re-take the cnode lock
5033 * below while inside a transaction (lock order violation).
5034 */
5035 hfs_end_transaction(hfsmp);
5036
5037 /* hfs_removefile() requires holding the truncate lock */
5038 hfs_unlock(cp);
5039 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
5040 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5041
5042 if (hfs_start_transaction(hfsmp) != 0) {
5043 started_tr = 0;
5044 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5045 goto out;
5046 }
5047
5048 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
5049 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5050 goto out;
5051 }
5052
5053 /* Write the link to disk */
5054 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
5055 0, 0, BLK_META);
5056 if (hfsmp->jnl) {
5057 journal_modify_block_start(hfsmp->jnl, bp);
5058 }
5059 datap = (char *)buf_dataptr(bp);
5060 bzero(datap, buf_size(bp));
5061 bcopy(ap->a_target, datap, len);
5062
5063 if (hfsmp->jnl) {
5064 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
5065 } else {
5066 buf_bawrite(bp);
5067 }
5068 /*
5069 * We defered the ubc_setsize for hfs_truncate
5070 * since we were inside a transaction.
5071 *
5072 * We don't need to drop the cnode lock here
5073 * since this is a symlink.
5074 */
5075 ubc_setsize(vp, len);
5076 out:
5077 if (started_tr)
5078 hfs_end_transaction(hfsmp);
5079 if ((cp != NULL) && (vp != NULL)) {
5080 hfs_unlock(cp);
5081 }
5082 if (error) {
5083 if (vp) {
5084 vnode_put(vp);
5085 }
5086 *vpp = NULL;
5087 }
5088 return (error);
5089 }
5090
5091
5092 /* structures to hold a "." or ".." directory entry */
5093 struct hfs_stddotentry {
5094 u_int32_t d_fileno; /* unique file number */
5095 u_int16_t d_reclen; /* length of this structure */
5096 u_int8_t d_type; /* dirent file type */
5097 u_int8_t d_namlen; /* len of filename */
5098 char d_name[4]; /* "." or ".." */
5099 };
5100
5101 struct hfs_extdotentry {
5102 u_int64_t d_fileno; /* unique file number */
5103 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
5104 u_int16_t d_reclen; /* length of this structure */
5105 u_int16_t d_namlen; /* len of filename */
5106 u_int8_t d_type; /* dirent file type */
5107 u_char d_name[3]; /* "." or ".." */
5108 };
5109
5110 typedef union {
5111 struct hfs_stddotentry std;
5112 struct hfs_extdotentry ext;
5113 } hfs_dotentry_t;
5114
5115 /*
5116 * hfs_vnop_readdir reads directory entries into the buffer pointed
5117 * to by uio, in a filesystem independent format. Up to uio_resid
5118 * bytes of data can be transferred. The data in the buffer is a
5119 * series of packed dirent structures where each one contains the
5120 * following entries:
5121 *
5122 * u_int32_t d_fileno; // file number of entry
5123 * u_int16_t d_reclen; // length of this record
5124 * u_int8_t d_type; // file type
5125 * u_int8_t d_namlen; // length of string in d_name
5126 * char d_name[MAXNAMELEN+1]; // null terminated file name
5127 *
5128 * The current position (uio_offset) refers to the next block of
5129 * entries. The offset can only be set to a value previously
5130 * returned by hfs_vnop_readdir or zero. This offset does not have
5131 * to match the number of bytes returned (in uio_resid).
5132 *
5133 * In fact, the offset used by HFS is essentially an index (26 bits)
5134 * with a tag (6 bits). The tag is for associating the next request
5135 * with the current request. This enables us to have multiple threads
5136 * reading the directory while the directory is also being modified.
5137 *
5138 * Each tag/index pair is tied to a unique directory hint. The hint
5139 * contains information (filename) needed to build the catalog b-tree
5140 * key for finding the next set of entries.
5141 *
5142 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
5143 * do NOT synthesize entries for "." and "..".
5144 */
5145 int
5146 hfs_vnop_readdir(ap)
5147 struct vnop_readdir_args /* {
5148 vnode_t a_vp;
5149 uio_t a_uio;
5150 int a_flags;
5151 int *a_eofflag;
5152 int *a_numdirent;
5153 vfs_context_t a_context;
5154 } */ *ap;
5155 {
5156 struct vnode *vp = ap->a_vp;
5157 uio_t uio = ap->a_uio;
5158 struct cnode *cp;
5159 struct hfsmount *hfsmp;
5160 directoryhint_t *dirhint = NULL;
5161 directoryhint_t localhint;
5162 off_t offset;
5163 off_t startoffset;
5164 int error = 0;
5165 int eofflag = 0;
5166 user_addr_t user_start = 0;
5167 user_size_t user_len = 0;
5168 int index;
5169 unsigned int tag;
5170 int items;
5171 int lockflags;
5172 int extended;
5173 int nfs_cookies;
5174 cnid_t cnid_hint = 0;
5175
5176 items = 0;
5177 startoffset = offset = uio_offset(uio);
5178 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5179 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
5180
5181 /* Sanity check the uio data. */
5182 if (uio_iovcnt(uio) > 1)
5183 return (EINVAL);
5184
5185 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
5186 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
5187 if (VTOCMP(vp) != NULL && !compressed) {
5188 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
5189 if (error) {
5190 return error;
5191 }
5192 }
5193 }
5194
5195 cp = VTOC(vp);
5196 hfsmp = VTOHFS(vp);
5197
5198 /* Note that the dirhint calls require an exclusive lock. */
5199 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5200 return (error);
5201
5202 /* Pick up cnid hint (if any). */
5203 if (nfs_cookies) {
5204 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
5205 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
5206 if (cnid_hint == INT_MAX) { /* searching pass the last item */
5207 eofflag = 1;
5208 goto out;
5209 }
5210 }
5211 /*
5212 * Synthesize entries for "." and "..", unless the directory has
5213 * been deleted, but not closed yet (lazy delete in progress).
5214 */
5215 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
5216 hfs_dotentry_t dotentry[2];
5217 size_t uiosize;
5218
5219 if (extended) {
5220 struct hfs_extdotentry *entry = &dotentry[0].ext;
5221
5222 entry->d_fileno = cp->c_cnid;
5223 entry->d_reclen = sizeof(struct hfs_extdotentry);
5224 entry->d_type = DT_DIR;
5225 entry->d_namlen = 1;
5226 entry->d_name[0] = '.';
5227 entry->d_name[1] = '\0';
5228 entry->d_name[2] = '\0';
5229 entry->d_seekoff = 1;
5230
5231 ++entry;
5232 entry->d_fileno = cp->c_parentcnid;
5233 entry->d_reclen = sizeof(struct hfs_extdotentry);
5234 entry->d_type = DT_DIR;
5235 entry->d_namlen = 2;
5236 entry->d_name[0] = '.';
5237 entry->d_name[1] = '.';
5238 entry->d_name[2] = '\0';
5239 entry->d_seekoff = 2;
5240 uiosize = 2 * sizeof(struct hfs_extdotentry);
5241 } else {
5242 struct hfs_stddotentry *entry = &dotentry[0].std;
5243
5244 entry->d_fileno = cp->c_cnid;
5245 entry->d_reclen = sizeof(struct hfs_stddotentry);
5246 entry->d_type = DT_DIR;
5247 entry->d_namlen = 1;
5248 *(int *)&entry->d_name[0] = 0;
5249 entry->d_name[0] = '.';
5250
5251 ++entry;
5252 entry->d_fileno = cp->c_parentcnid;
5253 entry->d_reclen = sizeof(struct hfs_stddotentry);
5254 entry->d_type = DT_DIR;
5255 entry->d_namlen = 2;
5256 *(int *)&entry->d_name[0] = 0;
5257 entry->d_name[0] = '.';
5258 entry->d_name[1] = '.';
5259 uiosize = 2 * sizeof(struct hfs_stddotentry);
5260 }
5261 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
5262 goto out;
5263 }
5264 offset += 2;
5265 }
5266
5267 /* If there are no real entries then we're done. */
5268 if (cp->c_entries == 0) {
5269 error = 0;
5270 eofflag = 1;
5271 uio_setoffset(uio, offset);
5272 goto seekoffcalc;
5273 }
5274
5275 //
5276 // We have to lock the user's buffer here so that we won't
5277 // fault on it after we've acquired a shared lock on the
5278 // catalog file. The issue is that you can get a 3-way
5279 // deadlock if someone else starts a transaction and then
5280 // tries to lock the catalog file but can't because we're
5281 // here and we can't service our page fault because VM is
5282 // blocked trying to start a transaction as a result of
5283 // trying to free up pages for our page fault. It's messy
5284 // but it does happen on dual-processors that are paging
5285 // heavily (see radar 3082639 for more info). By locking
5286 // the buffer up-front we prevent ourselves from faulting
5287 // while holding the shared catalog file lock.
5288 //
5289 // Fortunately this and hfs_search() are the only two places
5290 // currently (10/30/02) that can fault on user data with a
5291 // shared lock on the catalog file.
5292 //
5293 if (hfsmp->jnl && uio_isuserspace(uio)) {
5294 user_start = uio_curriovbase(uio);
5295 user_len = uio_curriovlen(uio);
5296
5297 if ((error = vslock(user_start, user_len)) != 0) {
5298 user_start = 0;
5299 goto out;
5300 }
5301 }
5302 /* Convert offset into a catalog directory index. */
5303 index = (offset & HFS_INDEX_MASK) - 2;
5304 tag = offset & ~HFS_INDEX_MASK;
5305
5306 /* Lock catalog during cat_findname and cat_getdirentries. */
5307 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5308
5309 /* When called from NFS, try and resolve a cnid hint. */
5310 if (nfs_cookies && cnid_hint != 0) {
5311 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
5312 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
5313 localhint.dh_index = index - 1;
5314 localhint.dh_time = 0;
5315 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
5316 dirhint = &localhint; /* don't forget to release the descriptor */
5317 } else {
5318 cat_releasedesc(&localhint.dh_desc);
5319 }
5320 }
5321 }
5322
5323 /* Get a directory hint (cnode must be locked exclusive) */
5324 if (dirhint == NULL) {
5325 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
5326
5327 /* Hide tag from catalog layer. */
5328 dirhint->dh_index &= HFS_INDEX_MASK;
5329 if (dirhint->dh_index == HFS_INDEX_MASK) {
5330 dirhint->dh_index = -1;
5331 }
5332 }
5333
5334 if (index == 0) {
5335 dirhint->dh_threadhint = cp->c_dirthreadhint;
5336 }
5337 else {
5338 /*
5339 * If we have a non-zero index, there is a possibility that during the last
5340 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
5341 * then we don't want to return any new entries for the caller. Just return 0
5342 * items, mark the eofflag, and bail out. Because we won't have done any work, the
5343 * code at the end of the function will release the dirhint for us.
5344 *
5345 * Don't forget to unlock the catalog lock on the way out, too.
5346 */
5347 if (dirhint->dh_desc.cd_flags & CD_EOF) {
5348 error = 0;
5349 eofflag = 1;
5350 uio_setoffset(uio, startoffset);
5351 hfs_systemfile_unlock (hfsmp, lockflags);
5352
5353 goto seekoffcalc;
5354 }
5355 }
5356
5357 /* Pack the buffer with dirent entries. */
5358 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
5359
5360 if (index == 0 && error == 0) {
5361 cp->c_dirthreadhint = dirhint->dh_threadhint;
5362 }
5363
5364 hfs_systemfile_unlock(hfsmp, lockflags);
5365
5366 if (error != 0) {
5367 goto out;
5368 }
5369
5370 /* Get index to the next item */
5371 index += items;
5372
5373 if (items >= (int)cp->c_entries) {
5374 eofflag = 1;
5375 }
5376
5377 /* Convert catalog directory index back into an offset. */
5378 while (tag == 0)
5379 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
5380 uio_setoffset(uio, (index + 2) | tag);
5381 dirhint->dh_index |= tag;
5382
5383 seekoffcalc:
5384 cp->c_touch_acctime = TRUE;
5385
5386 if (ap->a_numdirent) {
5387 if (startoffset == 0)
5388 items += 2;
5389 *ap->a_numdirent = items;
5390 }
5391
5392 out:
5393 if (user_start) {
5394 vsunlock(user_start, user_len, TRUE);
5395 }
5396 /* If we didn't do anything then go ahead and dump the hint. */
5397 if ((dirhint != NULL) &&
5398 (dirhint != &localhint) &&
5399 (uio_offset(uio) == startoffset)) {
5400 hfs_reldirhint(cp, dirhint);
5401 eofflag = 1;
5402 }
5403 if (ap->a_eofflag) {
5404 *ap->a_eofflag = eofflag;
5405 }
5406 if (dirhint == &localhint) {
5407 cat_releasedesc(&localhint.dh_desc);
5408 }
5409 hfs_unlock(cp);
5410 return (error);
5411 }
5412
5413
5414 /*
5415 * Read contents of a symbolic link.
5416 */
5417 int
5418 hfs_vnop_readlink(ap)
5419 struct vnop_readlink_args /* {
5420 struct vnode *a_vp;
5421 struct uio *a_uio;
5422 vfs_context_t a_context;
5423 } */ *ap;
5424 {
5425 struct vnode *vp = ap->a_vp;
5426 struct cnode *cp;
5427 struct filefork *fp;
5428 int error;
5429
5430 if (!vnode_islnk(vp))
5431 return (EINVAL);
5432
5433 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5434 return (error);
5435 cp = VTOC(vp);
5436 fp = VTOF(vp);
5437
5438 /* Zero length sym links are not allowed */
5439 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
5440 error = EINVAL;
5441 goto exit;
5442 }
5443
5444 /* Cache the path so we don't waste buffer cache resources */
5445 if (fp->ff_symlinkptr == NULL) {
5446 struct buf *bp = NULL;
5447
5448 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
5449 if (fp->ff_symlinkptr == NULL) {
5450 error = ENOMEM;
5451 goto exit;
5452 }
5453 error = (int)buf_meta_bread(vp, (daddr64_t)0,
5454 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
5455 vfs_context_ucred(ap->a_context), &bp);
5456 if (error) {
5457 if (bp)
5458 buf_brelse(bp);
5459 if (fp->ff_symlinkptr) {
5460 FREE(fp->ff_symlinkptr, M_TEMP);
5461 fp->ff_symlinkptr = NULL;
5462 }
5463 goto exit;
5464 }
5465 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
5466
5467 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
5468 buf_markinvalid(bp); /* data no longer needed */
5469 }
5470 buf_brelse(bp);
5471 }
5472 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
5473
5474 /*
5475 * Keep track blocks read
5476 */
5477 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
5478
5479 /*
5480 * If this file hasn't been seen since the start of
5481 * the current sampling period then start over.
5482 */
5483 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
5484 VTOF(vp)->ff_bytesread = fp->ff_size;
5485 else
5486 VTOF(vp)->ff_bytesread += fp->ff_size;
5487
5488 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
5489 // cp->c_touch_acctime = TRUE;
5490 }
5491
5492 exit:
5493 hfs_unlock(cp);
5494 return (error);
5495 }
5496
5497
5498 /*
5499 * Get configurable pathname variables.
5500 */
5501 int
5502 hfs_vnop_pathconf(ap)
5503 struct vnop_pathconf_args /* {
5504 struct vnode *a_vp;
5505 int a_name;
5506 int *a_retval;
5507 vfs_context_t a_context;
5508 } */ *ap;
5509 {
5510
5511 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
5512 switch (ap->a_name) {
5513 case _PC_LINK_MAX:
5514 if (std_hfs == 0){
5515 *ap->a_retval = HFS_LINK_MAX;
5516 }
5517 #if CONFIG_HFS_STD
5518 else {
5519 *ap->a_retval = 1;
5520 }
5521 #endif
5522 break;
5523 case _PC_NAME_MAX:
5524 if (std_hfs == 0) {
5525 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5526 }
5527 #if CONFIG_HFS_STD
5528 else {
5529 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5530 }
5531 #endif
5532 break;
5533 case _PC_PATH_MAX:
5534 *ap->a_retval = PATH_MAX; /* 1024 */
5535 break;
5536 case _PC_PIPE_BUF:
5537 *ap->a_retval = PIPE_BUF;
5538 break;
5539 case _PC_CHOWN_RESTRICTED:
5540 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
5541 break;
5542 case _PC_NO_TRUNC:
5543 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
5544 break;
5545 case _PC_NAME_CHARS_MAX:
5546 if (std_hfs == 0) {
5547 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5548 }
5549 #if CONFIG_HFS_STD
5550 else {
5551 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5552 }
5553 #endif
5554 break;
5555 case _PC_CASE_SENSITIVE:
5556 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
5557 *ap->a_retval = 1;
5558 else
5559 *ap->a_retval = 0;
5560 break;
5561 case _PC_CASE_PRESERVING:
5562 *ap->a_retval = 1;
5563 break;
5564 case _PC_FILESIZEBITS:
5565 /* number of bits to store max file size */
5566 if (std_hfs == 0) {
5567 *ap->a_retval = 64;
5568 }
5569 #if CONFIG_HFS_STD
5570 else {
5571 *ap->a_retval = 32;
5572 }
5573 #endif
5574 break;
5575 case _PC_XATTR_SIZE_BITS:
5576 /* Number of bits to store maximum extended attribute size */
5577 *ap->a_retval = HFS_XATTR_SIZE_BITS;
5578 break;
5579 default:
5580 return (EINVAL);
5581 }
5582
5583 return (0);
5584 }
5585
5586
5587 /*
5588 * Update a cnode's on-disk metadata.
5589 *
5590 * If waitfor is set, then wait for the disk write of
5591 * the node to complete.
5592 *
5593 * The cnode must be locked exclusive
5594 */
5595 int
5596 hfs_update(struct vnode *vp, __unused int waitfor)
5597 {
5598 struct cnode *cp = VTOC(vp);
5599 struct proc *p;
5600 struct cat_fork *dataforkp = NULL;
5601 struct cat_fork *rsrcforkp = NULL;
5602 struct cat_fork datafork;
5603 struct cat_fork rsrcfork;
5604 struct hfsmount *hfsmp;
5605 int lockflags;
5606 int error;
5607 uint32_t tstate = 0;
5608
5609 p = current_proc();
5610 hfsmp = VTOHFS(vp);
5611
5612 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
5613 hfsmp->hfs_catalog_vp == NULL){
5614 return (0);
5615 }
5616 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
5617 cp->c_flag &= ~C_MODIFIED;
5618 cp->c_touch_acctime = 0;
5619 cp->c_touch_chgtime = 0;
5620 cp->c_touch_modtime = 0;
5621 return (0);
5622 }
5623 if (kdebug_enable) {
5624 if (cp->c_touch_acctime)
5625 tstate |= DBG_HFS_UPDATE_ACCTIME;
5626 if (cp->c_touch_modtime)
5627 tstate |= DBG_HFS_UPDATE_MODTIME;
5628 if (cp->c_touch_chgtime)
5629 tstate |= DBG_HFS_UPDATE_CHGTIME;
5630
5631 if (cp->c_flag & C_MODIFIED)
5632 tstate |= DBG_HFS_UPDATE_MODIFIED;
5633 if (cp->c_flag & C_FORCEUPDATE)
5634 tstate |= DBG_HFS_UPDATE_FORCE;
5635 if (cp->c_flag & C_NEEDS_DATEADDED)
5636 tstate |= DBG_HFS_UPDATE_DATEADDED;
5637 }
5638 hfs_touchtimes(hfsmp, cp);
5639
5640 /* Nothing to update. */
5641 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
5642 return (0);
5643 }
5644
5645 if (cp->c_datafork)
5646 dataforkp = &cp->c_datafork->ff_data;
5647 if (cp->c_rsrcfork)
5648 rsrcforkp = &cp->c_rsrcfork->ff_data;
5649
5650 /*
5651 * For delayed allocations updates are
5652 * postponed until an fsync or the file
5653 * gets written to disk.
5654 *
5655 * Deleted files can defer meta data updates until inactive.
5656 *
5657 * If we're ever called with the C_FORCEUPDATE flag though
5658 * we have to do the update.
5659 */
5660 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
5661 (ISSET(cp->c_flag, C_DELETED) ||
5662 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
5663 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
5664 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
5665 cp->c_flag |= C_MODIFIED;
5666
5667 return (0);
5668 }
5669
5670 KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_START, vp, tstate, 0, 0, 0);
5671
5672 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5673
5674 KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_END, vp, tstate, error, -1, 0);
5675 return error;
5676 }
5677
5678 /*
5679 * Modify the values passed to cat_update based on whether or not
5680 * the file has invalid ranges or borrowed blocks.
5681 */
5682 if (dataforkp) {
5683 off_t numbytes = 0;
5684
5685 /* copy the datafork into a temporary copy so we don't pollute the cnode's */
5686 bcopy(dataforkp, &datafork, sizeof(datafork));
5687 dataforkp = &datafork;
5688
5689 /*
5690 * If there are borrowed blocks, ensure that they are subtracted
5691 * from the total block count before writing the cnode entry to disk.
5692 * Only extents that have actually been marked allocated in the bitmap
5693 * should be reflected in the total block count for this fork.
5694 */
5695 if (cp->c_datafork->ff_unallocblocks != 0) {
5696 // make sure that we don't assign a negative block count
5697 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
5698 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
5699 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
5700 }
5701
5702 /* Also cap the LEOF to the total number of bytes that are allocated. */
5703 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
5704 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
5705 }
5706
5707 /*
5708 * For files with invalid ranges (holes) the on-disk
5709 * field representing the size of the file (cf_size)
5710 * must be no larger than the start of the first hole.
5711 * However, note that if the first invalid range exists
5712 * solely within borrowed blocks, then our LEOF and block
5713 * count should both be zero. As a result, set it to the
5714 * min of the current cf_size and the start of the first
5715 * invalid range, because it may have already been reduced
5716 * to zero by the borrowed blocks check above.
5717 */
5718 if (!TAILQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
5719 numbytes = TAILQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
5720 datafork.cf_size = MIN((numbytes), (datafork.cf_size));
5721 }
5722 }
5723
5724 /*
5725 * For resource forks with delayed allocations, make sure
5726 * the block count and file size match the number of blocks
5727 * actually allocated to the file on disk.
5728 */
5729 if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) {
5730 bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork));
5731 rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks);
5732 rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
5733 rsrcforkp = &rsrcfork;
5734 }
5735 if (kdebug_enable) {
5736 long dbg_parms[NUMPARMS];
5737 int dbg_namelen;
5738
5739 dbg_namelen = NUMPARMS * sizeof(long);
5740 vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
5741
5742 if (dbg_namelen < (int)sizeof(dbg_parms))
5743 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
5744
5745 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
5746 }
5747
5748 /*
5749 * Lock the Catalog b-tree file.
5750 */
5751 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5752
5753 /* XXX - waitfor is not enforced */
5754 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
5755
5756 hfs_systemfile_unlock(hfsmp, lockflags);
5757
5758 /* After the updates are finished, clear the flags */
5759 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
5760
5761 hfs_end_transaction(hfsmp);
5762
5763 KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_END, vp, tstate, error, 0, 0);
5764
5765 return (error);
5766 }
5767
5768 /*
5769 * Allocate a new node
5770 * Note - Function does not create and return a vnode for whiteout creation.
5771 */
5772 int
5773 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5774 struct vnode_attr *vap, vfs_context_t ctx)
5775 {
5776 struct cnode *cp = NULL;
5777 struct cnode *dcp = NULL;
5778 struct vnode *tvp;
5779 struct hfsmount *hfsmp;
5780 struct cat_desc in_desc, out_desc;
5781 struct cat_attr attr;
5782 struct timeval tv;
5783 int lockflags;
5784 int error, started_tr = 0;
5785 enum vtype vnodetype;
5786 int mode;
5787 int newvnode_flags = 0;
5788 u_int32_t gnv_flags = 0;
5789 int protectable_target = 0;
5790 int nocache = 0;
5791
5792 #if CONFIG_PROTECT
5793 struct cprotect *entry = NULL;
5794 int32_t cp_class = -1;
5795 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
5796 cp_class = (int32_t)vap->va_dataprotect_class;
5797 }
5798 int protected_mount = 0;
5799 #endif
5800
5801
5802 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5803 return (error);
5804
5805 /* set the cnode pointer only after successfully acquiring lock */
5806 dcp = VTOC(dvp);
5807
5808 /* Don't allow creation of new entries in open-unlinked directories */
5809 if ((error = hfs_checkdeleted(dcp))) {
5810 hfs_unlock(dcp);
5811 return error;
5812 }
5813
5814 dcp->c_flag |= C_DIR_MODIFICATION;
5815
5816 hfsmp = VTOHFS(dvp);
5817
5818 *vpp = NULL;
5819 tvp = NULL;
5820 out_desc.cd_flags = 0;
5821 out_desc.cd_nameptr = NULL;
5822
5823 vnodetype = vap->va_type;
5824 if (vnodetype == VNON)
5825 vnodetype = VREG;
5826 mode = MAKEIMODE(vnodetype, vap->va_mode);
5827
5828 if (S_ISDIR (mode) || S_ISREG (mode)) {
5829 protectable_target = 1;
5830 }
5831
5832
5833 /* Check if were out of usable disk space. */
5834 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
5835 error = ENOSPC;
5836 goto exit;
5837 }
5838
5839 microtime(&tv);
5840
5841 /* Setup the default attributes */
5842 bzero(&attr, sizeof(attr));
5843 attr.ca_mode = mode;
5844 attr.ca_linkcount = 1;
5845 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
5846 attr.ca_rdev = vap->va_rdev;
5847 }
5848 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
5849 VATTR_SET_SUPPORTED(vap, va_create_time);
5850 attr.ca_itime = vap->va_create_time.tv_sec;
5851 } else {
5852 attr.ca_itime = tv.tv_sec;
5853 }
5854 #if CONFIG_HFS_STD
5855 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
5856 attr.ca_itime += 3600; /* Same as what hfs_update does */
5857 }
5858 #endif
5859 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
5860 attr.ca_atimeondisk = attr.ca_atime;
5861 if (VATTR_IS_ACTIVE(vap, va_flags)) {
5862 VATTR_SET_SUPPORTED(vap, va_flags);
5863 attr.ca_flags = vap->va_flags;
5864 }
5865
5866 /*
5867 * HFS+ only: all files get ThreadExists
5868 * HFSX only: dirs get HasFolderCount
5869 */
5870 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
5871 if (vnodetype == VDIR) {
5872 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
5873 attr.ca_recflags = kHFSHasFolderCountMask;
5874 } else {
5875 attr.ca_recflags = kHFSThreadExistsMask;
5876 }
5877 }
5878
5879 #if CONFIG_PROTECT
5880 if (cp_fs_protected(hfsmp->hfs_mp)) {
5881 protected_mount = 1;
5882 }
5883 /*
5884 * On a content-protected HFS+/HFSX filesystem, files and directories
5885 * cannot be created without atomically setting/creating the EA that
5886 * contains the protection class metadata and keys at the same time, in
5887 * the same transaction. As a result, pre-set the "EAs exist" flag
5888 * on the cat_attr for protectable catalog record creations. This will
5889 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
5890 * as having EAs.
5891 */
5892 if ((protected_mount) && (protectable_target)) {
5893 attr.ca_recflags |= kHFSHasAttributesMask;
5894 /* delay entering in the namecache */
5895 nocache = 1;
5896 }
5897 #endif
5898
5899
5900 /*
5901 * Add the date added to the item. See above, as
5902 * all of the dates are set to the itime.
5903 */
5904 hfs_write_dateadded (&attr, attr.ca_atime);
5905
5906 /* Initialize the gen counter to 1 */
5907 hfs_write_gencount(&attr, (uint32_t)1);
5908
5909 attr.ca_uid = vap->va_uid;
5910 attr.ca_gid = vap->va_gid;
5911 VATTR_SET_SUPPORTED(vap, va_mode);
5912 VATTR_SET_SUPPORTED(vap, va_uid);
5913 VATTR_SET_SUPPORTED(vap, va_gid);
5914
5915 #if QUOTA
5916 /* check to see if this node's creation would cause us to go over
5917 * quota. If so, abort this operation.
5918 */
5919 if (hfsmp->hfs_flags & HFS_QUOTAS) {
5920 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
5921 vfs_context_ucred(ctx)))) {
5922 goto exit;
5923 }
5924 }
5925 #endif
5926
5927
5928 /* Tag symlinks with a type and creator. */
5929 if (vnodetype == VLNK) {
5930 struct FndrFileInfo *fip;
5931
5932 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
5933 fip->fdType = SWAP_BE32(kSymLinkFileType);
5934 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
5935 }
5936 if (cnp->cn_flags & ISWHITEOUT)
5937 attr.ca_flags |= UF_OPAQUE;
5938
5939 /* Setup the descriptor */
5940 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
5941 in_desc.cd_namelen = cnp->cn_namelen;
5942 in_desc.cd_parentcnid = dcp->c_fileid;
5943 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
5944 in_desc.cd_hint = dcp->c_childhint;
5945 in_desc.cd_encoding = 0;
5946
5947 #if CONFIG_PROTECT
5948 /*
5949 * To preserve file creation atomicity with regards to the content protection EA,
5950 * we must create the file in the catalog and then write out its EA in the same
5951 * transaction.
5952 *
5953 * We only denote the target class in this EA; key generation is not completed
5954 * until the file has been inserted into the catalog and will be done
5955 * in a separate transaction.
5956 */
5957 if ((protected_mount) && (protectable_target)) {
5958 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
5959 if (error) {
5960 goto exit;
5961 }
5962 }
5963 #endif
5964
5965 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5966 goto exit;
5967 }
5968 started_tr = 1;
5969
5970 // have to also lock the attribute file because cat_create() needs
5971 // to check that any fileID it wants to use does not have orphaned
5972 // attributes in it.
5973 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
5974 cnid_t new_id;
5975
5976 /* Reserve some space in the Catalog file. */
5977 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
5978 hfs_systemfile_unlock(hfsmp, lockflags);
5979 goto exit;
5980 }
5981
5982 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
5983 hfs_systemfile_unlock (hfsmp, lockflags);
5984 goto exit;
5985 }
5986
5987 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
5988 if (error == 0) {
5989 /* Update the parent directory */
5990 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5991 dcp->c_entries++;
5992 {
5993 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
5994 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
5995 }
5996 if (vnodetype == VDIR) {
5997 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
5998 }
5999 dcp->c_dirchangecnt++;
6000 {
6001 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)dcp->c_finderinfo + 16);
6002 extinfo->write_gen_counter = OSSwapHostToBigInt32(OSSwapBigToHostInt32(extinfo->write_gen_counter) + 1);
6003 }
6004 dcp->c_ctime = tv.tv_sec;
6005 dcp->c_mtime = tv.tv_sec;
6006 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6007
6008 #if CONFIG_PROTECT
6009 /*
6010 * If we are creating a content protected file, now is when
6011 * we create the EA. We must create it in the same transaction
6012 * that creates the file. We can also guarantee that the file
6013 * MUST exist because we are still holding the catalog lock
6014 * at this point.
6015 */
6016 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6017 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
6018
6019 if (error) {
6020 int delete_err;
6021 /*
6022 * If we fail the EA creation, then we need to delete the file.
6023 * Luckily, we are still holding all of the right locks.
6024 */
6025 delete_err = cat_delete (hfsmp, &out_desc, &attr);
6026 if (delete_err == 0) {
6027 /* Update the parent directory */
6028 if (dcp->c_entries > 0)
6029 dcp->c_entries--;
6030 dcp->c_dirchangecnt++;
6031 dcp->c_ctime = tv.tv_sec;
6032 dcp->c_mtime = tv.tv_sec;
6033 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6034 }
6035
6036 /* Emit EINVAL if we fail to create EA*/
6037 error = EINVAL;
6038 }
6039 }
6040 #endif
6041 }
6042 hfs_systemfile_unlock(hfsmp, lockflags);
6043 if (error)
6044 goto exit;
6045
6046 /* Invalidate negative cache entries in the directory */
6047 if (dcp->c_flag & C_NEG_ENTRIES) {
6048 cache_purge_negatives(dvp);
6049 dcp->c_flag &= ~C_NEG_ENTRIES;
6050 }
6051
6052 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
6053 (dcp->c_cnid == kHFSRootFolderID));
6054
6055 // XXXdbg
6056 // have to end the transaction here before we call hfs_getnewvnode()
6057 // because that can cause us to try and reclaim a vnode on a different
6058 // file system which could cause us to start a transaction which can
6059 // deadlock with someone on that other file system (since we could be
6060 // holding two transaction locks as well as various vnodes and we did
6061 // not obtain the locks on them in the proper order).
6062 //
6063 // NOTE: this means that if the quota check fails or we have to update
6064 // the change time on a block-special device that those changes
6065 // will happen as part of independent transactions.
6066 //
6067 if (started_tr) {
6068 hfs_end_transaction(hfsmp);
6069 started_tr = 0;
6070 }
6071
6072 #if CONFIG_PROTECT
6073 /*
6074 * At this point, we must have encountered success with writing the EA.
6075 * Destroy our temporary cprotect (which had no keys).
6076 */
6077
6078 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6079 cp_entry_destroy (entry);
6080 entry = NULL;
6081 }
6082 #endif
6083
6084 /* Do not create vnode for whiteouts */
6085 if (S_ISWHT(mode)) {
6086 goto exit;
6087 }
6088
6089 gnv_flags |= GNV_CREATE;
6090 if (nocache) {
6091 gnv_flags |= GNV_NOCACHE;
6092 }
6093
6094 /*
6095 * Create a vnode for the object just created.
6096 *
6097 * NOTE: Maintaining the cnode lock on the parent directory is important,
6098 * as it prevents race conditions where other threads want to look up entries
6099 * in the directory and/or add things as we are in the process of creating
6100 * the vnode below. However, this has the potential for causing a
6101 * double lock panic when dealing with shadow files on a HFS boot partition.
6102 * The panic could occur if we are not cleaning up after ourselves properly
6103 * when done with a shadow file or in the error cases. The error would occur if we
6104 * try to create a new vnode, and then end up reclaiming another shadow vnode to
6105 * create the new one. However, if everything is working properly, this should
6106 * be a non-issue as we would never enter that reclaim codepath.
6107 *
6108 * The cnode is locked on successful return.
6109 */
6110 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
6111 NULL, &tvp, &newvnode_flags);
6112 if (error)
6113 goto exit;
6114
6115 cp = VTOC(tvp);
6116
6117 struct doc_tombstone *ut;
6118 ut = get_uthread_doc_tombstone();
6119 if ( ut->t_lastop_document_id != 0
6120 && ut->t_lastop_parent == dvp
6121 && ut->t_lastop_parent_vid == vnode_vid(dvp)
6122 && strcmp((char *)ut->t_lastop_filename, (char *)cp->c_desc.cd_nameptr) == 0) {
6123 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
6124
6125 //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
6126 fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
6127
6128 cp->c_bsdflags |= UF_TRACKED;
6129 // mark the cnode dirty
6130 cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
6131
6132 if ((error = hfs_start_transaction(hfsmp)) == 0) {
6133 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6134
6135 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
6136
6137 hfs_systemfile_unlock (hfsmp, lockflags);
6138 (void) hfs_end_transaction(hfsmp);
6139 }
6140
6141 clear_tombstone_docid(ut, hfsmp, cp); // will send the docid-changed fsevent
6142 } else if (ut->t_lastop_document_id != 0) {
6143 int len = cnp->cn_namelen;
6144 if (len == 0) {
6145 len = strlen(cnp->cn_nameptr);
6146 }
6147
6148 if (is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
6149 // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
6150 } else {
6151 // Clear the tombstone because the thread is not recreating the same path
6152 // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
6153 clear_tombstone_docid(ut, hfsmp, NULL);
6154 }
6155 }
6156
6157 *vpp = tvp;
6158
6159 #if CONFIG_PROTECT
6160 /*
6161 * Now that we have a vnode-in-hand, generate keys for this namespace item.
6162 * If we fail to create the keys, then attempt to delete the item from the
6163 * namespace. If we can't delete the item, that's not desirable but also not fatal..
6164 * All of the places which deal with restoring/unwrapping keys must also be
6165 * prepared to encounter an entry that does not have keys.
6166 */
6167 if ((protectable_target) && (protected_mount)) {
6168 struct cprotect *keyed_entry = NULL;
6169
6170 if (cp->c_cpentry == NULL) {
6171 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
6172 }
6173
6174 error = cp_generate_keys (hfsmp, cp, cp->c_cpentry->cp_pclass, &keyed_entry);
6175 if (error == 0) {
6176 /*
6177 * Upon success, the keys were generated and written out.
6178 * Update the cp pointer in the cnode.
6179 */
6180 cp_replace_entry (cp, keyed_entry);
6181 if (nocache) {
6182 cache_enter (dvp, tvp, cnp);
6183 }
6184 }
6185 else {
6186 /* If key creation OR the setxattr failed, emit EPERM to userland */
6187 error = EPERM;
6188
6189 /*
6190 * Beware! This slightly violates the lock ordering for the
6191 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
6192 * which guards file size changes before acquiring the normal cnode lock
6193 * and calling hfs_removefile on an item.
6194 *
6195 * However, in this case, we are still holding the directory lock so
6196 * 'tvp' is not lookup-able and it was a newly created vnode so it
6197 * cannot have any content yet. The only reason we are initiating
6198 * the removefile is because we could not generate content protection keys
6199 * for this namespace item. Note also that we pass a '1' in the allow_dirs
6200 * argument for hfs_removefile because we may be creating a directory here.
6201 *
6202 * All this to say that while it is technically a violation it is
6203 * impossible to race with another thread for this cnode so it is safe.
6204 */
6205 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
6206 if (err) {
6207 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
6208 }
6209
6210 /* Release the cnode lock and mark the vnode for termination */
6211 hfs_unlock (cp);
6212 err = vnode_recycle (tvp);
6213 if (err) {
6214 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
6215 }
6216
6217 /* Drop the iocount on the new vnode to force reclamation/recycling */
6218 vnode_put (tvp);
6219 cp = NULL;
6220 *vpp = NULL;
6221 }
6222 }
6223 #endif
6224
6225 #if QUOTA
6226 /*
6227 * Once we create this vnode, we need to initialize its quota data
6228 * structures, if necessary. We know that it is OK to just go ahead and
6229 * initialize because we've already validated earlier (through the hfs_quotacheck
6230 * function) to see if creating this cnode/vnode would cause us to go over quota.
6231 */
6232 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6233 if (cp) {
6234 /* cp could have been zeroed earlier */
6235 (void) hfs_getinoquota(cp);
6236 }
6237 }
6238 #endif
6239
6240 exit:
6241 cat_releasedesc(&out_desc);
6242
6243 #if CONFIG_PROTECT
6244 /*
6245 * We may have jumped here in error-handling various situations above.
6246 * If we haven't already dumped the temporary CP used to initialize
6247 * the file atomically, then free it now. cp_entry_destroy should null
6248 * out the pointer if it was called already.
6249 */
6250 if (entry) {
6251 cp_entry_destroy (entry);
6252 entry = NULL;
6253 }
6254 #endif
6255
6256 /*
6257 * Make sure we release cnode lock on dcp.
6258 */
6259 if (dcp) {
6260 dcp->c_flag &= ~C_DIR_MODIFICATION;
6261 wakeup((caddr_t)&dcp->c_flag);
6262
6263 hfs_unlock(dcp);
6264 }
6265 if (error == 0 && cp != NULL) {
6266 hfs_unlock(cp);
6267 }
6268 if (started_tr) {
6269 hfs_end_transaction(hfsmp);
6270 started_tr = 0;
6271 }
6272
6273 return (error);
6274 }
6275
6276
6277 /*
6278 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the cnode that is
6279 * found in 'vp'. The rsrc fork vnode is returned with the cnode locked and iocount
6280 * on the rsrc vnode.
6281 *
6282 * *rvpp is an output argument for returning the pointer to the resource fork vnode.
6283 * In most cases, the resource fork vnode will not be set if we return an error.
6284 * However, if error_on_unlinked is set, we may have already acquired the resource fork vnode
6285 * before we discover the error (the file has gone open-unlinked). In this case only,
6286 * we may return a vnode in the output argument despite an error.
6287 *
6288 * If can_drop_lock is set, then it is safe for this function to temporarily drop
6289 * and then re-acquire the cnode lock. We may need to do this, for example, in order to
6290 * acquire an iocount or promote our lock.
6291 *
6292 * error_on_unlinked is an argument which indicates that we are to return an error if we
6293 * discover that the cnode has gone into an open-unlinked state ( C_DELETED or C_NOEXISTS)
6294 * is set in the cnode flags. This is only necessary if can_drop_lock is true, otherwise
6295 * there's really no reason to double-check for errors on the cnode.
6296 */
6297
6298 int
6299 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp,
6300 int can_drop_lock, int error_on_unlinked)
6301 {
6302 struct vnode *rvp;
6303 struct vnode *dvp = NULLVP;
6304 struct cnode *cp = VTOC(vp);
6305 int error;
6306 int vid;
6307 int delete_status = 0;
6308
6309 if (vnode_vtype(vp) == VDIR) {
6310 return EINVAL;
6311 }
6312
6313 /*
6314 * Need to check the status of the cnode to validate it hasn't gone
6315 * open-unlinked on us before we can actually do work with it.
6316 */
6317 delete_status = hfs_checkdeleted(cp);
6318 if ((delete_status) && (error_on_unlinked)) {
6319 return delete_status;
6320 }
6321
6322 restart:
6323 /* Attempt to use existing vnode */
6324 if ((rvp = cp->c_rsrc_vp)) {
6325 vid = vnode_vid(rvp);
6326
6327 /*
6328 * It is not safe to hold the cnode lock when calling vnode_getwithvid()
6329 * for the alternate fork -- vnode_getwithvid() could deadlock waiting
6330 * for a VL_WANTTERM while another thread has an iocount on the alternate
6331 * fork vnode and is attempting to acquire the common cnode lock.
6332 *
6333 * But it's also not safe to drop the cnode lock when we're holding
6334 * multiple cnode locks, like during a hfs_removefile() operation
6335 * since we could lock out of order when re-acquiring the cnode lock.
6336 *
6337 * So we can only drop the lock here if its safe to drop it -- which is
6338 * most of the time with the exception being hfs_removefile().
6339 */
6340 if (can_drop_lock)
6341 hfs_unlock(cp);
6342
6343 error = vnode_getwithvid(rvp, vid);
6344
6345 if (can_drop_lock) {
6346 (void) hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
6347
6348 /*
6349 * When we relinquished our cnode lock, the cnode could have raced
6350 * with a delete and gotten deleted. If the caller did not want
6351 * us to ignore open-unlinked files, then re-check the C_DELETED
6352 * state and see if we need to return an ENOENT here because the item
6353 * got deleted in the intervening time.
6354 */
6355 if (error_on_unlinked) {
6356 if ((delete_status = hfs_checkdeleted(cp))) {
6357 /*
6358 * If error == 0, this means that we succeeded in acquiring an iocount on the
6359 * rsrc fork vnode. However, if we're in this block of code, that means that we noticed
6360 * that the cnode has gone open-unlinked. In this case, the caller requested that we
6361 * not do any other work and return an errno. The caller will be responsible for
6362 * dropping the iocount we just acquired because we can't do it until we've released
6363 * the cnode lock.
6364 */
6365 if (error == 0) {
6366 *rvpp = rvp;
6367 }
6368 return delete_status;
6369 }
6370 }
6371
6372 /*
6373 * When our lock was relinquished, the resource fork
6374 * could have been recycled. Check for this and try
6375 * again.
6376 */
6377 if (error == ENOENT)
6378 goto restart;
6379 }
6380 if (error) {
6381 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
6382
6383 if (name)
6384 printf("hfs_vgetrsrc: couldn't get resource"
6385 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
6386 return (error);
6387 }
6388 } else {
6389 struct cat_fork rsrcfork;
6390 struct componentname cn;
6391 struct cat_desc *descptr = NULL;
6392 struct cat_desc to_desc;
6393 char delname[32];
6394 int lockflags;
6395 int newvnode_flags = 0;
6396
6397 /*
6398 * Make sure cnode lock is exclusive, if not upgrade it.
6399 *
6400 * We assume that we were called from a read-only VNOP (getattr)
6401 * and that its safe to have the cnode lock dropped and reacquired.
6402 */
6403 if (cp->c_lockowner != current_thread()) {
6404 if (!can_drop_lock) {
6405 return (EINVAL);
6406 }
6407 /*
6408 * If the upgrade fails we lose the lock and
6409 * have to take the exclusive lock on our own.
6410 */
6411 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
6412 lck_rw_lock_exclusive(&cp->c_rwlock);
6413 cp->c_lockowner = current_thread();
6414 }
6415
6416 /*
6417 * hfs_vgetsrc may be invoked for a cnode that has already been marked
6418 * C_DELETED. This is because we need to continue to provide rsrc
6419 * fork access to open-unlinked files. In this case, build a fake descriptor
6420 * like in hfs_removefile. If we don't do this, buildkey will fail in
6421 * cat_lookup because this cnode has no name in its descriptor. However,
6422 * only do this if the caller did not specify that they wanted us to
6423 * error out upon encountering open-unlinked files.
6424 */
6425
6426 if ((error_on_unlinked) && (can_drop_lock)) {
6427 if ((error = hfs_checkdeleted(cp))) {
6428 return error;
6429 }
6430 }
6431
6432 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
6433 bzero (&to_desc, sizeof(to_desc));
6434 bzero (delname, 32);
6435 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
6436 to_desc.cd_nameptr = (const u_int8_t*) delname;
6437 to_desc.cd_namelen = strlen(delname);
6438 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
6439 to_desc.cd_flags = 0;
6440 to_desc.cd_cnid = cp->c_cnid;
6441
6442 descptr = &to_desc;
6443 }
6444 else {
6445 descptr = &cp->c_desc;
6446 }
6447
6448
6449 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
6450
6451 /*
6452 * We call cat_idlookup (instead of cat_lookup) below because we can't
6453 * trust the descriptor in the provided cnode for lookups at this point.
6454 * Between the time of the original lookup of this vnode and now, the
6455 * descriptor could have gotten swapped or replaced. If this occurred,
6456 * the parent/name combo originally desired may not necessarily be provided
6457 * if we use the descriptor. Even worse, if the vnode represents
6458 * a hardlink, we could have removed one of the links from the namespace
6459 * but left the descriptor alone, since hfs_unlink does not invalidate
6460 * the descriptor in the cnode if other links still point to the inode.
6461 *
6462 * Consider the following (slightly contrived) scenario:
6463 * /tmp/a <--> /tmp/b (hardlinks).
6464 * 1. Thread A: open rsrc fork on /tmp/b.
6465 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
6466 * 2. Thread B does 'mv /foo/b /tmp/b'
6467 * 2. Thread B succeeds.
6468 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
6469 *
6470 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
6471 * is not removed/updated during the unlink process. So, if you were to
6472 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
6473 * fork.
6474 *
6475 * As a result, we use the fileid, which should be invariant for the lifetime
6476 * of the cnode (possibly barring calls to exchangedata).
6477 *
6478 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
6479 * have thread records for files. They were only required for directories. So
6480 * we need to do the lookup with the catalog name. This is OK since hardlinks were
6481 * never allowed on HFS standard.
6482 */
6483
6484 /* Get resource fork data */
6485 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
6486 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
6487 }
6488 #if CONFIG_HFS_STD
6489 else {
6490 /*
6491 * HFS standard only:
6492 *
6493 * Get the resource fork for this item with a cat_lookup call, but do not
6494 * force a case lookup since HFS standard is case-insensitive only. We
6495 * don't want the descriptor; just the fork data here. If we tried to
6496 * do a ID lookup (via thread record -> catalog record), then we might fail
6497 * prematurely since, as noted above, thread records were not strictly required
6498 * on files in HFS.
6499 */
6500 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
6501 (struct cat_attr*)NULL, &rsrcfork, NULL);
6502 }
6503 #endif
6504
6505 hfs_systemfile_unlock(hfsmp, lockflags);
6506 if (error) {
6507 return (error);
6508 }
6509 /*
6510 * Supply hfs_getnewvnode with a component name.
6511 */
6512 cn.cn_pnbuf = NULL;
6513 if (descptr->cd_nameptr) {
6514 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
6515 cn.cn_nameiop = LOOKUP;
6516 cn.cn_flags = ISLASTCN | HASBUF;
6517 cn.cn_context = NULL;
6518 cn.cn_pnlen = MAXPATHLEN;
6519 cn.cn_nameptr = cn.cn_pnbuf;
6520 cn.cn_hash = 0;
6521 cn.cn_consume = 0;
6522 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
6523 "%s%s", descptr->cd_nameptr,
6524 _PATH_RSRCFORKSPEC);
6525 }
6526 dvp = vnode_getparent(vp);
6527 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
6528 descptr, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
6529 &rsrcfork, &rvp, &newvnode_flags);
6530 if (dvp)
6531 vnode_put(dvp);
6532 if (cn.cn_pnbuf)
6533 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
6534 if (error)
6535 return (error);
6536 }
6537
6538 *rvpp = rvp;
6539 return (0);
6540 }
6541
6542 /*
6543 * Wrapper for special device reads
6544 */
6545 int
6546 hfsspec_read(ap)
6547 struct vnop_read_args /* {
6548 struct vnode *a_vp;
6549 struct uio *a_uio;
6550 int a_ioflag;
6551 vfs_context_t a_context;
6552 } */ *ap;
6553 {
6554 /*
6555 * Set access flag.
6556 */
6557 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6558 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
6559 }
6560
6561 /*
6562 * Wrapper for special device writes
6563 */
6564 int
6565 hfsspec_write(ap)
6566 struct vnop_write_args /* {
6567 struct vnode *a_vp;
6568 struct uio *a_uio;
6569 int a_ioflag;
6570 vfs_context_t a_context;
6571 } */ *ap;
6572 {
6573 /*
6574 * Set update and change flags.
6575 */
6576 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6577 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6578 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
6579 }
6580
6581 /*
6582 * Wrapper for special device close
6583 *
6584 * Update the times on the cnode then do device close.
6585 */
6586 int
6587 hfsspec_close(ap)
6588 struct vnop_close_args /* {
6589 struct vnode *a_vp;
6590 int a_fflag;
6591 vfs_context_t a_context;
6592 } */ *ap;
6593 {
6594 struct vnode *vp = ap->a_vp;
6595 struct cnode *cp;
6596
6597 if (vnode_isinuse(ap->a_vp, 0)) {
6598 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6599 cp = VTOC(vp);
6600 hfs_touchtimes(VTOHFS(vp), cp);
6601 hfs_unlock(cp);
6602 }
6603 }
6604 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
6605 }
6606
6607 #if FIFO
6608 /*
6609 * Wrapper for fifo reads
6610 */
6611 static int
6612 hfsfifo_read(ap)
6613 struct vnop_read_args /* {
6614 struct vnode *a_vp;
6615 struct uio *a_uio;
6616 int a_ioflag;
6617 vfs_context_t a_context;
6618 } */ *ap;
6619 {
6620 /*
6621 * Set access flag.
6622 */
6623 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6624 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
6625 }
6626
6627 /*
6628 * Wrapper for fifo writes
6629 */
6630 static int
6631 hfsfifo_write(ap)
6632 struct vnop_write_args /* {
6633 struct vnode *a_vp;
6634 struct uio *a_uio;
6635 int a_ioflag;
6636 vfs_context_t a_context;
6637 } */ *ap;
6638 {
6639 /*
6640 * Set update and change flags.
6641 */
6642 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6643 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6644 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
6645 }
6646
6647 /*
6648 * Wrapper for fifo close
6649 *
6650 * Update the times on the cnode then do device close.
6651 */
6652 static int
6653 hfsfifo_close(ap)
6654 struct vnop_close_args /* {
6655 struct vnode *a_vp;
6656 int a_fflag;
6657 vfs_context_t a_context;
6658 } */ *ap;
6659 {
6660 struct vnode *vp = ap->a_vp;
6661 struct cnode *cp;
6662
6663 if (vnode_isinuse(ap->a_vp, 1)) {
6664 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6665 cp = VTOC(vp);
6666 hfs_touchtimes(VTOHFS(vp), cp);
6667 hfs_unlock(cp);
6668 }
6669 }
6670 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
6671 }
6672
6673
6674 #endif /* FIFO */
6675
6676 /*
6677 * Getter for the document_id
6678 * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
6679 */
6680 static u_int32_t
6681 hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
6682 {
6683 u_int8_t *finfo = NULL;
6684 u_int32_t doc_id = 0;
6685
6686 /* overlay the FinderInfo to the correct pointer, and advance */
6687 finfo = ((uint8_t *)finderinfo) + 16;
6688
6689 if (S_ISDIR(mode) || S_ISREG(mode)) {
6690 struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
6691 doc_id = extinfo->document_id;
6692 } else if (S_ISDIR(mode)) {
6693 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)finderinfo + 16);
6694 doc_id = extinfo->document_id;
6695 }
6696
6697 return doc_id;
6698 }
6699
6700
6701 /* getter(s) for document id */
6702 u_int32_t
6703 hfs_get_document_id(struct cnode *cp)
6704 {
6705 return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
6706 cp->c_attr.ca_mode));
6707 }
6708
6709 /* If you have finderinfo and mode, you can use this */
6710 u_int32_t
6711 hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
6712 {
6713 return (hfs_get_document_id_internal(finderinfo, mode));
6714 }
6715
6716 /*
6717 * Synchronize a file's in-core state with that on disk.
6718 */
6719 int
6720 hfs_vnop_fsync(ap)
6721 struct vnop_fsync_args /* {
6722 struct vnode *a_vp;
6723 int a_waitfor;
6724 vfs_context_t a_context;
6725 } */ *ap;
6726 {
6727 struct vnode* vp = ap->a_vp;
6728 int error;
6729
6730 /* Note: We check hfs flags instead of vfs mount flag because during
6731 * read-write update, hfs marks itself read-write much earlier than
6732 * the vfs, and hence won't result in skipping of certain writes like
6733 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
6734 */
6735 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
6736 return 0;
6737 }
6738
6739 #if CONFIG_PROTECT
6740 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
6741 return (error);
6742 }
6743 #endif /* CONFIG_PROTECT */
6744
6745 /*
6746 * We need to allow ENOENT lock errors since unlink
6747 * systenm call can call VNOP_FSYNC during vclean.
6748 */
6749 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
6750 if (error)
6751 return (0);
6752
6753 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
6754
6755 hfs_unlock(VTOC(vp));
6756 return (error);
6757 }
6758
6759
6760 int
6761 hfs_vnop_whiteout(ap)
6762 struct vnop_whiteout_args /* {
6763 struct vnode *a_dvp;
6764 struct componentname *a_cnp;
6765 int a_flags;
6766 vfs_context_t a_context;
6767 } */ *ap;
6768 {
6769 int error = 0;
6770 struct vnode *vp = NULL;
6771 struct vnode_attr va;
6772 struct vnop_lookup_args lookup_args;
6773 struct vnop_remove_args remove_args;
6774 struct hfsmount *hfsmp;
6775
6776 hfsmp = VTOHFS(ap->a_dvp);
6777 if (hfsmp->hfs_flags & HFS_STANDARD) {
6778 error = ENOTSUP;
6779 goto exit;
6780 }
6781
6782 switch (ap->a_flags) {
6783 case LOOKUP:
6784 error = 0;
6785 break;
6786
6787 case CREATE:
6788 VATTR_INIT(&va);
6789 VATTR_SET(&va, va_type, VREG);
6790 VATTR_SET(&va, va_mode, S_IFWHT);
6791 VATTR_SET(&va, va_uid, 0);
6792 VATTR_SET(&va, va_gid, 0);
6793
6794 error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context);
6795 /* No need to release the vnode as no vnode is created for whiteouts */
6796 break;
6797
6798 case DELETE:
6799 lookup_args.a_dvp = ap->a_dvp;
6800 lookup_args.a_vpp = &vp;
6801 lookup_args.a_cnp = ap->a_cnp;
6802 lookup_args.a_context = ap->a_context;
6803
6804 error = hfs_vnop_lookup(&lookup_args);
6805 if (error) {
6806 break;
6807 }
6808
6809 remove_args.a_dvp = ap->a_dvp;
6810 remove_args.a_vp = vp;
6811 remove_args.a_cnp = ap->a_cnp;
6812 remove_args.a_flags = 0;
6813 remove_args.a_context = ap->a_context;
6814
6815 error = hfs_vnop_remove(&remove_args);
6816 vnode_put(vp);
6817 break;
6818
6819 default:
6820 panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags);
6821 };
6822
6823 exit:
6824 return (error);
6825 }
6826
6827 int (**hfs_vnodeop_p)(void *);
6828
6829 #define VOPFUNC int (*)(void *)
6830
6831
6832 #if CONFIG_HFS_STD
6833 int (**hfs_std_vnodeop_p) (void *);
6834 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
6835
6836 /*
6837 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
6838 * is for use with HFS standard to block out operations that would modify the file system
6839 */
6840
6841 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
6842 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6843 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
6844 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
6845 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
6846 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
6847 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
6848 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6849 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
6850 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
6851 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
6852 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
6853 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
6854 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
6855 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
6856 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
6857 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
6858 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
6859 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
6860 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
6861 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
6862 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
6863 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
6864 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
6865 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
6866 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
6867 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6868 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6869 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
6870 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
6871 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6872 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
6873 #if CONFIG_SEARCHFS
6874 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
6875 #else
6876 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
6877 #endif
6878 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
6879 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
6880 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
6881 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
6882 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6883 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6884 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
6885 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
6886 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
6887 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
6888 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
6889 { &vnop_whiteout_desc, (VOPFUNC)hfs_readonly_op}, /* whiteout (READONLY) */
6890 #if NAMEDSTREAMS
6891 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
6892 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
6893 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
6894 #endif
6895 { NULL, (VOPFUNC)NULL }
6896 };
6897
6898 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
6899 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
6900 #endif
6901
6902 /* VNOP table for HFS+ */
6903 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
6904 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6905 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
6906 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
6907 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
6908 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
6909 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
6910 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6911 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
6912 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
6913 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
6914 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
6915 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
6916 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
6917 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
6918 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
6919 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
6920 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
6921 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
6922 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
6923 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
6924 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
6925 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
6926 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
6927 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
6928 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
6929 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6930 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6931 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
6932 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
6933 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6934 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
6935 #if CONFIG_SEARCHFS
6936 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
6937 #else
6938 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
6939 #endif
6940 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
6941 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
6942 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
6943 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6944 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6945 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6946 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
6947 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
6948 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
6949 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
6950 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
6951 { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout},
6952 #if NAMEDSTREAMS
6953 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
6954 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
6955 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
6956 #endif
6957 { NULL, (VOPFUNC)NULL }
6958 };
6959
6960 struct vnodeopv_desc hfs_vnodeop_opv_desc =
6961 { &hfs_vnodeop_p, hfs_vnodeop_entries };
6962
6963
6964 /* Spec Op vnop table for HFS+ */
6965 int (**hfs_specop_p)(void *);
6966 struct vnodeopv_entry_desc hfs_specop_entries[] = {
6967 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6968 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
6969 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
6970 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
6971 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
6972 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
6973 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6974 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
6975 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
6976 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
6977 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
6978 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
6979 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
6980 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
6981 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
6982 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
6983 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
6984 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
6985 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
6986 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
6987 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
6988 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
6989 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
6990 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6991 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6992 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
6993 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
6994 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6995 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
6996 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
6997 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
6998 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6999 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7000 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7001 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7002 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7003 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7004 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7005 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7006 };
7007 struct vnodeopv_desc hfs_specop_opv_desc =
7008 { &hfs_specop_p, hfs_specop_entries };
7009
7010 #if FIFO
7011 /* HFS+ FIFO VNOP table */
7012 int (**hfs_fifoop_p)(void *);
7013 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
7014 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7015 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
7016 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
7017 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
7018 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
7019 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
7020 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7021 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7022 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
7023 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
7024 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
7025 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
7026 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
7027 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
7028 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7029 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
7030 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
7031 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
7032 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
7033 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
7034 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
7035 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
7036 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
7037 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7038 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7039 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
7040 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
7041 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7042 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7043 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7044 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7045 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7046 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7047 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7048 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7049 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7050 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7051 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7052 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7053 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7054 };
7055 struct vnodeopv_desc hfs_fifoop_opv_desc =
7056 { &hfs_fifoop_p, hfs_fifoop_entries };
7057 #endif /* FIFO */
7058
7059
7060