]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
5bfc09c3e8f1678fd1367cf111c5f34251cdd952
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdbool.h>
30 #include <sys/systm.h>
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/file_internal.h>
34 #include <sys/dirent.h>
35 #include <sys/stat.h>
36 #include <sys/buf.h>
37 #include <sys/buf_internal.h>
38 #include <sys/mount.h>
39 #include <sys/vnode_if.h>
40 #include <sys/vnode_internal.h>
41 #include <sys/malloc.h>
42 #include <sys/ubc.h>
43 #include <sys/ubc_internal.h>
44 #include <sys/paths.h>
45 #include <sys/quota.h>
46 #include <sys/time.h>
47 #include <sys/disk.h>
48 #include <sys/kauth.h>
49 #include <sys/uio_internal.h>
50 #include <sys/fsctl.h>
51 #include <sys/cprotect.h>
52 #include <sys/xattr.h>
53 #include <string.h>
54 #include <sys/fsevents.h>
55 #include <kern/kalloc.h>
56
57 #include <miscfs/specfs/specdev.h>
58 #include <miscfs/fifofs/fifo.h>
59 #include <vfs/vfs_support.h>
60 #include <machine/spl.h>
61
62 #include <sys/kdebug.h>
63 #include <sys/sysctl.h>
64 #include <stdbool.h>
65
66 #include "hfs.h"
67 #include "hfs_catalog.h"
68 #include "hfs_cnode.h"
69 #include "hfs_dbg.h"
70 #include "hfs_mount.h"
71 #include "hfs_quota.h"
72 #include "hfs_endian.h"
73 #include "hfs_kdebug.h"
74
75 #include "hfscommon/headers/BTreesInternal.h"
76 #include "hfscommon/headers/FileMgrInternal.h"
77
78 #define KNDETACH_VNLOCKED 0x00000001
79
80 /* Global vfs data structures for hfs */
81
82 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
83 int always_do_fullfsync = 0;
84 SYSCTL_DECL(_vfs_generic);
85 SYSCTL_INT (_vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
86
87 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
88 struct componentname *cnp, struct vnode_attr *vap,
89 vfs_context_t ctx);
90 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
91 int hfs_metasync_all(struct hfsmount *hfsmp);
92
93 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
94 int, int);
95 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
96 int, int, int, struct vnode *, int);
97
98 /* Used here and in cnode teardown -- for symlinks */
99 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
100
101 enum {
102 HFS_MOVE_DATA_INCLUDE_RSRC = 1,
103 };
104 typedef uint32_t hfs_move_data_options_t;
105
106 static int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
107 hfs_move_data_options_t options);
108 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src,
109 filefork_t *dstfork, cnode_t *dst);
110 static const struct cat_fork *
111 hfs_prepare_fork_for_update(const filefork_t *pfork,
112 struct cat_fork *fork_buf,
113 uint32_t block_size);
114
115 #if HFS_COMPRESSION
116 static int hfs_move_compressed(cnode_t *from_vp, cnode_t *to_vp);
117 #endif
118
119 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
120
121 #if FIFO
122 static int hfsfifo_read(struct vnop_read_args *);
123 static int hfsfifo_write(struct vnop_write_args *);
124 static int hfsfifo_close(struct vnop_close_args *);
125
126 extern int (**fifo_vnodeop_p)(void *);
127 #endif /* FIFO */
128
129 int hfs_vnop_close(struct vnop_close_args*);
130 int hfs_vnop_create(struct vnop_create_args*);
131 int hfs_vnop_exchange(struct vnop_exchange_args*);
132 int hfs_vnop_fsync(struct vnop_fsync_args*);
133 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
134 int hfs_vnop_mknod(struct vnop_mknod_args*);
135 int hfs_vnop_getattr(struct vnop_getattr_args*);
136 int hfs_vnop_open(struct vnop_open_args*);
137 int hfs_vnop_readdir(struct vnop_readdir_args*);
138 int hfs_vnop_remove(struct vnop_remove_args*);
139 int hfs_vnop_rename(struct vnop_rename_args*);
140 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
141 int hfs_vnop_symlink(struct vnop_symlink_args*);
142 int hfs_vnop_setattr(struct vnop_setattr_args*);
143 int hfs_vnop_readlink(struct vnop_readlink_args *);
144 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
145 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
146 int hfsspec_read(struct vnop_read_args *);
147 int hfsspec_write(struct vnop_write_args *);
148 int hfsspec_close(struct vnop_close_args *);
149
150 /* Options for hfs_removedir and hfs_removefile */
151 #define HFSRM_SKIP_RESERVE 0x01
152
153
154
155 /*****************************************************************************
156 *
157 * Common Operations on vnodes
158 *
159 *****************************************************************************/
160
161 /*
162 * Is the given cnode either the .journal or .journal_info_block file on
163 * a volume with an active journal? Many VNOPs use this to deny access
164 * to those files.
165 *
166 * Note: the .journal file on a volume with an external journal still
167 * returns true here, even though it does not actually hold the contents
168 * of the volume's journal.
169 */
170 static _Bool
171 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
172 {
173 if (hfsmp->jnl != NULL &&
174 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
175 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
176 return true;
177 } else {
178 return false;
179 }
180 }
181
182 /*
183 * Create a regular file.
184 */
185 int
186 hfs_vnop_create(struct vnop_create_args *ap)
187 {
188 /*
189 * We leave handling of certain race conditions here to the caller
190 * which will have a better understanding of the semantics it
191 * requires. For example, if it turns out that the file exists,
192 * it would be wrong of us to return a reference to the existing
193 * file because the caller might not want that and it would be
194 * misleading to suggest the file had been created when it hadn't
195 * been. Note that our NFS server code does not set the
196 * VA_EXCLUSIVE flag so you cannot assume that callers don't want
197 * EEXIST errors if it's not set. The common case, where users
198 * are calling open with the O_CREAT mode, is handled in VFS; when
199 * we return EEXIST, it will loop and do the look-up again.
200 */
201 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
202 }
203
204 /*
205 * Make device special file.
206 */
207 int
208 hfs_vnop_mknod(struct vnop_mknod_args *ap)
209 {
210 struct vnode_attr *vap = ap->a_vap;
211 struct vnode *dvp = ap->a_dvp;
212 struct vnode **vpp = ap->a_vpp;
213 struct cnode *cp;
214 int error;
215
216 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
217 return (ENOTSUP);
218 }
219
220 /* Create the vnode */
221 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
222 if (error)
223 return (error);
224
225 cp = VTOC(*vpp);
226 cp->c_touch_acctime = TRUE;
227 cp->c_touch_chgtime = TRUE;
228 cp->c_touch_modtime = TRUE;
229
230 if ((vap->va_rdev != VNOVAL) &&
231 (vap->va_type == VBLK || vap->va_type == VCHR))
232 cp->c_rdev = vap->va_rdev;
233
234 return (0);
235 }
236
237 #if HFS_COMPRESSION
238 /*
239 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
240 * In the (hopefully rare) case where the data fork vnode is not
241 * present, it will use hfs_vget() to create a new vnode for the
242 * data fork.
243 *
244 * NOTE: If successful and a vnode is returned, the caller is responsible
245 * for releasing the returned vnode with vnode_rele().
246 */
247 static int
248 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
249 {
250 int vref = 0;
251
252 if (!data_vp || !cp) /* sanity check incoming parameters */
253 return EINVAL;
254
255 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
256
257 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
258 struct vnode *c_vp = cp->c_vp;
259 if (c_vp) {
260 /* we already have a data vnode */
261 *data_vp = c_vp;
262 vref = vnode_ref(*data_vp);
263 if (!skiplock) hfs_unlock(cp);
264 if (vref == 0) {
265 return 0;
266 }
267 return EINVAL;
268 }
269 /* no data fork vnode in the cnode, so ask hfs for one. */
270
271 if (!cp->c_rsrc_vp) {
272 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
273 *data_vp = NULL;
274 if (!skiplock) hfs_unlock(cp);
275 return EINVAL;
276 }
277
278 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
279 0 != data_vp) {
280 vref = vnode_ref(*data_vp);
281 vnode_put(*data_vp);
282 if (!skiplock) hfs_unlock(cp);
283 if (vref == 0) {
284 return 0;
285 }
286 return EINVAL;
287 }
288 /* there was an error getting the vnode */
289 *data_vp = NULL;
290 if (!skiplock) hfs_unlock(cp);
291 return EINVAL;
292 }
293
294 /*
295 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
296 * allocating it if necessary; returns NULL if there was an allocation error.
297 * function is non-static so that it can be used from the FCNTL handler.
298 */
299 decmpfs_cnode *
300 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
301 {
302 if (!cp->c_decmp) {
303 decmpfs_cnode *dp = NULL;
304 MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK);
305 if (!dp) {
306 /* error allocating a decmpfs cnode */
307 return NULL;
308 }
309 decmpfs_cnode_init(dp);
310 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
311 /* another thread got here first, so free the decmpfs_cnode we allocated */
312 decmpfs_cnode_destroy(dp);
313 FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE);
314 }
315 }
316
317 return cp->c_decmp;
318 }
319
320 /*
321 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
322 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
323 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
324 * or else fills it in via the decmpfs_file_is_compressed() function.
325 */
326 int
327 hfs_file_is_compressed(struct cnode *cp, int skiplock)
328 {
329 int ret = 0;
330
331 /* fast check to see if file is compressed. If flag is clear, just answer no */
332 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
333 return 0;
334 }
335
336 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
337 if (!dp) {
338 /* error allocating a decmpfs cnode, treat the file as uncompressed */
339 return 0;
340 }
341
342 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
343 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
344 switch(decmpfs_state) {
345 case FILE_IS_COMPRESSED:
346 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
347 return 1;
348 case FILE_IS_NOT_COMPRESSED:
349 return 0;
350 /* otherwise the state is not cached yet */
351 }
352
353 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
354 struct vnode *data_vp = NULL;
355 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
356 if (data_vp) {
357 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
358 vnode_rele(data_vp);
359 }
360 }
361 return ret;
362 }
363
364 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
365 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
366 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
367 * files size is returned in size (required)
368 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
369 * will return an error and the caller should fall back to treating the item as an uncompressed file
370 */
371 int
372 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
373 {
374 int ret = 0;
375 int putaway = 0; /* flag to remember if we used hfs_vget() */
376
377 if (!size) {
378 return EINVAL; /* no place to put the file size */
379 }
380
381 if (NULL == vp) {
382 if (!hfsmp || !fid) { /* make sure we have the required parameters */
383 return EINVAL;
384 }
385 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
386 vp = NULL;
387 } else {
388 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
389 }
390 }
391 /* this double check for compression (hfs_file_is_compressed)
392 * ensures the cached size is present in case decmpfs hasn't
393 * encountered this node yet.
394 */
395 if (vp) {
396 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
397 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
398 } else {
399 if (VTOCMP(vp) && VTOCMP(vp)->cmp_type >= CMP_MAX) {
400 if (VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) {
401 // if we don't recognize this type, just use the real data fork size
402 if (VTOC(vp)->c_datafork) {
403 *size = VTOC(vp)->c_datafork->ff_size;
404 ret = 0;
405 } else {
406 ret = EINVAL;
407 }
408 } else {
409 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
410 ret = 0;
411 }
412 } else {
413 ret = EINVAL;
414 }
415 }
416 }
417
418 if (putaway) { /* did we use hfs_vget() to get this vnode? */
419 vnode_put(vp); /* if so, release it and set it to null */
420 vp = NULL;
421 }
422 return ret;
423 }
424
425 int
426 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
427 {
428 if (ctx == decmpfs_ctx)
429 return 0;
430 if (!hfs_file_is_compressed(cp, skiplock))
431 return 0;
432 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
433 }
434
435 int
436 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
437 {
438 if (ctx == decmpfs_ctx)
439 return 0;
440 if (!hfs_file_is_compressed(cp, skiplock))
441 return 0;
442 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
443 }
444 #endif /* HFS_COMPRESSION */
445
446
447 //
448 // This function gets the doc_tombstone structure for the
449 // current thread. If the thread doesn't have one, the
450 // structure is allocated.
451 //
452 static struct doc_tombstone *
453 get_uthread_doc_tombstone(void)
454 {
455 struct uthread *ut;
456 ut = get_bsdthread_info(current_thread());
457
458 if (ut->t_tombstone == NULL) {
459 ut->t_tombstone = kalloc(sizeof(struct doc_tombstone));
460 if (ut->t_tombstone) {
461 memset(ut->t_tombstone, 0, sizeof(struct doc_tombstone));
462 }
463 }
464
465 return ut->t_tombstone;
466 }
467
468 //
469 // This routine clears out the current tombstone for the
470 // current thread and if necessary passes the doc-id of
471 // the tombstone on to the dst_cnode.
472 //
473 // If the doc-id transfers to dst_cnode, we also generate
474 // a doc-id changed fsevent. Unlike all the other fsevents,
475 // doc-id changed events can only be generated here in HFS
476 // where we have the necessary info.
477 //
478 static void
479 clear_tombstone_docid(struct doc_tombstone *ut, __unused struct hfsmount *hfsmp, struct cnode *dst_cnode)
480 {
481 uint32_t old_id = ut->t_lastop_document_id;
482
483 ut->t_lastop_document_id = 0;
484 ut->t_lastop_parent = NULL;
485 ut->t_lastop_parent_vid = 0;
486 ut->t_lastop_filename[0] = '\0';
487
488 //
489 // If the lastop item is still the same and needs to be cleared,
490 // clear it.
491 //
492 if (dst_cnode && old_id && ut->t_lastop_item && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) {
493 //
494 // clear the document_id from the file that used to have it.
495 // XXXdbg - we need to lock the other vnode and make sure to
496 // update it on disk.
497 //
498 struct cnode *ocp = VTOC(ut->t_lastop_item);
499 struct FndrExtendedFileInfo *ofip = (struct FndrExtendedFileInfo *)((char *)&ocp->c_attr.ca_finderinfo + 16);
500
501 // printf("clearing doc-id from ino %d\n", ocp->c_desc.cd_cnid);
502 ofip->document_id = 0;
503 ocp->c_bsdflags &= ~UF_TRACKED;
504 ocp->c_flag |= C_MODIFIED | C_FORCEUPDATE; // mark it dirty
505 /* cat_update(hfsmp, &ocp->c_desc, &ocp->c_attr, NULL, NULL); */
506
507 }
508
509 #if CONFIG_FSE
510 if (dst_cnode && old_id) {
511 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&dst_cnode->c_attr.ca_finderinfo + 16);
512
513 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
514 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
515 FSE_ARG_INO, (ino64_t)ut->t_lastop_fileid, // src inode #
516 FSE_ARG_INO, (ino64_t)dst_cnode->c_fileid, // dst inode #
517 FSE_ARG_INT32, (uint32_t)fip->document_id,
518 FSE_ARG_DONE);
519 }
520 #endif
521 // last, clear these now that we're all done
522 ut->t_lastop_item = NULL;
523 ut->t_lastop_fileid = 0;
524 ut->t_lastop_item_vid = 0;
525 }
526
527
528 //
529 // This function is used to filter out operations on temp
530 // filenames. We have to filter out operations on certain
531 // temp filenames to work-around questionable application
532 // behavior from apps like Autocad that perform unusual
533 // sequences of file system operations for a "safe save".
534 static int
535 is_ignorable_temp_name(const char *nameptr, int len)
536 {
537 if (len == 0) {
538 len = strlen(nameptr);
539 }
540
541 if ( strncmp(nameptr, "atmp", 4) == 0
542 || (len > 4 && strncmp(nameptr+len-4, ".bak", 4) == 0)
543 || (len > 4 && strncmp(nameptr+len-4, ".tmp", 4) == 0)) {
544 return 1;
545 }
546
547 return 0;
548 }
549
550 //
551 // Decide if we need to save a tombstone or not. Normally we always
552 // save a tombstone - but if there already is one and the name we're
553 // given is an ignorable name, then we will not save a tombstone.
554 //
555 static int
556 should_save_docid_tombstone(struct doc_tombstone *ut, struct vnode *vp, struct componentname *cnp)
557 {
558 if (cnp->cn_nameptr == NULL) {
559 return 0;
560 }
561
562 if (ut->t_lastop_document_id && ut->t_lastop_item == vp && is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
563 return 0;
564 }
565
566 return 1;
567 }
568
569
570 //
571 // This function saves a tombstone for the given vnode and name. The
572 // tombstone represents the parent directory and name where the document
573 // used to live and the document-id of that file. This info is recorded
574 // in the doc_tombstone structure hanging off the uthread (which assumes
575 // that all safe-save operations happen on the same thread).
576 //
577 // If later on the same parent/name combo comes back into existence then
578 // we'll preserve the doc-id from this vnode onto the new vnode.
579 //
580 static void
581 save_tombstone(struct hfsmount *hfsmp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int for_unlink)
582 {
583 struct cnode *cp = VTOC(vp);
584 struct doc_tombstone *ut;
585 ut = get_uthread_doc_tombstone();
586
587 if (for_unlink && vp->v_type == VREG && cp->c_linkcount > 1) {
588 //
589 // a regular file that is being unlinked and that is also
590 // hardlinked should not clear the UF_TRACKED state or
591 // mess with the tombstone because somewhere else in the
592 // file system the file is still alive.
593 //
594 return;
595 }
596
597 ut->t_lastop_parent = dvp;
598 ut->t_lastop_parent_vid = vnode_vid(dvp);
599 ut->t_lastop_fileid = cp->c_fileid;
600 if (for_unlink) {
601 ut->t_lastop_item = NULL;
602 ut->t_lastop_item_vid = 0;
603 } else {
604 ut->t_lastop_item = vp;
605 ut->t_lastop_item_vid = vnode_vid(vp);
606 }
607
608 strlcpy((char *)&ut->t_lastop_filename[0], cnp->cn_nameptr, sizeof(ut->t_lastop_filename));
609
610 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
611 ut->t_lastop_document_id = fip->document_id;
612
613 if (for_unlink) {
614 // clear this so it's never returned again
615 fip->document_id = 0;
616 cp->c_bsdflags &= ~UF_TRACKED;
617
618 if (ut->t_lastop_document_id) {
619 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
620
621 #if CONFIG_FSE
622 // this event is more of a "pending-delete"
623 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
624 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
625 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
626 FSE_ARG_INO, (ino64_t)0, // dst inode #
627 FSE_ARG_INT32, ut->t_lastop_document_id, // document id
628 FSE_ARG_DONE);
629 #endif
630 }
631 }
632 }
633
634
635 /*
636 * Open a file/directory.
637 */
638 int
639 hfs_vnop_open(struct vnop_open_args *ap)
640 {
641 struct vnode *vp = ap->a_vp;
642 struct filefork *fp;
643 struct timeval tv;
644 int error;
645 static int past_bootup = 0;
646 struct cnode *cp = VTOC(vp);
647 struct hfsmount *hfsmp = VTOHFS(vp);
648
649 #if HFS_COMPRESSION
650 if (ap->a_mode & FWRITE) {
651 /* open for write */
652 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
653 /* opening a compressed file for write, so convert it to decompressed */
654 struct vnode *data_vp = NULL;
655 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
656 if (0 == error) {
657 if (data_vp) {
658 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
659 vnode_rele(data_vp);
660 } else {
661 error = EINVAL;
662 }
663 }
664 if (error != 0)
665 return error;
666 }
667 } else {
668 /* open for read */
669 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
670 if (VNODE_IS_RSRC(vp)) {
671 /* opening the resource fork of a compressed file, so nothing to do */
672 } else {
673 /* opening a compressed file for read, make sure it validates */
674 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
675 if (error != 0)
676 return error;
677 }
678 }
679 }
680 #endif
681
682 /*
683 * Files marked append-only must be opened for appending.
684 */
685 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
686 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
687 return (EPERM);
688
689 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
690 return (EBUSY); /* file is in use by the kernel */
691
692 /* Don't allow journal to be opened externally. */
693 if (hfs_is_journal_file(hfsmp, cp))
694 return (EPERM);
695
696 if ((hfsmp->hfs_flags & HFS_READ_ONLY) ||
697 (hfsmp->jnl == NULL) ||
698 #if NAMEDSTREAMS
699 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
700 #else
701 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
702 #endif
703 return (0);
704 }
705
706 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
707 return (error);
708
709 #if QUOTA
710 /* If we're going to write to the file, initialize quotas. */
711 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
712 (void)hfs_getinoquota(cp);
713 #endif /* QUOTA */
714
715 /*
716 * On the first (non-busy) open of a fragmented
717 * file attempt to de-frag it (if its less than 20MB).
718 */
719 fp = VTOF(vp);
720 if (fp->ff_blocks &&
721 fp->ff_extents[7].blockCount != 0 &&
722 fp->ff_size <= (20 * 1024 * 1024)) {
723 int no_mods = 0;
724 struct timeval now;
725 /*
726 * Wait until system bootup is done (3 min).
727 * And don't relocate a file that's been modified
728 * within the past minute -- this can lead to
729 * system thrashing.
730 */
731
732 if (!past_bootup) {
733 microuptime(&tv);
734 if (tv.tv_sec > (60*3)) {
735 past_bootup = 1;
736 }
737 }
738
739 microtime(&now);
740 if ((now.tv_sec - cp->c_mtime) > 60) {
741 no_mods = 1;
742 }
743
744 if (past_bootup && no_mods) {
745 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
746 vfs_context_ucred(ap->a_context),
747 vfs_context_proc(ap->a_context));
748 }
749 }
750
751 hfs_unlock(cp);
752
753 return (0);
754 }
755
756
757 /*
758 * Close a file/directory.
759 */
760 int
761 hfs_vnop_close(ap)
762 struct vnop_close_args /* {
763 struct vnode *a_vp;
764 int a_fflag;
765 vfs_context_t a_context;
766 } */ *ap;
767 {
768 register struct vnode *vp = ap->a_vp;
769 register struct cnode *cp;
770 struct proc *p = vfs_context_proc(ap->a_context);
771 struct hfsmount *hfsmp;
772 int busy;
773 int tooktrunclock = 0;
774 int knownrefs = 0;
775
776 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
777 return (0);
778 cp = VTOC(vp);
779 hfsmp = VTOHFS(vp);
780
781 /*
782 * If the rsrc fork is a named stream, it can cause the data fork to
783 * stay around, preventing de-allocation of these blocks.
784 * Do checks for truncation on close. Purge extra extents if they exist.
785 * Make sure the vp is not a directory, and that it has a resource fork,
786 * and that resource fork is also a named stream.
787 */
788
789 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
790 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
791 uint32_t blks;
792
793 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
794 /*
795 * If there are extra blocks and there are only 2 refs on
796 * this vp (ourselves + rsrc fork holding ref on us), go ahead
797 * and try to truncate.
798 */
799 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
800 // release cnode lock; must acquire truncate lock BEFORE cnode lock
801 hfs_unlock(cp);
802
803 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
804 tooktrunclock = 1;
805
806 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
807 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
808 // bail out if we can't re-acquire cnode lock
809 return 0;
810 }
811 // now re-test to make sure it's still valid
812 if (cp->c_rsrc_vp) {
813 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
814 if (!vnode_isinuse(vp, knownrefs)){
815 // now we can truncate the file, if necessary
816 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
817 if (blks < VTOF(vp)->ff_blocks){
818 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY,
819 0, ap->a_context);
820 }
821 }
822 }
823 }
824 }
825
826
827 // if we froze the fs and we're exiting, then "thaw" the fs
828 if (hfsmp->hfs_freeze_state == HFS_FROZEN
829 && hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
830 hfs_thaw(hfsmp, p);
831 }
832
833 busy = vnode_isinuse(vp, 1);
834
835 if (busy) {
836 hfs_touchtimes(VTOHFS(vp), cp);
837 }
838 if (vnode_isdir(vp)) {
839 hfs_reldirhints(cp, busy);
840 } else if (vnode_issystem(vp) && !busy) {
841 vnode_recycle(vp);
842 }
843
844 if (tooktrunclock){
845 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
846 }
847 hfs_unlock(cp);
848
849 if (ap->a_fflag & FWASWRITTEN) {
850 hfs_sync_ejectable(hfsmp);
851 }
852
853 return (0);
854 }
855
856 static bool hfs_should_generate_document_id(hfsmount_t *hfsmp, cnode_t *cp)
857 {
858 return (!ISSET(hfsmp->hfs_flags, HFS_READ_ONLY)
859 && ISSET(cp->c_bsdflags, UF_TRACKED)
860 && cp->c_desc.cd_cnid != kHFSRootFolderID
861 && (S_ISDIR(cp->c_mode) || S_ISREG(cp->c_mode) || S_ISLNK(cp->c_mode)));
862 }
863
864 /*
865 * Get basic attributes.
866 */
867 int
868 hfs_vnop_getattr(struct vnop_getattr_args *ap)
869 {
870 #define VNODE_ATTR_TIMES \
871 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
872 #define VNODE_ATTR_AUTH \
873 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
874 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
875
876 struct vnode *vp = ap->a_vp;
877 struct vnode_attr *vap = ap->a_vap;
878 struct vnode *rvp = NULLVP;
879 struct hfsmount *hfsmp;
880 struct cnode *cp;
881 uint64_t data_size;
882 enum vtype v_type;
883 int error = 0;
884 cp = VTOC(vp);
885
886 #if HFS_COMPRESSION
887 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
888 int compressed = 0;
889 int hide_size = 0;
890 off_t uncompressed_size = -1;
891 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
892 /* we only care about whether the file is compressed if asked for the uncompressed size */
893 if (VNODE_IS_RSRC(vp)) {
894 /* if it's a resource fork, decmpfs may want us to hide the size */
895 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
896 } else {
897 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
898 compressed = hfs_file_is_compressed(cp, 0);
899 }
900 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
901 // if it's compressed
902 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && cp->c_decmp->cmp_type >= CMP_MAX)) {
903 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
904 /* failed to get the uncompressed size, we'll check for this later */
905 uncompressed_size = -1;
906 } else {
907 // fake that it's compressed
908 compressed = 1;
909 }
910 }
911 }
912 }
913 #endif
914
915 /*
916 * Shortcut for vnode_authorize path. Each of the attributes
917 * in this set is updated atomically so we don't need to take
918 * the cnode lock to access them.
919 */
920 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
921 /* Make sure file still exists. */
922 if (cp->c_flag & C_NOEXISTS)
923 return (ENOENT);
924
925 vap->va_uid = cp->c_uid;
926 vap->va_gid = cp->c_gid;
927 vap->va_mode = cp->c_mode;
928 vap->va_flags = cp->c_bsdflags;
929 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
930
931 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
932 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
933 VATTR_SET_SUPPORTED(vap, va_acl);
934 }
935
936 return (0);
937 }
938
939 hfsmp = VTOHFS(vp);
940 v_type = vnode_vtype(vp);
941
942 if (VATTR_IS_ACTIVE(vap, va_document_id)) {
943 uint32_t document_id;
944
945 if (cp->c_desc.cd_cnid == kHFSRootFolderID)
946 document_id = kHFSRootFolderID;
947 else {
948 /*
949 * This is safe without a lock because we're just reading
950 * a 32 bit aligned integer which should be atomic on all
951 * platforms we support.
952 */
953 document_id = hfs_get_document_id(cp);
954
955 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
956 uint32_t new_document_id;
957
958 error = hfs_generate_document_id(hfsmp, &new_document_id);
959 if (error)
960 return error;
961
962 error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
963 if (error)
964 return error;
965
966 bool want_docid_fsevent = false;
967
968 // Need to check again now that we have the lock
969 document_id = hfs_get_document_id(cp);
970 if (!document_id && hfs_should_generate_document_id(hfsmp, cp)) {
971 cp->c_attr.ca_finderextendeddirinfo.document_id = document_id = new_document_id;
972 want_docid_fsevent = true;
973 SET(cp->c_flag, C_MODIFIED);
974 }
975
976 hfs_unlock(cp);
977
978 if (want_docid_fsevent) {
979 #if CONFIG_FSE
980 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
981 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
982 FSE_ARG_INO, (ino64_t)0, // src inode #
983 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
984 FSE_ARG_INT32, document_id,
985 FSE_ARG_DONE);
986
987 if (need_fsevent(FSE_STAT_CHANGED, vp)) {
988 add_fsevent(FSE_STAT_CHANGED, ap->a_context,
989 FSE_ARG_VNODE, vp, FSE_ARG_DONE);
990 }
991 #endif
992 }
993 }
994 }
995
996 vap->va_document_id = document_id;
997 VATTR_SET_SUPPORTED(vap, va_document_id);
998 }
999
1000 /*
1001 * If time attributes are requested and we have cnode times
1002 * that require updating, then acquire an exclusive lock on
1003 * the cnode before updating the times. Otherwise we can
1004 * just acquire a shared lock.
1005 */
1006 if ((vap->va_active & VNODE_ATTR_TIMES) &&
1007 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
1008 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1009 return (error);
1010 hfs_touchtimes(hfsmp, cp);
1011 }
1012 else {
1013 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT)))
1014 return (error);
1015 }
1016
1017 if (v_type == VDIR) {
1018 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
1019
1020 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
1021 int nlink;
1022
1023 /*
1024 * For directories, the va_nlink is esentially a count
1025 * of the ".." references to a directory plus the "."
1026 * reference and the directory itself. So for HFS+ this
1027 * becomes the sub-directory count plus two.
1028 *
1029 * In the absence of a sub-directory count we use the
1030 * directory's item count. This will be too high in
1031 * most cases since it also includes files.
1032 */
1033 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
1034 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
1035 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
1036 else
1037 nlink = cp->c_entries;
1038
1039 /* Account for ourself and our "." entry */
1040 nlink += 2;
1041 /* Hide our private directories. */
1042 if (cp->c_cnid == kHFSRootFolderID) {
1043 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
1044 --nlink;
1045 }
1046 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
1047 --nlink;
1048 }
1049 }
1050 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
1051 }
1052 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
1053 int entries;
1054
1055 entries = cp->c_entries;
1056 /* Hide our private files and directories. */
1057 if (cp->c_cnid == kHFSRootFolderID) {
1058 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
1059 --entries;
1060 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
1061 --entries;
1062 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
1063 entries -= 2; /* hide the journal files */
1064 }
1065 VATTR_RETURN(vap, va_nchildren, entries);
1066 }
1067 /*
1068 * The va_dirlinkcount is the count of real directory hard links.
1069 * (i.e. its not the sum of the implied "." and ".." references)
1070 */
1071 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
1072 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
1073 }
1074 } else /* !VDIR */ {
1075 data_size = VCTOF(vp, cp)->ff_size;
1076
1077 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
1078 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
1079 u_int64_t blocks;
1080
1081 #if HFS_COMPRESSION
1082 if (hide_size) {
1083 VATTR_RETURN(vap, va_data_alloc, 0);
1084 } else if (compressed) {
1085 /* for compressed files, we report all allocated blocks as belonging to the data fork */
1086 blocks = cp->c_blocks;
1087 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
1088 }
1089 else
1090 #endif
1091 {
1092 blocks = VCTOF(vp, cp)->ff_blocks;
1093 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
1094 }
1095 }
1096 }
1097
1098 /* conditional because 64-bit arithmetic can be expensive */
1099 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
1100 if (v_type == VDIR) {
1101 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
1102 } else {
1103 u_int64_t total_size = ~0ULL;
1104 struct cnode *rcp;
1105 #if HFS_COMPRESSION
1106 if (hide_size) {
1107 /* we're hiding the size of this file, so just return 0 */
1108 total_size = 0;
1109 } else if (compressed) {
1110 if (uncompressed_size == -1) {
1111 /*
1112 * We failed to get the uncompressed size above,
1113 * so we'll fall back to the standard path below
1114 * since total_size is still -1
1115 */
1116 } else {
1117 /* use the uncompressed size we fetched above */
1118 total_size = uncompressed_size;
1119 }
1120 }
1121 #endif
1122 if (total_size == ~0ULL) {
1123 if (cp->c_datafork) {
1124 total_size = cp->c_datafork->ff_size;
1125 }
1126
1127 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1128 /* We deal with rsrc fork vnode iocount at the end of the function */
1129 error = hfs_vgetrsrc(hfsmp, vp, &rvp);
1130 if (error) {
1131 /*
1132 * Note that we call hfs_vgetrsrc with error_on_unlinked
1133 * set to FALSE. This is because we may be invoked via
1134 * fstat() on an open-unlinked file descriptor and we must
1135 * continue to support access to the rsrc fork until it disappears.
1136 * The code at the end of this function will be
1137 * responsible for releasing the iocount generated by
1138 * hfs_vgetrsrc. This is because we can't drop the iocount
1139 * without unlocking the cnode first.
1140 */
1141 goto out;
1142 }
1143
1144 rcp = VTOC(rvp);
1145 if (rcp && rcp->c_rsrcfork) {
1146 total_size += rcp->c_rsrcfork->ff_size;
1147 }
1148 }
1149 }
1150
1151 VATTR_RETURN(vap, va_total_size, total_size);
1152 }
1153 }
1154 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
1155 if (v_type == VDIR) {
1156 VATTR_RETURN(vap, va_total_alloc, 0);
1157 } else {
1158 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
1159 }
1160 }
1161
1162 /*
1163 * If the VFS wants extended security data, and we know that we
1164 * don't have any (because it never told us it was setting any)
1165 * then we can return the supported bit and no data. If we do
1166 * have extended security, we can just leave the bit alone and
1167 * the VFS will use the fallback path to fetch it.
1168 */
1169 if (VATTR_IS_ACTIVE(vap, va_acl)) {
1170 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
1171 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
1172 VATTR_SET_SUPPORTED(vap, va_acl);
1173 }
1174 }
1175 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1176 /* Access times are lazily updated, get current time if needed */
1177 if (cp->c_touch_acctime) {
1178 struct timeval tv;
1179
1180 microtime(&tv);
1181 vap->va_access_time.tv_sec = tv.tv_sec;
1182 } else {
1183 vap->va_access_time.tv_sec = cp->c_atime;
1184 }
1185 vap->va_access_time.tv_nsec = 0;
1186 VATTR_SET_SUPPORTED(vap, va_access_time);
1187 }
1188 vap->va_create_time.tv_sec = cp->c_itime;
1189 vap->va_create_time.tv_nsec = 0;
1190 vap->va_modify_time.tv_sec = cp->c_mtime;
1191 vap->va_modify_time.tv_nsec = 0;
1192 vap->va_change_time.tv_sec = cp->c_ctime;
1193 vap->va_change_time.tv_nsec = 0;
1194 vap->va_backup_time.tv_sec = cp->c_btime;
1195 vap->va_backup_time.tv_nsec = 0;
1196
1197 /* See if we need to emit the date added field to the user */
1198 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
1199 u_int32_t dateadded = hfs_get_dateadded (cp);
1200 if (dateadded) {
1201 vap->va_addedtime.tv_sec = dateadded;
1202 vap->va_addedtime.tv_nsec = 0;
1203 VATTR_SET_SUPPORTED (vap, va_addedtime);
1204 }
1205 }
1206
1207 /* XXX is this really a good 'optimal I/O size'? */
1208 vap->va_iosize = hfsmp->hfs_logBlockSize;
1209 vap->va_uid = cp->c_uid;
1210 vap->va_gid = cp->c_gid;
1211 vap->va_mode = cp->c_mode;
1212 vap->va_flags = cp->c_bsdflags;
1213
1214 /*
1215 * Exporting file IDs from HFS Plus:
1216 *
1217 * For "normal" files the c_fileid is the same value as the
1218 * c_cnid. But for hard link files, they are different - the
1219 * c_cnid belongs to the active directory entry (ie the link)
1220 * and the c_fileid is for the actual inode (ie the data file).
1221 *
1222 * The stat call (getattr) uses va_fileid and the Carbon APIs,
1223 * which are hardlink-ignorant, will ask for va_linkid.
1224 */
1225 vap->va_fileid = (u_int64_t)cp->c_fileid;
1226 /*
1227 * We need to use the origin cache for both hardlinked files
1228 * and directories. Hardlinked directories have multiple cnids
1229 * and parents (one per link). Hardlinked files also have their
1230 * own parents and link IDs separate from the indirect inode number.
1231 * If we don't use the cache, we could end up vending the wrong ID
1232 * because the cnode will only reflect the link that was looked up most recently.
1233 */
1234 if (cp->c_flag & C_HARDLINK) {
1235 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
1236 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
1237 } else {
1238 vap->va_linkid = (u_int64_t)cp->c_cnid;
1239 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
1240 }
1241 vap->va_fsid = hfsmp->hfs_raw_dev;
1242 vap->va_filerev = 0;
1243 vap->va_encoding = cp->c_encoding;
1244 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
1245 #if HFS_COMPRESSION
1246 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1247 if (hide_size)
1248 vap->va_data_size = 0;
1249 else if (compressed) {
1250 if (uncompressed_size == -1) {
1251 /* failed to get the uncompressed size above, so just return data_size */
1252 vap->va_data_size = data_size;
1253 } else {
1254 /* use the uncompressed size we fetched above */
1255 vap->va_data_size = uncompressed_size;
1256 }
1257 } else
1258 vap->va_data_size = data_size;
1259 // vap->va_supported |= VNODE_ATTR_va_data_size;
1260 VATTR_SET_SUPPORTED(vap, va_data_size);
1261 }
1262 #else
1263 vap->va_data_size = data_size;
1264 vap->va_supported |= VNODE_ATTR_va_data_size;
1265 #endif
1266
1267 #if CONFIG_PROTECT
1268 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
1269 vap->va_dataprotect_class = cp->c_cpentry ? cp->c_cpentry->cp_pclass : 0;
1270 VATTR_SET_SUPPORTED(vap, va_dataprotect_class);
1271 }
1272 #endif
1273 if (VATTR_IS_ACTIVE(vap, va_write_gencount)) {
1274 if (ubc_is_mapped_writable(vp)) {
1275 /*
1276 * Return 0 to the caller to indicate the file may be
1277 * changing. There is no need for us to increment the
1278 * generation counter here because it gets done as part of
1279 * page-out and also when the file is unmapped (to account
1280 * for changes we might not have seen).
1281 */
1282 vap->va_write_gencount = 0;
1283 } else {
1284 vap->va_write_gencount = hfs_get_gencount(cp);
1285 }
1286
1287 VATTR_SET_SUPPORTED(vap, va_write_gencount);
1288 }
1289
1290 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1291 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1292 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1293 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1294 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1295 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1296 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1297 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1298 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1299
1300 /* If this is the root, let VFS to find out the mount name, which
1301 * may be different from the real name. Otherwise, we need to take care
1302 * for hardlinked files, which need to be looked up, if necessary
1303 */
1304 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1305 struct cat_desc linkdesc;
1306 int lockflags;
1307 int uselinkdesc = 0;
1308 cnid_t nextlinkid = 0;
1309 cnid_t prevlinkid = 0;
1310
1311 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1312 * here because the info. for the link ID requested by getattrlist may be
1313 * different than what's currently in the cnode. This is because the cnode
1314 * will be filled in with the information for the most recent link ID that went
1315 * through namei/lookup(). If there are competing lookups for hardlinks that point
1316 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1317 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1318 */
1319
1320 if ((cp->c_flag & C_HARDLINK) &&
1321 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1322 /*
1323 * If we have no name and our link ID is the raw inode number, then we may
1324 * have an open-unlinked file. Go to the next link in this case.
1325 */
1326 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1327 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1328 goto out;
1329 }
1330 }
1331 else {
1332 /* just use link obtained from vap above */
1333 nextlinkid = vap->va_linkid;
1334 }
1335
1336 /* We need to probe the catalog for the descriptor corresponding to the link ID
1337 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1338 * for the cnode here, so we can't just update the descriptor. Instead,
1339 * we should just store the descriptor's value locally and then use it to pass
1340 * out the name value as needed below.
1341 */
1342 if (nextlinkid){
1343 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1344 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1345 hfs_systemfile_unlock(hfsmp, lockflags);
1346 if (error == 0) {
1347 uselinkdesc = 1;
1348 }
1349 }
1350 }
1351
1352 /* By this point, we've either patched up the name above and the c_desc
1353 * points to the correct data, or it already did, in which case we just proceed
1354 * by copying the name into the vap. Note that we will never set va_name to
1355 * supported if nextlinkid is never initialized. This could happen in the degenerate
1356 * case above involving the raw inode number, where it has no nextlinkid. In this case
1357 * we will simply not mark the name bit as supported.
1358 */
1359 if (uselinkdesc) {
1360 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1361 VATTR_SET_SUPPORTED(vap, va_name);
1362 cat_releasedesc(&linkdesc);
1363 }
1364 else if (cp->c_desc.cd_namelen) {
1365 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1366 VATTR_SET_SUPPORTED(vap, va_name);
1367 }
1368 }
1369
1370 out:
1371 hfs_unlock(cp);
1372 /*
1373 * We need to vnode_put the rsrc fork vnode only *after* we've released
1374 * the cnode lock, since vnode_put can trigger an inactive call, which
1375 * will go back into HFS and try to acquire a cnode lock.
1376 */
1377 if (rvp) {
1378 vnode_put (rvp);
1379 }
1380
1381 return (error);
1382 }
1383
1384 int
1385 hfs_vnop_setattr(ap)
1386 struct vnop_setattr_args /* {
1387 struct vnode *a_vp;
1388 struct vnode_attr *a_vap;
1389 vfs_context_t a_context;
1390 } */ *ap;
1391 {
1392 struct vnode_attr *vap = ap->a_vap;
1393 struct vnode *vp = ap->a_vp;
1394 struct cnode *cp = NULL;
1395 struct hfsmount *hfsmp;
1396 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1397 struct proc *p = vfs_context_proc(ap->a_context);
1398 int error = 0;
1399 uid_t nuid;
1400 gid_t ngid;
1401 time_t orig_ctime;
1402
1403 orig_ctime = VTOC(vp)->c_ctime;
1404
1405 #if HFS_COMPRESSION
1406 int decmpfs_reset_state = 0;
1407 /*
1408 we call decmpfs_update_attributes even if the file is not compressed
1409 because we want to update the incoming flags if the xattrs are invalid
1410 */
1411 error = decmpfs_update_attributes(vp, vap);
1412 if (error)
1413 return error;
1414 #endif
1415 //
1416 // if this is not a size-changing setattr and it is not just
1417 // an atime update, then check for a snapshot.
1418 //
1419 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1420 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1421 }
1422
1423 #if CONFIG_PROTECT
1424 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1425 return (error);
1426 }
1427 #endif /* CONFIG_PROTECT */
1428
1429 hfsmp = VTOHFS(vp);
1430
1431 /* Don't allow modification of the journal. */
1432 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1433 return (EPERM);
1434 }
1435
1436 //
1437 // Check if we'll need a document_id and if so, get it before we lock the
1438 // the cnode to avoid any possible deadlock with the root vnode which has
1439 // to get locked to get the document id
1440 //
1441 u_int32_t document_id=0;
1442 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & UF_TRACKED) && !(VTOC(vp)->c_bsdflags & UF_TRACKED)) {
1443 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&(VTOC(vp)->c_attr.ca_finderinfo) + 16);
1444 //
1445 // If the document_id is not set, get a new one. It will be set
1446 // on the file down below once we hold the cnode lock.
1447 //
1448 if (fip->document_id == 0) {
1449 if (hfs_generate_document_id(hfsmp, &document_id) != 0) {
1450 document_id = 0;
1451 }
1452 }
1453 }
1454
1455
1456 /*
1457 * File size change request.
1458 * We are guaranteed that this is not a directory, and that
1459 * the filesystem object is writeable.
1460 *
1461 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1462 */
1463 VATTR_SET_SUPPORTED(vap, va_data_size);
1464 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
1465 #if HFS_COMPRESSION
1466 /* keep the compressed state locked until we're done truncating the file */
1467 decmpfs_cnode *dp = VTOCMP(vp);
1468 if (!dp) {
1469 /*
1470 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1471 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1472 * on this file while it's truncating
1473 */
1474 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1475 if (!dp) {
1476 /* failed to allocate a decmpfs_cnode */
1477 return ENOMEM; /* what should this be? */
1478 }
1479 }
1480
1481 check_for_tracked_file(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1482
1483 decmpfs_lock_compressed_data(dp, 1);
1484 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1485 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1486 if (error != 0) {
1487 decmpfs_unlock_compressed_data(dp, 1);
1488 return error;
1489 }
1490 }
1491 #endif
1492
1493 // Take truncate lock
1494 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1495
1496 // hfs_truncate will deal with the cnode lock
1497 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff,
1498 0, ap->a_context);
1499
1500 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1501 #if HFS_COMPRESSION
1502 decmpfs_unlock_compressed_data(dp, 1);
1503 #endif
1504 if (error)
1505 return error;
1506 }
1507 if (cp == NULL) {
1508 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1509 return (error);
1510 cp = VTOC(vp);
1511 }
1512
1513 /*
1514 * If it is just an access time update request by itself
1515 * we know the request is from kernel level code, and we
1516 * can delay it without being as worried about consistency.
1517 * This change speeds up mmaps, in the rare case that they
1518 * get caught behind a sync.
1519 */
1520
1521 if (vap->va_active == VNODE_ATTR_va_access_time) {
1522 cp->c_touch_acctime=TRUE;
1523 goto out;
1524 }
1525
1526
1527
1528 /*
1529 * Owner/group change request.
1530 * We are guaranteed that the new owner/group is valid and legal.
1531 */
1532 VATTR_SET_SUPPORTED(vap, va_uid);
1533 VATTR_SET_SUPPORTED(vap, va_gid);
1534 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1535 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1536 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1537 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1538 goto out;
1539
1540 /*
1541 * Mode change request.
1542 * We are guaranteed that the mode value is valid and that in
1543 * conjunction with the owner and group, this change is legal.
1544 */
1545 VATTR_SET_SUPPORTED(vap, va_mode);
1546 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1547 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1548 goto out;
1549
1550 /*
1551 * File flags change.
1552 * We are guaranteed that only flags allowed to change given the
1553 * current securelevel are being changed.
1554 */
1555 VATTR_SET_SUPPORTED(vap, va_flags);
1556 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1557 u_int16_t *fdFlags;
1558
1559 #if HFS_COMPRESSION
1560 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1561 /*
1562 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1563 * but we don't want to actually do the update until we've released the cnode lock down below
1564 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1565 * turn off the flag and look at the "raw" file for debugging purposes
1566 */
1567 decmpfs_reset_state = 1;
1568 }
1569 #endif
1570 if ((vap->va_flags & UF_TRACKED) && !(cp->c_bsdflags & UF_TRACKED)) {
1571 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1572
1573 //
1574 // we're marking this item UF_TRACKED. if the document_id is
1575 // not set, get a new one and put it on the file.
1576 //
1577 if (fip->document_id == 0) {
1578 if (document_id != 0) {
1579 // printf("SETATTR: assigning doc-id %d to %s (ino %d)\n", document_id, vp->v_name, cp->c_desc.cd_cnid);
1580 fip->document_id = (uint32_t)document_id;
1581 #if CONFIG_FSE
1582 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1583 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1584 FSE_ARG_INO, (ino64_t)0, // src inode #
1585 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
1586 FSE_ARG_INT32, document_id,
1587 FSE_ARG_DONE);
1588 #endif
1589 } else {
1590 // printf("hfs: could not acquire a new document_id for %s (ino %d)\n", vp->v_name, cp->c_desc.cd_cnid);
1591 }
1592 }
1593
1594 } else if (!(vap->va_flags & UF_TRACKED) && (cp->c_bsdflags & UF_TRACKED)) {
1595 //
1596 // UF_TRACKED is being cleared so clear the document_id
1597 //
1598 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
1599 if (fip->document_id) {
1600 // printf("SETATTR: clearing doc-id %d from %s (ino %d)\n", fip->document_id, vp->v_name, cp->c_desc.cd_cnid);
1601 #if CONFIG_FSE
1602 add_fsevent(FSE_DOCID_CHANGED, ap->a_context,
1603 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
1604 FSE_ARG_INO, (ino64_t)cp->c_fileid, // src inode #
1605 FSE_ARG_INO, (ino64_t)0, // dst inode #
1606 FSE_ARG_INT32, fip->document_id, // document id
1607 FSE_ARG_DONE);
1608 #endif
1609 fip->document_id = 0;
1610 cp->c_bsdflags &= ~UF_TRACKED;
1611 }
1612 }
1613
1614 cp->c_bsdflags = vap->va_flags;
1615 cp->c_touch_chgtime = TRUE;
1616
1617
1618 /*
1619 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1620 *
1621 * The fdFlags for files and frFlags for folders are both 8 bytes
1622 * into the userInfo (the first 16 bytes of the Finder Info). They
1623 * are both 16-bit fields.
1624 */
1625 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1626 if (vap->va_flags & UF_HIDDEN)
1627 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1628 else
1629 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1630 }
1631
1632 /*
1633 * Timestamp updates.
1634 */
1635 VATTR_SET_SUPPORTED(vap, va_create_time);
1636 VATTR_SET_SUPPORTED(vap, va_access_time);
1637 VATTR_SET_SUPPORTED(vap, va_modify_time);
1638 VATTR_SET_SUPPORTED(vap, va_backup_time);
1639 VATTR_SET_SUPPORTED(vap, va_change_time);
1640 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1641 VATTR_IS_ACTIVE(vap, va_access_time) ||
1642 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1643 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1644 if (VATTR_IS_ACTIVE(vap, va_create_time))
1645 cp->c_itime = vap->va_create_time.tv_sec;
1646 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1647 cp->c_atime = vap->va_access_time.tv_sec;
1648 cp->c_touch_acctime = FALSE;
1649 }
1650 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1651 cp->c_mtime = vap->va_modify_time.tv_sec;
1652 cp->c_touch_modtime = FALSE;
1653 cp->c_touch_chgtime = TRUE;
1654
1655 hfs_clear_might_be_dirty_flag(cp);
1656
1657 /*
1658 * The utimes system call can reset the modification
1659 * time but it doesn't know about HFS create times.
1660 * So we need to ensure that the creation time is
1661 * always at least as old as the modification time.
1662 */
1663 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1664 (cp->c_cnid != kHFSRootFolderID) &&
1665 (cp->c_mtime < cp->c_itime)) {
1666 cp->c_itime = cp->c_mtime;
1667 }
1668 }
1669 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1670 cp->c_btime = vap->va_backup_time.tv_sec;
1671 cp->c_flag |= C_MODIFIED;
1672 }
1673
1674 /*
1675 * Set name encoding.
1676 */
1677 VATTR_SET_SUPPORTED(vap, va_encoding);
1678 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1679 cp->c_encoding = vap->va_encoding;
1680 hfs_setencodingbits(hfsmp, cp->c_encoding);
1681 }
1682
1683 if ((error = hfs_update(vp, TRUE)) != 0)
1684 goto out;
1685 out:
1686 if (cp) {
1687 /* Purge origin cache for cnode, since caller now has correct link ID for it
1688 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1689 */
1690 if ((cp->c_flag & C_HARDLINK) && (vp->v_type != VDIR)){
1691 hfs_relorigin(cp, 0);
1692 }
1693
1694 hfs_unlock(cp);
1695 #if HFS_COMPRESSION
1696 if (decmpfs_reset_state) {
1697 /*
1698 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1699 * but don't do it while holding the hfs cnode lock
1700 */
1701 decmpfs_cnode *dp = VTOCMP(vp);
1702 if (!dp) {
1703 /*
1704 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1705 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1706 * on this file if it's locked
1707 */
1708 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1709 if (!dp) {
1710 /* failed to allocate a decmpfs_cnode */
1711 return ENOMEM; /* what should this be? */
1712 }
1713 }
1714 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1715 }
1716 #endif
1717 }
1718 return (error);
1719 }
1720
1721
1722 /*
1723 * Change the mode on a file.
1724 * cnode must be locked before calling.
1725 */
1726 int
1727 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1728 {
1729 register struct cnode *cp = VTOC(vp);
1730
1731 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1732 return (0);
1733
1734 // Don't allow modification of the journal or journal_info_block
1735 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1736 return EPERM;
1737 }
1738
1739 #if OVERRIDE_UNKNOWN_PERMISSIONS
1740 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1741 return (0);
1742 };
1743 #endif
1744 cp->c_mode &= ~ALLPERMS;
1745 cp->c_mode |= (mode & ALLPERMS);
1746 cp->c_touch_chgtime = TRUE;
1747 return (0);
1748 }
1749
1750
1751 int
1752 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1753 {
1754 struct cnode *cp = VTOC(vp);
1755 int retval = 0;
1756 int is_member;
1757
1758 /*
1759 * Disallow write attempts on read-only file systems;
1760 * unless the file is a socket, fifo, or a block or
1761 * character device resident on the file system.
1762 */
1763 switch (vnode_vtype(vp)) {
1764 case VDIR:
1765 case VLNK:
1766 case VREG:
1767 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1768 return (EROFS);
1769 break;
1770 default:
1771 break;
1772 }
1773
1774 /* If immutable bit set, nobody gets to write it. */
1775 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1776 return (EPERM);
1777
1778 /* Otherwise, user id 0 always gets access. */
1779 if (!suser(cred, NULL))
1780 return (0);
1781
1782 /* Otherwise, check the owner. */
1783 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1784 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1785
1786 /* Otherwise, check the groups. */
1787 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1788 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1789 }
1790
1791 /* Otherwise, check everyone else. */
1792 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1793 }
1794
1795
1796 /*
1797 * Perform chown operation on cnode cp;
1798 * code must be locked prior to call.
1799 */
1800 int
1801 #if !QUOTA
1802 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1803 __unused struct proc *p)
1804 #else
1805 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1806 __unused struct proc *p)
1807 #endif
1808 {
1809 register struct cnode *cp = VTOC(vp);
1810 uid_t ouid;
1811 gid_t ogid;
1812 #if QUOTA
1813 int error = 0;
1814 register int i;
1815 int64_t change;
1816 #endif /* QUOTA */
1817
1818 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1819 return (ENOTSUP);
1820
1821 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1822 return (0);
1823
1824 if (uid == (uid_t)VNOVAL)
1825 uid = cp->c_uid;
1826 if (gid == (gid_t)VNOVAL)
1827 gid = cp->c_gid;
1828
1829 #if 0 /* we are guaranteed that this is already the case */
1830 /*
1831 * If we don't own the file, are trying to change the owner
1832 * of the file, or are not a member of the target group,
1833 * the caller must be superuser or the call fails.
1834 */
1835 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1836 (gid != cp->c_gid &&
1837 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1838 (error = suser(cred, 0)))
1839 return (error);
1840 #endif
1841
1842 ogid = cp->c_gid;
1843 ouid = cp->c_uid;
1844 #if QUOTA
1845 if ((error = hfs_getinoquota(cp)))
1846 return (error);
1847 if (ouid == uid) {
1848 dqrele(cp->c_dquot[USRQUOTA]);
1849 cp->c_dquot[USRQUOTA] = NODQUOT;
1850 }
1851 if (ogid == gid) {
1852 dqrele(cp->c_dquot[GRPQUOTA]);
1853 cp->c_dquot[GRPQUOTA] = NODQUOT;
1854 }
1855
1856 /*
1857 * Eventually need to account for (fake) a block per directory
1858 * if (vnode_isdir(vp))
1859 * change = VTOHFS(vp)->blockSize;
1860 * else
1861 */
1862
1863 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1864 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1865 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1866 for (i = 0; i < MAXQUOTAS; i++) {
1867 dqrele(cp->c_dquot[i]);
1868 cp->c_dquot[i] = NODQUOT;
1869 }
1870 #endif /* QUOTA */
1871 cp->c_gid = gid;
1872 cp->c_uid = uid;
1873 #if QUOTA
1874 if ((error = hfs_getinoquota(cp)) == 0) {
1875 if (ouid == uid) {
1876 dqrele(cp->c_dquot[USRQUOTA]);
1877 cp->c_dquot[USRQUOTA] = NODQUOT;
1878 }
1879 if (ogid == gid) {
1880 dqrele(cp->c_dquot[GRPQUOTA]);
1881 cp->c_dquot[GRPQUOTA] = NODQUOT;
1882 }
1883 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1884 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1885 goto good;
1886 else
1887 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1888 }
1889 for (i = 0; i < MAXQUOTAS; i++) {
1890 dqrele(cp->c_dquot[i]);
1891 cp->c_dquot[i] = NODQUOT;
1892 }
1893 }
1894 cp->c_gid = ogid;
1895 cp->c_uid = ouid;
1896 if (hfs_getinoquota(cp) == 0) {
1897 if (ouid == uid) {
1898 dqrele(cp->c_dquot[USRQUOTA]);
1899 cp->c_dquot[USRQUOTA] = NODQUOT;
1900 }
1901 if (ogid == gid) {
1902 dqrele(cp->c_dquot[GRPQUOTA]);
1903 cp->c_dquot[GRPQUOTA] = NODQUOT;
1904 }
1905 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1906 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1907 (void) hfs_getinoquota(cp);
1908 }
1909 return (error);
1910 good:
1911 if (hfs_getinoquota(cp))
1912 panic("hfs_chown: lost quota");
1913 #endif /* QUOTA */
1914
1915
1916 /*
1917 According to the SUSv3 Standard, chown() shall mark
1918 for update the st_ctime field of the file.
1919 (No exceptions mentioned)
1920 */
1921 cp->c_touch_chgtime = TRUE;
1922 return (0);
1923 }
1924
1925 #if HFS_COMPRESSION
1926 /*
1927 * Flush the resource fork if it exists. vp is the data fork and has
1928 * an iocount.
1929 */
1930 static int hfs_flush_rsrc(vnode_t vp, vfs_context_t ctx)
1931 {
1932 cnode_t *cp = VTOC(vp);
1933
1934 hfs_lock(cp, HFS_SHARED_LOCK, 0);
1935
1936 vnode_t rvp = cp->c_rsrc_vp;
1937
1938 if (!rvp) {
1939 hfs_unlock(cp);
1940 return 0;
1941 }
1942
1943 int vid = vnode_vid(rvp);
1944
1945 hfs_unlock(cp);
1946
1947 int error = vnode_getwithvid(rvp, vid);
1948
1949 if (error)
1950 return error == ENOENT ? 0 : error;
1951
1952 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, 0);
1953 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
1954 hfs_filedone(rvp, ctx, HFS_FILE_DONE_NO_SYNC);
1955 hfs_unlock(cp);
1956 hfs_unlock_truncate(cp, 0);
1957
1958 error = ubc_msync(rvp, 0, ubc_getsize(rvp), NULL,
1959 UBC_PUSHALL | UBC_SYNC);
1960
1961 vnode_put(rvp);
1962
1963 return error;
1964 }
1965 #endif // HFS_COMPRESSION
1966
1967 /*
1968 * hfs_vnop_exchange:
1969 *
1970 * Inputs:
1971 * 'from' vnode/cnode
1972 * 'to' vnode/cnode
1973 * options flag bits
1974 * vfs_context
1975 *
1976 * Discussion:
1977 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
1978 * Per the requirements of that system call, this function "swaps" some
1979 * of the information that lives in one catalog record for some that
1980 * lives in another. Note that not everything is swapped; in particular,
1981 * the extent information stored in each cnode is kept local to that
1982 * cnode. This allows existing file descriptor references to continue
1983 * to operate on the same content, regardless of the location in the
1984 * namespace that the file may have moved to. See inline comments
1985 * in the function for more information.
1986 */
1987 int
1988 hfs_vnop_exchange(ap)
1989 struct vnop_exchange_args /* {
1990 struct vnode *a_fvp;
1991 struct vnode *a_tvp;
1992 int a_options;
1993 vfs_context_t a_context;
1994 } */ *ap;
1995 {
1996 struct vnode *from_vp = ap->a_fvp;
1997 struct vnode *to_vp = ap->a_tvp;
1998 struct cnode *from_cp;
1999 struct cnode *to_cp;
2000 struct hfsmount *hfsmp;
2001 struct cat_desc tempdesc;
2002 struct cat_attr tempattr;
2003 const unsigned char *from_nameptr;
2004 const unsigned char *to_nameptr;
2005 char from_iname[32];
2006 char to_iname[32];
2007 uint32_t to_flag_special;
2008 uint32_t from_flag_special;
2009 cnid_t from_parid;
2010 cnid_t to_parid;
2011 int lockflags;
2012 int error = 0, started_tr = 0, got_cookie = 0;
2013 cat_cookie_t cookie;
2014 time_t orig_from_ctime, orig_to_ctime;
2015 bool have_cnode_locks = false, have_from_trunc_lock = false, have_to_trunc_lock = false;
2016
2017 /*
2018 * VFS does the following checks:
2019 * 1. Validate that both are files.
2020 * 2. Validate that both are on the same mount.
2021 * 3. Validate that they're not the same vnode.
2022 */
2023
2024 from_cp = VTOC(from_vp);
2025 to_cp = VTOC(to_vp);
2026 hfsmp = VTOHFS(from_vp);
2027
2028 orig_from_ctime = from_cp->c_ctime;
2029 orig_to_ctime = to_cp->c_ctime;
2030
2031 #if CONFIG_PROTECT
2032 /*
2033 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
2034 * because the EAs will not be swapped. As a result, the persistent keys would not
2035 * match and the files will be garbage.
2036 */
2037 if (cp_fs_protected (vnode_mount(from_vp))) {
2038 return EINVAL;
2039 }
2040 #endif
2041
2042 #if HFS_COMPRESSION
2043 if (!ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2044 if ( hfs_file_is_compressed(from_cp, 0) ) {
2045 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
2046 return error;
2047 }
2048 }
2049
2050 if ( hfs_file_is_compressed(to_cp, 0) ) {
2051 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
2052 return error;
2053 }
2054 }
2055 }
2056 #endif // HFS_COMPRESSION
2057
2058 // Resource forks cannot be exchanged.
2059 if (VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
2060 return EINVAL;
2061
2062 /*
2063 * Normally, we want to notify the user handlers about the event,
2064 * except if it's a handler driving the event.
2065 */
2066 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
2067 check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2068 check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2069 } else {
2070 /*
2071 * This is currently used by mtmd so we should tidy up the
2072 * file now because the data won't be used again in the
2073 * destination file.
2074 */
2075 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, 0);
2076 hfs_lock_always(from_cp, HFS_EXCLUSIVE_LOCK);
2077 hfs_filedone(from_vp, ap->a_context, HFS_FILE_DONE_NO_SYNC);
2078 hfs_unlock(from_cp);
2079 hfs_unlock_truncate(from_cp, 0);
2080
2081 // Flush all the data from the source file
2082 error = ubc_msync(from_vp, 0, ubc_getsize(from_vp), NULL,
2083 UBC_PUSHALL | UBC_SYNC);
2084 if (error)
2085 goto exit;
2086
2087 #if HFS_COMPRESSION
2088 /*
2089 * If this is a compressed file, we need to do the same for
2090 * the resource fork.
2091 */
2092 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2093 error = hfs_flush_rsrc(from_vp, ap->a_context);
2094 if (error)
2095 goto exit;
2096 }
2097 #endif
2098
2099 /*
2100 * We're doing a data-swap so we need to take the truncate
2101 * lock exclusively. We need an exclusive lock because we
2102 * will be completely truncating the source file and we must
2103 * make sure nobody else sneaks in and trys to issue I/O
2104 * whilst we don't have the cnode lock.
2105 *
2106 * After taking the truncate lock we do a quick check to
2107 * verify there are no other references (including mmap
2108 * references), but we must remember that this does not stop
2109 * anybody coming in later and taking a reference. We will
2110 * have the truncate lock exclusively so that will prevent
2111 * them from issuing any I/O.
2112 */
2113
2114 if (to_cp < from_cp) {
2115 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2116 have_to_trunc_lock = true;
2117 }
2118
2119 hfs_lock_truncate(from_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2120 have_from_trunc_lock = true;
2121
2122 /*
2123 * Do an early check to verify the source is not in use by
2124 * anyone. We should be called from an FD opened as F_EVTONLY
2125 * so that doesn't count as a reference.
2126 */
2127 if (vnode_isinuse(from_vp, 0)) {
2128 error = EBUSY;
2129 goto exit;
2130 }
2131
2132 if (to_cp >= from_cp) {
2133 hfs_lock_truncate(to_cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2134 have_to_trunc_lock = true;
2135 }
2136 }
2137
2138 if ((error = hfs_lockpair(from_cp, to_cp, HFS_EXCLUSIVE_LOCK)))
2139 goto exit;
2140 have_cnode_locks = true;
2141
2142 // Don't allow modification of the journal or journal_info_block
2143 if (hfs_is_journal_file(hfsmp, from_cp) ||
2144 hfs_is_journal_file(hfsmp, to_cp)) {
2145 error = EPERM;
2146 goto exit;
2147 }
2148
2149 /*
2150 * Ok, now that all of the pre-flighting is done, call the underlying
2151 * function if needed.
2152 */
2153 if (ISSET(ap->a_options, FSOPT_EXCHANGE_DATA_ONLY)) {
2154 #if HFS_COMPRESSION
2155 if (ISSET(from_cp->c_bsdflags, UF_COMPRESSED)) {
2156 error = hfs_move_compressed(from_cp, to_cp);
2157 goto exit;
2158 }
2159 #endif
2160
2161 error = hfs_move_data(from_cp, to_cp, 0);
2162 goto exit;
2163 }
2164
2165 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2166 goto exit;
2167 }
2168 started_tr = 1;
2169
2170 /*
2171 * Reserve some space in the Catalog file.
2172 */
2173 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
2174 goto exit;
2175 }
2176 got_cookie = 1;
2177
2178 /* The backend code always tries to delete the virtual
2179 * extent id for exchanging files so we need to lock
2180 * the extents b-tree.
2181 */
2182 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2183
2184 /* Account for the location of the catalog objects. */
2185 if (from_cp->c_flag & C_HARDLINK) {
2186 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
2187 from_cp->c_attr.ca_linkref);
2188 from_nameptr = (unsigned char *)from_iname;
2189 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2190 from_cp->c_hint = 0;
2191 } else {
2192 from_nameptr = from_cp->c_desc.cd_nameptr;
2193 from_parid = from_cp->c_parentcnid;
2194 }
2195 if (to_cp->c_flag & C_HARDLINK) {
2196 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
2197 to_cp->c_attr.ca_linkref);
2198 to_nameptr = (unsigned char *)to_iname;
2199 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2200 to_cp->c_hint = 0;
2201 } else {
2202 to_nameptr = to_cp->c_desc.cd_nameptr;
2203 to_parid = to_cp->c_parentcnid;
2204 }
2205
2206 /*
2207 * ExchangeFileIDs swaps the on-disk, or in-BTree extent information
2208 * attached to two different file IDs. It also swaps the extent
2209 * information that may live in the extents-overflow B-Tree.
2210 *
2211 * We do this in a transaction as this may require a lot of B-Tree nodes
2212 * to do completely, particularly if one of the files in question
2213 * has a lot of extents.
2214 *
2215 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
2216 * For the on-disk records, which are assumed to be synced, we will
2217 * first swap the resident inline-8 extents as part of the catalog records.
2218 * Then we will swap any extents overflow records for each file.
2219 *
2220 * When ExchangeFileIDs returns successfully, "file1" will have fileID 52,
2221 * and "file2" will have fileID 50. However, note that this is only
2222 * approximately half of the work that exchangedata(2) will need to
2223 * accomplish. In other words, we swap "too much" of the information
2224 * because if we only called ExchangeFileIDs, both the fileID and extent
2225 * information would be the invariants of this operation. We don't
2226 * actually want that; we want to conclude with "file1" having
2227 * file ID 50, and "file2" having fileID 52.
2228 *
2229 * The remainder of hfs_vnop_exchange will swap the file ID and other cnode
2230 * data back to the proper ownership, while still allowing the cnode to remain
2231 * pointing at the same set of extents that it did originally.
2232 */
2233 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
2234 to_parid, from_cp->c_hint, to_cp->c_hint);
2235 hfs_systemfile_unlock(hfsmp, lockflags);
2236
2237 /*
2238 * Note that we don't need to exchange any extended attributes
2239 * since the attributes are keyed by file ID.
2240 */
2241
2242 if (error != E_NONE) {
2243 error = MacToVFSError(error);
2244 goto exit;
2245 }
2246
2247 /* Purge the vnodes from the name cache */
2248 if (from_vp)
2249 cache_purge(from_vp);
2250 if (to_vp)
2251 cache_purge(to_vp);
2252
2253 /* Bump both source and destination write counts before any swaps. */
2254 {
2255 hfs_incr_gencount (from_cp);
2256 hfs_incr_gencount (to_cp);
2257 }
2258
2259 /* Save a copy of "from" attributes before swapping. */
2260 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
2261 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
2262
2263 /* Save whether or not each cnode is a hardlink or has EAs */
2264 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2265 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
2266
2267 /* Drop the special bits from each cnode */
2268 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2269 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
2270
2271 /*
2272 * Now complete the in-memory portion of the copy.
2273 *
2274 * ExchangeFileIDs swaps the on-disk records involved. We complete the
2275 * operation by swapping the in-memory contents of the two files here.
2276 * We swap the cnode descriptors, which contain name, BSD attributes,
2277 * timestamps, etc, about the file.
2278 *
2279 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
2280 * already swapped the on-disk extent information. As long as we swap the
2281 * IDs, the in-line resident 8 extents that live in the filefork data
2282 * structure will point to the right data for the new file ID if we leave
2283 * them alone.
2284 *
2285 * As a result, any file descriptor that points to a particular
2286 * vnode (even though it should change names), will continue
2287 * to point to the same content.
2288 */
2289
2290 /* Copy the "to" -> "from" cnode */
2291 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
2292
2293 from_cp->c_hint = 0;
2294 /*
2295 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
2296 * when we bcopy'd the descriptor above. However, the cnode attributes
2297 * are not bcopied. As a result, make sure to swap the file IDs of each item.
2298 *
2299 * Further, other hardlink attributes must be moved along in this swap:
2300 * the linkcount, the linkref, and the firstlink all need to move
2301 * along with the file IDs. See note below regarding the flags and
2302 * what moves vs. what does not.
2303 *
2304 * For Reference:
2305 * linkcount == total # of hardlinks.
2306 * linkref == the indirect inode pointer.
2307 * firstlink == the first hardlink in the chain (written to the raw inode).
2308 * These three are tied to the fileID and must move along with the rest of the data.
2309 */
2310 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
2311
2312 from_cp->c_itime = to_cp->c_itime;
2313 from_cp->c_btime = to_cp->c_btime;
2314 from_cp->c_atime = to_cp->c_atime;
2315 from_cp->c_ctime = to_cp->c_ctime;
2316 from_cp->c_gid = to_cp->c_gid;
2317 from_cp->c_uid = to_cp->c_uid;
2318 from_cp->c_bsdflags = to_cp->c_bsdflags;
2319 from_cp->c_mode = to_cp->c_mode;
2320 from_cp->c_linkcount = to_cp->c_linkcount;
2321 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
2322 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
2323
2324 /*
2325 * The cnode flags need to stay with the cnode and not get transferred
2326 * over along with everything else because they describe the content; they are
2327 * not attributes that reflect changes specific to the file ID. In general,
2328 * fields that are tied to the file ID are the ones that will move.
2329 *
2330 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
2331 * or other extents, which may not yet have been written to the catalog. If
2332 * they were, they would have been transferred above in the ExchangeFileIDs call above...
2333 *
2334 * The flags that are special are:
2335 * C_HARDLINK, C_HASXATTRS
2336 *
2337 * These flags move with the item and file ID in the namespace since their
2338 * state is tied to that of the file ID.
2339 *
2340 * So to transfer the flags, we have to take the following steps
2341 * 1) Store in a localvar whether or not the special bits are set.
2342 * 2) Drop the special bits from the current flags
2343 * 3) swap the special flag bits to their destination
2344 */
2345 from_cp->c_flag |= to_flag_special;
2346 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
2347 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
2348
2349
2350 /* Copy the "from" -> "to" cnode */
2351 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
2352 to_cp->c_hint = 0;
2353 /*
2354 * Pull the file ID from the tempattr we copied above. We can't assume
2355 * it is the same as the CNID.
2356 */
2357 to_cp->c_fileid = tempattr.ca_fileid;
2358 to_cp->c_itime = tempattr.ca_itime;
2359 to_cp->c_btime = tempattr.ca_btime;
2360 to_cp->c_atime = tempattr.ca_atime;
2361 to_cp->c_ctime = tempattr.ca_ctime;
2362 to_cp->c_gid = tempattr.ca_gid;
2363 to_cp->c_uid = tempattr.ca_uid;
2364 to_cp->c_bsdflags = tempattr.ca_flags;
2365 to_cp->c_mode = tempattr.ca_mode;
2366 to_cp->c_linkcount = tempattr.ca_linkcount;
2367 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
2368 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
2369
2370 /*
2371 * Only OR in the "from" flags into our cnode flags below.
2372 * Leave the rest of the flags alone.
2373 */
2374 to_cp->c_flag |= from_flag_special;
2375
2376 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
2377 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
2378
2379
2380 /* Rehash the cnodes using their new file IDs */
2381 hfs_chash_rehash(hfsmp, from_cp, to_cp);
2382
2383 /*
2384 * When a file moves out of "Cleanup At Startup"
2385 * we can drop its NODUMP status.
2386 */
2387 if ((from_cp->c_bsdflags & UF_NODUMP) &&
2388 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
2389 from_cp->c_bsdflags &= ~UF_NODUMP;
2390 from_cp->c_touch_chgtime = TRUE;
2391 }
2392 if ((to_cp->c_bsdflags & UF_NODUMP) &&
2393 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
2394 to_cp->c_bsdflags &= ~UF_NODUMP;
2395 to_cp->c_touch_chgtime = TRUE;
2396 }
2397
2398 exit:
2399 if (got_cookie) {
2400 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
2401 }
2402 if (started_tr) {
2403 hfs_end_transaction(hfsmp);
2404 }
2405
2406 if (have_from_trunc_lock)
2407 hfs_unlock_truncate(from_cp, 0);
2408
2409 if (have_to_trunc_lock)
2410 hfs_unlock_truncate(to_cp, 0);
2411
2412 if (have_cnode_locks)
2413 hfs_unlockpair(from_cp, to_cp);
2414
2415 return (error);
2416 }
2417
2418 #if HFS_COMPRESSION
2419 /*
2420 * This function is used specifically for the case when a namespace
2421 * handler is trying to steal data before it's deleted. Note that we
2422 * don't bother deleting the xattr from the source because it will get
2423 * deleted a short time later anyway.
2424 *
2425 * cnodes must be locked
2426 */
2427 static int hfs_move_compressed(cnode_t *from_cp, cnode_t *to_cp)
2428 {
2429 int ret;
2430 void *data = NULL;
2431
2432 CLR(from_cp->c_bsdflags, UF_COMPRESSED);
2433 SET(from_cp->c_flag, C_MODIFIED);
2434
2435 ret = hfs_move_data(from_cp, to_cp, HFS_MOVE_DATA_INCLUDE_RSRC);
2436 if (ret)
2437 goto exit;
2438
2439 /*
2440 * Transfer the xattr that decmpfs uses. Ideally, this code
2441 * should be with the other decmpfs code but it's file system
2442 * agnostic and this path is currently, and likely to remain, HFS+
2443 * specific. It's easier and more performant if we implement it
2444 * here.
2445 */
2446
2447 size_t size = MAX_DECMPFS_XATTR_SIZE;
2448 MALLOC(data, void *, size, M_TEMP, M_WAITOK);
2449
2450 ret = hfs_xattr_read(from_cp->c_vp, DECMPFS_XATTR_NAME, data, &size);
2451 if (ret)
2452 goto exit;
2453
2454 ret = hfs_xattr_write(to_cp->c_vp, DECMPFS_XATTR_NAME, data, size);
2455 if (ret)
2456 goto exit;
2457
2458 SET(to_cp->c_bsdflags, UF_COMPRESSED);
2459 SET(to_cp->c_flag, C_MODIFIED);
2460
2461 exit:
2462 if (data)
2463 FREE(data, M_TEMP);
2464
2465 return ret;
2466 }
2467 #endif // HFS_COMPRESSION
2468
2469 int
2470 hfs_vnop_mmap(struct vnop_mmap_args *ap)
2471 {
2472 struct vnode *vp = ap->a_vp;
2473 cnode_t *cp = VTOC(vp);
2474 int error;
2475
2476 if (VNODE_IS_RSRC(vp)) {
2477 /* allow pageins of the resource fork */
2478 } else {
2479 int compressed = hfs_file_is_compressed(cp, 1); /* 1 == don't take the cnode lock */
2480 time_t orig_ctime = cp->c_ctime;
2481
2482 if (!compressed && (cp->c_bsdflags & UF_COMPRESSED)) {
2483 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2484 if (error != 0) {
2485 return error;
2486 }
2487 }
2488
2489 if (ap->a_fflags & PROT_WRITE) {
2490 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2491 }
2492 }
2493
2494 //
2495 // NOTE: we return ENOTSUP because we want the cluster layer
2496 // to actually do all the real work.
2497 //
2498 return (ENOTSUP);
2499 }
2500
2501 static errno_t hfs_vnop_mnomap(struct vnop_mnomap_args *ap)
2502 {
2503 vnode_t vp = ap->a_vp;
2504
2505 /*
2506 * Whilst the file was mapped, there may not have been any
2507 * page-outs so we need to increment the generation counter now.
2508 * Unfortunately this may lead to a change in the generation
2509 * counter when no actual change has been made, but there is
2510 * little we can do about that with our current architecture.
2511 */
2512 if (ubc_is_mapped_writable(vp)) {
2513 cnode_t *cp = VTOC(vp);
2514 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2515 hfs_incr_gencount(cp);
2516
2517 /*
2518 * We don't want to set the modification time here since a
2519 * change to that is not acceptable if no changes were made.
2520 * Instead we set a flag so that if we get any page-outs we
2521 * know to update the modification time. It's possible that
2522 * they weren't actually because of changes made whilst the
2523 * file was mapped but that's not easy to fix now.
2524 */
2525 SET(cp->c_flag, C_MIGHT_BE_DIRTY_FROM_MAPPING);
2526
2527 hfs_unlock(cp);
2528 }
2529
2530 return 0;
2531 }
2532
2533 /*
2534 * Mark the resource fork as needing a ubc_setsize when we drop the
2535 * cnode lock later.
2536 */
2537 static void hfs_rsrc_setsize(cnode_t *cp)
2538 {
2539 /*
2540 * We need to take an iocount if we don't have one. vnode_get
2541 * will return ENOENT if the vnode is terminating which is what we
2542 * want as it's not safe to call ubc_setsize in that case.
2543 */
2544 if (cp->c_rsrc_vp && !vnode_get(cp->c_rsrc_vp)) {
2545 // Shouldn't happen, but better safe...
2546 if (ISSET(cp->c_flag, C_NEED_RVNODE_PUT))
2547 vnode_put(cp->c_rsrc_vp);
2548 SET(cp->c_flag, C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE);
2549 }
2550 }
2551
2552 /*
2553 * hfs_move_data
2554 *
2555 * This is a non-symmetric variant of exchangedata. In this function,
2556 * the contents of the data fork (and optionally the resource fork)
2557 * are moved from from_cp to to_cp.
2558 *
2559 * The cnodes must be locked.
2560 *
2561 * The cnode pointed to by 'to_cp' *must* be empty prior to invoking
2562 * this function. We impose this restriction because we may not be
2563 * able to fully delete the entire file's contents in a single
2564 * transaction, particularly if it has a lot of extents. In the
2565 * normal file deletion codepath, the file is screened for two
2566 * conditions: 1) bigger than 400MB, and 2) more than 8 extents. If
2567 * so, the file is relocated to the hidden directory and the deletion
2568 * is broken up into multiple truncates. We can't do that here
2569 * because both files need to exist in the namespace. The main reason
2570 * this is imposed is that we may have to touch a whole lot of bitmap
2571 * blocks if there are many extents.
2572 *
2573 * Any data written to 'from_cp' after this call completes is not
2574 * guaranteed to be moved.
2575 *
2576 * Arguments:
2577 * cnode_t *from_cp : source file
2578 * cnode_t *to_cp : destination file; must be empty
2579 *
2580 * Returns:
2581 *
2582 * EBUSY - File has been deleted or is in use
2583 * EFBIG - Destination file was not empty
2584 * EIO - An I/O error
2585 * 0 - success
2586 * other - Other errors that can be returned from called functions
2587 */
2588 int hfs_move_data(cnode_t *from_cp, cnode_t *to_cp,
2589 hfs_move_data_options_t options)
2590 {
2591 hfsmount_t *hfsmp = VTOHFS(from_cp->c_vp);
2592 int error = 0;
2593 int lockflags = 0;
2594 bool return_EIO_on_error = false;
2595 const bool include_rsrc = ISSET(options, HFS_MOVE_DATA_INCLUDE_RSRC);
2596
2597 /* Verify that neither source/dest file is open-unlinked */
2598 if (ISSET(from_cp->c_flag, C_DELETED | C_NOEXISTS)
2599 || ISSET(to_cp->c_flag, C_DELETED | C_NOEXISTS)) {
2600 return EBUSY;
2601 }
2602
2603 /*
2604 * Verify the source file is not in use by anyone besides us.
2605 *
2606 * This function is typically invoked by a namespace handler
2607 * process responding to a temporarily stalled system call.
2608 * The FD that it is working off of is opened O_EVTONLY, so
2609 * it really has no active usecounts (the kusecount from O_EVTONLY
2610 * is subtracted from the total usecounts).
2611 *
2612 * As a result, we shouldn't have any active usecounts against
2613 * this vnode when we go to check it below.
2614 */
2615 if (vnode_isinuse(from_cp->c_vp, 0))
2616 return EBUSY;
2617
2618 if (include_rsrc && from_cp->c_rsrc_vp) {
2619 if (vnode_isinuse(from_cp->c_rsrc_vp, 0))
2620 return EBUSY;
2621
2622 /*
2623 * In the code below, if the destination file doesn't have a
2624 * c_rsrcfork then we don't create it which means we we cannot
2625 * transfer the ff_invalidranges and cf_vblocks fields. These
2626 * shouldn't be set because we flush the resource fork before
2627 * calling this function but there is a tiny window when we
2628 * did not have any locks...
2629 */
2630 if (!to_cp->c_rsrcfork
2631 && (!TAILQ_EMPTY(&from_cp->c_rsrcfork->ff_invalidranges)
2632 || from_cp->c_rsrcfork->ff_unallocblocks)) {
2633 /*
2634 * The file isn't really busy now but something did slip
2635 * in and tinker with the file while we didn't have any
2636 * locks, so this is the most meaningful return code for
2637 * the caller.
2638 */
2639 return EBUSY;
2640 }
2641 }
2642
2643 // Check the destination file is empty
2644 if (to_cp->c_datafork->ff_blocks
2645 || to_cp->c_datafork->ff_size
2646 || (include_rsrc
2647 && (to_cp->c_blocks
2648 || (to_cp->c_rsrcfork && to_cp->c_rsrcfork->ff_size)))) {
2649 return EFBIG;
2650 }
2651
2652 if ((error = hfs_start_transaction (hfsmp)))
2653 return error;
2654
2655 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE,
2656 HFS_EXCLUSIVE_LOCK);
2657
2658 // filefork_t is 128 bytes which should be OK
2659 filefork_t rfork_buf, *from_rfork = NULL;
2660
2661 if (include_rsrc) {
2662 from_rfork = from_cp->c_rsrcfork;
2663
2664 /*
2665 * Creating resource fork vnodes is expensive, so just get get
2666 * the fork data if we need it.
2667 */
2668 if (!from_rfork && hfs_has_rsrc(from_cp)) {
2669 from_rfork = &rfork_buf;
2670
2671 from_rfork->ff_cp = from_cp;
2672 TAILQ_INIT(&from_rfork->ff_invalidranges);
2673
2674 error = cat_idlookup(hfsmp, from_cp->c_fileid, 0, 1, NULL, NULL,
2675 &from_rfork->ff_data);
2676
2677 if (error)
2678 goto exit;
2679 }
2680 }
2681
2682 /*
2683 * From here on, any failures mean that we might be leaving things
2684 * in a weird or inconsistent state. Ideally, we should back out
2685 * all the changes, but to do that properly we need to fix
2686 * MoveData. We'll save fixing that for another time. For now,
2687 * just return EIO in all cases to the caller so that they know.
2688 */
2689 return_EIO_on_error = true;
2690
2691 bool data_overflow_extents = overflow_extents(from_cp->c_datafork);
2692
2693 // Move the data fork
2694 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp,
2695 to_cp->c_datafork, to_cp))) {
2696 goto exit;
2697 }
2698
2699 SET(from_cp->c_flag, C_NEED_DATA_SETSIZE);
2700 SET(to_cp->c_flag, C_NEED_DATA_SETSIZE);
2701
2702 // We move the resource fork later
2703
2704 /*
2705 * Note that because all we're doing is moving the extents around,
2706 * we can probably do this in a single transaction: Each extent
2707 * record (group of 8) is 64 bytes. A extent overflow B-Tree node
2708 * is typically 4k. This means each node can hold roughly ~60
2709 * extent records == (480 extents).
2710 *
2711 * If a file was massively fragmented and had 20k extents, this
2712 * means we'd roughly touch 20k/480 == 41 to 42 nodes, plus the
2713 * index nodes, for half of the operation. (inserting or
2714 * deleting). So if we're manipulating 80-100 nodes, this is
2715 * basically 320k of data to write to the journal in a bad case.
2716 */
2717 if (data_overflow_extents) {
2718 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0)))
2719 goto exit;
2720 }
2721
2722 if (from_rfork && overflow_extents(from_rfork)) {
2723 if ((error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1)))
2724 goto exit;
2725 }
2726
2727 // Touch times
2728 from_cp->c_touch_acctime = TRUE;
2729 from_cp->c_touch_chgtime = TRUE;
2730 from_cp->c_touch_modtime = TRUE;
2731 hfs_touchtimes(hfsmp, from_cp);
2732
2733 to_cp->c_touch_acctime = TRUE;
2734 to_cp->c_touch_chgtime = TRUE;
2735 to_cp->c_touch_modtime = TRUE;
2736 hfs_touchtimes(hfsmp, to_cp);
2737
2738 struct cat_fork dfork_buf;
2739 const struct cat_fork *dfork, *rfork;
2740
2741 dfork = hfs_prepare_fork_for_update(to_cp->c_datafork, &dfork_buf,
2742 hfsmp->blockSize);
2743 rfork = hfs_prepare_fork_for_update(from_rfork, &rfork_buf.ff_data,
2744 hfsmp->blockSize);
2745
2746 // Update the catalog nodes, to_cp first
2747 if ((error = cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2748 dfork, rfork))) {
2749 goto exit;
2750 }
2751
2752 CLR(to_cp->c_flag, C_MODIFIED);
2753
2754 // Update in-memory resource fork data here
2755 if (from_rfork) {
2756 // Update c_blocks
2757 uint32_t moving = from_rfork->ff_blocks + from_rfork->ff_unallocblocks;
2758
2759 from_cp->c_blocks -= moving;
2760 to_cp->c_blocks += moving;
2761
2762 // Update to_cp's resource data if it has it
2763 filefork_t *to_rfork = to_cp->c_rsrcfork;
2764 if (to_rfork) {
2765 to_rfork->ff_invalidranges = from_rfork->ff_invalidranges;
2766 to_rfork->ff_data = from_rfork->ff_data;
2767
2768 // Deal with ubc_setsize
2769 hfs_rsrc_setsize(to_cp);
2770 }
2771
2772 // Wipe out the resource fork in from_cp
2773 rl_init(&from_rfork->ff_invalidranges);
2774 bzero(&from_rfork->ff_data, sizeof(from_rfork->ff_data));
2775
2776 // Deal with ubc_setsize
2777 hfs_rsrc_setsize(from_cp);
2778 }
2779
2780 // Currently unnecessary, but might be useful in future...
2781 dfork = hfs_prepare_fork_for_update(from_cp->c_datafork, &dfork_buf,
2782 hfsmp->blockSize);
2783 rfork = hfs_prepare_fork_for_update(from_rfork, &rfork_buf.ff_data,
2784 hfsmp->blockSize);
2785
2786 // Update from_cp
2787 if ((error = cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2788 dfork, rfork))) {
2789 goto exit;
2790 }
2791
2792 CLR(from_cp->c_flag, C_MODIFIED);
2793
2794 exit:
2795 if (lockflags) {
2796 hfs_systemfile_unlock(hfsmp, lockflags);
2797 hfs_end_transaction(hfsmp);
2798 }
2799
2800 if (error && error != EIO && return_EIO_on_error) {
2801 printf("hfs_move_data: encountered error %d\n", error);
2802 error = EIO;
2803 }
2804
2805 return error;
2806 }
2807
2808 /*
2809 * Move all of the catalog and runtime data in srcfork to dstfork.
2810 *
2811 * This allows us to maintain the invalid ranges across the move data
2812 * operation so we don't need to force all of the pending IO right
2813 * now. In addition, we move all non overflow-extent extents into the
2814 * destination here.
2815 *
2816 * The destination fork must be empty and should have been checked
2817 * prior to calling this.
2818 */
2819 static int hfs_move_fork(filefork_t *srcfork, cnode_t *src_cp,
2820 filefork_t *dstfork, cnode_t *dst_cp)
2821 {
2822 // Move the invalid ranges
2823 dstfork->ff_invalidranges = srcfork->ff_invalidranges;
2824 rl_init(&srcfork->ff_invalidranges);
2825
2826 // Move the fork data (copy whole structure)
2827 dstfork->ff_data = srcfork->ff_data;
2828 bzero(&srcfork->ff_data, sizeof(srcfork->ff_data));
2829
2830 // Update c_blocks
2831 src_cp->c_blocks -= dstfork->ff_blocks + dstfork->ff_unallocblocks;
2832 dst_cp->c_blocks += dstfork->ff_blocks + dstfork->ff_unallocblocks;
2833
2834 return 0;
2835 }
2836
2837
2838 #include <i386/panic_hooks.h>
2839
2840 struct hfs_fsync_panic_hook {
2841 panic_hook_t hook;
2842 struct cnode *cp;
2843 };
2844
2845 static void hfs_fsync_panic_hook(panic_hook_t *hook_)
2846 {
2847 struct hfs_fsync_panic_hook *hook = (struct hfs_fsync_panic_hook *)hook_;
2848 extern int kdb_log(const char *fmt, ...);
2849
2850 // Get the physical region just before cp
2851 panic_phys_range_t range;
2852 uint64_t phys;
2853
2854 if (panic_phys_range_before(hook->cp, &phys, &range)) {
2855 kdb_log("cp = %p, phys = %p, prev (%p: %p-%p)\n",
2856 hook->cp, phys, range.type, range.phys_start,
2857 range.phys_start + range.len);
2858 } else
2859 kdb_log("cp = %p, phys = %p, prev (!)\n", hook->cp, phys);
2860
2861 panic_dump_mem((void *)(((vm_offset_t)hook->cp - 4096) & ~4095), 12288);
2862
2863 kdb_log("\n");
2864 }
2865
2866
2867 /*
2868 * cnode must be locked
2869 */
2870 int
2871 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
2872 {
2873 struct cnode *cp = VTOC(vp);
2874 struct filefork *fp = NULL;
2875 int retval = 0;
2876 struct hfsmount *hfsmp = VTOHFS(vp);
2877 struct rl_entry *invalid_range;
2878 struct timeval tv;
2879 int waitdata; /* attributes necessary for data retrieval */
2880 int wait; /* all other attributes (e.g. atime, etc.) */
2881 int lockflag;
2882 int took_trunc_lock = 0;
2883 int locked_buffers = 0;
2884
2885 /*
2886 * Applications which only care about data integrity rather than full
2887 * file integrity may opt out of (delay) expensive metadata update
2888 * operations as a performance optimization.
2889 */
2890 wait = (waitfor == MNT_WAIT);
2891 waitdata = (waitfor == MNT_DWAIT) | wait;
2892 if (always_do_fullfsync)
2893 fullsync = 1;
2894
2895 /* HFS directories don't have any data blocks. */
2896 if (vnode_isdir(vp))
2897 goto metasync;
2898 fp = VTOF(vp);
2899
2900 /*
2901 * For system files flush the B-tree header and
2902 * for regular files write out any clusters
2903 */
2904 if (vnode_issystem(vp)) {
2905 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2906 // XXXdbg
2907 if (hfsmp->jnl == NULL) {
2908 BTFlushPath(VTOF(vp));
2909 }
2910 }
2911 } else if (UBCINFOEXISTS(vp)) {
2912 hfs_unlock(cp);
2913 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
2914 took_trunc_lock = 1;
2915
2916 struct hfs_fsync_panic_hook hook;
2917 hook.cp = cp;
2918 panic_hook(&hook.hook, hfs_fsync_panic_hook);
2919
2920 if (fp->ff_unallocblocks != 0) {
2921 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2922
2923 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2924 }
2925
2926 panic_unhook(&hook.hook);
2927
2928 /* Don't hold cnode lock when calling into cluster layer. */
2929 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2930
2931 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2932 }
2933 /*
2934 * When MNT_WAIT is requested and the zero fill timeout
2935 * has expired then we must explicitly zero out any areas
2936 * that are currently marked invalid (holes).
2937 *
2938 * Files with NODUMP can bypass zero filling here.
2939 */
2940 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
2941 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
2942 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
2943 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) &&
2944 cp->c_zftimeout != 0))) {
2945
2946 microuptime(&tv);
2947 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && !fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
2948 /* Remember that a force sync was requested. */
2949 cp->c_flag |= C_ZFWANTSYNC;
2950 goto datasync;
2951 }
2952 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
2953 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
2954 hfs_unlock(cp);
2955 if (took_trunc_lock) {
2956 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2957 }
2958 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2959 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2960 took_trunc_lock = 1;
2961 }
2962 while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) {
2963 off_t start = invalid_range->rl_start;
2964 off_t end = invalid_range->rl_end;
2965
2966 /* The range about to be written must be validated
2967 * first, so that VNOP_BLOCKMAP() will return the
2968 * appropriate mapping for the cluster code:
2969 */
2970 rl_remove(start, end, &fp->ff_invalidranges);
2971
2972 /* Don't hold cnode lock when calling into cluster layer. */
2973 hfs_unlock(cp);
2974 (void) cluster_write(vp, (struct uio *) 0,
2975 fp->ff_size, end + 1, start, (off_t)0,
2976 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
2977 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2978 cp->c_flag |= C_MODIFIED;
2979 }
2980 hfs_unlock(cp);
2981 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2982 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2983 }
2984 cp->c_flag &= ~C_ZFWANTSYNC;
2985 cp->c_zftimeout = 0;
2986 }
2987 datasync:
2988 if (took_trunc_lock) {
2989 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2990 took_trunc_lock = 0;
2991 }
2992 /*
2993 * if we have a journal and if journal_active() returns != 0 then the
2994 * we shouldn't do anything to a locked block (because it is part
2995 * of a transaction). otherwise we'll just go through the normal
2996 * code path and flush the buffer. note journal_active() can return
2997 * -1 if the journal is invalid -- however we still need to skip any
2998 * locked blocks as they get cleaned up when we finish the transaction
2999 * or close the journal.
3000 */
3001 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
3002 if (hfsmp->jnl)
3003 lockflag = BUF_SKIP_LOCKED;
3004 else
3005 lockflag = 0;
3006
3007 /*
3008 * Flush all dirty buffers associated with a vnode.
3009 * Record how many of them were dirty AND locked (if necessary).
3010 */
3011 locked_buffers = buf_flushdirtyblks_skipinfo(vp, waitdata, lockflag, "hfs_fsync");
3012 if ((lockflag & BUF_SKIP_LOCKED) && (locked_buffers) && (vnode_vtype(vp) == VLNK)) {
3013 /*
3014 * If there are dirty symlink buffers, then we may need to take action
3015 * to prevent issues later on if we are journaled. If we're fsyncing a
3016 * symlink vnode then we are in one of three cases:
3017 *
3018 * 1) automatic sync has fired. In this case, we don't want the behavior to change.
3019 *
3020 * 2) Someone has opened the FD for the symlink (not what it points to)
3021 * and has issued an fsync against it. This should be rare, and we don't
3022 * want the behavior to change.
3023 *
3024 * 3) We are being called by a vclean which is trying to reclaim this
3025 * symlink vnode. If this is the case, then allowing this fsync to
3026 * proceed WITHOUT flushing the journal could result in the vclean
3027 * invalidating the buffer's blocks before the journal transaction is
3028 * written to disk. To prevent this, we force a journal flush
3029 * if the vnode is in the middle of a recycle (VL_TERMINATE or VL_DEAD is set).
3030 */
3031 if (vnode_isrecycled(vp)) {
3032 fullsync = 1;
3033 }
3034 }
3035
3036 metasync:
3037 if (vnode_isreg(vp) && vnode_issystem(vp)) {
3038 if (VTOF(vp)->fcbBTCBPtr != NULL) {
3039 microuptime(&tv);
3040 BTSetLastSync(VTOF(vp), tv.tv_sec);
3041 }
3042 cp->c_touch_acctime = FALSE;
3043 cp->c_touch_chgtime = FALSE;
3044 cp->c_touch_modtime = FALSE;
3045 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
3046 retval = hfs_update(vp, wait);
3047
3048 /*
3049 * When MNT_WAIT is requested push out the catalog record for
3050 * this file. If they asked for a full fsync, we can skip this
3051 * because the journal_flush or hfs_metasync_all will push out
3052 * all of the metadata changes.
3053 */
3054 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
3055 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
3056 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
3057 }
3058
3059 /*
3060 * If this was a full fsync, make sure all metadata
3061 * changes get to stable storage.
3062 */
3063 if (fullsync) {
3064 if (hfsmp->jnl) {
3065 hfs_journal_flush(hfsmp, FALSE);
3066
3067 if (journal_uses_fua(hfsmp->jnl)) {
3068 /*
3069 * the journal_flush did NOT issue a sync track cache command,
3070 * and the fullsync indicates we are supposed to flush all cached
3071 * data to the media, so issue the sync track cache command
3072 * explicitly
3073 */
3074 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
3075 }
3076 } else {
3077 retval = hfs_metasync_all(hfsmp);
3078 /* XXX need to pass context! */
3079 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
3080 }
3081 }
3082 }
3083
3084 return (retval);
3085 }
3086
3087
3088 /* Sync an hfs catalog b-tree node */
3089 int
3090 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
3091 {
3092 vnode_t vp;
3093 buf_t bp;
3094 int lockflags;
3095
3096 vp = HFSTOVCB(hfsmp)->catalogRefNum;
3097
3098 // XXXdbg - don't need to do this on a journaled volume
3099 if (hfsmp->jnl) {
3100 return 0;
3101 }
3102
3103 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3104 /*
3105 * Look for a matching node that has been delayed
3106 * but is not part of a set (B_LOCKED).
3107 *
3108 * BLK_ONLYVALID causes buf_getblk to return a
3109 * buf_t for the daddr64_t specified only if it's
3110 * currently resident in the cache... the size
3111 * parameter to buf_getblk is ignored when this flag
3112 * is set
3113 */
3114 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
3115
3116 if (bp) {
3117 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
3118 (void) VNOP_BWRITE(bp);
3119 else
3120 buf_brelse(bp);
3121 }
3122
3123 hfs_systemfile_unlock(hfsmp, lockflags);
3124
3125 return (0);
3126 }
3127
3128
3129 /*
3130 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
3131 * without a journal. Note that the volume bitmap does not get written;
3132 * we rely on fsck_hfs to fix that up (which it can do without any loss
3133 * of data).
3134 */
3135 int
3136 hfs_metasync_all(struct hfsmount *hfsmp)
3137 {
3138 int lockflags;
3139
3140 /* Lock all of the B-trees so we get a mutually consistent state */
3141 lockflags = hfs_systemfile_lock(hfsmp,
3142 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
3143
3144 /* Sync each of the B-trees */
3145 if (hfsmp->hfs_catalog_vp)
3146 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
3147 if (hfsmp->hfs_extents_vp)
3148 hfs_btsync(hfsmp->hfs_extents_vp, 0);
3149 if (hfsmp->hfs_attribute_vp)
3150 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
3151
3152 /* Wait for all of the writes to complete */
3153 if (hfsmp->hfs_catalog_vp)
3154 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
3155 if (hfsmp->hfs_extents_vp)
3156 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
3157 if (hfsmp->hfs_attribute_vp)
3158 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
3159
3160 hfs_systemfile_unlock(hfsmp, lockflags);
3161
3162 return 0;
3163 }
3164
3165
3166 /*ARGSUSED 1*/
3167 static int
3168 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
3169 {
3170 buf_clearflags(bp, B_LOCKED);
3171 (void) buf_bawrite(bp);
3172
3173 return(BUF_CLAIMED);
3174 }
3175
3176
3177 int
3178 hfs_btsync(struct vnode *vp, int sync_transaction)
3179 {
3180 struct cnode *cp = VTOC(vp);
3181 struct timeval tv;
3182 int flags = 0;
3183
3184 if (sync_transaction)
3185 flags |= BUF_SKIP_NONLOCKED;
3186 /*
3187 * Flush all dirty buffers associated with b-tree.
3188 */
3189 buf_iterate(vp, hfs_btsync_callback, flags, 0);
3190
3191 microuptime(&tv);
3192 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
3193 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
3194 cp->c_touch_acctime = FALSE;
3195 cp->c_touch_chgtime = FALSE;
3196 cp->c_touch_modtime = FALSE;
3197
3198 return 0;
3199 }
3200
3201 /*
3202 * Remove a directory.
3203 */
3204 int
3205 hfs_vnop_rmdir(ap)
3206 struct vnop_rmdir_args /* {
3207 struct vnode *a_dvp;
3208 struct vnode *a_vp;
3209 struct componentname *a_cnp;
3210 vfs_context_t a_context;
3211 } */ *ap;
3212 {
3213 struct vnode *dvp = ap->a_dvp;
3214 struct vnode *vp = ap->a_vp;
3215 struct cnode *dcp = VTOC(dvp);
3216 struct cnode *cp = VTOC(vp);
3217 int error;
3218 time_t orig_ctime;
3219
3220 orig_ctime = VTOC(vp)->c_ctime;
3221
3222 if (!S_ISDIR(cp->c_mode)) {
3223 return (ENOTDIR);
3224 }
3225 if (dvp == vp) {
3226 return (EINVAL);
3227 }
3228
3229 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3230 cp = VTOC(vp);
3231
3232 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3233 return (error);
3234 }
3235
3236 /* Check for a race with rmdir on the parent directory */
3237 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3238 hfs_unlockpair (dcp, cp);
3239 return ENOENT;
3240 }
3241
3242 //
3243 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3244 //
3245 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3246 uint32_t newid;
3247
3248 hfs_unlockpair(dcp, cp);
3249
3250 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3251 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3252 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3253 #if CONFIG_FSE
3254 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3255 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3256 FSE_ARG_INO, (ino64_t)0, // src inode #
3257 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3258 FSE_ARG_INT32, newid,
3259 FSE_ARG_DONE);
3260 #endif
3261 } else {
3262 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3263 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3264 }
3265 }
3266
3267 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
3268
3269 hfs_unlockpair(dcp, cp);
3270
3271 return (error);
3272 }
3273
3274 /*
3275 * Remove a directory
3276 *
3277 * Both dvp and vp cnodes are locked
3278 */
3279 int
3280 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3281 int skip_reserve, int only_unlink)
3282 {
3283 struct cnode *cp;
3284 struct cnode *dcp;
3285 struct hfsmount * hfsmp;
3286 struct cat_desc desc;
3287 int lockflags;
3288 int error = 0, started_tr = 0;
3289
3290 cp = VTOC(vp);
3291 dcp = VTOC(dvp);
3292 hfsmp = VTOHFS(vp);
3293
3294 if (dcp == cp) {
3295 return (EINVAL); /* cannot remove "." */
3296 }
3297 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3298 return (0);
3299 }
3300 if (cp->c_entries != 0) {
3301 return (ENOTEMPTY);
3302 }
3303
3304 /*
3305 * If the directory is open or in use (e.g. opendir() or current working
3306 * directory for some process); wait for inactive/reclaim to actually
3307 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
3308 * of removing open-unlinked directories from the catalog, as well as getting rid
3309 * of EAs still on the element. So change only_unlink to true, so that it will get
3310 * cleaned up below.
3311 *
3312 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
3313 * but it really means C_NOEXISTS because the item was actually removed from the
3314 * catalog. Then when we try to remove the entry from the catalog later on, it won't
3315 * really be there anymore.
3316 */
3317 if (vnode_isinuse(vp, 0)) {
3318 only_unlink = 1;
3319 }
3320
3321 /* Deal with directory hardlinks */
3322 if (cp->c_flag & C_HARDLINK) {
3323 /*
3324 * Note that if we have a directory which was a hardlink at any point,
3325 * its actual directory data is stored in the directory inode in the hidden
3326 * directory rather than the leaf element(s) present in the namespace.
3327 *
3328 * If there are still other hardlinks to this directory,
3329 * then we'll just eliminate this particular link and the vnode will still exist.
3330 * If this is the last link to an empty directory, then we'll open-unlink the
3331 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
3332 *
3333 * We could also return EBUSY here.
3334 */
3335
3336 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3337 }
3338
3339 /*
3340 * In a few cases, we may want to allow the directory to persist in an
3341 * open-unlinked state. If the directory is being open-unlinked (still has usecount
3342 * references), or if it has EAs, or if it was being deleted as part of a rename,
3343 * then we go ahead and move it to the hidden directory.
3344 *
3345 * If the directory is being open-unlinked, then we want to keep the catalog entry
3346 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
3347 *
3348 * If the directory had EAs, then we want to use the open-unlink trick so that the
3349 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
3350 * due to overflowing the journal.
3351 *
3352 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
3353 * in order to maintain rename atomicity.
3354 *
3355 * Note that the allow_dirs argument to hfs_removefile specifies that it is
3356 * supposed to handle directories for this case.
3357 */
3358
3359 if (((hfsmp->hfs_attribute_vp != NULL) &&
3360 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
3361 (only_unlink != 0)) {
3362
3363 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
3364 /*
3365 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
3366 * it here just in case we were invoked by rmdir() on a directory that had
3367 * EAs. To ensure that we start reclaiming the space as soon as possible,
3368 * we call vnode_recycle on the directory.
3369 */
3370 vnode_recycle(vp);
3371
3372 return ret;
3373
3374 }
3375
3376 dcp->c_flag |= C_DIR_MODIFICATION;
3377
3378 #if QUOTA
3379 if (hfsmp->hfs_flags & HFS_QUOTAS)
3380 (void)hfs_getinoquota(cp);
3381 #endif
3382 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3383 goto out;
3384 }
3385 started_tr = 1;
3386
3387 /*
3388 * Verify the directory is empty (and valid).
3389 * (Rmdir ".." won't be valid since
3390 * ".." will contain a reference to
3391 * the current directory and thus be
3392 * non-empty.)
3393 */
3394 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
3395 error = EPERM;
3396 goto out;
3397 }
3398
3399 /* Remove the entry from the namei cache: */
3400 cache_purge(vp);
3401
3402 /*
3403 * Protect against a race with rename by using the component
3404 * name passed in and parent id from dvp (instead of using
3405 * the cp->c_desc which may have changed).
3406 */
3407 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3408 desc.cd_namelen = cnp->cn_namelen;
3409 desc.cd_parentcnid = dcp->c_fileid;
3410 desc.cd_cnid = cp->c_cnid;
3411 desc.cd_flags = CD_ISDIR;
3412 desc.cd_encoding = cp->c_encoding;
3413 desc.cd_hint = 0;
3414
3415 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3416 error = 0;
3417 goto out;
3418 }
3419
3420 /* Remove entry from catalog */
3421 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3422
3423 if (!skip_reserve) {
3424 /*
3425 * Reserve some space in the Catalog file.
3426 */
3427 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3428 hfs_systemfile_unlock(hfsmp, lockflags);
3429 goto out;
3430 }
3431 }
3432
3433 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3434
3435 if (!error) {
3436 //
3437 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
3438 // we don't need to touch the document_id as it's handled by the rename code.
3439 // otherwise it's a normal remove and we need to save the document id in the
3440 // per thread struct and clear it from the cnode.
3441 //
3442 struct doc_tombstone *ut;
3443 ut = get_uthread_doc_tombstone();
3444 if (!skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
3445
3446 if (ut->t_lastop_document_id) {
3447 clear_tombstone_docid(ut, hfsmp, NULL);
3448 }
3449 save_tombstone(hfsmp, dvp, vp, cnp, 1);
3450
3451 }
3452
3453 /* The parent lost a child */
3454 if (dcp->c_entries > 0)
3455 dcp->c_entries--;
3456 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3457 dcp->c_dirchangecnt++;
3458 hfs_incr_gencount(dcp);
3459
3460 dcp->c_touch_chgtime = TRUE;
3461 dcp->c_touch_modtime = TRUE;
3462 hfs_touchtimes(hfsmp, cp);
3463 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3464 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
3465 }
3466
3467 hfs_systemfile_unlock(hfsmp, lockflags);
3468
3469 if (error)
3470 goto out;
3471
3472 #if QUOTA
3473 if (hfsmp->hfs_flags & HFS_QUOTAS)
3474 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3475 #endif /* QUOTA */
3476
3477 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
3478
3479 /* Mark C_NOEXISTS since the catalog entry is now gone */
3480 cp->c_flag |= C_NOEXISTS;
3481
3482 out:
3483 dcp->c_flag &= ~C_DIR_MODIFICATION;
3484 wakeup((caddr_t)&dcp->c_flag);
3485
3486 if (started_tr) {
3487 hfs_end_transaction(hfsmp);
3488 }
3489
3490 return (error);
3491 }
3492
3493
3494 /*
3495 * Remove a file or link.
3496 */
3497 int
3498 hfs_vnop_remove(ap)
3499 struct vnop_remove_args /* {
3500 struct vnode *a_dvp;
3501 struct vnode *a_vp;
3502 struct componentname *a_cnp;
3503 int a_flags;
3504 vfs_context_t a_context;
3505 } */ *ap;
3506 {
3507 struct vnode *dvp = ap->a_dvp;
3508 struct vnode *vp = ap->a_vp;
3509 struct cnode *dcp = VTOC(dvp);
3510 struct cnode *cp;
3511 struct vnode *rvp = NULL;
3512 int error=0, recycle_rsrc=0;
3513 int recycle_vnode = 0;
3514 uint32_t rsrc_vid = 0;
3515 time_t orig_ctime;
3516
3517 if (dvp == vp) {
3518 return (EINVAL);
3519 }
3520
3521 orig_ctime = VTOC(vp)->c_ctime;
3522 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
3523 error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3524 if (error) {
3525 // XXXdbg - decide on a policy for handling namespace handler failures!
3526 // for now we just let them proceed.
3527 }
3528 }
3529 error = 0;
3530
3531 cp = VTOC(vp);
3532
3533 relock:
3534
3535 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3536
3537 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
3538 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3539 if (rvp) {
3540 vnode_put (rvp);
3541 }
3542 return (error);
3543 }
3544 //
3545 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
3546 //
3547 if ((cp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id == 0) {
3548 uint32_t newid;
3549
3550 hfs_unlockpair(dcp, cp);
3551
3552 if (hfs_generate_document_id(VTOHFS(vp), &newid) == 0) {
3553 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3554 ((struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16))->document_id = newid;
3555 #if CONFIG_FSE
3556 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
3557 FSE_ARG_DEV, VTOHFS(vp)->hfs_raw_dev,
3558 FSE_ARG_INO, (ino64_t)0, // src inode #
3559 FSE_ARG_INO, (ino64_t)cp->c_fileid, // dst inode #
3560 FSE_ARG_INT32, newid,
3561 FSE_ARG_DONE);
3562 #endif
3563 } else {
3564 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rm...
3565 hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK);
3566 }
3567 }
3568
3569 /*
3570 * Lazily respond to determining if there is a valid resource fork
3571 * vnode attached to 'cp' if it is a regular file or symlink.
3572 * If the vnode does not exist, then we may proceed without having to
3573 * create it.
3574 *
3575 * If, however, it does exist, then we need to acquire an iocount on the
3576 * vnode after acquiring its vid. This ensures that if we have to do I/O
3577 * against it, it can't get recycled from underneath us in the middle
3578 * of this call.
3579 *
3580 * Note: this function may be invoked for directory hardlinks, so just skip these
3581 * steps if 'vp' is a directory.
3582 */
3583
3584 if ((vp->v_type == VLNK) || (vp->v_type == VREG)) {
3585 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
3586 /* We need to acquire the rsrc vnode */
3587 rvp = cp->c_rsrc_vp;
3588 rsrc_vid = vnode_vid (rvp);
3589
3590 /* Unlock everything to acquire iocount on the rsrc vnode */
3591 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
3592 hfs_unlockpair (dcp, cp);
3593 /* Use the vid to maintain identity on rvp */
3594 if (vnode_getwithvid(rvp, rsrc_vid)) {
3595 /*
3596 * If this fails, then it was recycled or
3597 * reclaimed in the interim. Reset fields and
3598 * start over.
3599 */
3600 rvp = NULL;
3601 rsrc_vid = 0;
3602 }
3603 goto relock;
3604 }
3605 }
3606
3607 /*
3608 * Check to see if we raced rmdir for the parent directory
3609 * hfs_removefile already checks for a race on vp/cp
3610 */
3611 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
3612 error = ENOENT;
3613 goto rm_done;
3614 }
3615
3616 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
3617
3618 /*
3619 * If the remove succeeded in deleting the file, then we may need to mark
3620 * the resource fork for recycle so that it is reclaimed as quickly
3621 * as possible. If it were not recycled quickly, then this resource fork
3622 * vnode could keep a v_parent reference on the data fork, which prevents it
3623 * from going through reclaim (by giving it extra usecounts), except in the force-
3624 * unmount case.
3625 *
3626 * However, a caveat: we need to continue to supply resource fork
3627 * access to open-unlinked files even if the resource fork is not open. This is
3628 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3629 * this already if the data fork has been re-parented to the hidden directory.
3630 *
3631 * As a result, all we really need to do here is mark the resource fork vnode
3632 * for recycle. If it goes out of core, it can be brought in again if needed.
3633 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3634 * more work.
3635 */
3636 if (error == 0) {
3637 if (rvp) {
3638 recycle_rsrc = 1;
3639 }
3640 /*
3641 * If the target was actually removed from the catalog schedule it for
3642 * full reclamation/inactivation. We hold an iocount on it so it should just
3643 * get marked with MARKTERM
3644 */
3645 if (cp->c_flag & C_NOEXISTS) {
3646 recycle_vnode = 1;
3647 }
3648 }
3649
3650
3651 /*
3652 * Drop the truncate lock before unlocking the cnode
3653 * (which can potentially perform a vnode_put and
3654 * recycle the vnode which in turn might require the
3655 * truncate lock)
3656 */
3657 rm_done:
3658 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3659 hfs_unlockpair(dcp, cp);
3660
3661 if (recycle_rsrc) {
3662 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3663 vnode_recycle(rvp);
3664 }
3665 if (recycle_vnode) {
3666 vnode_recycle (vp);
3667 }
3668
3669 if (rvp) {
3670 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3671 vnode_put(rvp);
3672 }
3673
3674 return (error);
3675 }
3676
3677
3678 int
3679 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3680
3681 if ( !(buf_flags(bp) & B_META))
3682 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3683 /*
3684 * it's part of the current transaction, kill it.
3685 */
3686 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3687
3688 return (BUF_CLAIMED);
3689 }
3690
3691 /*
3692 * hfs_removefile
3693 *
3694 * Similar to hfs_vnop_remove except there are additional options.
3695 * This function may be used to remove directories if they have
3696 * lots of EA's -- note the 'allow_dirs' argument.
3697 *
3698 * This function is able to delete blocks & fork data for the resource
3699 * fork even if it does not exist in core (and have a backing vnode).
3700 * It should infer the correct behavior based on the number of blocks
3701 * in the cnode and whether or not the resource fork pointer exists or
3702 * not. As a result, one only need pass in the 'vp' corresponding to the
3703 * data fork of this file (or main vnode in the case of a directory).
3704 * Passing in a resource fork will result in an error.
3705 *
3706 * Because we do not create any vnodes in this function, we are not at
3707 * risk of deadlocking against ourselves by double-locking.
3708 *
3709 * Requires cnode and truncate locks to be held.
3710 */
3711 int
3712 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3713 int flags, int skip_reserve, int allow_dirs,
3714 __unused struct vnode *rvp, int only_unlink)
3715 {
3716 struct cnode *cp;
3717 struct cnode *dcp;
3718 struct vnode *rsrc_vp = NULL;
3719 struct hfsmount *hfsmp;
3720 struct cat_desc desc;
3721 struct timeval tv;
3722 int dataforkbusy = 0;
3723 int rsrcforkbusy = 0;
3724 int lockflags;
3725 int error = 0;
3726 int started_tr = 0;
3727 int isbigfile = 0, defer_remove=0, isdir=0;
3728 int update_vh = 0;
3729
3730 cp = VTOC(vp);
3731 dcp = VTOC(dvp);
3732 hfsmp = VTOHFS(vp);
3733
3734 /* Check if we lost a race post lookup. */
3735 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3736 return (0);
3737 }
3738
3739 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3740 return 0;
3741 }
3742
3743 /* Make sure a remove is permitted */
3744 if (VNODE_IS_RSRC(vp)) {
3745 return (EPERM);
3746 }
3747 else {
3748 /*
3749 * We know it's a data fork.
3750 * Probe the cnode to see if we have a valid resource fork
3751 * in hand or not.
3752 */
3753 rsrc_vp = cp->c_rsrc_vp;
3754 }
3755
3756 /* Don't allow deleting the journal or journal_info_block. */
3757 if (hfs_is_journal_file(hfsmp, cp)) {
3758 return (EPERM);
3759 }
3760
3761 /*
3762 * If removing a symlink, then we need to ensure that the
3763 * data blocks for the symlink are not still in-flight or pending.
3764 * If so, we will unlink the symlink here, making its blocks
3765 * available for re-allocation by a subsequent transaction. That is OK, but
3766 * then the I/O for the data blocks could then go out before the journal
3767 * transaction that created it was flushed, leading to I/O ordering issues.
3768 */
3769 if (vp->v_type == VLNK) {
3770 /*
3771 * This will block if the asynchronous journal flush is in progress.
3772 * If this symlink is not being renamed over and doesn't have any open FDs,
3773 * then we'll remove it from the journal's bufs below in kill_block.
3774 */
3775 buf_wait_for_shadow_io (vp, 0);
3776 }
3777
3778 /*
3779 * Hard links require special handling.
3780 */
3781 if (cp->c_flag & C_HARDLINK) {
3782 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3783 return (EBUSY);
3784 } else {
3785 /* A directory hard link with a link count of one is
3786 * treated as a regular directory. Therefore it should
3787 * only be removed using rmdir().
3788 */
3789 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3790 (allow_dirs == 0)) {
3791 return (EPERM);
3792 }
3793 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3794 }
3795 }
3796
3797 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3798 if (vnode_isdir(vp)) {
3799 if (allow_dirs == 0)
3800 return (EPERM); /* POSIX */
3801 isdir = 1;
3802 }
3803 /* Sanity check the parent ids. */
3804 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3805 (cp->c_parentcnid != dcp->c_fileid)) {
3806 return (EINVAL);
3807 }
3808
3809 dcp->c_flag |= C_DIR_MODIFICATION;
3810
3811 // this guy is going away so mark him as such
3812 cp->c_flag |= C_DELETED;
3813
3814
3815 /* Remove our entry from the namei cache. */
3816 cache_purge(vp);
3817
3818 /*
3819 * If the caller was operating on a file (as opposed to a
3820 * directory with EAs), then we need to figure out
3821 * whether or not it has a valid resource fork vnode.
3822 *
3823 * If there was a valid resource fork vnode, then we need
3824 * to use hfs_truncate to eliminate its data. If there is
3825 * no vnode, then we hold the cnode lock which would
3826 * prevent it from being created. As a result,
3827 * we can use the data deletion functions which do not
3828 * require that a cnode/vnode pair exist.
3829 */
3830
3831 /* Check if this file is being used. */
3832 if (isdir == 0) {
3833 dataforkbusy = vnode_isinuse(vp, 0);
3834 /*
3835 * At this point, we know that 'vp' points to the
3836 * a data fork because we checked it up front. And if
3837 * there is no rsrc fork, rsrc_vp will be NULL.
3838 */
3839 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3840 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3841 }
3842 }
3843
3844 /* Check if we have to break the deletion into multiple pieces. */
3845 if (isdir == 0)
3846 isbigfile = cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE;
3847
3848 /* Check if the file has xattrs. If it does we'll have to delete them in
3849 individual transactions in case there are too many */
3850 if ((hfsmp->hfs_attribute_vp != NULL) &&
3851 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3852 defer_remove = 1;
3853 }
3854
3855 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3856 if (only_unlink) {
3857 defer_remove = 1;
3858 }
3859
3860 /*
3861 * Carbon semantics prohibit deleting busy files.
3862 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3863 */
3864 if (dataforkbusy || rsrcforkbusy) {
3865 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3866 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3867 error = EBUSY;
3868 goto out;
3869 }
3870 }
3871
3872 #if QUOTA
3873 if (hfsmp->hfs_flags & HFS_QUOTAS)
3874 (void)hfs_getinoquota(cp);
3875 #endif /* QUOTA */
3876
3877 /*
3878 * Do a ubc_setsize to indicate we need to wipe contents if:
3879 * 1) item is a regular file.
3880 * 2) Neither fork is busy AND we are not told to unlink this.
3881 *
3882 * We need to check for the defer_remove since it can be set without
3883 * having a busy data or rsrc fork
3884 */
3885 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3886 /*
3887 * A ubc_setsize can cause a pagein so defer it
3888 * until after the cnode lock is dropped. The
3889 * cnode lock cannot be dropped/reacquired here
3890 * since we might already hold the journal lock.
3891 */
3892 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3893 cp->c_flag |= C_NEED_DATA_SETSIZE;
3894 }
3895 if (!rsrcforkbusy && rsrc_vp) {
3896 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3897 }
3898 }
3899
3900 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3901 goto out;
3902 }
3903 started_tr = 1;
3904
3905 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3906 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3907 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3908 }
3909
3910 /*
3911 * Prepare to truncate any non-busy forks. Busy forks will
3912 * get truncated when their vnode goes inactive.
3913 * Note that we will only enter this region if we
3914 * can avoid creating an open-unlinked file. If
3915 * either region is busy, we will have to create an open
3916 * unlinked file.
3917 *
3918 * Since we are deleting the file, we need to stagger the runtime
3919 * modifications to do things in such a way that a crash won't
3920 * result in us getting overlapped extents or any other
3921 * bad inconsistencies. As such, we call prepare_release_storage
3922 * which updates the UBC, updates quota information, and releases
3923 * any loaned blocks that belong to this file. No actual
3924 * truncation or bitmap manipulation is done until *AFTER*
3925 * the catalog record is removed.
3926 */
3927 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3928
3929 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3930
3931 error = hfs_prepare_release_storage (hfsmp, vp);
3932 if (error) {
3933 goto out;
3934 }
3935 update_vh = 1;
3936 }
3937
3938 /*
3939 * If the resource fork vnode does not exist, we can skip this step.
3940 */
3941 if (!rsrcforkbusy && rsrc_vp) {
3942 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3943 if (error) {
3944 goto out;
3945 }
3946 update_vh = 1;
3947 }
3948 }
3949
3950 /*
3951 * Protect against a race with rename by using the component
3952 * name passed in and parent id from dvp (instead of using
3953 * the cp->c_desc which may have changed). Also, be aware that
3954 * because we allow directories to be passed in, we need to special case
3955 * this temporary descriptor in case we were handed a directory.
3956 */
3957 if (isdir) {
3958 desc.cd_flags = CD_ISDIR;
3959 }
3960 else {
3961 desc.cd_flags = 0;
3962 }
3963 desc.cd_encoding = cp->c_desc.cd_encoding;
3964 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3965 desc.cd_namelen = cnp->cn_namelen;
3966 desc.cd_parentcnid = dcp->c_fileid;
3967 desc.cd_hint = cp->c_desc.cd_hint;
3968 desc.cd_cnid = cp->c_cnid;
3969 microtime(&tv);
3970
3971 /*
3972 * There are two cases to consider:
3973 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
3974 * 2. File is not in use ==> remove the file
3975 *
3976 * We can get a directory in case 1 because it may have had lots of attributes,
3977 * which need to get removed here.
3978 */
3979 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
3980 char delname[32];
3981 struct cat_desc to_desc;
3982 struct cat_desc todir_desc;
3983
3984 /*
3985 * Orphan this file or directory (move to hidden directory).
3986 * Again, we need to take care that we treat directories as directories,
3987 * and files as files. Because directories with attributes can be passed in
3988 * check to make sure that we have a directory or a file before filling in the
3989 * temporary descriptor's flags. We keep orphaned directories AND files in
3990 * the FILE_HARDLINKS private directory since we're generalizing over all
3991 * orphaned filesystem objects.
3992 */
3993 bzero(&todir_desc, sizeof(todir_desc));
3994 todir_desc.cd_parentcnid = 2;
3995
3996 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
3997 bzero(&to_desc, sizeof(to_desc));
3998 to_desc.cd_nameptr = (const u_int8_t *)delname;
3999 to_desc.cd_namelen = strlen(delname);
4000 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
4001 if (isdir) {
4002 to_desc.cd_flags = CD_ISDIR;
4003 }
4004 else {
4005 to_desc.cd_flags = 0;
4006 }
4007 to_desc.cd_cnid = cp->c_cnid;
4008
4009 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4010 if (!skip_reserve) {
4011 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
4012 hfs_systemfile_unlock(hfsmp, lockflags);
4013 goto out;
4014 }
4015 }
4016
4017 error = cat_rename(hfsmp, &desc, &todir_desc,
4018 &to_desc, (struct cat_desc *)NULL);
4019
4020 if (error == 0) {
4021 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
4022 if (isdir == 1) {
4023 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
4024 }
4025 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
4026 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
4027
4028 /* Update the parent directory */
4029 if (dcp->c_entries > 0)
4030 dcp->c_entries--;
4031 if (isdir == 1) {
4032 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
4033 }
4034 dcp->c_dirchangecnt++;
4035 hfs_incr_gencount(dcp);
4036
4037 dcp->c_ctime = tv.tv_sec;
4038 dcp->c_mtime = tv.tv_sec;
4039 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
4040
4041 /* Update the file or directory's state */
4042 cp->c_flag |= C_DELETED;
4043 cp->c_ctime = tv.tv_sec;
4044 --cp->c_linkcount;
4045 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
4046 }
4047 hfs_systemfile_unlock(hfsmp, lockflags);
4048 if (error)
4049 goto out;
4050
4051 }
4052 else {
4053 /*
4054 * Nobody is using this item; we can safely remove everything.
4055 */
4056 struct filefork *temp_rsrc_fork = NULL;
4057 #if QUOTA
4058 off_t savedbytes;
4059 int blksize = hfsmp->blockSize;
4060 #endif
4061 u_int32_t fileid = cp->c_fileid;
4062
4063 /*
4064 * Figure out if we need to read the resource fork data into
4065 * core before wiping out the catalog record.
4066 *
4067 * 1) Must not be a directory
4068 * 2) cnode's c_rsrcfork ptr must be NULL.
4069 * 3) rsrc fork must have actual blocks
4070 */
4071 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
4072 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
4073 /*
4074 * The resource fork vnode & filefork did not exist.
4075 * Create a temporary one for use in this function only.
4076 */
4077 MALLOC_ZONE (temp_rsrc_fork, struct filefork *, sizeof (struct filefork), M_HFSFORK, M_WAITOK);
4078 bzero(temp_rsrc_fork, sizeof(struct filefork));
4079 temp_rsrc_fork->ff_cp = cp;
4080 rl_init(&temp_rsrc_fork->ff_invalidranges);
4081 }
4082
4083 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4084
4085 /* Look up the resource fork first, if necessary */
4086 if (temp_rsrc_fork) {
4087 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
4088 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
4089 if (error) {
4090 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4091 hfs_systemfile_unlock (hfsmp, lockflags);
4092 goto out;
4093 }
4094 }
4095
4096 if (!skip_reserve) {
4097 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
4098 if (temp_rsrc_fork) {
4099 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4100 }
4101 hfs_systemfile_unlock(hfsmp, lockflags);
4102 goto out;
4103 }
4104 }
4105
4106 error = cat_delete(hfsmp, &desc, &cp->c_attr);
4107
4108 if (error && error != ENXIO && error != ENOENT) {
4109 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
4110 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
4111 }
4112
4113 if (error == 0) {
4114 /* Update the parent directory */
4115 if (dcp->c_entries > 0)
4116 dcp->c_entries--;
4117 dcp->c_dirchangecnt++;
4118 hfs_incr_gencount(dcp);
4119
4120 dcp->c_ctime = tv.tv_sec;
4121 dcp->c_mtime = tv.tv_sec;
4122 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
4123 }
4124 hfs_systemfile_unlock(hfsmp, lockflags);
4125
4126 if (error) {
4127 if (temp_rsrc_fork) {
4128 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4129 }
4130 goto out;
4131 }
4132
4133 /*
4134 * Now that we've wiped out the catalog record, the file effectively doesn't
4135 * exist anymore. So update the quota records to reflect the loss of the
4136 * data fork and the resource fork.
4137 */
4138 #if QUOTA
4139 if (cp->c_datafork->ff_blocks > 0) {
4140 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
4141 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4142 }
4143
4144 /*
4145 * We may have just deleted the catalog record for a resource fork even
4146 * though it did not exist in core as a vnode. However, just because there
4147 * was a resource fork pointer in the cnode does not mean that it had any blocks.
4148 */
4149 if (temp_rsrc_fork || cp->c_rsrcfork) {
4150 if (cp->c_rsrcfork) {
4151 if (cp->c_rsrcfork->ff_blocks > 0) {
4152 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
4153 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4154 }
4155 }
4156 else {
4157 /* we must have used a temporary fork */
4158 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
4159 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
4160 }
4161 }
4162
4163 if (hfsmp->hfs_flags & HFS_QUOTAS) {
4164 (void)hfs_chkiq(cp, -1, NOCRED, 0);
4165 }
4166 #endif
4167
4168 /*
4169 * If we didn't get any errors deleting the catalog entry, then go ahead
4170 * and release the backing store now. The filefork pointers are still valid.
4171 */
4172 if (temp_rsrc_fork) {
4173 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
4174 }
4175 else {
4176 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
4177 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
4178 }
4179 if (error) {
4180 /*
4181 * If we encountered an error updating the extents and bitmap,
4182 * mark the volume inconsistent. At this point, the catalog record has
4183 * already been deleted, so we can't recover it at this point. We need
4184 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
4185 * The subsequent fsck should be able to recover the free space for us.
4186 */
4187 hfs_mark_inconsistent(hfsmp, HFS_OP_INCOMPLETE);
4188 }
4189 else {
4190 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
4191 update_vh = 0;
4192 }
4193
4194 /* Get rid of the temporary rsrc fork */
4195 if (temp_rsrc_fork) {
4196 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
4197 }
4198
4199 cp->c_flag |= C_NOEXISTS;
4200 cp->c_flag &= ~C_DELETED;
4201
4202 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
4203 --cp->c_linkcount;
4204
4205 /*
4206 * We must never get a directory if we're in this else block. We could
4207 * accidentally drop the number of files in the volume header if we did.
4208 */
4209 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
4210
4211 }
4212
4213 //
4214 // if skip_reserve == 1 then we're being called from hfs_vnop_rename() and thus
4215 // we don't need to touch the document_id as it's handled by the rename code.
4216 // otherwise it's a normal remove and we need to save the document id in the
4217 // per thread struct and clear it from the cnode.
4218 //
4219 struct doc_tombstone *ut;
4220 ut = get_uthread_doc_tombstone();
4221 if (!error && !skip_reserve && (cp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, vp, cnp)) {
4222
4223 if (ut->t_lastop_document_id) {
4224 clear_tombstone_docid(ut, hfsmp, NULL);
4225 }
4226 save_tombstone(hfsmp, dvp, vp, cnp, 1);
4227
4228 }
4229
4230
4231 /*
4232 * All done with this cnode's descriptor...
4233 *
4234 * Note: all future catalog calls for this cnode must be by
4235 * fileid only. This is OK for HFS (which doesn't have file
4236 * thread records) since HFS doesn't support the removal of
4237 * busy files.
4238 */
4239 cat_releasedesc(&cp->c_desc);
4240
4241 out:
4242 if (error) {
4243 cp->c_flag &= ~C_DELETED;
4244 }
4245
4246 if (update_vh) {
4247 /*
4248 * If we bailed out earlier, we may need to update the volume header
4249 * to deal with the borrowed blocks accounting.
4250 */
4251 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
4252 }
4253
4254 if (started_tr) {
4255 hfs_end_transaction(hfsmp);
4256 }
4257
4258 dcp->c_flag &= ~C_DIR_MODIFICATION;
4259 wakeup((caddr_t)&dcp->c_flag);
4260
4261 return (error);
4262 }
4263
4264
4265 __private_extern__ void
4266 replace_desc(struct cnode *cp, struct cat_desc *cdp)
4267 {
4268 // fixes 4348457 and 4463138
4269 if (&cp->c_desc == cdp) {
4270 return;
4271 }
4272
4273 /* First release allocated name buffer */
4274 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
4275 const u_int8_t *name = cp->c_desc.cd_nameptr;
4276
4277 cp->c_desc.cd_nameptr = 0;
4278 cp->c_desc.cd_namelen = 0;
4279 cp->c_desc.cd_flags &= ~CD_HASBUF;
4280 vfs_removename((const char *)name);
4281 }
4282 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
4283
4284 /* Cnode now owns the name buffer */
4285 cdp->cd_nameptr = 0;
4286 cdp->cd_namelen = 0;
4287 cdp->cd_flags &= ~CD_HASBUF;
4288 }
4289
4290
4291 /*
4292 * Rename a cnode.
4293 *
4294 * The VFS layer guarantees that:
4295 * - source and destination will either both be directories, or
4296 * both not be directories.
4297 * - all the vnodes are from the same file system
4298 *
4299 * When the target is a directory, HFS must ensure that its empty.
4300 *
4301 * Note that this function requires up to 6 vnodes in order to work properly
4302 * if it is operating on files (and not on directories). This is because only
4303 * files can have resource forks, and we now require iocounts to be held on the
4304 * vnodes corresponding to the resource forks (if applicable) as well as
4305 * the files or directories undergoing rename. The problem with not holding
4306 * iocounts on the resource fork vnodes is that it can lead to a deadlock
4307 * situation: The rsrc fork of the source file may be recycled and reclaimed
4308 * in order to provide a vnode for the destination file's rsrc fork. Since
4309 * data and rsrc forks share the same cnode, we'd eventually try to lock the
4310 * source file's cnode in order to sync its rsrc fork to disk, but it's already
4311 * been locked. By taking the rsrc fork vnodes up front we ensure that they
4312 * cannot be recycled, and that the situation mentioned above cannot happen.
4313 */
4314 int
4315 hfs_vnop_rename(ap)
4316 struct vnop_rename_args /* {
4317 struct vnode *a_fdvp;
4318 struct vnode *a_fvp;
4319 struct componentname *a_fcnp;
4320 struct vnode *a_tdvp;
4321 struct vnode *a_tvp;
4322 struct componentname *a_tcnp;
4323 vfs_context_t a_context;
4324 } */ *ap;
4325 {
4326 struct vnode *tvp = ap->a_tvp;
4327 struct vnode *tdvp = ap->a_tdvp;
4328 struct vnode *fvp = ap->a_fvp;
4329 struct vnode *fdvp = ap->a_fdvp;
4330 /*
4331 * Note that we only need locals for the target/destination's
4332 * resource fork vnode (and only if necessary). We don't care if the
4333 * source has a resource fork vnode or not.
4334 */
4335 struct vnode *tvp_rsrc = NULLVP;
4336 uint32_t tvp_rsrc_vid = 0;
4337 struct componentname *tcnp = ap->a_tcnp;
4338 struct componentname *fcnp = ap->a_fcnp;
4339 struct proc *p = vfs_context_proc(ap->a_context);
4340 struct cnode *fcp;
4341 struct cnode *fdcp;
4342 struct cnode *tdcp;
4343 struct cnode *tcp;
4344 struct cnode *error_cnode;
4345 struct cat_desc from_desc;
4346 struct cat_desc to_desc;
4347 struct cat_desc out_desc;
4348 struct hfsmount *hfsmp;
4349 cat_cookie_t cookie;
4350 int tvp_deleted = 0;
4351 int started_tr = 0, got_cookie = 0;
4352 int took_trunc_lock = 0;
4353 int lockflags;
4354 int error;
4355 time_t orig_from_ctime, orig_to_ctime;
4356 int emit_rename = 1;
4357 int emit_delete = 1;
4358 int is_tracked = 0;
4359 int unlocked;
4360
4361 orig_from_ctime = VTOC(fvp)->c_ctime;
4362 if (tvp && VTOC(tvp)) {
4363 orig_to_ctime = VTOC(tvp)->c_ctime;
4364 } else {
4365 orig_to_ctime = ~0;
4366 }
4367
4368 hfsmp = VTOHFS(tdvp);
4369 /*
4370 * Do special case checks here. If fvp == tvp then we need to check the
4371 * cnode with locks held.
4372 */
4373 if (fvp == tvp) {
4374 int is_hardlink = 0;
4375 /*
4376 * In this case, we do *NOT* ever emit a DELETE event.
4377 * We may not necessarily emit a RENAME event
4378 */
4379 emit_delete = 0;
4380 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
4381 return error;
4382 }
4383 /* Check to see if the item is a hardlink or not */
4384 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
4385 hfs_unlock (VTOC(fvp));
4386
4387 /*
4388 * If the item is not a hardlink, then case sensitivity must be off, otherwise
4389 * two names should not resolve to the same cnode unless they were case variants.
4390 */
4391 if (is_hardlink) {
4392 emit_rename = 0;
4393 /*
4394 * Hardlinks are a little trickier. We only want to emit a rename event
4395 * if the item is a hardlink, the parent directories are the same, case sensitivity
4396 * is off, and the case folded names are the same. See the fvp == tvp case below for more
4397 * info.
4398 */
4399
4400 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
4401 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4402 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4403 /* Then in this case only it is ok to emit a rename */
4404 emit_rename = 1;
4405 }
4406 }
4407 }
4408 }
4409 if (emit_rename) {
4410 /* c_bsdflags should only be assessed while holding the cnode lock.
4411 * This is not done consistently throughout the code and can result
4412 * in race. This will be fixed via rdar://12181064
4413 */
4414 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
4415 is_tracked = 1;
4416 }
4417 check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
4418 }
4419
4420 if (tvp && VTOC(tvp)) {
4421 if (emit_delete) {
4422 check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
4423 }
4424 }
4425
4426 retry:
4427 /* When tvp exists, take the truncate lock for hfs_removefile(). */
4428 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
4429 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4430 took_trunc_lock = 1;
4431 }
4432
4433 relock:
4434 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
4435 HFS_EXCLUSIVE_LOCK, &error_cnode);
4436 if (error) {
4437 if (took_trunc_lock) {
4438 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4439 took_trunc_lock = 0;
4440 }
4441
4442 /*
4443 * We hit an error path. If we were trying to re-acquire the locks
4444 * after coming through here once, we might have already obtained
4445 * an iocount on tvp's resource fork vnode. Drop that before dealing
4446 * with the failure. Note this is safe -- since we are in an
4447 * error handling path, we can't be holding the cnode locks.
4448 */
4449 if (tvp_rsrc) {
4450 vnode_put (tvp_rsrc);
4451 tvp_rsrc_vid = 0;
4452 tvp_rsrc = NULL;
4453 }
4454
4455 /*
4456 * tvp might no longer exist. If the cause of the lock failure
4457 * was tvp, then we can try again with tvp/tcp set to NULL.
4458 * This is ok because the vfs syscall will vnode_put the vnodes
4459 * after we return from hfs_vnop_rename.
4460 */
4461 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
4462 tcp = NULL;
4463 tvp = NULL;
4464 goto retry;
4465 }
4466
4467 /* If we want to reintroduce notifications for failed renames, this
4468 is the place to do it. */
4469
4470 return (error);
4471 }
4472
4473 fdcp = VTOC(fdvp);
4474 fcp = VTOC(fvp);
4475 tdcp = VTOC(tdvp);
4476 tcp = tvp ? VTOC(tvp) : NULL;
4477
4478 //
4479 // if the item is tracked but doesn't have a document_id, assign one and generate an fsevent for it
4480 //
4481 unlocked = 0;
4482 if ((fcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4483 uint32_t newid;
4484
4485 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4486 unlocked = 1;
4487
4488 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4489 hfs_lock(fcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4490 ((struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4491 #if CONFIG_FSE
4492 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4493 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4494 FSE_ARG_INO, (ino64_t)0, // src inode #
4495 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4496 FSE_ARG_INT32, newid,
4497 FSE_ARG_DONE);
4498 #endif
4499 hfs_unlock(fcp);
4500 } else {
4501 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4502 }
4503
4504 //
4505 // check if we're going to need to fix tcp as well. if we aren't, go back relock
4506 // everything. otherwise continue on and fix up tcp as well before relocking.
4507 //
4508 if (tcp == NULL || !(tcp->c_bsdflags & UF_TRACKED) || ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id != 0) {
4509 goto relock;
4510 }
4511 }
4512
4513 //
4514 // same thing for tcp if it's set
4515 //
4516 if (tcp && (tcp->c_bsdflags & UF_TRACKED) && ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id == 0) {
4517 uint32_t newid;
4518
4519 if (!unlocked) {
4520 hfs_unlockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL);
4521 unlocked = 1;
4522 }
4523
4524 if (hfs_generate_document_id(hfsmp, &newid) == 0) {
4525 hfs_lock(tcp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4526 ((struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16))->document_id = newid;
4527 #if CONFIG_FSE
4528 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4529 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4530 FSE_ARG_INO, (ino64_t)0, // src inode #
4531 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // dst inode #
4532 FSE_ARG_INT32, newid,
4533 FSE_ARG_DONE);
4534 #endif
4535 hfs_unlock(tcp);
4536 } else {
4537 // XXXdbg - couldn't get a new docid... what to do? can't really fail the rename...
4538 }
4539
4540 // go back up and relock everything. next time through the if statement won't be true
4541 // and we'll skip over this block of code.
4542 goto relock;
4543 }
4544
4545
4546
4547 /*
4548 * Acquire iocounts on the destination's resource fork vnode
4549 * if necessary. If dst/src are files and the dst has a resource
4550 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
4551 * If it does not exist, then we don't care and can skip it.
4552 */
4553 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
4554 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
4555 tvp_rsrc = tcp->c_rsrc_vp;
4556 /*
4557 * We can look at the vid here because we're holding the
4558 * cnode lock on the underlying cnode for this rsrc vnode.
4559 */
4560 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
4561
4562 /* Unlock everything to acquire iocount on this rsrc vnode */
4563 if (took_trunc_lock) {
4564 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
4565 took_trunc_lock = 0;
4566 }
4567 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4568
4569 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
4570 /* iocount acquisition failed. Reset fields and start over.. */
4571 tvp_rsrc_vid = 0;
4572 tvp_rsrc = NULL;
4573 }
4574 goto retry;
4575 }
4576 }
4577
4578
4579
4580 /* Ensure we didn't race src or dst parent directories with rmdir. */
4581 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4582 error = ENOENT;
4583 goto out;
4584 }
4585
4586 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
4587 error = ENOENT;
4588 goto out;
4589 }
4590
4591
4592 /* Check for a race against unlink. The hfs_valid_cnode checks validate
4593 * the parent/child relationship with fdcp and tdcp, as well as the
4594 * component name of the target cnodes.
4595 */
4596 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
4597 error = ENOENT;
4598 goto out;
4599 }
4600
4601 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
4602 //
4603 // hmm, the destination vnode isn't valid any more.
4604 // in this case we can just drop him and pretend he
4605 // never existed in the first place.
4606 //
4607 if (took_trunc_lock) {
4608 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4609 took_trunc_lock = 0;
4610 }
4611 error = 0;
4612
4613 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4614
4615 tcp = NULL;
4616 tvp = NULL;
4617
4618 // retry the locking with tvp null'ed out
4619 goto retry;
4620 }
4621
4622 fdcp->c_flag |= C_DIR_MODIFICATION;
4623 if (fdvp != tdvp) {
4624 tdcp->c_flag |= C_DIR_MODIFICATION;
4625 }
4626
4627 /*
4628 * Disallow renaming of a directory hard link if the source and
4629 * destination parent directories are different, or a directory whose
4630 * descendant is a directory hard link and the one of the ancestors
4631 * of the destination directory is a directory hard link.
4632 */
4633 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4634 if (fcp->c_flag & C_HARDLINK) {
4635 error = EPERM;
4636 goto out;
4637 }
4638 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4639 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4640 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4641 error = EPERM;
4642 hfs_systemfile_unlock(hfsmp, lockflags);
4643 goto out;
4644 }
4645 hfs_systemfile_unlock(hfsmp, lockflags);
4646 }
4647 }
4648
4649 /*
4650 * The following edge case is caught here:
4651 * (to cannot be a descendent of from)
4652 *
4653 * o fdvp
4654 * /
4655 * /
4656 * o fvp
4657 * \
4658 * \
4659 * o tdvp
4660 * /
4661 * /
4662 * o tvp
4663 */
4664 if (tdcp->c_parentcnid == fcp->c_fileid) {
4665 error = EINVAL;
4666 goto out;
4667 }
4668
4669 /*
4670 * The following two edge cases are caught here:
4671 * (note tvp is not empty)
4672 *
4673 * o tdvp o tdvp
4674 * / /
4675 * / /
4676 * o tvp tvp o fdvp
4677 * \ \
4678 * \ \
4679 * o fdvp o fvp
4680 * /
4681 * /
4682 * o fvp
4683 */
4684 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
4685 error = ENOTEMPTY;
4686 goto out;
4687 }
4688
4689 /*
4690 * The following edge case is caught here:
4691 * (the from child and parent are the same)
4692 *
4693 * o tdvp
4694 * /
4695 * /
4696 * fdvp o fvp
4697 */
4698 if (fdvp == fvp) {
4699 error = EINVAL;
4700 goto out;
4701 }
4702
4703 /*
4704 * Make sure "from" vnode and its parent are changeable.
4705 */
4706 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
4707 error = EPERM;
4708 goto out;
4709 }
4710
4711 /*
4712 * If the destination parent directory is "sticky", then the
4713 * user must own the parent directory, or the destination of
4714 * the rename, otherwise the destination may not be changed
4715 * (except by root). This implements append-only directories.
4716 *
4717 * Note that checks for immutable and write access are done
4718 * by the call to hfs_removefile.
4719 */
4720 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4721 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
4722 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
4723 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
4724 error = EPERM;
4725 goto out;
4726 }
4727
4728 /* Don't allow modification of the journal or journal_info_block */
4729 if (hfs_is_journal_file(hfsmp, fcp) ||
4730 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4731 error = EPERM;
4732 goto out;
4733 }
4734
4735 #if QUOTA
4736 if (tvp)
4737 (void)hfs_getinoquota(tcp);
4738 #endif
4739 /* Preflighting done, take fvp out of the name space. */
4740 cache_purge(fvp);
4741
4742 #if CONFIG_SECLUDED_RENAME
4743 /*
4744 * Check for "secure" rename that imposes additional restrictions on the
4745 * source vnode. We wait until here to check in order to prevent a race
4746 * with other threads that manage to look up fvp, but their open or link
4747 * is blocked by our locks. At this point, with fvp out of the name cache,
4748 * and holding the lock on fdvp, no other thread can find fvp.
4749 *
4750 * TODO: Do we need to limit these checks to regular files only?
4751 */
4752 if (fcnp->cn_flags & CN_SECLUDE_RENAME) {
4753 if (vnode_isdir(fvp)) {
4754 error = EISDIR;
4755 goto out;
4756 }
4757
4758 /*
4759 * Neither fork of source may be open or memory mapped.
4760 * We also don't want it in use by any other system call.
4761 * The file must not have hard links.
4762 *
4763 * We can't simply use vnode_isinuse() because that does not
4764 * count opens with O_EVTONLY. We don't want a malicious
4765 * process using O_EVTONLY to subvert a secluded rename.
4766 */
4767 if (fcp->c_linkcount != 1) {
4768 error = EMLINK;
4769 goto out;
4770 }
4771
4772 if (fcp->c_rsrc_vp && (fcp->c_rsrc_vp->v_usecount > 0 ||
4773 fcp->c_rsrc_vp->v_iocount > 0)) {
4774 /* Resource fork is in use (including O_EVTONLY) */
4775 error = EBUSY;
4776 goto out;
4777 }
4778 if (fcp->c_vp && (fcp->c_vp->v_usecount > (fcp->c_rsrc_vp ? 1 : 0) ||
4779 fcp->c_vp->v_iocount > 1)) {
4780 /*
4781 * Data fork is in use, including O_EVTONLY, but not
4782 * including a reference from the resource fork.
4783 */
4784 error = EBUSY;
4785 goto out;
4786 }
4787 }
4788 #endif
4789
4790 bzero(&from_desc, sizeof(from_desc));
4791 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4792 from_desc.cd_namelen = fcnp->cn_namelen;
4793 from_desc.cd_parentcnid = fdcp->c_fileid;
4794 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4795 from_desc.cd_cnid = fcp->c_cnid;
4796
4797 bzero(&to_desc, sizeof(to_desc));
4798 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4799 to_desc.cd_namelen = tcnp->cn_namelen;
4800 to_desc.cd_parentcnid = tdcp->c_fileid;
4801 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4802 to_desc.cd_cnid = fcp->c_cnid;
4803
4804 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4805 goto out;
4806 }
4807 started_tr = 1;
4808
4809 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
4810 * inside a journal transaction and without holding a cnode lock.
4811 * As setting of this bit depends on being in journal transaction for
4812 * concurrency, check this bit again after we start journal transaction for rename
4813 * to ensure that this directory does not have any descendant that
4814 * is a directory hard link.
4815 */
4816 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4817 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4818 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4819 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4820 error = EPERM;
4821 hfs_systemfile_unlock(hfsmp, lockflags);
4822 goto out;
4823 }
4824 hfs_systemfile_unlock(hfsmp, lockflags);
4825 }
4826 }
4827
4828 // if it's a hardlink then re-lookup the name so
4829 // that we get the correct cnid in from_desc (see
4830 // the comment in hfs_removefile for more details)
4831 //
4832 if (fcp->c_flag & C_HARDLINK) {
4833 struct cat_desc tmpdesc;
4834 cnid_t real_cnid;
4835
4836 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4837 tmpdesc.cd_namelen = fcnp->cn_namelen;
4838 tmpdesc.cd_parentcnid = fdcp->c_fileid;
4839 tmpdesc.cd_hint = fdcp->c_childhint;
4840 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
4841 tmpdesc.cd_encoding = 0;
4842
4843 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4844
4845 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
4846 hfs_systemfile_unlock(hfsmp, lockflags);
4847 goto out;
4848 }
4849
4850 // use the real cnid instead of whatever happened to be there
4851 from_desc.cd_cnid = real_cnid;
4852 hfs_systemfile_unlock(hfsmp, lockflags);
4853 }
4854
4855 /*
4856 * Reserve some space in the Catalog file.
4857 */
4858 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
4859 goto out;
4860 }
4861 got_cookie = 1;
4862
4863 /*
4864 * If the destination exists then it may need to be removed.
4865 *
4866 * Due to HFS's locking system, we should always move the
4867 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
4868 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
4869 * of the actual vnop that it was trying to do (stat, link, readlink),
4870 * we must release the cnode lock of that element during the interim to
4871 * do MAC checking, vnode authorization, and other calls. In that time,
4872 * the item can be deleted (or renamed over). However, only in the rename
4873 * case is it inappropriate to return ENOENT from any of those calls. Either
4874 * the call should return information about the old element (stale), or get
4875 * information about the newer element that we are about to write in its place.
4876 *
4877 * HFS lookup has been modified to detect a rename and re-drive its
4878 * lookup internally. For other calls that have already succeeded in
4879 * their lookup call and are waiting to acquire the cnode lock in order
4880 * to proceed, that cnode lock will not fail due to the cnode being marked
4881 * C_NOEXISTS, because it won't have been marked as such. It will only
4882 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
4883 * element. All future callers will get the new element.
4884 *
4885 * To implement this behavior, we pass the "only_unlink" argument to
4886 * hfs_removefile and hfs_removedir. This will result in the vnode acting
4887 * as though it is open-unlinked. Additionally, when we are done moving the
4888 * element to the hidden directory, we vnode_recycle the target so that it is
4889 * reclaimed as soon as possible. Reclaim and inactive are both
4890 * capable of clearing out unused blocks for an open-unlinked file or dir.
4891 */
4892 if (tvp) {
4893 //
4894 // if the destination has a document id, we need to preserve it
4895 //
4896 if (fvp != tvp) {
4897 uint32_t document_id;
4898 struct FndrExtendedDirInfo *ffip = (struct FndrExtendedDirInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
4899 struct FndrExtendedDirInfo *tfip = (struct FndrExtendedDirInfo *)((char *)&tcp->c_attr.ca_finderinfo + 16);
4900
4901 if (ffip->document_id && tfip->document_id) {
4902 // both documents are tracked. only save a tombstone from tcp and do nothing else.
4903 save_tombstone(hfsmp, tdvp, tvp, tcnp, 0);
4904 } else {
4905 struct doc_tombstone *ut;
4906 ut = get_uthread_doc_tombstone();
4907
4908 document_id = tfip->document_id;
4909 tfip->document_id = 0;
4910
4911 if (document_id != 0) {
4912 // clear UF_TRACKED as well since tcp is now no longer tracked
4913 tcp->c_bsdflags &= ~UF_TRACKED;
4914 (void) cat_update(hfsmp, &tcp->c_desc, &tcp->c_attr, NULL, NULL);
4915 }
4916
4917 if (ffip->document_id == 0 && document_id != 0) {
4918 // printf("RENAME: preserving doc-id %d onto %s (from ino %d, to ino %d)\n", document_id, tcp->c_desc.cd_nameptr, tcp->c_desc.cd_cnid, fcp->c_desc.cd_cnid);
4919 fcp->c_bsdflags |= UF_TRACKED;
4920 ffip->document_id = document_id;
4921
4922 (void) cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
4923 #if CONFIG_FSE
4924 add_fsevent(FSE_DOCID_CHANGED, vfs_context_current(),
4925 FSE_ARG_DEV, hfsmp->hfs_raw_dev,
4926 FSE_ARG_INO, (ino64_t)tcp->c_fileid, // src inode #
4927 FSE_ARG_INO, (ino64_t)fcp->c_fileid, // dst inode #
4928 FSE_ARG_INT32, (uint32_t)ffip->document_id,
4929 FSE_ARG_DONE);
4930 #endif
4931 } else if ((fcp->c_bsdflags & UF_TRACKED) && should_save_docid_tombstone(ut, fvp, fcnp)) {
4932
4933 if (ut->t_lastop_document_id) {
4934 clear_tombstone_docid(ut, hfsmp, NULL);
4935 }
4936 save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
4937
4938 //printf("RENAME: (dest-exists): saving tombstone doc-id %lld @ %s (ino %d)\n",
4939 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
4940 }
4941 }
4942 }
4943
4944 /*
4945 * When fvp matches tvp they could be case variants
4946 * or matching hard links.
4947 */
4948 if (fvp == tvp) {
4949 if (!(fcp->c_flag & C_HARDLINK)) {
4950 /*
4951 * If they're not hardlinks, then fvp == tvp must mean we
4952 * are using case-insensitive HFS because case-sensitive would
4953 * not use the same vnode for both. In this case we just update
4954 * the catalog for: a -> A
4955 */
4956 goto skip_rm; /* simple case variant */
4957
4958 }
4959 /* For all cases below, we must be using hardlinks */
4960 else if ((fdvp != tdvp) ||
4961 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4962 /*
4963 * If the parent directories are not the same, AND the two items
4964 * are hardlinks, posix says to do nothing:
4965 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4966 * We just return 0 in this case.
4967 *
4968 * If case sensitivity is on, and we are using hardlinks
4969 * then renaming is supposed to do nothing.
4970 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
4971 */
4972 goto out; /* matching hardlinks, nothing to do */
4973
4974 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4975 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4976 /*
4977 * If we get here, then the following must be true:
4978 * a) We are running case-insensitive HFS+.
4979 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
4980 * c) the two names are case-variants of each other.
4981 *
4982 * In this case, we are really only dealing with a single catalog record
4983 * whose name is being updated.
4984 *
4985 * op is dir1/fred -> dir1/FRED
4986 *
4987 * We need to special case the name matching, because if
4988 * dir1/fred <-> dir1/bob were the two links, and the
4989 * op was dir1/fred -> dir1/bob
4990 * That would fail/do nothing.
4991 */
4992 goto skip_rm; /* case-variant hardlink in the same dir */
4993 } else {
4994 goto out; /* matching hardlink, nothing to do */
4995 }
4996 }
4997
4998
4999 if (vnode_isdir(tvp)) {
5000 /*
5001 * hfs_removedir will eventually call hfs_removefile on the directory
5002 * we're working on, because only hfs_removefile does the renaming of the
5003 * item to the hidden directory. The directory will stay around in the
5004 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
5005 * That way, we can destroy all of the EAs as needed and allow new ones to be
5006 * written.
5007 */
5008 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
5009 }
5010 else {
5011 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
5012
5013 /*
5014 * If the destination file had a resource fork vnode, then we need to get rid of
5015 * its blocks when there are no more references to it. Because the call to
5016 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
5017 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
5018 * the resource fork vnode could prevent the data fork vnode from going out of scope
5019 * because it holds a v_parent reference on it. So we mark it for termination
5020 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
5021 * can clean up the blocks of open-unlinked files and resource forks.
5022 *
5023 * We can safely call vnode_recycle on the resource fork because we took an iocount
5024 * reference on it at the beginning of the function.
5025 */
5026
5027 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
5028 vnode_recycle(tvp_rsrc);
5029 }
5030 }
5031
5032 if (error) {
5033 goto out;
5034 }
5035
5036 tvp_deleted = 1;
5037
5038 /* Mark 'tcp' as being deleted due to a rename */
5039 tcp->c_flag |= C_RENAMED;
5040
5041 /*
5042 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
5043 * as quickly as possible.
5044 */
5045 vnode_recycle(tvp);
5046 } else {
5047 struct doc_tombstone *ut;
5048 ut = get_uthread_doc_tombstone();
5049
5050 //
5051 // There is nothing at the destination. If the file being renamed is
5052 // tracked, save a "tombstone" of the document_id. If the file is
5053 // not a tracked file, then see if it needs to inherit a tombstone.
5054 //
5055 // NOTE: we do not save a tombstone if the file being renamed begins
5056 // with "atmp" which is done to work-around AutoCad's bizarre
5057 // 5-step un-safe save behavior
5058 //
5059 if (fcp->c_bsdflags & UF_TRACKED) {
5060 if (should_save_docid_tombstone(ut, fvp, fcnp)) {
5061 save_tombstone(hfsmp, fdvp, fvp, fcnp, 0);
5062
5063 //printf("RENAME: (no dest): saving tombstone doc-id %lld @ %s (ino %d)\n",
5064 // ut->t_lastop_document_id, ut->t_lastop_filename, fcp->c_desc.cd_cnid);
5065 } else {
5066 // intentionally do nothing
5067 }
5068 } else if ( ut->t_lastop_document_id != 0
5069 && tdvp == ut->t_lastop_parent
5070 && vnode_vid(tdvp) == ut->t_lastop_parent_vid
5071 && strcmp((char *)ut->t_lastop_filename, (char *)tcnp->cn_nameptr) == 0) {
5072
5073 //printf("RENAME: %s (ino %d) inheriting doc-id %lld\n", tcnp->cn_nameptr, fcp->c_desc.cd_cnid, ut->t_lastop_document_id);
5074 struct FndrExtendedFileInfo *fip = (struct FndrExtendedFileInfo *)((char *)&fcp->c_attr.ca_finderinfo + 16);
5075 fcp->c_bsdflags |= UF_TRACKED;
5076 fip->document_id = ut->t_lastop_document_id;
5077 cat_update(hfsmp, &fcp->c_desc, &fcp->c_attr, NULL, NULL);
5078
5079 clear_tombstone_docid(ut, hfsmp, fcp); // will send the docid-changed fsevent
5080
5081 } else if (ut->t_lastop_document_id && should_save_docid_tombstone(ut, fvp, fcnp) && should_save_docid_tombstone(ut, tvp, tcnp)) {
5082 // no match, clear the tombstone
5083 //printf("RENAME: clearing the tombstone %lld @ %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
5084 clear_tombstone_docid(ut, hfsmp, NULL);
5085 }
5086
5087 }
5088 skip_rm:
5089 /*
5090 * All done with tvp and fvp.
5091 *
5092 * We also jump to this point if there was no destination observed during lookup and namei.
5093 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
5094 * competing thread from racing us and creating a file or dir at the destination of this rename
5095 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
5096 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
5097 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
5098 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
5099 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
5100 * will be swallowed and it will restart the operation.
5101 */
5102
5103 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5104 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
5105 hfs_systemfile_unlock(hfsmp, lockflags);
5106
5107 if (error) {
5108 if (error == EEXIST) {
5109 error = ERECYCLE;
5110 }
5111 goto out;
5112 }
5113
5114 /* Invalidate negative cache entries in the destination directory */
5115 if (tdcp->c_flag & C_NEG_ENTRIES) {
5116 cache_purge_negatives(tdvp);
5117 tdcp->c_flag &= ~C_NEG_ENTRIES;
5118 }
5119
5120 /* Update cnode's catalog descriptor */
5121 replace_desc(fcp, &out_desc);
5122 fcp->c_parentcnid = tdcp->c_fileid;
5123 fcp->c_hint = 0;
5124
5125 /* Now indicate this cnode needs to have date-added written to the finderinfo */
5126 fcp->c_flag |= C_NEEDS_DATEADDED;
5127 (void) hfs_update (fvp, 0);
5128
5129
5130 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
5131 (fdcp->c_cnid == kHFSRootFolderID));
5132 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
5133 (tdcp->c_cnid == kHFSRootFolderID));
5134
5135 /* Update both parent directories. */
5136 if (fdvp != tdvp) {
5137 if (vnode_isdir(fvp)) {
5138 /* If the source directory has directory hard link
5139 * descendants, set the kHFSHasChildLinkBit in the
5140 * destination parent hierarchy
5141 */
5142 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
5143 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
5144
5145 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
5146
5147 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
5148 if (error) {
5149 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
5150 error = 0;
5151 }
5152 }
5153 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
5154 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
5155 }
5156 tdcp->c_entries++;
5157 tdcp->c_dirchangecnt++;
5158 hfs_incr_gencount(tdcp);
5159
5160 if (fdcp->c_entries > 0)
5161 fdcp->c_entries--;
5162 fdcp->c_dirchangecnt++;
5163 fdcp->c_touch_chgtime = TRUE;
5164 fdcp->c_touch_modtime = TRUE;
5165
5166 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
5167 (void) hfs_update(fdvp, 0);
5168 }
5169 hfs_incr_gencount(fdcp);
5170
5171 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5172 tdcp->c_touch_chgtime = TRUE;
5173 tdcp->c_touch_modtime = TRUE;
5174
5175 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
5176 (void) hfs_update(tdvp, 0);
5177
5178 /* Update the vnode's name now that the rename has completed. */
5179 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
5180 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
5181
5182 /*
5183 * At this point, we may have a resource fork vnode attached to the
5184 * 'from' vnode. If it exists, we will want to update its name, because
5185 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
5186 *
5187 * Note that the only thing we need to update here is the name attached to
5188 * the vnode, since a resource fork vnode does not have a separate resource
5189 * cnode -- it's still 'fcp'.
5190 */
5191 if (fcp->c_rsrc_vp) {
5192 char* rsrc_path = NULL;
5193 int len;
5194
5195 /* Create a new temporary buffer that's going to hold the new name */
5196 MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
5197 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
5198 len = MIN(len, MAXPATHLEN);
5199
5200 /*
5201 * vnode_update_identity will do the following for us:
5202 * 1) release reference on the existing rsrc vnode's name.
5203 * 2) copy/insert new name into the name cache
5204 * 3) attach the new name to the resource vnode
5205 * 4) update the vnode's vid
5206 */
5207 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
5208
5209 /* Free the memory associated with the resource fork's name */
5210 FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
5211 }
5212 out:
5213 if (got_cookie) {
5214 cat_postflight(hfsmp, &cookie, p);
5215 }
5216 if (started_tr) {
5217 hfs_end_transaction(hfsmp);
5218 }
5219
5220 fdcp->c_flag &= ~C_DIR_MODIFICATION;
5221 wakeup((caddr_t)&fdcp->c_flag);
5222 if (fdvp != tdvp) {
5223 tdcp->c_flag &= ~C_DIR_MODIFICATION;
5224 wakeup((caddr_t)&tdcp->c_flag);
5225 }
5226
5227 if (took_trunc_lock) {
5228 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
5229 }
5230
5231 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
5232
5233 /* Now vnode_put the resource forks vnodes if necessary */
5234 if (tvp_rsrc) {
5235 vnode_put(tvp_rsrc);
5236 tvp_rsrc = NULL;
5237 }
5238
5239 /* After tvp is removed the only acceptable error is EIO */
5240 if (error && tvp_deleted)
5241 error = EIO;
5242
5243 /* If we want to reintroduce notifications for renames, this is the
5244 place to do it. */
5245
5246 return (error);
5247 }
5248
5249
5250 /*
5251 * Make a directory.
5252 */
5253 int
5254 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
5255 {
5256 /***** HACK ALERT ********/
5257 ap->a_cnp->cn_flags |= MAKEENTRY;
5258 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
5259 }
5260
5261
5262 /*
5263 * Create a symbolic link.
5264 */
5265 int
5266 hfs_vnop_symlink(struct vnop_symlink_args *ap)
5267 {
5268 struct vnode **vpp = ap->a_vpp;
5269 struct vnode *dvp = ap->a_dvp;
5270 struct vnode *vp = NULL;
5271 struct cnode *cp = NULL;
5272 struct hfsmount *hfsmp;
5273 struct filefork *fp;
5274 struct buf *bp = NULL;
5275 char *datap;
5276 int started_tr = 0;
5277 u_int32_t len;
5278 int error;
5279
5280 /* HFS standard disks don't support symbolic links */
5281 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
5282 return (ENOTSUP);
5283
5284 /* Check for empty target name */
5285 if (ap->a_target[0] == 0)
5286 return (EINVAL);
5287
5288 hfsmp = VTOHFS(dvp);
5289 len = strlen(ap->a_target);
5290
5291 /* Check for free space */
5292 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
5293 return (ENOSPC);
5294 }
5295
5296 /* Create the vnode */
5297 ap->a_vap->va_mode |= S_IFLNK;
5298 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
5299 goto out;
5300 }
5301 vp = *vpp;
5302 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
5303 goto out;
5304 }
5305 cp = VTOC(vp);
5306 fp = VTOF(vp);
5307
5308 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
5309 goto out;
5310 }
5311
5312 #if QUOTA
5313 (void)hfs_getinoquota(cp);
5314 #endif /* QUOTA */
5315
5316 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5317 goto out;
5318 }
5319 started_tr = 1;
5320
5321 /*
5322 * Allocate space for the link.
5323 *
5324 * Since we're already inside a transaction,
5325 *
5326 * Don't need truncate lock since a symlink is treated as a system file.
5327 */
5328 error = hfs_truncate(vp, len, IO_NOZEROFILL, 0, ap->a_context);
5329
5330 /* On errors, remove the symlink file */
5331 if (error) {
5332 /*
5333 * End the transaction so we don't re-take the cnode lock
5334 * below while inside a transaction (lock order violation).
5335 */
5336 hfs_end_transaction(hfsmp);
5337
5338 /* hfs_removefile() requires holding the truncate lock */
5339 hfs_unlock(cp);
5340 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
5341 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5342
5343 if (hfs_start_transaction(hfsmp) != 0) {
5344 started_tr = 0;
5345 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5346 goto out;
5347 }
5348
5349 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
5350 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
5351 goto out;
5352 }
5353
5354 /* Write the link to disk */
5355 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
5356 0, 0, BLK_META);
5357 if (hfsmp->jnl) {
5358 journal_modify_block_start(hfsmp->jnl, bp);
5359 }
5360 datap = (char *)buf_dataptr(bp);
5361 bzero(datap, buf_size(bp));
5362 bcopy(ap->a_target, datap, len);
5363
5364 if (hfsmp->jnl) {
5365 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
5366 } else {
5367 buf_bawrite(bp);
5368 }
5369 out:
5370 if (started_tr)
5371 hfs_end_transaction(hfsmp);
5372 if ((cp != NULL) && (vp != NULL)) {
5373 hfs_unlock(cp);
5374 }
5375 if (error) {
5376 if (vp) {
5377 vnode_put(vp);
5378 }
5379 *vpp = NULL;
5380 }
5381 return (error);
5382 }
5383
5384
5385 /* structures to hold a "." or ".." directory entry */
5386 struct hfs_stddotentry {
5387 u_int32_t d_fileno; /* unique file number */
5388 u_int16_t d_reclen; /* length of this structure */
5389 u_int8_t d_type; /* dirent file type */
5390 u_int8_t d_namlen; /* len of filename */
5391 char d_name[4]; /* "." or ".." */
5392 };
5393
5394 struct hfs_extdotentry {
5395 u_int64_t d_fileno; /* unique file number */
5396 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
5397 u_int16_t d_reclen; /* length of this structure */
5398 u_int16_t d_namlen; /* len of filename */
5399 u_int8_t d_type; /* dirent file type */
5400 u_char d_name[3]; /* "." or ".." */
5401 };
5402
5403 typedef union {
5404 struct hfs_stddotentry std;
5405 struct hfs_extdotentry ext;
5406 } hfs_dotentry_t;
5407
5408 /*
5409 * hfs_vnop_readdir reads directory entries into the buffer pointed
5410 * to by uio, in a filesystem independent format. Up to uio_resid
5411 * bytes of data can be transferred. The data in the buffer is a
5412 * series of packed dirent structures where each one contains the
5413 * following entries:
5414 *
5415 * u_int32_t d_fileno; // file number of entry
5416 * u_int16_t d_reclen; // length of this record
5417 * u_int8_t d_type; // file type
5418 * u_int8_t d_namlen; // length of string in d_name
5419 * char d_name[MAXNAMELEN+1]; // null terminated file name
5420 *
5421 * The current position (uio_offset) refers to the next block of
5422 * entries. The offset can only be set to a value previously
5423 * returned by hfs_vnop_readdir or zero. This offset does not have
5424 * to match the number of bytes returned (in uio_resid).
5425 *
5426 * In fact, the offset used by HFS is essentially an index (26 bits)
5427 * with a tag (6 bits). The tag is for associating the next request
5428 * with the current request. This enables us to have multiple threads
5429 * reading the directory while the directory is also being modified.
5430 *
5431 * Each tag/index pair is tied to a unique directory hint. The hint
5432 * contains information (filename) needed to build the catalog b-tree
5433 * key for finding the next set of entries.
5434 *
5435 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
5436 * do NOT synthesize entries for "." and "..".
5437 */
5438 int
5439 hfs_vnop_readdir(ap)
5440 struct vnop_readdir_args /* {
5441 vnode_t a_vp;
5442 uio_t a_uio;
5443 int a_flags;
5444 int *a_eofflag;
5445 int *a_numdirent;
5446 vfs_context_t a_context;
5447 } */ *ap;
5448 {
5449 struct vnode *vp = ap->a_vp;
5450 uio_t uio = ap->a_uio;
5451 struct cnode *cp;
5452 struct hfsmount *hfsmp;
5453 directoryhint_t *dirhint = NULL;
5454 directoryhint_t localhint;
5455 off_t offset;
5456 off_t startoffset;
5457 int error = 0;
5458 int eofflag = 0;
5459 user_addr_t user_start = 0;
5460 user_size_t user_len = 0;
5461 int index;
5462 unsigned int tag;
5463 int items;
5464 int lockflags;
5465 int extended;
5466 int nfs_cookies;
5467 cnid_t cnid_hint = 0;
5468 int bump_valence = 0;
5469
5470 items = 0;
5471 startoffset = offset = uio_offset(uio);
5472 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5473 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
5474
5475 /* Sanity check the uio data. */
5476 if (uio_iovcnt(uio) > 1)
5477 return (EINVAL);
5478
5479 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
5480 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
5481 if (VTOCMP(vp) != NULL && !compressed) {
5482 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
5483 if (error) {
5484 return error;
5485 }
5486 }
5487 }
5488
5489 cp = VTOC(vp);
5490 hfsmp = VTOHFS(vp);
5491
5492 /* Note that the dirhint calls require an exclusive lock. */
5493 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5494 return (error);
5495
5496 /* Pick up cnid hint (if any). */
5497 if (nfs_cookies) {
5498 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
5499 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
5500 if (cnid_hint == INT_MAX) { /* searching pass the last item */
5501 eofflag = 1;
5502 goto out;
5503 }
5504 }
5505 /*
5506 * Synthesize entries for "." and "..", unless the directory has
5507 * been deleted, but not closed yet (lazy delete in progress).
5508 */
5509 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
5510 hfs_dotentry_t dotentry[2];
5511 size_t uiosize;
5512
5513 if (extended) {
5514 struct hfs_extdotentry *entry = &dotentry[0].ext;
5515
5516 entry->d_fileno = cp->c_cnid;
5517 entry->d_reclen = sizeof(struct hfs_extdotentry);
5518 entry->d_type = DT_DIR;
5519 entry->d_namlen = 1;
5520 entry->d_name[0] = '.';
5521 entry->d_name[1] = '\0';
5522 entry->d_name[2] = '\0';
5523 entry->d_seekoff = 1;
5524
5525 ++entry;
5526 entry->d_fileno = cp->c_parentcnid;
5527 entry->d_reclen = sizeof(struct hfs_extdotentry);
5528 entry->d_type = DT_DIR;
5529 entry->d_namlen = 2;
5530 entry->d_name[0] = '.';
5531 entry->d_name[1] = '.';
5532 entry->d_name[2] = '\0';
5533 entry->d_seekoff = 2;
5534 uiosize = 2 * sizeof(struct hfs_extdotentry);
5535 } else {
5536 struct hfs_stddotentry *entry = &dotentry[0].std;
5537
5538 entry->d_fileno = cp->c_cnid;
5539 entry->d_reclen = sizeof(struct hfs_stddotentry);
5540 entry->d_type = DT_DIR;
5541 entry->d_namlen = 1;
5542 *(int *)&entry->d_name[0] = 0;
5543 entry->d_name[0] = '.';
5544
5545 ++entry;
5546 entry->d_fileno = cp->c_parentcnid;
5547 entry->d_reclen = sizeof(struct hfs_stddotentry);
5548 entry->d_type = DT_DIR;
5549 entry->d_namlen = 2;
5550 *(int *)&entry->d_name[0] = 0;
5551 entry->d_name[0] = '.';
5552 entry->d_name[1] = '.';
5553 uiosize = 2 * sizeof(struct hfs_stddotentry);
5554 }
5555 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
5556 goto out;
5557 }
5558 offset += 2;
5559 }
5560
5561 /*
5562 * Intentionally avoid checking the valence here. If we
5563 * have FS corruption that reports the valence is 0, even though it
5564 * has contents, we might artificially skip over iterating
5565 * this directory.
5566 */
5567
5568 //
5569 // We have to lock the user's buffer here so that we won't
5570 // fault on it after we've acquired a shared lock on the
5571 // catalog file. The issue is that you can get a 3-way
5572 // deadlock if someone else starts a transaction and then
5573 // tries to lock the catalog file but can't because we're
5574 // here and we can't service our page fault because VM is
5575 // blocked trying to start a transaction as a result of
5576 // trying to free up pages for our page fault. It's messy
5577 // but it does happen on dual-processors that are paging
5578 // heavily (see radar 3082639 for more info). By locking
5579 // the buffer up-front we prevent ourselves from faulting
5580 // while holding the shared catalog file lock.
5581 //
5582 // Fortunately this and hfs_search() are the only two places
5583 // currently (10/30/02) that can fault on user data with a
5584 // shared lock on the catalog file.
5585 //
5586 if (hfsmp->jnl && uio_isuserspace(uio)) {
5587 user_start = uio_curriovbase(uio);
5588 user_len = uio_curriovlen(uio);
5589
5590 if ((error = vslock(user_start, user_len)) != 0) {
5591 user_start = 0;
5592 goto out;
5593 }
5594 }
5595 /* Convert offset into a catalog directory index. */
5596 index = (offset & HFS_INDEX_MASK) - 2;
5597 tag = offset & ~HFS_INDEX_MASK;
5598
5599 /* Lock catalog during cat_findname and cat_getdirentries. */
5600 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5601
5602 /* When called from NFS, try and resolve a cnid hint. */
5603 if (nfs_cookies && cnid_hint != 0) {
5604 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
5605 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
5606 localhint.dh_index = index - 1;
5607 localhint.dh_time = 0;
5608 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
5609 dirhint = &localhint; /* don't forget to release the descriptor */
5610 } else {
5611 cat_releasedesc(&localhint.dh_desc);
5612 }
5613 }
5614 }
5615
5616 /* Get a directory hint (cnode must be locked exclusive) */
5617 if (dirhint == NULL) {
5618 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
5619
5620 /* Hide tag from catalog layer. */
5621 dirhint->dh_index &= HFS_INDEX_MASK;
5622 if (dirhint->dh_index == HFS_INDEX_MASK) {
5623 dirhint->dh_index = -1;
5624 }
5625 }
5626
5627 if (index == 0) {
5628 dirhint->dh_threadhint = cp->c_dirthreadhint;
5629 }
5630 else {
5631 /*
5632 * If we have a non-zero index, there is a possibility that during the last
5633 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
5634 * then we don't want to return any new entries for the caller. Just return 0
5635 * items, mark the eofflag, and bail out. Because we won't have done any work, the
5636 * code at the end of the function will release the dirhint for us.
5637 *
5638 * Don't forget to unlock the catalog lock on the way out, too.
5639 */
5640 if (dirhint->dh_desc.cd_flags & CD_EOF) {
5641 error = 0;
5642 eofflag = 1;
5643 uio_setoffset(uio, startoffset);
5644 hfs_systemfile_unlock (hfsmp, lockflags);
5645
5646 goto seekoffcalc;
5647 }
5648 }
5649
5650 /* Pack the buffer with dirent entries. */
5651 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
5652
5653 if (index == 0 && error == 0) {
5654 cp->c_dirthreadhint = dirhint->dh_threadhint;
5655 }
5656
5657 hfs_systemfile_unlock(hfsmp, lockflags);
5658
5659 if (error != 0) {
5660 goto out;
5661 }
5662
5663 /* Get index to the next item */
5664 index += items;
5665
5666 if (items >= (int)cp->c_entries) {
5667 eofflag = 1;
5668 }
5669
5670 /*
5671 * Detect valence FS corruption.
5672 *
5673 * We are holding the cnode lock exclusive, so there should not be
5674 * anybody modifying the valence field of this cnode. If we enter
5675 * this block, that means we observed filesystem corruption, because
5676 * this directory reported a valence of 0, yet we found at least one
5677 * item. In this case, we need to minimally self-heal this
5678 * directory to prevent userland from tripping over a directory
5679 * that appears empty (getattr of valence reports 0), but actually
5680 * has contents.
5681 *
5682 * We'll force the cnode update at the end of the function after
5683 * completing all of the normal getdirentries steps.
5684 */
5685 if ((cp->c_entries == 0) && (items > 0)) {
5686 /* disk corruption */
5687 cp->c_entries++;
5688 /* Mark the cnode as dirty. */
5689 cp->c_flag |= (C_MODIFIED | C_FORCEUPDATE);
5690 printf("hfs_vnop_readdir: repairing valence to non-zero! \n");
5691 bump_valence++;
5692 }
5693
5694
5695 /* Convert catalog directory index back into an offset. */
5696 while (tag == 0)
5697 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
5698 uio_setoffset(uio, (index + 2) | tag);
5699 dirhint->dh_index |= tag;
5700
5701 seekoffcalc:
5702 cp->c_touch_acctime = TRUE;
5703
5704 if (ap->a_numdirent) {
5705 if (startoffset == 0)
5706 items += 2;
5707 *ap->a_numdirent = items;
5708 }
5709
5710 out:
5711 if (user_start) {
5712 vsunlock(user_start, user_len, TRUE);
5713 }
5714 /* If we didn't do anything then go ahead and dump the hint. */
5715 if ((dirhint != NULL) &&
5716 (dirhint != &localhint) &&
5717 (uio_offset(uio) == startoffset)) {
5718 hfs_reldirhint(cp, dirhint);
5719 eofflag = 1;
5720 }
5721 if (ap->a_eofflag) {
5722 *ap->a_eofflag = eofflag;
5723 }
5724 if (dirhint == &localhint) {
5725 cat_releasedesc(&localhint.dh_desc);
5726 }
5727
5728 if (bump_valence) {
5729 /* force the update before dropping the cnode lock*/
5730 hfs_update(vp, 0);
5731 }
5732
5733 hfs_unlock(cp);
5734
5735 return (error);
5736 }
5737
5738
5739 /*
5740 * Read contents of a symbolic link.
5741 */
5742 int
5743 hfs_vnop_readlink(ap)
5744 struct vnop_readlink_args /* {
5745 struct vnode *a_vp;
5746 struct uio *a_uio;
5747 vfs_context_t a_context;
5748 } */ *ap;
5749 {
5750 struct vnode *vp = ap->a_vp;
5751 struct cnode *cp;
5752 struct filefork *fp;
5753 int error;
5754
5755 if (!vnode_islnk(vp))
5756 return (EINVAL);
5757
5758 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5759 return (error);
5760 cp = VTOC(vp);
5761 fp = VTOF(vp);
5762
5763 /* Zero length sym links are not allowed */
5764 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
5765 error = EINVAL;
5766 goto exit;
5767 }
5768
5769 /* Cache the path so we don't waste buffer cache resources */
5770 if (fp->ff_symlinkptr == NULL) {
5771 struct buf *bp = NULL;
5772
5773 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
5774 if (fp->ff_symlinkptr == NULL) {
5775 error = ENOMEM;
5776 goto exit;
5777 }
5778 error = (int)buf_meta_bread(vp, (daddr64_t)0,
5779 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
5780 vfs_context_ucred(ap->a_context), &bp);
5781 if (error) {
5782 if (bp)
5783 buf_brelse(bp);
5784 if (fp->ff_symlinkptr) {
5785 FREE(fp->ff_symlinkptr, M_TEMP);
5786 fp->ff_symlinkptr = NULL;
5787 }
5788 goto exit;
5789 }
5790 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
5791
5792 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
5793 buf_markinvalid(bp); /* data no longer needed */
5794 }
5795 buf_brelse(bp);
5796 }
5797 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
5798
5799 /*
5800 * Keep track blocks read
5801 */
5802 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
5803
5804 /*
5805 * If this file hasn't been seen since the start of
5806 * the current sampling period then start over.
5807 */
5808 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
5809 VTOF(vp)->ff_bytesread = fp->ff_size;
5810 else
5811 VTOF(vp)->ff_bytesread += fp->ff_size;
5812
5813 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
5814 // cp->c_touch_acctime = TRUE;
5815 }
5816
5817 exit:
5818 hfs_unlock(cp);
5819 return (error);
5820 }
5821
5822
5823 /*
5824 * Get configurable pathname variables.
5825 */
5826 int
5827 hfs_vnop_pathconf(ap)
5828 struct vnop_pathconf_args /* {
5829 struct vnode *a_vp;
5830 int a_name;
5831 int *a_retval;
5832 vfs_context_t a_context;
5833 } */ *ap;
5834 {
5835
5836 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
5837 switch (ap->a_name) {
5838 case _PC_LINK_MAX:
5839 if (std_hfs == 0){
5840 *ap->a_retval = HFS_LINK_MAX;
5841 }
5842 #if CONFIG_HFS_STD
5843 else {
5844 *ap->a_retval = 1;
5845 }
5846 #endif
5847 break;
5848 case _PC_NAME_MAX:
5849 if (std_hfs == 0) {
5850 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5851 }
5852 #if CONFIG_HFS_STD
5853 else {
5854 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5855 }
5856 #endif
5857 break;
5858 case _PC_PATH_MAX:
5859 *ap->a_retval = PATH_MAX; /* 1024 */
5860 break;
5861 case _PC_PIPE_BUF:
5862 *ap->a_retval = PIPE_BUF;
5863 break;
5864 case _PC_CHOWN_RESTRICTED:
5865 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
5866 break;
5867 case _PC_NO_TRUNC:
5868 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
5869 break;
5870 case _PC_NAME_CHARS_MAX:
5871 if (std_hfs == 0) {
5872 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5873 }
5874 #if CONFIG_HFS_STD
5875 else {
5876 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5877 }
5878 #endif
5879 break;
5880 case _PC_CASE_SENSITIVE:
5881 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
5882 *ap->a_retval = 1;
5883 else
5884 *ap->a_retval = 0;
5885 break;
5886 case _PC_CASE_PRESERVING:
5887 *ap->a_retval = 1;
5888 break;
5889 case _PC_FILESIZEBITS:
5890 /* number of bits to store max file size */
5891 if (std_hfs == 0) {
5892 *ap->a_retval = 64;
5893 }
5894 #if CONFIG_HFS_STD
5895 else {
5896 *ap->a_retval = 32;
5897 }
5898 #endif
5899 break;
5900 case _PC_XATTR_SIZE_BITS:
5901 /* Number of bits to store maximum extended attribute size */
5902 *ap->a_retval = HFS_XATTR_SIZE_BITS;
5903 break;
5904 default:
5905 return (EINVAL);
5906 }
5907
5908 return (0);
5909 }
5910
5911 /*
5912 * Prepares a fork for cat_update by making sure ff_size and ff_blocks
5913 * are no bigger than the valid data on disk thus reducing the chance
5914 * of exposing unitialised data in the event of a non clean unmount.
5915 * fork_buf is where to put the temporary copy if required. (It can
5916 * be inside pfork.)
5917 */
5918 static const struct cat_fork *
5919 hfs_prepare_fork_for_update(const filefork_t *pfork,
5920 struct cat_fork *fork_buf,
5921 uint32_t block_size)
5922 {
5923 if (!pfork)
5924 return NULL;
5925
5926 off_t max_size = pfork->ff_size;
5927
5928 // Check first invalid range
5929 if (!TAILQ_EMPTY(&pfork->ff_invalidranges))
5930 max_size = TAILQ_FIRST(&pfork->ff_invalidranges)->rl_start;
5931
5932 if (!pfork->ff_unallocblocks && pfork->ff_size <= max_size)
5933 return &pfork->ff_data; // Nothing to do
5934
5935 if (pfork->ff_blocks < pfork->ff_unallocblocks) {
5936 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
5937 pfork->ff_blocks, pfork->ff_unallocblocks);
5938 }
5939
5940 struct cat_fork *out = fork_buf;
5941
5942 if (out != &pfork->ff_data)
5943 bcopy(&pfork->ff_data, out, sizeof(*out));
5944
5945 // Adjust cf_blocks for cf_vblocks
5946 out->cf_blocks -= out->cf_vblocks;
5947
5948 /*
5949 * We have to trim the size with the updated cf_blocks. You might
5950 * think that this is unnecessary because the invalid ranges
5951 * should catch this, but we update invalid ranges *before* we do
5952 * I/O whereas cf_vblocks is updated later in hfs_vnop_blockmap.
5953 * There might still be a chance that we will be exposing
5954 * unitialised data because the metadata takes a different path to
5955 * data but the window should be tiny (if it exists at all).
5956 */
5957 off_t alloc_bytes = hfs_blk_to_bytes(out->cf_blocks, block_size);
5958 if (out->cf_size > alloc_bytes)
5959 out->cf_size = alloc_bytes;
5960
5961 // Trim cf_size to first invalid range
5962 if (out->cf_size > max_size)
5963 out->cf_size = max_size;
5964
5965 return out;
5966 }
5967
5968 /*
5969 * Update a cnode's on-disk metadata.
5970 *
5971 * If waitfor is set, then wait for the disk write of
5972 * the node to complete.
5973 *
5974 * The cnode must be locked exclusive
5975 */
5976 int
5977 hfs_update(struct vnode *vp, __unused int waitfor)
5978 {
5979 struct cnode *cp = VTOC(vp);
5980 struct proc *p;
5981 const struct cat_fork *dataforkp = NULL;
5982 const struct cat_fork *rsrcforkp = NULL;
5983 struct cat_fork datafork;
5984 struct cat_fork rsrcfork;
5985 struct hfsmount *hfsmp;
5986 int lockflags;
5987 int error;
5988 uint32_t tstate = 0;
5989
5990 p = current_proc();
5991 hfsmp = VTOHFS(vp);
5992
5993 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
5994 hfsmp->hfs_catalog_vp == NULL){
5995 return (0);
5996 }
5997 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
5998 cp->c_flag &= ~C_MODIFIED;
5999 cp->c_touch_acctime = 0;
6000 cp->c_touch_chgtime = 0;
6001 cp->c_touch_modtime = 0;
6002 return (0);
6003 }
6004 if (kdebug_enable) {
6005 if (cp->c_touch_acctime)
6006 tstate |= DBG_HFS_UPDATE_ACCTIME;
6007 if (cp->c_touch_modtime)
6008 tstate |= DBG_HFS_UPDATE_MODTIME;
6009 if (cp->c_touch_chgtime)
6010 tstate |= DBG_HFS_UPDATE_CHGTIME;
6011
6012 if (cp->c_flag & C_MODIFIED)
6013 tstate |= DBG_HFS_UPDATE_MODIFIED;
6014 if (cp->c_flag & C_FORCEUPDATE)
6015 tstate |= DBG_HFS_UPDATE_FORCE;
6016 if (cp->c_flag & C_NEEDS_DATEADDED)
6017 tstate |= DBG_HFS_UPDATE_DATEADDED;
6018 }
6019 hfs_touchtimes(hfsmp, cp);
6020
6021 /* Nothing to update. */
6022 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
6023 return (0);
6024 }
6025
6026 if (cp->c_datafork)
6027 dataforkp = &cp->c_datafork->ff_data;
6028 if (cp->c_rsrcfork)
6029 rsrcforkp = &cp->c_rsrcfork->ff_data;
6030
6031 /*
6032 * For delayed allocations updates are
6033 * postponed until an fsync or the file
6034 * gets written to disk.
6035 *
6036 * Deleted files can defer meta data updates until inactive.
6037 *
6038 * If we're ever called with the C_FORCEUPDATE flag though
6039 * we have to do the update.
6040 */
6041 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
6042 (ISSET(cp->c_flag, C_DELETED) ||
6043 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
6044 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
6045 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
6046 cp->c_flag |= C_MODIFIED;
6047
6048 return (0);
6049 }
6050
6051 KERNEL_DEBUG_CONSTANT(HFSDBG_UPDATE | DBG_FUNC_START, VM_KERNEL_ADDRPERM(vp), tstate, 0, 0, 0);
6052
6053 if ((error = hfs_start_transaction(hfsmp)) != 0) {
6054 KERNEL_DEBUG_CONSTANT(HFSDBG_UPDATE | DBG_FUNC_END, VM_KERNEL_ADDRPERM(vp), tstate, error, -1, 0);
6055 return error;
6056 }
6057
6058 /*
6059 * Modify the values passed to cat_update based on whether or not
6060 * the file has invalid ranges or borrowed blocks.
6061 */
6062 dataforkp = hfs_prepare_fork_for_update(cp->c_datafork, &datafork, hfsmp->blockSize);
6063 rsrcforkp = hfs_prepare_fork_for_update(cp->c_rsrcfork, &rsrcfork, hfsmp->blockSize);
6064
6065 if (kdebug_enable) {
6066 long dbg_parms[NUMPARMS];
6067 int dbg_namelen;
6068
6069 dbg_namelen = NUMPARMS * sizeof(long);
6070 vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
6071
6072 if (dbg_namelen < (int)sizeof(dbg_parms))
6073 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
6074
6075 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
6076 }
6077
6078 /*
6079 * Lock the Catalog b-tree file.
6080 */
6081 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6082
6083 /* XXX - waitfor is not enforced */
6084 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
6085
6086 hfs_systemfile_unlock(hfsmp, lockflags);
6087
6088 /* After the updates are finished, clear the flags */
6089 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
6090
6091 hfs_end_transaction(hfsmp);
6092
6093 KERNEL_DEBUG_CONSTANT(HFSDBG_UPDATE | DBG_FUNC_END, VM_KERNEL_ADDRPERM(vp), tstate, error, 0, 0);
6094
6095 return (error);
6096 }
6097
6098 /*
6099 * Allocate a new node
6100 */
6101 int
6102 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
6103 struct vnode_attr *vap, vfs_context_t ctx)
6104 {
6105 struct cnode *cp = NULL;
6106 struct cnode *dcp = NULL;
6107 struct vnode *tvp;
6108 struct hfsmount *hfsmp;
6109 struct cat_desc in_desc, out_desc;
6110 struct cat_attr attr;
6111 struct timeval tv;
6112 int lockflags;
6113 int error, started_tr = 0;
6114 enum vtype vnodetype;
6115 int mode;
6116 int newvnode_flags = 0;
6117 u_int32_t gnv_flags = 0;
6118 int protectable_target = 0;
6119 int nocache = 0;
6120
6121 #if CONFIG_PROTECT
6122 struct cprotect *entry = NULL;
6123 int32_t cp_class = -1;
6124
6125 /*
6126 * By default, it's OK for AKS to overrride our target class preferences.
6127 */
6128 uint32_t keywrap_flags = CP_KEYWRAP_DIFFCLASS;
6129
6130 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
6131 cp_class = (int32_t)vap->va_dataprotect_class;
6132 /*
6133 * Since the user specifically requested this target class be used,
6134 * we want to fail this creation operation if we cannot wrap to their
6135 * target class. The CP_KEYWRAP_DIFFCLASS bit says that it is OK to
6136 * use a different class than the one specified, so we turn that off
6137 * now.
6138 */
6139 keywrap_flags &= ~CP_KEYWRAP_DIFFCLASS;
6140 }
6141 int protected_mount = 0;
6142 #endif
6143
6144
6145 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
6146 return (error);
6147
6148 /* set the cnode pointer only after successfully acquiring lock */
6149 dcp = VTOC(dvp);
6150
6151 /* Don't allow creation of new entries in open-unlinked directories */
6152 if ((error = hfs_checkdeleted(dcp))) {
6153 hfs_unlock(dcp);
6154 return error;
6155 }
6156
6157 dcp->c_flag |= C_DIR_MODIFICATION;
6158
6159 hfsmp = VTOHFS(dvp);
6160
6161 *vpp = NULL;
6162 tvp = NULL;
6163 out_desc.cd_flags = 0;
6164 out_desc.cd_nameptr = NULL;
6165
6166 vnodetype = vap->va_type;
6167 if (vnodetype == VNON)
6168 vnodetype = VREG;
6169 mode = MAKEIMODE(vnodetype, vap->va_mode);
6170
6171 if (S_ISDIR (mode) || S_ISREG (mode)) {
6172 protectable_target = 1;
6173 }
6174
6175
6176 /* Check if were out of usable disk space. */
6177 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
6178 error = ENOSPC;
6179 goto exit;
6180 }
6181
6182 microtime(&tv);
6183
6184 /* Setup the default attributes */
6185 bzero(&attr, sizeof(attr));
6186 attr.ca_mode = mode;
6187 attr.ca_linkcount = 1;
6188 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
6189 attr.ca_rdev = vap->va_rdev;
6190 }
6191 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
6192 VATTR_SET_SUPPORTED(vap, va_create_time);
6193 attr.ca_itime = vap->va_create_time.tv_sec;
6194 } else {
6195 attr.ca_itime = tv.tv_sec;
6196 }
6197 #if CONFIG_HFS_STD
6198 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
6199 attr.ca_itime += 3600; /* Same as what hfs_update does */
6200 }
6201 #endif
6202 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
6203 attr.ca_atimeondisk = attr.ca_atime;
6204 if (VATTR_IS_ACTIVE(vap, va_flags)) {
6205 VATTR_SET_SUPPORTED(vap, va_flags);
6206 attr.ca_flags = vap->va_flags;
6207 }
6208
6209 /*
6210 * HFS+ only: all files get ThreadExists
6211 * HFSX only: dirs get HasFolderCount
6212 */
6213 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
6214 if (vnodetype == VDIR) {
6215 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
6216 attr.ca_recflags = kHFSHasFolderCountMask;
6217 } else {
6218 attr.ca_recflags = kHFSThreadExistsMask;
6219 }
6220 }
6221
6222 #if CONFIG_PROTECT
6223 if (cp_fs_protected(hfsmp->hfs_mp)) {
6224 protected_mount = 1;
6225 }
6226 /*
6227 * On a content-protected HFS+/HFSX filesystem, files and directories
6228 * cannot be created without atomically setting/creating the EA that
6229 * contains the protection class metadata and keys at the same time, in
6230 * the same transaction. As a result, pre-set the "EAs exist" flag
6231 * on the cat_attr for protectable catalog record creations. This will
6232 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
6233 * as having EAs.
6234 */
6235 if ((protected_mount) && (protectable_target)) {
6236 attr.ca_recflags |= kHFSHasAttributesMask;
6237 /* delay entering in the namecache */
6238 nocache = 1;
6239 }
6240 #endif
6241
6242
6243 /*
6244 * Add the date added to the item. See above, as
6245 * all of the dates are set to the itime.
6246 */
6247 hfs_write_dateadded (&attr, attr.ca_atime);
6248
6249 /* Initialize the gen counter to 1 */
6250 hfs_write_gencount(&attr, (uint32_t)1);
6251
6252 attr.ca_uid = vap->va_uid;
6253 attr.ca_gid = vap->va_gid;
6254 VATTR_SET_SUPPORTED(vap, va_mode);
6255 VATTR_SET_SUPPORTED(vap, va_uid);
6256 VATTR_SET_SUPPORTED(vap, va_gid);
6257
6258 #if QUOTA
6259 /* check to see if this node's creation would cause us to go over
6260 * quota. If so, abort this operation.
6261 */
6262 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6263 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
6264 vfs_context_ucred(ctx)))) {
6265 goto exit;
6266 }
6267 }
6268 #endif
6269
6270
6271 /* Tag symlinks with a type and creator. */
6272 if (vnodetype == VLNK) {
6273 struct FndrFileInfo *fip;
6274
6275 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
6276 fip->fdType = SWAP_BE32(kSymLinkFileType);
6277 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
6278 }
6279
6280 /* Setup the descriptor */
6281 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
6282 in_desc.cd_namelen = cnp->cn_namelen;
6283 in_desc.cd_parentcnid = dcp->c_fileid;
6284 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
6285 in_desc.cd_hint = dcp->c_childhint;
6286 in_desc.cd_encoding = 0;
6287
6288 #if CONFIG_PROTECT
6289 /*
6290 * To preserve file creation atomicity with regards to the content protection EA,
6291 * we must create the file in the catalog and then write out its EA in the same
6292 * transaction.
6293 *
6294 * We only denote the target class in this EA; key generation is not completed
6295 * until the file has been inserted into the catalog and will be done
6296 * in a separate transaction.
6297 */
6298 if ((protected_mount) && (protectable_target)) {
6299 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
6300 if (error) {
6301 goto exit;
6302 }
6303 }
6304 #endif
6305
6306 if ((error = hfs_start_transaction(hfsmp)) != 0) {
6307 goto exit;
6308 }
6309 started_tr = 1;
6310
6311 // have to also lock the attribute file because cat_create() needs
6312 // to check that any fileID it wants to use does not have orphaned
6313 // attributes in it.
6314 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
6315 cnid_t new_id;
6316
6317 /* Reserve some space in the Catalog file. */
6318 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
6319 hfs_systemfile_unlock(hfsmp, lockflags);
6320 goto exit;
6321 }
6322
6323 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
6324 hfs_systemfile_unlock (hfsmp, lockflags);
6325 goto exit;
6326 }
6327
6328 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
6329 if (error == 0) {
6330 /* Update the parent directory */
6331 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
6332 dcp->c_entries++;
6333
6334 if (vnodetype == VDIR) {
6335 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
6336 }
6337 dcp->c_dirchangecnt++;
6338 hfs_incr_gencount(dcp);
6339
6340 dcp->c_ctime = tv.tv_sec;
6341 dcp->c_mtime = tv.tv_sec;
6342 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6343
6344 #if CONFIG_PROTECT
6345 /*
6346 * If we are creating a content protected file, now is when
6347 * we create the EA. We must create it in the same transaction
6348 * that creates the file. We can also guarantee that the file
6349 * MUST exist because we are still holding the catalog lock
6350 * at this point.
6351 */
6352 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6353 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
6354
6355 if (error) {
6356 int delete_err;
6357 /*
6358 * If we fail the EA creation, then we need to delete the file.
6359 * Luckily, we are still holding all of the right locks.
6360 */
6361 delete_err = cat_delete (hfsmp, &out_desc, &attr);
6362 if (delete_err == 0) {
6363 /* Update the parent directory */
6364 if (dcp->c_entries > 0)
6365 dcp->c_entries--;
6366 dcp->c_dirchangecnt++;
6367 dcp->c_ctime = tv.tv_sec;
6368 dcp->c_mtime = tv.tv_sec;
6369 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
6370 }
6371
6372 /* Emit EINVAL if we fail to create EA*/
6373 error = EINVAL;
6374 }
6375 }
6376 #endif
6377 }
6378 hfs_systemfile_unlock(hfsmp, lockflags);
6379 if (error)
6380 goto exit;
6381
6382 /* Invalidate negative cache entries in the directory */
6383 if (dcp->c_flag & C_NEG_ENTRIES) {
6384 cache_purge_negatives(dvp);
6385 dcp->c_flag &= ~C_NEG_ENTRIES;
6386 }
6387
6388 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
6389 (dcp->c_cnid == kHFSRootFolderID));
6390
6391 // XXXdbg
6392 // have to end the transaction here before we call hfs_getnewvnode()
6393 // because that can cause us to try and reclaim a vnode on a different
6394 // file system which could cause us to start a transaction which can
6395 // deadlock with someone on that other file system (since we could be
6396 // holding two transaction locks as well as various vnodes and we did
6397 // not obtain the locks on them in the proper order).
6398 //
6399 // NOTE: this means that if the quota check fails or we have to update
6400 // the change time on a block-special device that those changes
6401 // will happen as part of independent transactions.
6402 //
6403 if (started_tr) {
6404 hfs_end_transaction(hfsmp);
6405 started_tr = 0;
6406 }
6407
6408 #if CONFIG_PROTECT
6409 /*
6410 * At this point, we must have encountered success with writing the EA.
6411 * Destroy our temporary cprotect (which had no keys).
6412 */
6413
6414 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
6415 cp_entry_destroy (entry);
6416 entry = NULL;
6417 }
6418 #endif
6419 gnv_flags |= GNV_CREATE;
6420 if (nocache) {
6421 gnv_flags |= GNV_NOCACHE;
6422 }
6423
6424 /*
6425 * Create a vnode for the object just created.
6426 *
6427 * NOTE: Maintaining the cnode lock on the parent directory is important,
6428 * as it prevents race conditions where other threads want to look up entries
6429 * in the directory and/or add things as we are in the process of creating
6430 * the vnode below. However, this has the potential for causing a
6431 * double lock panic when dealing with shadow files on a HFS boot partition.
6432 * The panic could occur if we are not cleaning up after ourselves properly
6433 * when done with a shadow file or in the error cases. The error would occur if we
6434 * try to create a new vnode, and then end up reclaiming another shadow vnode to
6435 * create the new one. However, if everything is working properly, this should
6436 * be a non-issue as we would never enter that reclaim codepath.
6437 *
6438 * The cnode is locked on successful return.
6439 */
6440 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
6441 NULL, &tvp, &newvnode_flags);
6442 if (error)
6443 goto exit;
6444
6445 cp = VTOC(tvp);
6446
6447 struct doc_tombstone *ut;
6448 ut = get_uthread_doc_tombstone();
6449 if ( ut->t_lastop_document_id != 0
6450 && ut->t_lastop_parent == dvp
6451 && ut->t_lastop_parent_vid == vnode_vid(dvp)
6452 && strcmp((char *)ut->t_lastop_filename, (char *)cp->c_desc.cd_nameptr) == 0) {
6453 struct FndrExtendedDirInfo *fip = (struct FndrExtendedDirInfo *)((char *)&cp->c_attr.ca_finderinfo + 16);
6454
6455 //printf("CREATE: preserving doc-id %lld on %s\n", ut->t_lastop_document_id, ut->t_lastop_filename);
6456 fip->document_id = (uint32_t)(ut->t_lastop_document_id & 0xffffffff);
6457
6458 cp->c_bsdflags |= UF_TRACKED;
6459 // mark the cnode dirty
6460 cp->c_flag |= C_MODIFIED | C_FORCEUPDATE;
6461
6462 if ((error = hfs_start_transaction(hfsmp)) == 0) {
6463 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
6464
6465 (void) cat_update(hfsmp, &cp->c_desc, &cp->c_attr, NULL, NULL);
6466
6467 hfs_systemfile_unlock (hfsmp, lockflags);
6468 (void) hfs_end_transaction(hfsmp);
6469 }
6470
6471 clear_tombstone_docid(ut, hfsmp, cp); // will send the docid-changed fsevent
6472 } else if (ut->t_lastop_document_id != 0) {
6473 int len = cnp->cn_namelen;
6474 if (len == 0) {
6475 len = strlen(cnp->cn_nameptr);
6476 }
6477
6478 if (is_ignorable_temp_name(cnp->cn_nameptr, cnp->cn_namelen)) {
6479 // printf("CREATE: not clearing tombstone because %s is a temp name.\n", cnp->cn_nameptr);
6480 } else {
6481 // Clear the tombstone because the thread is not recreating the same path
6482 // printf("CREATE: clearing tombstone because %s is NOT a temp name.\n", cnp->cn_nameptr);
6483 clear_tombstone_docid(ut, hfsmp, NULL);
6484 }
6485 }
6486
6487 *vpp = tvp;
6488
6489 #if CONFIG_PROTECT
6490 /*
6491 * Now that we have a vnode-in-hand, generate keys for this namespace item.
6492 * If we fail to create the keys, then attempt to delete the item from the
6493 * namespace. If we can't delete the item, that's not desirable but also not fatal..
6494 * All of the places which deal with restoring/unwrapping keys must also be
6495 * prepared to encounter an entry that does not have keys.
6496 */
6497 if ((protectable_target) && (protected_mount)) {
6498 struct cprotect *keyed_entry = NULL;
6499
6500 if (cp->c_cpentry == NULL) {
6501 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
6502 }
6503
6504 error = cp_generate_keys (hfsmp, cp, CP_CLASS(cp->c_cpentry->cp_pclass), keywrap_flags, &keyed_entry);
6505 if (error == 0) {
6506 /*
6507 * Upon success, the keys were generated and written out.
6508 * Update the cp pointer in the cnode.
6509 */
6510 cp_replace_entry (cp, keyed_entry);
6511 if (nocache) {
6512 cache_enter (dvp, tvp, cnp);
6513 }
6514 }
6515 else {
6516 /* If key creation OR the setxattr failed, emit EPERM to userland */
6517 error = EPERM;
6518
6519 /*
6520 * Beware! This slightly violates the lock ordering for the
6521 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
6522 * which guards file size changes before acquiring the normal cnode lock
6523 * and calling hfs_removefile on an item.
6524 *
6525 * However, in this case, we are still holding the directory lock so
6526 * 'tvp' is not lookup-able and it was a newly created vnode so it
6527 * cannot have any content yet. The only reason we are initiating
6528 * the removefile is because we could not generate content protection keys
6529 * for this namespace item. Note also that we pass a '1' in the allow_dirs
6530 * argument for hfs_removefile because we may be creating a directory here.
6531 *
6532 * All this to say that while it is technically a violation it is
6533 * impossible to race with another thread for this cnode so it is safe.
6534 */
6535 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
6536 if (err) {
6537 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
6538 }
6539
6540 /* Release the cnode lock and mark the vnode for termination */
6541 hfs_unlock (cp);
6542 err = vnode_recycle (tvp);
6543 if (err) {
6544 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
6545 }
6546
6547 /* Drop the iocount on the new vnode to force reclamation/recycling */
6548 vnode_put (tvp);
6549 cp = NULL;
6550 *vpp = NULL;
6551 }
6552 }
6553 #endif
6554
6555 #if QUOTA
6556 /*
6557 * Once we create this vnode, we need to initialize its quota data
6558 * structures, if necessary. We know that it is OK to just go ahead and
6559 * initialize because we've already validated earlier (through the hfs_quotacheck
6560 * function) to see if creating this cnode/vnode would cause us to go over quota.
6561 */
6562 if (hfsmp->hfs_flags & HFS_QUOTAS) {
6563 if (cp) {
6564 /* cp could have been zeroed earlier */
6565 (void) hfs_getinoquota(cp);
6566 }
6567 }
6568 #endif
6569
6570 exit:
6571 cat_releasedesc(&out_desc);
6572
6573 #if CONFIG_PROTECT
6574 /*
6575 * We may have jumped here in error-handling various situations above.
6576 * If we haven't already dumped the temporary CP used to initialize
6577 * the file atomically, then free it now. cp_entry_destroy should null
6578 * out the pointer if it was called already.
6579 */
6580 if (entry) {
6581 cp_entry_destroy (entry);
6582 entry = NULL;
6583 }
6584 #endif
6585
6586 /*
6587 * Make sure we release cnode lock on dcp.
6588 */
6589 if (dcp) {
6590 dcp->c_flag &= ~C_DIR_MODIFICATION;
6591 wakeup((caddr_t)&dcp->c_flag);
6592
6593 hfs_unlock(dcp);
6594 }
6595 if (error == 0 && cp != NULL) {
6596 hfs_unlock(cp);
6597 }
6598 if (started_tr) {
6599 hfs_end_transaction(hfsmp);
6600 started_tr = 0;
6601 }
6602
6603 return (error);
6604 }
6605
6606
6607 /*
6608 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the
6609 * cnode that is found in 'vp'. The cnode should be locked upon entry
6610 * and will be returned locked, but it may be dropped temporarily.
6611 *
6612 * On success, *rvpp wlll hold the resource fork vnode with an
6613 * iocount. *Don't* forget the vnode_put.
6614 */
6615 int
6616 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp)
6617 {
6618 struct vnode *rvp;
6619 struct vnode *dvp = NULLVP;
6620 struct cnode *cp = VTOC(vp);
6621 int error;
6622 int vid;
6623
6624 if (vnode_vtype(vp) == VDIR) {
6625 return EINVAL;
6626 }
6627
6628 restart:
6629 /* Attempt to use existing vnode */
6630 if ((rvp = cp->c_rsrc_vp)) {
6631 vid = vnode_vid(rvp);
6632
6633 // vnode_getwithvid can block so we need to drop the cnode lock
6634 hfs_unlock(cp);
6635
6636 error = vnode_getwithvid(rvp, vid);
6637
6638 hfs_lock_always(cp, HFS_EXCLUSIVE_LOCK);
6639
6640 /*
6641 * When our lock was relinquished, the resource fork
6642 * could have been recycled. Check for this and try
6643 * again.
6644 */
6645 if (error == ENOENT)
6646 goto restart;
6647
6648 if (error) {
6649 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
6650
6651 if (name)
6652 printf("hfs_vgetrsrc: couldn't get resource"
6653 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
6654 return (error);
6655 }
6656 } else {
6657 struct cat_fork rsrcfork;
6658 struct componentname cn;
6659 struct cat_desc *descptr = NULL;
6660 struct cat_desc to_desc;
6661 char delname[32];
6662 int lockflags;
6663 int newvnode_flags = 0;
6664
6665 /*
6666 * Make sure cnode lock is exclusive, if not upgrade it.
6667 *
6668 * We assume that we were called from a read-only VNOP (getattr)
6669 * and that its safe to have the cnode lock dropped and reacquired.
6670 */
6671 if (cp->c_lockowner != current_thread()) {
6672 /*
6673 * If the upgrade fails we lose the lock and
6674 * have to take the exclusive lock on our own.
6675 */
6676 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
6677 lck_rw_lock_exclusive(&cp->c_rwlock);
6678 cp->c_lockowner = current_thread();
6679 }
6680
6681 /*
6682 * hfs_vgetsrc may be invoked for a cnode that has already been marked
6683 * C_DELETED. This is because we need to continue to provide rsrc
6684 * fork access to open-unlinked files. In this case, build a fake descriptor
6685 * like in hfs_removefile. If we don't do this, buildkey will fail in
6686 * cat_lookup because this cnode has no name in its descriptor.
6687 */
6688 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
6689 bzero (&to_desc, sizeof(to_desc));
6690 bzero (delname, 32);
6691 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
6692 to_desc.cd_nameptr = (const u_int8_t*) delname;
6693 to_desc.cd_namelen = strlen(delname);
6694 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
6695 to_desc.cd_flags = 0;
6696 to_desc.cd_cnid = cp->c_cnid;
6697
6698 descptr = &to_desc;
6699 }
6700 else {
6701 descptr = &cp->c_desc;
6702 }
6703
6704
6705 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
6706
6707 /*
6708 * We call cat_idlookup (instead of cat_lookup) below because we can't
6709 * trust the descriptor in the provided cnode for lookups at this point.
6710 * Between the time of the original lookup of this vnode and now, the
6711 * descriptor could have gotten swapped or replaced. If this occurred,
6712 * the parent/name combo originally desired may not necessarily be provided
6713 * if we use the descriptor. Even worse, if the vnode represents
6714 * a hardlink, we could have removed one of the links from the namespace
6715 * but left the descriptor alone, since hfs_unlink does not invalidate
6716 * the descriptor in the cnode if other links still point to the inode.
6717 *
6718 * Consider the following (slightly contrived) scenario:
6719 * /tmp/a <--> /tmp/b (hardlinks).
6720 * 1. Thread A: open rsrc fork on /tmp/b.
6721 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
6722 * 2. Thread B does 'mv /foo/b /tmp/b'
6723 * 2. Thread B succeeds.
6724 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
6725 *
6726 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
6727 * is not removed/updated during the unlink process. So, if you were to
6728 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
6729 * fork.
6730 *
6731 * As a result, we use the fileid, which should be invariant for the lifetime
6732 * of the cnode (possibly barring calls to exchangedata).
6733 *
6734 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
6735 * have thread records for files. They were only required for directories. So
6736 * we need to do the lookup with the catalog name. This is OK since hardlinks were
6737 * never allowed on HFS standard.
6738 */
6739
6740 /* Get resource fork data */
6741 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
6742 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
6743 }
6744 #if CONFIG_HFS_STD
6745 else {
6746 /*
6747 * HFS standard only:
6748 *
6749 * Get the resource fork for this item with a cat_lookup call, but do not
6750 * force a case lookup since HFS standard is case-insensitive only. We
6751 * don't want the descriptor; just the fork data here. If we tried to
6752 * do a ID lookup (via thread record -> catalog record), then we might fail
6753 * prematurely since, as noted above, thread records were not strictly required
6754 * on files in HFS.
6755 */
6756 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
6757 (struct cat_attr*)NULL, &rsrcfork, NULL);
6758 }
6759 #endif
6760
6761 hfs_systemfile_unlock(hfsmp, lockflags);
6762 if (error) {
6763 return (error);
6764 }
6765 /*
6766 * Supply hfs_getnewvnode with a component name.
6767 */
6768 cn.cn_pnbuf = NULL;
6769 if (descptr->cd_nameptr) {
6770 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
6771 cn.cn_nameiop = LOOKUP;
6772 cn.cn_flags = ISLASTCN | HASBUF;
6773 cn.cn_context = NULL;
6774 cn.cn_pnlen = MAXPATHLEN;
6775 cn.cn_nameptr = cn.cn_pnbuf;
6776 cn.cn_hash = 0;
6777 cn.cn_consume = 0;
6778 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
6779 "%s%s", descptr->cd_nameptr,
6780 _PATH_RSRCFORKSPEC);
6781 // Should never happen because cn.cn_nameptr won't ever be long...
6782 if (cn.cn_namelen >= MAXPATHLEN) {
6783 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
6784 return ENAMETOOLONG;
6785 }
6786 }
6787 dvp = vnode_getparent(vp);
6788 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
6789 descptr, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
6790 &rsrcfork, &rvp, &newvnode_flags);
6791 if (dvp)
6792 vnode_put(dvp);
6793 if (cn.cn_pnbuf)
6794 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
6795 if (error)
6796 return (error);
6797 }
6798
6799 *rvpp = rvp;
6800 return (0);
6801 }
6802
6803 /*
6804 * Wrapper for special device reads
6805 */
6806 int
6807 hfsspec_read(ap)
6808 struct vnop_read_args /* {
6809 struct vnode *a_vp;
6810 struct uio *a_uio;
6811 int a_ioflag;
6812 vfs_context_t a_context;
6813 } */ *ap;
6814 {
6815 /*
6816 * Set access flag.
6817 */
6818 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6819 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
6820 }
6821
6822 /*
6823 * Wrapper for special device writes
6824 */
6825 int
6826 hfsspec_write(ap)
6827 struct vnop_write_args /* {
6828 struct vnode *a_vp;
6829 struct uio *a_uio;
6830 int a_ioflag;
6831 vfs_context_t a_context;
6832 } */ *ap;
6833 {
6834 /*
6835 * Set update and change flags.
6836 */
6837 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6838 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6839 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
6840 }
6841
6842 /*
6843 * Wrapper for special device close
6844 *
6845 * Update the times on the cnode then do device close.
6846 */
6847 int
6848 hfsspec_close(ap)
6849 struct vnop_close_args /* {
6850 struct vnode *a_vp;
6851 int a_fflag;
6852 vfs_context_t a_context;
6853 } */ *ap;
6854 {
6855 struct vnode *vp = ap->a_vp;
6856 struct cnode *cp;
6857
6858 if (vnode_isinuse(ap->a_vp, 0)) {
6859 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6860 cp = VTOC(vp);
6861 hfs_touchtimes(VTOHFS(vp), cp);
6862 hfs_unlock(cp);
6863 }
6864 }
6865 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
6866 }
6867
6868 #if FIFO
6869 /*
6870 * Wrapper for fifo reads
6871 */
6872 static int
6873 hfsfifo_read(ap)
6874 struct vnop_read_args /* {
6875 struct vnode *a_vp;
6876 struct uio *a_uio;
6877 int a_ioflag;
6878 vfs_context_t a_context;
6879 } */ *ap;
6880 {
6881 /*
6882 * Set access flag.
6883 */
6884 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6885 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
6886 }
6887
6888 /*
6889 * Wrapper for fifo writes
6890 */
6891 static int
6892 hfsfifo_write(ap)
6893 struct vnop_write_args /* {
6894 struct vnode *a_vp;
6895 struct uio *a_uio;
6896 int a_ioflag;
6897 vfs_context_t a_context;
6898 } */ *ap;
6899 {
6900 /*
6901 * Set update and change flags.
6902 */
6903 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6904 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6905 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
6906 }
6907
6908 /*
6909 * Wrapper for fifo close
6910 *
6911 * Update the times on the cnode then do device close.
6912 */
6913 static int
6914 hfsfifo_close(ap)
6915 struct vnop_close_args /* {
6916 struct vnode *a_vp;
6917 int a_fflag;
6918 vfs_context_t a_context;
6919 } */ *ap;
6920 {
6921 struct vnode *vp = ap->a_vp;
6922 struct cnode *cp;
6923
6924 if (vnode_isinuse(ap->a_vp, 1)) {
6925 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6926 cp = VTOC(vp);
6927 hfs_touchtimes(VTOHFS(vp), cp);
6928 hfs_unlock(cp);
6929 }
6930 }
6931 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
6932 }
6933
6934
6935 #endif /* FIFO */
6936
6937 /*
6938 * Getter for the document_id
6939 * the document_id is stored in FndrExtendedFileInfo/FndrExtendedDirInfo
6940 */
6941 static u_int32_t
6942 hfs_get_document_id_internal(const uint8_t *finderinfo, mode_t mode)
6943 {
6944 u_int8_t *finfo = NULL;
6945 u_int32_t doc_id = 0;
6946
6947 /* overlay the FinderInfo to the correct pointer, and advance */
6948 finfo = ((uint8_t *)finderinfo) + 16;
6949
6950 if (S_ISDIR(mode) || S_ISREG(mode)) {
6951 struct FndrExtendedFileInfo *extinfo = (struct FndrExtendedFileInfo *)finfo;
6952 doc_id = extinfo->document_id;
6953 } else if (S_ISDIR(mode)) {
6954 struct FndrExtendedDirInfo *extinfo = (struct FndrExtendedDirInfo *)((u_int8_t*)finderinfo + 16);
6955 doc_id = extinfo->document_id;
6956 }
6957
6958 return doc_id;
6959 }
6960
6961
6962 /* getter(s) for document id */
6963 u_int32_t
6964 hfs_get_document_id(struct cnode *cp)
6965 {
6966 return (hfs_get_document_id_internal((u_int8_t*)cp->c_finderinfo,
6967 cp->c_attr.ca_mode));
6968 }
6969
6970 /* If you have finderinfo and mode, you can use this */
6971 u_int32_t
6972 hfs_get_document_id_from_blob(const uint8_t *finderinfo, mode_t mode)
6973 {
6974 return (hfs_get_document_id_internal(finderinfo, mode));
6975 }
6976
6977 /*
6978 * Synchronize a file's in-core state with that on disk.
6979 */
6980 int
6981 hfs_vnop_fsync(ap)
6982 struct vnop_fsync_args /* {
6983 struct vnode *a_vp;
6984 int a_waitfor;
6985 vfs_context_t a_context;
6986 } */ *ap;
6987 {
6988 struct vnode* vp = ap->a_vp;
6989 int error;
6990
6991 /* Note: We check hfs flags instead of vfs mount flag because during
6992 * read-write update, hfs marks itself read-write much earlier than
6993 * the vfs, and hence won't result in skipping of certain writes like
6994 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
6995 */
6996 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
6997 return 0;
6998 }
6999
7000 /*
7001 * No need to call cp_handle_vnop to resolve fsync(). Any dirty data
7002 * should have caused the keys to be unwrapped at the time the data was
7003 * put into the UBC, either at mmap/pagein/read-write. If we did manage
7004 * to let this by, then strategy will auto-resolve for us.
7005 *
7006 * We also need to allow ENOENT lock errors since unlink
7007 * system call can call VNOP_FSYNC during vclean.
7008 */
7009 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
7010 if (error)
7011 return (0);
7012
7013 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
7014
7015 hfs_unlock(VTOC(vp));
7016 return (error);
7017 }
7018
7019 int (**hfs_vnodeop_p)(void *);
7020
7021 #define VOPFUNC int (*)(void *)
7022
7023
7024 #if CONFIG_HFS_STD
7025 int (**hfs_std_vnodeop_p) (void *);
7026 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
7027
7028 /*
7029 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
7030 * is for use with HFS standard to block out operations that would modify the file system
7031 */
7032
7033 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
7034 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7035 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7036 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
7037 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
7038 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7039 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7040 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7041 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
7042 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7043 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
7044 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7045 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7046 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7047 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
7048 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
7049 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
7050 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
7051 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
7052 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
7053 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
7054 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
7055 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
7056 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7057 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7058 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7059 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7060 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7061 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7062 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7063 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7064 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
7065 #if CONFIG_SEARCHFS
7066 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7067 #else
7068 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7069 #endif
7070 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
7071 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7072 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
7073 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
7074 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7075 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7076 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7077 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7078 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
7079 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
7080 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7081 #if NAMEDSTREAMS
7082 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7083 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7084 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
7085 #endif
7086 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7087 { NULL, (VOPFUNC)NULL }
7088 };
7089
7090 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
7091 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
7092 #endif
7093
7094 /* VNOP table for HFS+ */
7095 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
7096 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7097 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
7098 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
7099 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
7100 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
7101 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
7102 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7103 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7104 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
7105 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
7106 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
7107 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
7108 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
7109 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
7110 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
7111 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7112 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
7113 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
7114 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
7115 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
7116 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
7117 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
7118 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
7119 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
7120 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
7121 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7122 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7123 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
7124 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
7125 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7126 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
7127 #if CONFIG_SEARCHFS
7128 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
7129 #else
7130 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
7131 #endif
7132 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
7133 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
7134 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
7135 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7136 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7137 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7138 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7139 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7140 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7141 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7142 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7143 #if NAMEDSTREAMS
7144 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
7145 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
7146 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
7147 #endif
7148 { &vnop_getattrlistbulk_desc, (VOPFUNC)hfs_vnop_getattrlistbulk }, /* getattrlistbulk */
7149 { &vnop_mnomap_desc, (VOPFUNC)hfs_vnop_mnomap },
7150 { NULL, (VOPFUNC)NULL }
7151 };
7152
7153 struct vnodeopv_desc hfs_vnodeop_opv_desc =
7154 { &hfs_vnodeop_p, hfs_vnodeop_entries };
7155
7156
7157 /* Spec Op vnop table for HFS+ */
7158 int (**hfs_specop_p)(void *);
7159 struct vnodeopv_entry_desc hfs_specop_entries[] = {
7160 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7161 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
7162 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
7163 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
7164 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
7165 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
7166 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7167 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7168 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
7169 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
7170 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
7171 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
7172 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
7173 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
7174 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7175 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
7176 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
7177 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
7178 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
7179 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
7180 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
7181 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
7182 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
7183 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7184 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7185 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
7186 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
7187 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7188 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7189 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7190 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7191 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7192 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7193 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7194 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7195 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7196 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7197 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7198 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7199 };
7200 struct vnodeopv_desc hfs_specop_opv_desc =
7201 { &hfs_specop_p, hfs_specop_entries };
7202
7203 #if FIFO
7204 /* HFS+ FIFO VNOP table */
7205 int (**hfs_fifoop_p)(void *);
7206 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
7207 { &vnop_default_desc, (VOPFUNC)vn_default_error },
7208 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
7209 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
7210 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
7211 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
7212 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
7213 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
7214 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
7215 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
7216 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
7217 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
7218 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
7219 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
7220 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
7221 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
7222 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
7223 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
7224 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
7225 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
7226 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
7227 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
7228 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
7229 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
7230 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
7231 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
7232 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
7233 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
7234 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
7235 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
7236 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
7237 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
7238 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
7239 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
7240 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
7241 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
7242 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
7243 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
7244 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
7245 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
7246 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
7247 };
7248 struct vnodeopv_desc hfs_fifoop_opv_desc =
7249 { &hfs_fifoop_p, hfs_fifoop_entries };
7250 #endif /* FIFO */
7251
7252
7253