]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
xnu-2050.18.24.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/systm.h>
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/file_internal.h>
33 #include <sys/dirent.h>
34 #include <sys/stat.h>
35 #include <sys/buf.h>
36 #include <sys/buf_internal.h>
37 #include <sys/mount.h>
38 #include <sys/vnode_if.h>
39 #include <sys/vnode_internal.h>
40 #include <sys/malloc.h>
41 #include <sys/ubc.h>
42 #include <sys/ubc_internal.h>
43 #include <sys/paths.h>
44 #include <sys/quota.h>
45 #include <sys/time.h>
46 #include <sys/disk.h>
47 #include <sys/kauth.h>
48 #include <sys/uio_internal.h>
49 #include <sys/fsctl.h>
50 #include <sys/cprotect.h>
51 #include <sys/xattr.h>
52 #include <string.h>
53
54 #include <miscfs/specfs/specdev.h>
55 #include <miscfs/fifofs/fifo.h>
56 #include <vfs/vfs_support.h>
57 #include <machine/spl.h>
58
59 #include <sys/kdebug.h>
60 #include <sys/sysctl.h>
61
62 #include "hfs.h"
63 #include "hfs_catalog.h"
64 #include "hfs_cnode.h"
65 #include "hfs_dbg.h"
66 #include "hfs_mount.h"
67 #include "hfs_quota.h"
68 #include "hfs_endian.h"
69
70 #include "hfscommon/headers/BTreesInternal.h"
71 #include "hfscommon/headers/FileMgrInternal.h"
72
73 #define KNDETACH_VNLOCKED 0x00000001
74
75 /* Global vfs data structures for hfs */
76
77 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
78 int always_do_fullfsync = 0;
79 SYSCTL_DECL(_vfs_generic);
80 SYSCTL_INT (_vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
81
82 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
83 struct componentname *cnp, struct vnode_attr *vap,
84 vfs_context_t ctx);
85 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
86 int hfs_metasync_all(struct hfsmount *hfsmp);
87
88 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
89 int, int);
90 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
91 int, int, int, struct vnode *, int);
92
93 /* Used here and in cnode teardown -- for symlinks */
94 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
95
96 int hfs_movedata (struct vnode *, struct vnode*);
97 static int hfs_move_fork (struct filefork *srcfork, struct cnode *src,
98 struct filefork *dstfork, struct cnode *dst);
99
100 #if FIFO
101 static int hfsfifo_read(struct vnop_read_args *);
102 static int hfsfifo_write(struct vnop_write_args *);
103 static int hfsfifo_close(struct vnop_close_args *);
104
105 extern int (**fifo_vnodeop_p)(void *);
106 #endif /* FIFO */
107
108 int hfs_vnop_close(struct vnop_close_args*);
109 int hfs_vnop_create(struct vnop_create_args*);
110 int hfs_vnop_exchange(struct vnop_exchange_args*);
111 int hfs_vnop_fsync(struct vnop_fsync_args*);
112 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
113 int hfs_vnop_mknod(struct vnop_mknod_args*);
114 int hfs_vnop_getattr(struct vnop_getattr_args*);
115 int hfs_vnop_open(struct vnop_open_args*);
116 int hfs_vnop_readdir(struct vnop_readdir_args*);
117 int hfs_vnop_remove(struct vnop_remove_args*);
118 int hfs_vnop_rename(struct vnop_rename_args*);
119 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
120 int hfs_vnop_symlink(struct vnop_symlink_args*);
121 int hfs_vnop_setattr(struct vnop_setattr_args*);
122 int hfs_vnop_readlink(struct vnop_readlink_args *);
123 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
124 int hfs_vnop_whiteout(struct vnop_whiteout_args *);
125 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
126 int hfsspec_read(struct vnop_read_args *);
127 int hfsspec_write(struct vnop_write_args *);
128 int hfsspec_close(struct vnop_close_args *);
129
130 /* Options for hfs_removedir and hfs_removefile */
131 #define HFSRM_SKIP_RESERVE 0x01
132
133
134
135
136 /*****************************************************************************
137 *
138 * Common Operations on vnodes
139 *
140 *****************************************************************************/
141
142 /*
143 * Is the given cnode either the .journal or .journal_info_block file on
144 * a volume with an active journal? Many VNOPs use this to deny access
145 * to those files.
146 *
147 * Note: the .journal file on a volume with an external journal still
148 * returns true here, even though it does not actually hold the contents
149 * of the volume's journal.
150 */
151 static _Bool
152 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
153 {
154 if (hfsmp->jnl != NULL &&
155 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
156 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
157 return true;
158 } else {
159 return false;
160 }
161 }
162
163 /*
164 * Create a regular file.
165 */
166 int
167 hfs_vnop_create(struct vnop_create_args *ap)
168 {
169 int error;
170
171 again:
172 error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
173
174 /*
175 * We speculatively skipped the original lookup of the leaf
176 * for CREATE. Since it exists, go get it as long as they
177 * didn't want an exclusive create.
178 */
179 if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) {
180 struct vnop_lookup_args args;
181
182 args.a_desc = &vnop_lookup_desc;
183 args.a_dvp = ap->a_dvp;
184 args.a_vpp = ap->a_vpp;
185 args.a_cnp = ap->a_cnp;
186 args.a_context = ap->a_context;
187 args.a_cnp->cn_nameiop = LOOKUP;
188 error = hfs_vnop_lookup(&args);
189 /*
190 * We can also race with remove for this file.
191 */
192 if (error == ENOENT) {
193 goto again;
194 }
195
196 /* Make sure it was file. */
197 if ((error == 0) && !vnode_isreg(*args.a_vpp)) {
198 vnode_put(*args.a_vpp);
199 *args.a_vpp = NULLVP;
200 error = EEXIST;
201 }
202 args.a_cnp->cn_nameiop = CREATE;
203 }
204 return (error);
205 }
206
207 /*
208 * Make device special file.
209 */
210 int
211 hfs_vnop_mknod(struct vnop_mknod_args *ap)
212 {
213 struct vnode_attr *vap = ap->a_vap;
214 struct vnode *dvp = ap->a_dvp;
215 struct vnode **vpp = ap->a_vpp;
216 struct cnode *cp;
217 int error;
218
219 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
220 return (ENOTSUP);
221 }
222
223 /* Create the vnode */
224 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
225 if (error)
226 return (error);
227
228 cp = VTOC(*vpp);
229 cp->c_touch_acctime = TRUE;
230 cp->c_touch_chgtime = TRUE;
231 cp->c_touch_modtime = TRUE;
232
233 if ((vap->va_rdev != VNOVAL) &&
234 (vap->va_type == VBLK || vap->va_type == VCHR))
235 cp->c_rdev = vap->va_rdev;
236
237 return (0);
238 }
239
240 #if HFS_COMPRESSION
241 /*
242 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
243 * In the (hopefully rare) case where the data fork vnode is not
244 * present, it will use hfs_vget() to create a new vnode for the
245 * data fork.
246 *
247 * NOTE: If successful and a vnode is returned, the caller is responsible
248 * for releasing the returned vnode with vnode_rele().
249 */
250 static int
251 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
252 {
253 int vref = 0;
254
255 if (!data_vp || !cp) /* sanity check incoming parameters */
256 return EINVAL;
257
258 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
259
260 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK);
261 struct vnode *c_vp = cp->c_vp;
262 if (c_vp) {
263 /* we already have a data vnode */
264 *data_vp = c_vp;
265 vref = vnode_ref(*data_vp);
266 if (!skiplock) hfs_unlock(cp);
267 if (vref == 0) {
268 return 0;
269 }
270 return EINVAL;
271 }
272 /* no data fork vnode in the cnode, so ask hfs for one. */
273
274 if (!cp->c_rsrc_vp) {
275 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
276 *data_vp = NULL;
277 if (!skiplock) hfs_unlock(cp);
278 return EINVAL;
279 }
280
281 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
282 0 != data_vp) {
283 vref = vnode_ref(*data_vp);
284 vnode_put(*data_vp);
285 if (!skiplock) hfs_unlock(cp);
286 if (vref == 0) {
287 return 0;
288 }
289 return EINVAL;
290 }
291 /* there was an error getting the vnode */
292 *data_vp = NULL;
293 if (!skiplock) hfs_unlock(cp);
294 return EINVAL;
295 }
296
297 /*
298 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
299 * allocating it if necessary; returns NULL if there was an allocation error
300 */
301 static decmpfs_cnode *
302 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
303 {
304 if (!cp->c_decmp) {
305 decmpfs_cnode *dp = NULL;
306 MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK);
307 if (!dp) {
308 /* error allocating a decmpfs cnode */
309 return NULL;
310 }
311 decmpfs_cnode_init(dp);
312 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
313 /* another thread got here first, so free the decmpfs_cnode we allocated */
314 decmpfs_cnode_destroy(dp);
315 FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE);
316 }
317 }
318
319 return cp->c_decmp;
320 }
321
322 /*
323 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
324 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
325 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
326 * or else fills it in via the decmpfs_file_is_compressed() function.
327 */
328 int
329 hfs_file_is_compressed(struct cnode *cp, int skiplock)
330 {
331 int ret = 0;
332
333 /* fast check to see if file is compressed. If flag is clear, just answer no */
334 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
335 return 0;
336 }
337
338 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
339 if (!dp) {
340 /* error allocating a decmpfs cnode, treat the file as uncompressed */
341 return 0;
342 }
343
344 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
345 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
346 switch(decmpfs_state) {
347 case FILE_IS_COMPRESSED:
348 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
349 return 1;
350 case FILE_IS_NOT_COMPRESSED:
351 return 0;
352 /* otherwise the state is not cached yet */
353 }
354
355 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
356 struct vnode *data_vp = NULL;
357 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
358 if (data_vp) {
359 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
360 vnode_rele(data_vp);
361 }
362 }
363 return ret;
364 }
365
366 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
367 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
368 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
369 * files size is returned in size (required)
370 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
371 * will return an error and the caller should fall back to treating the item as an uncompressed file
372 */
373 int
374 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
375 {
376 int ret = 0;
377 int putaway = 0; /* flag to remember if we used hfs_vget() */
378
379 if (!size) {
380 return EINVAL; /* no place to put the file size */
381 }
382
383 if (NULL == vp) {
384 if (!hfsmp || !fid) { /* make sure we have the required parameters */
385 return EINVAL;
386 }
387 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
388 vp = NULL;
389 } else {
390 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
391 }
392 }
393 /* this double check for compression (hfs_file_is_compressed)
394 * ensures the cached size is present in case decmpfs hasn't
395 * encountered this node yet.
396 */
397 if (vp) {
398 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
399 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
400 } else {
401 if (VTOCMP(vp) && VTOCMP(vp)->cmp_type >= CMP_MAX) {
402 if (VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) {
403 // if we don't recognize this type, just use the real data fork size
404 if (VTOC(vp)->c_datafork) {
405 *size = VTOC(vp)->c_datafork->ff_size;
406 ret = 0;
407 } else {
408 ret = EINVAL;
409 }
410 } else {
411 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
412 ret = 0;
413 }
414 } else {
415 ret = EINVAL;
416 }
417 }
418 }
419
420 if (putaway) { /* did we use hfs_vget() to get this vnode? */
421 vnode_put(vp); /* if so, release it and set it to null */
422 vp = NULL;
423 }
424 return ret;
425 }
426
427 int
428 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
429 {
430 if (ctx == decmpfs_ctx)
431 return 0;
432 if (!hfs_file_is_compressed(cp, skiplock))
433 return 0;
434 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
435 }
436
437 int
438 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
439 {
440 if (ctx == decmpfs_ctx)
441 return 0;
442 if (!hfs_file_is_compressed(cp, skiplock))
443 return 0;
444 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
445 }
446 #endif /* HFS_COMPRESSION */
447
448 /*
449 * Open a file/directory.
450 */
451 int
452 hfs_vnop_open(struct vnop_open_args *ap)
453 {
454 struct vnode *vp = ap->a_vp;
455 struct filefork *fp;
456 struct timeval tv;
457 int error;
458 static int past_bootup = 0;
459 struct cnode *cp = VTOC(vp);
460 struct hfsmount *hfsmp = VTOHFS(vp);
461
462 #if HFS_COMPRESSION
463 if (ap->a_mode & FWRITE) {
464 /* open for write */
465 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
466 /* opening a compressed file for write, so convert it to decompressed */
467 struct vnode *data_vp = NULL;
468 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
469 if (0 == error) {
470 if (data_vp) {
471 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
472 vnode_rele(data_vp);
473 } else {
474 error = EINVAL;
475 }
476 }
477 if (error != 0)
478 return error;
479 }
480 } else {
481 /* open for read */
482 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
483 if (VNODE_IS_RSRC(vp)) {
484 /* opening the resource fork of a compressed file, so nothing to do */
485 } else {
486 /* opening a compressed file for read, make sure it validates */
487 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
488 if (error != 0)
489 return error;
490 }
491 }
492 }
493 #endif
494
495 /*
496 * Files marked append-only must be opened for appending.
497 */
498 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
499 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
500 return (EPERM);
501
502 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
503 return (EBUSY); /* file is in use by the kernel */
504
505 /* Don't allow journal to be opened externally. */
506 if (hfs_is_journal_file(hfsmp, cp))
507 return (EPERM);
508
509 if ((hfsmp->hfs_flags & HFS_READ_ONLY) ||
510 (hfsmp->jnl == NULL) ||
511 #if NAMEDSTREAMS
512 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
513 #else
514 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
515 #endif
516 return (0);
517 }
518
519 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
520 return (error);
521
522 #if QUOTA
523 /* If we're going to write to the file, initialize quotas. */
524 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
525 (void)hfs_getinoquota(cp);
526 #endif /* QUOTA */
527
528 /*
529 * On the first (non-busy) open of a fragmented
530 * file attempt to de-frag it (if its less than 20MB).
531 */
532 fp = VTOF(vp);
533 if (fp->ff_blocks &&
534 fp->ff_extents[7].blockCount != 0 &&
535 fp->ff_size <= (20 * 1024 * 1024)) {
536 int no_mods = 0;
537 struct timeval now;
538 /*
539 * Wait until system bootup is done (3 min).
540 * And don't relocate a file that's been modified
541 * within the past minute -- this can lead to
542 * system thrashing.
543 */
544
545 if (!past_bootup) {
546 microuptime(&tv);
547 if (tv.tv_sec > (60*3)) {
548 past_bootup = 1;
549 }
550 }
551
552 microtime(&now);
553 if ((now.tv_sec - cp->c_mtime) > 60) {
554 no_mods = 1;
555 }
556
557 if (past_bootup && no_mods) {
558 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
559 vfs_context_ucred(ap->a_context),
560 vfs_context_proc(ap->a_context));
561 }
562 }
563
564 hfs_unlock(cp);
565
566 return (0);
567 }
568
569
570 /*
571 * Close a file/directory.
572 */
573 int
574 hfs_vnop_close(ap)
575 struct vnop_close_args /* {
576 struct vnode *a_vp;
577 int a_fflag;
578 vfs_context_t a_context;
579 } */ *ap;
580 {
581 register struct vnode *vp = ap->a_vp;
582 register struct cnode *cp;
583 struct proc *p = vfs_context_proc(ap->a_context);
584 struct hfsmount *hfsmp;
585 int busy;
586 int tooktrunclock = 0;
587 int knownrefs = 0;
588
589 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0)
590 return (0);
591 cp = VTOC(vp);
592 hfsmp = VTOHFS(vp);
593
594 /*
595 * If the rsrc fork is a named stream, it can cause the data fork to
596 * stay around, preventing de-allocation of these blocks.
597 * Do checks for truncation on close. Purge extra extents if they exist.
598 * Make sure the vp is not a directory, and that it has a resource fork,
599 * and that resource fork is also a named stream.
600 */
601
602 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
603 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
604 uint32_t blks;
605
606 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
607 /*
608 * If there are extra blocks and there are only 2 refs on
609 * this vp (ourselves + rsrc fork holding ref on us), go ahead
610 * and try to truncate.
611 */
612 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
613 // release cnode lock; must acquire truncate lock BEFORE cnode lock
614 hfs_unlock(cp);
615
616 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
617 tooktrunclock = 1;
618
619 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
620 hfs_unlock_truncate(cp, 0);
621 // bail out if we can't re-acquire cnode lock
622 return 0;
623 }
624 // now re-test to make sure it's still valid
625 if (cp->c_rsrc_vp) {
626 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
627 if (!vnode_isinuse(vp, knownrefs)){
628 // now we can truncate the file, if necessary
629 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
630 if (blks < VTOF(vp)->ff_blocks){
631 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, 0, ap->a_context);
632 }
633 }
634 }
635 }
636 }
637
638
639 // if we froze the fs and we're exiting, then "thaw" the fs
640 if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
641 hfsmp->hfs_freezing_proc = NULL;
642 hfs_unlock_global (hfsmp);
643 lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
644 }
645
646 busy = vnode_isinuse(vp, 1);
647
648 if (busy) {
649 hfs_touchtimes(VTOHFS(vp), cp);
650 }
651 if (vnode_isdir(vp)) {
652 hfs_reldirhints(cp, busy);
653 } else if (vnode_issystem(vp) && !busy) {
654 vnode_recycle(vp);
655 }
656
657 if (tooktrunclock){
658 hfs_unlock_truncate(cp, 0);
659 }
660 hfs_unlock(cp);
661
662 if (ap->a_fflag & FWASWRITTEN) {
663 hfs_sync_ejectable(hfsmp);
664 }
665
666 return (0);
667 }
668
669 /*
670 * Get basic attributes.
671 */
672 int
673 hfs_vnop_getattr(struct vnop_getattr_args *ap)
674 {
675 #define VNODE_ATTR_TIMES \
676 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
677 #define VNODE_ATTR_AUTH \
678 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
679 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
680
681 struct vnode *vp = ap->a_vp;
682 struct vnode_attr *vap = ap->a_vap;
683 struct vnode *rvp = NULLVP;
684 struct hfsmount *hfsmp;
685 struct cnode *cp;
686 uint64_t data_size;
687 enum vtype v_type;
688 int error = 0;
689 cp = VTOC(vp);
690
691 #if HFS_COMPRESSION
692 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
693 int compressed = 0;
694 int hide_size = 0;
695 off_t uncompressed_size = -1;
696 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
697 /* we only care about whether the file is compressed if asked for the uncompressed size */
698 if (VNODE_IS_RSRC(vp)) {
699 /* if it's a resource fork, decmpfs may want us to hide the size */
700 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
701 } else {
702 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
703 compressed = hfs_file_is_compressed(cp, 0);
704 }
705 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
706 // if it's compressed
707 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && cp->c_decmp->cmp_type >= CMP_MAX)) {
708 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
709 /* failed to get the uncompressed size, we'll check for this later */
710 uncompressed_size = -1;
711 } else {
712 // fake that it's compressed
713 compressed = 1;
714 }
715 }
716 }
717 }
718 #endif
719
720 /*
721 * Shortcut for vnode_authorize path. Each of the attributes
722 * in this set is updated atomically so we don't need to take
723 * the cnode lock to access them.
724 */
725 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
726 /* Make sure file still exists. */
727 if (cp->c_flag & C_NOEXISTS)
728 return (ENOENT);
729
730 vap->va_uid = cp->c_uid;
731 vap->va_gid = cp->c_gid;
732 vap->va_mode = cp->c_mode;
733 vap->va_flags = cp->c_bsdflags;
734 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
735
736 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
737 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
738 VATTR_SET_SUPPORTED(vap, va_acl);
739 }
740
741 return (0);
742 }
743
744 hfsmp = VTOHFS(vp);
745 v_type = vnode_vtype(vp);
746 /*
747 * If time attributes are requested and we have cnode times
748 * that require updating, then acquire an exclusive lock on
749 * the cnode before updating the times. Otherwise we can
750 * just acquire a shared lock.
751 */
752 if ((vap->va_active & VNODE_ATTR_TIMES) &&
753 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
754 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
755 return (error);
756 hfs_touchtimes(hfsmp, cp);
757 }
758 else {
759 if ((error = hfs_lock(cp, HFS_SHARED_LOCK)))
760 return (error);
761 }
762
763 if (v_type == VDIR) {
764 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
765
766 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
767 int nlink;
768
769 /*
770 * For directories, the va_nlink is esentially a count
771 * of the ".." references to a directory plus the "."
772 * reference and the directory itself. So for HFS+ this
773 * becomes the sub-directory count plus two.
774 *
775 * In the absence of a sub-directory count we use the
776 * directory's item count. This will be too high in
777 * most cases since it also includes files.
778 */
779 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
780 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
781 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
782 else
783 nlink = cp->c_entries;
784
785 /* Account for ourself and our "." entry */
786 nlink += 2;
787 /* Hide our private directories. */
788 if (cp->c_cnid == kHFSRootFolderID) {
789 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
790 --nlink;
791 }
792 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
793 --nlink;
794 }
795 }
796 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
797 }
798 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
799 int entries;
800
801 entries = cp->c_entries;
802 /* Hide our private files and directories. */
803 if (cp->c_cnid == kHFSRootFolderID) {
804 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
805 --entries;
806 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
807 --entries;
808 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
809 entries -= 2; /* hide the journal files */
810 }
811 VATTR_RETURN(vap, va_nchildren, entries);
812 }
813 /*
814 * The va_dirlinkcount is the count of real directory hard links.
815 * (i.e. its not the sum of the implied "." and ".." references)
816 */
817 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
818 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
819 }
820 } else /* !VDIR */ {
821 data_size = VCTOF(vp, cp)->ff_size;
822
823 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
824 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
825 u_int64_t blocks;
826
827 #if HFS_COMPRESSION
828 if (hide_size) {
829 VATTR_RETURN(vap, va_data_alloc, 0);
830 } else if (compressed) {
831 /* for compressed files, we report all allocated blocks as belonging to the data fork */
832 blocks = cp->c_blocks;
833 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
834 }
835 else
836 #endif
837 {
838 blocks = VCTOF(vp, cp)->ff_blocks;
839 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
840 }
841 }
842 }
843
844 /* conditional because 64-bit arithmetic can be expensive */
845 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
846 if (v_type == VDIR) {
847 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
848 } else {
849 u_int64_t total_size = ~0ULL;
850 struct cnode *rcp;
851 #if HFS_COMPRESSION
852 if (hide_size) {
853 /* we're hiding the size of this file, so just return 0 */
854 total_size = 0;
855 } else if (compressed) {
856 if (uncompressed_size == -1) {
857 /*
858 * We failed to get the uncompressed size above,
859 * so we'll fall back to the standard path below
860 * since total_size is still -1
861 */
862 } else {
863 /* use the uncompressed size we fetched above */
864 total_size = uncompressed_size;
865 }
866 }
867 #endif
868 if (total_size == ~0ULL) {
869 if (cp->c_datafork) {
870 total_size = cp->c_datafork->ff_size;
871 }
872
873 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
874 /* We deal with rsrc fork vnode iocount at the end of the function */
875 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, FALSE);
876 if (error) {
877 /*
878 * Note that we call hfs_vgetrsrc with error_on_unlinked
879 * set to FALSE. This is because we may be invoked via
880 * fstat() on an open-unlinked file descriptor and we must
881 * continue to support access to the rsrc fork until it disappears.
882 * The code at the end of this function will be
883 * responsible for releasing the iocount generated by
884 * hfs_vgetrsrc. This is because we can't drop the iocount
885 * without unlocking the cnode first.
886 */
887 goto out;
888 }
889
890 rcp = VTOC(rvp);
891 if (rcp && rcp->c_rsrcfork) {
892 total_size += rcp->c_rsrcfork->ff_size;
893 }
894 }
895 }
896
897 VATTR_RETURN(vap, va_total_size, total_size);
898 }
899 }
900 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
901 if (v_type == VDIR) {
902 VATTR_RETURN(vap, va_total_alloc, 0);
903 } else {
904 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
905 }
906 }
907
908 /*
909 * If the VFS wants extended security data, and we know that we
910 * don't have any (because it never told us it was setting any)
911 * then we can return the supported bit and no data. If we do
912 * have extended security, we can just leave the bit alone and
913 * the VFS will use the fallback path to fetch it.
914 */
915 if (VATTR_IS_ACTIVE(vap, va_acl)) {
916 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
917 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
918 VATTR_SET_SUPPORTED(vap, va_acl);
919 }
920 }
921 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
922 /* Access times are lazily updated, get current time if needed */
923 if (cp->c_touch_acctime) {
924 struct timeval tv;
925
926 microtime(&tv);
927 vap->va_access_time.tv_sec = tv.tv_sec;
928 } else {
929 vap->va_access_time.tv_sec = cp->c_atime;
930 }
931 vap->va_access_time.tv_nsec = 0;
932 VATTR_SET_SUPPORTED(vap, va_access_time);
933 }
934 vap->va_create_time.tv_sec = cp->c_itime;
935 vap->va_create_time.tv_nsec = 0;
936 vap->va_modify_time.tv_sec = cp->c_mtime;
937 vap->va_modify_time.tv_nsec = 0;
938 vap->va_change_time.tv_sec = cp->c_ctime;
939 vap->va_change_time.tv_nsec = 0;
940 vap->va_backup_time.tv_sec = cp->c_btime;
941 vap->va_backup_time.tv_nsec = 0;
942
943 /* See if we need to emit the date added field to the user */
944 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
945 u_int32_t dateadded = hfs_get_dateadded (cp);
946 if (dateadded) {
947 vap->va_addedtime.tv_sec = dateadded;
948 vap->va_addedtime.tv_nsec = 0;
949 VATTR_SET_SUPPORTED (vap, va_addedtime);
950 }
951 }
952
953 /* XXX is this really a good 'optimal I/O size'? */
954 vap->va_iosize = hfsmp->hfs_logBlockSize;
955 vap->va_uid = cp->c_uid;
956 vap->va_gid = cp->c_gid;
957 vap->va_mode = cp->c_mode;
958 vap->va_flags = cp->c_bsdflags;
959
960 /*
961 * Exporting file IDs from HFS Plus:
962 *
963 * For "normal" files the c_fileid is the same value as the
964 * c_cnid. But for hard link files, they are different - the
965 * c_cnid belongs to the active directory entry (ie the link)
966 * and the c_fileid is for the actual inode (ie the data file).
967 *
968 * The stat call (getattr) uses va_fileid and the Carbon APIs,
969 * which are hardlink-ignorant, will ask for va_linkid.
970 */
971 vap->va_fileid = (u_int64_t)cp->c_fileid;
972 /*
973 * We need to use the origin cache for both hardlinked files
974 * and directories. Hardlinked directories have multiple cnids
975 * and parents (one per link). Hardlinked files also have their
976 * own parents and link IDs separate from the indirect inode number.
977 * If we don't use the cache, we could end up vending the wrong ID
978 * because the cnode will only reflect the link that was looked up most recently.
979 */
980 if (cp->c_flag & C_HARDLINK) {
981 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
982 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
983 } else {
984 vap->va_linkid = (u_int64_t)cp->c_cnid;
985 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
986 }
987 vap->va_fsid = hfsmp->hfs_raw_dev;
988 vap->va_filerev = 0;
989 vap->va_encoding = cp->c_encoding;
990 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
991 #if HFS_COMPRESSION
992 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
993 if (hide_size)
994 vap->va_data_size = 0;
995 else if (compressed) {
996 if (uncompressed_size == -1) {
997 /* failed to get the uncompressed size above, so just return data_size */
998 vap->va_data_size = data_size;
999 } else {
1000 /* use the uncompressed size we fetched above */
1001 vap->va_data_size = uncompressed_size;
1002 }
1003 } else
1004 vap->va_data_size = data_size;
1005 // vap->va_supported |= VNODE_ATTR_va_data_size;
1006 VATTR_SET_SUPPORTED(vap, va_data_size);
1007 }
1008 #else
1009 vap->va_data_size = data_size;
1010 vap->va_supported |= VNODE_ATTR_va_data_size;
1011 #endif
1012
1013 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1014 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1015 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1016 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1017 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1018 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1019 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1020 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1021 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1022
1023 /* If this is the root, let VFS to find out the mount name, which
1024 * may be different from the real name. Otherwise, we need to take care
1025 * for hardlinked files, which need to be looked up, if necessary
1026 */
1027 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1028 struct cat_desc linkdesc;
1029 int lockflags;
1030 int uselinkdesc = 0;
1031 cnid_t nextlinkid = 0;
1032 cnid_t prevlinkid = 0;
1033
1034 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1035 * here because the info. for the link ID requested by getattrlist may be
1036 * different than what's currently in the cnode. This is because the cnode
1037 * will be filled in with the information for the most recent link ID that went
1038 * through namei/lookup(). If there are competing lookups for hardlinks that point
1039 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1040 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1041 */
1042
1043 if ((cp->c_flag & C_HARDLINK) &&
1044 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1045 /* If we have no name and our link ID is the raw inode number, then we may
1046 * have an open-unlinked file. Go to the next link in this case.
1047 */
1048 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1049 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1050 goto out;
1051 }
1052 }
1053 else {
1054 /* just use link obtained from vap above */
1055 nextlinkid = vap->va_linkid;
1056 }
1057
1058 /* We need to probe the catalog for the descriptor corresponding to the link ID
1059 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1060 * for the cnode here, so we can't just update the descriptor. Instead,
1061 * we should just store the descriptor's value locally and then use it to pass
1062 * out the name value as needed below.
1063 */
1064 if (nextlinkid){
1065 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1066 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1067 hfs_systemfile_unlock(hfsmp, lockflags);
1068 if (error == 0) {
1069 uselinkdesc = 1;
1070 }
1071 }
1072 }
1073
1074 /* By this point, we've either patched up the name above and the c_desc
1075 * points to the correct data, or it already did, in which case we just proceed
1076 * by copying the name into the vap. Note that we will never set va_name to
1077 * supported if nextlinkid is never initialized. This could happen in the degenerate
1078 * case above involving the raw inode number, where it has no nextlinkid. In this case
1079 * we will simply not mark the name bit as supported.
1080 */
1081 if (uselinkdesc) {
1082 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1083 VATTR_SET_SUPPORTED(vap, va_name);
1084 cat_releasedesc(&linkdesc);
1085 }
1086 else if (cp->c_desc.cd_namelen) {
1087 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1088 VATTR_SET_SUPPORTED(vap, va_name);
1089 }
1090 }
1091
1092 out:
1093 hfs_unlock(cp);
1094 /*
1095 * We need to vnode_put the rsrc fork vnode only *after* we've released
1096 * the cnode lock, since vnode_put can trigger an inactive call, which
1097 * will go back into HFS and try to acquire a cnode lock.
1098 */
1099 if (rvp) {
1100 vnode_put (rvp);
1101 }
1102
1103 return (error);
1104 }
1105
1106 int
1107 hfs_vnop_setattr(ap)
1108 struct vnop_setattr_args /* {
1109 struct vnode *a_vp;
1110 struct vnode_attr *a_vap;
1111 vfs_context_t a_context;
1112 } */ *ap;
1113 {
1114 struct vnode_attr *vap = ap->a_vap;
1115 struct vnode *vp = ap->a_vp;
1116 struct cnode *cp = NULL;
1117 struct hfsmount *hfsmp;
1118 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1119 struct proc *p = vfs_context_proc(ap->a_context);
1120 int error = 0;
1121 uid_t nuid;
1122 gid_t ngid;
1123 time_t orig_ctime;
1124
1125 orig_ctime = VTOC(vp)->c_ctime;
1126
1127 #if HFS_COMPRESSION
1128 int decmpfs_reset_state = 0;
1129 /*
1130 we call decmpfs_update_attributes even if the file is not compressed
1131 because we want to update the incoming flags if the xattrs are invalid
1132 */
1133 error = decmpfs_update_attributes(vp, vap);
1134 if (error)
1135 return error;
1136
1137 //
1138 // if this is not a size-changing setattr and it is not just
1139 // an atime update, then check for a snapshot.
1140 //
1141 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1142 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NULL);
1143 }
1144 #endif
1145
1146
1147 #if CONFIG_PROTECT
1148 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1149 return (error);
1150 }
1151 #endif /* CONFIG_PROTECT */
1152
1153 hfsmp = VTOHFS(vp);
1154
1155 /* Don't allow modification of the journal. */
1156 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1157 return (EPERM);
1158 }
1159
1160 /*
1161 * File size change request.
1162 * We are guaranteed that this is not a directory, and that
1163 * the filesystem object is writeable.
1164 *
1165 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1166 */
1167 VATTR_SET_SUPPORTED(vap, va_data_size);
1168 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
1169 #if HFS_COMPRESSION
1170 /* keep the compressed state locked until we're done truncating the file */
1171 decmpfs_cnode *dp = VTOCMP(vp);
1172 if (!dp) {
1173 /*
1174 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1175 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1176 * on this file while it's truncating
1177 */
1178 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1179 if (!dp) {
1180 /* failed to allocate a decmpfs_cnode */
1181 return ENOMEM; /* what should this be? */
1182 }
1183 }
1184
1185 check_for_tracked_file(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1186
1187 decmpfs_lock_compressed_data(dp, 1);
1188 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1189 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1190 if (error != 0) {
1191 decmpfs_unlock_compressed_data(dp, 1);
1192 return error;
1193 }
1194 }
1195 #endif
1196
1197 /* Take truncate lock before taking cnode lock. */
1198 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK);
1199
1200 /* Perform the ubc_setsize before taking the cnode lock. */
1201 ubc_setsize(vp, vap->va_data_size);
1202
1203 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
1204 hfs_unlock_truncate(VTOC(vp), 0);
1205 #if HFS_COMPRESSION
1206 decmpfs_unlock_compressed_data(dp, 1);
1207 #endif
1208 return (error);
1209 }
1210 cp = VTOC(vp);
1211
1212 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, 0, ap->a_context);
1213
1214 hfs_unlock_truncate(cp, 0);
1215 #if HFS_COMPRESSION
1216 decmpfs_unlock_compressed_data(dp, 1);
1217 #endif
1218 if (error)
1219 goto out;
1220 }
1221 if (cp == NULL) {
1222 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
1223 return (error);
1224 cp = VTOC(vp);
1225 }
1226
1227 /*
1228 * If it is just an access time update request by itself
1229 * we know the request is from kernel level code, and we
1230 * can delay it without being as worried about consistency.
1231 * This change speeds up mmaps, in the rare case that they
1232 * get caught behind a sync.
1233 */
1234
1235 if (vap->va_active == VNODE_ATTR_va_access_time) {
1236 cp->c_touch_acctime=TRUE;
1237 goto out;
1238 }
1239
1240
1241
1242 /*
1243 * Owner/group change request.
1244 * We are guaranteed that the new owner/group is valid and legal.
1245 */
1246 VATTR_SET_SUPPORTED(vap, va_uid);
1247 VATTR_SET_SUPPORTED(vap, va_gid);
1248 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1249 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1250 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1251 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1252 goto out;
1253
1254 /*
1255 * Mode change request.
1256 * We are guaranteed that the mode value is valid and that in
1257 * conjunction with the owner and group, this change is legal.
1258 */
1259 VATTR_SET_SUPPORTED(vap, va_mode);
1260 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1261 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1262 goto out;
1263
1264 /*
1265 * File flags change.
1266 * We are guaranteed that only flags allowed to change given the
1267 * current securelevel are being changed.
1268 */
1269 VATTR_SET_SUPPORTED(vap, va_flags);
1270 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1271 u_int16_t *fdFlags;
1272
1273 #if HFS_COMPRESSION
1274 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1275 /*
1276 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1277 * but we don't want to actually do the update until we've released the cnode lock down below
1278 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1279 * turn off the flag and look at the "raw" file for debugging purposes
1280 */
1281 decmpfs_reset_state = 1;
1282 }
1283 #endif
1284
1285 cp->c_bsdflags = vap->va_flags;
1286 cp->c_touch_chgtime = TRUE;
1287
1288 /*
1289 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1290 *
1291 * The fdFlags for files and frFlags for folders are both 8 bytes
1292 * into the userInfo (the first 16 bytes of the Finder Info). They
1293 * are both 16-bit fields.
1294 */
1295 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1296 if (vap->va_flags & UF_HIDDEN)
1297 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1298 else
1299 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1300 }
1301
1302 /*
1303 * Timestamp updates.
1304 */
1305 VATTR_SET_SUPPORTED(vap, va_create_time);
1306 VATTR_SET_SUPPORTED(vap, va_access_time);
1307 VATTR_SET_SUPPORTED(vap, va_modify_time);
1308 VATTR_SET_SUPPORTED(vap, va_backup_time);
1309 VATTR_SET_SUPPORTED(vap, va_change_time);
1310 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1311 VATTR_IS_ACTIVE(vap, va_access_time) ||
1312 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1313 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1314 if (VATTR_IS_ACTIVE(vap, va_create_time))
1315 cp->c_itime = vap->va_create_time.tv_sec;
1316 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1317 cp->c_atime = vap->va_access_time.tv_sec;
1318 cp->c_touch_acctime = FALSE;
1319 }
1320 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1321 cp->c_mtime = vap->va_modify_time.tv_sec;
1322 cp->c_touch_modtime = FALSE;
1323 cp->c_touch_chgtime = TRUE;
1324
1325 /*
1326 * The utimes system call can reset the modification
1327 * time but it doesn't know about HFS create times.
1328 * So we need to ensure that the creation time is
1329 * always at least as old as the modification time.
1330 */
1331 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1332 (cp->c_cnid != kHFSRootFolderID) &&
1333 (cp->c_mtime < cp->c_itime)) {
1334 cp->c_itime = cp->c_mtime;
1335 }
1336 }
1337 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1338 cp->c_btime = vap->va_backup_time.tv_sec;
1339 cp->c_flag |= C_MODIFIED;
1340 }
1341
1342 /*
1343 * Set name encoding.
1344 */
1345 VATTR_SET_SUPPORTED(vap, va_encoding);
1346 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1347 cp->c_encoding = vap->va_encoding;
1348 hfs_setencodingbits(hfsmp, cp->c_encoding);
1349 }
1350
1351 if ((error = hfs_update(vp, TRUE)) != 0)
1352 goto out;
1353 out:
1354 if (cp) {
1355 /* Purge origin cache for cnode, since caller now has correct link ID for it
1356 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1357 */
1358 if ((cp->c_flag & C_HARDLINK) && (vp->v_type != VDIR)){
1359 hfs_relorigin(cp, 0);
1360 }
1361
1362 hfs_unlock(cp);
1363 #if HFS_COMPRESSION
1364 if (decmpfs_reset_state) {
1365 /*
1366 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1367 * but don't do it while holding the hfs cnode lock
1368 */
1369 decmpfs_cnode *dp = VTOCMP(vp);
1370 if (!dp) {
1371 /*
1372 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1373 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1374 * on this file if it's locked
1375 */
1376 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1377 if (!dp) {
1378 /* failed to allocate a decmpfs_cnode */
1379 return ENOMEM; /* what should this be? */
1380 }
1381 }
1382 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1383 }
1384 #endif
1385 }
1386 return (error);
1387 }
1388
1389
1390 /*
1391 * Change the mode on a file.
1392 * cnode must be locked before calling.
1393 */
1394 int
1395 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1396 {
1397 register struct cnode *cp = VTOC(vp);
1398
1399 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1400 return (0);
1401
1402 // Don't allow modification of the journal or journal_info_block
1403 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1404 return EPERM;
1405 }
1406
1407 #if OVERRIDE_UNKNOWN_PERMISSIONS
1408 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1409 return (0);
1410 };
1411 #endif
1412 cp->c_mode &= ~ALLPERMS;
1413 cp->c_mode |= (mode & ALLPERMS);
1414 cp->c_touch_chgtime = TRUE;
1415 return (0);
1416 }
1417
1418
1419 int
1420 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1421 {
1422 struct cnode *cp = VTOC(vp);
1423 int retval = 0;
1424 int is_member;
1425
1426 /*
1427 * Disallow write attempts on read-only file systems;
1428 * unless the file is a socket, fifo, or a block or
1429 * character device resident on the file system.
1430 */
1431 switch (vnode_vtype(vp)) {
1432 case VDIR:
1433 case VLNK:
1434 case VREG:
1435 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1436 return (EROFS);
1437 break;
1438 default:
1439 break;
1440 }
1441
1442 /* If immutable bit set, nobody gets to write it. */
1443 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1444 return (EPERM);
1445
1446 /* Otherwise, user id 0 always gets access. */
1447 if (!suser(cred, NULL))
1448 return (0);
1449
1450 /* Otherwise, check the owner. */
1451 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1452 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1453
1454 /* Otherwise, check the groups. */
1455 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1456 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1457 }
1458
1459 /* Otherwise, check everyone else. */
1460 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1461 }
1462
1463
1464 /*
1465 * Perform chown operation on cnode cp;
1466 * code must be locked prior to call.
1467 */
1468 int
1469 #if !QUOTA
1470 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1471 __unused struct proc *p)
1472 #else
1473 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1474 __unused struct proc *p)
1475 #endif
1476 {
1477 register struct cnode *cp = VTOC(vp);
1478 uid_t ouid;
1479 gid_t ogid;
1480 #if QUOTA
1481 int error = 0;
1482 register int i;
1483 int64_t change;
1484 #endif /* QUOTA */
1485
1486 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1487 return (ENOTSUP);
1488
1489 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1490 return (0);
1491
1492 if (uid == (uid_t)VNOVAL)
1493 uid = cp->c_uid;
1494 if (gid == (gid_t)VNOVAL)
1495 gid = cp->c_gid;
1496
1497 #if 0 /* we are guaranteed that this is already the case */
1498 /*
1499 * If we don't own the file, are trying to change the owner
1500 * of the file, or are not a member of the target group,
1501 * the caller must be superuser or the call fails.
1502 */
1503 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1504 (gid != cp->c_gid &&
1505 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1506 (error = suser(cred, 0)))
1507 return (error);
1508 #endif
1509
1510 ogid = cp->c_gid;
1511 ouid = cp->c_uid;
1512 #if QUOTA
1513 if ((error = hfs_getinoquota(cp)))
1514 return (error);
1515 if (ouid == uid) {
1516 dqrele(cp->c_dquot[USRQUOTA]);
1517 cp->c_dquot[USRQUOTA] = NODQUOT;
1518 }
1519 if (ogid == gid) {
1520 dqrele(cp->c_dquot[GRPQUOTA]);
1521 cp->c_dquot[GRPQUOTA] = NODQUOT;
1522 }
1523
1524 /*
1525 * Eventually need to account for (fake) a block per directory
1526 * if (vnode_isdir(vp))
1527 * change = VTOHFS(vp)->blockSize;
1528 * else
1529 */
1530
1531 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1532 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1533 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1534 for (i = 0; i < MAXQUOTAS; i++) {
1535 dqrele(cp->c_dquot[i]);
1536 cp->c_dquot[i] = NODQUOT;
1537 }
1538 #endif /* QUOTA */
1539 cp->c_gid = gid;
1540 cp->c_uid = uid;
1541 #if QUOTA
1542 if ((error = hfs_getinoquota(cp)) == 0) {
1543 if (ouid == uid) {
1544 dqrele(cp->c_dquot[USRQUOTA]);
1545 cp->c_dquot[USRQUOTA] = NODQUOT;
1546 }
1547 if (ogid == gid) {
1548 dqrele(cp->c_dquot[GRPQUOTA]);
1549 cp->c_dquot[GRPQUOTA] = NODQUOT;
1550 }
1551 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1552 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1553 goto good;
1554 else
1555 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1556 }
1557 for (i = 0; i < MAXQUOTAS; i++) {
1558 dqrele(cp->c_dquot[i]);
1559 cp->c_dquot[i] = NODQUOT;
1560 }
1561 }
1562 cp->c_gid = ogid;
1563 cp->c_uid = ouid;
1564 if (hfs_getinoquota(cp) == 0) {
1565 if (ouid == uid) {
1566 dqrele(cp->c_dquot[USRQUOTA]);
1567 cp->c_dquot[USRQUOTA] = NODQUOT;
1568 }
1569 if (ogid == gid) {
1570 dqrele(cp->c_dquot[GRPQUOTA]);
1571 cp->c_dquot[GRPQUOTA] = NODQUOT;
1572 }
1573 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1574 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1575 (void) hfs_getinoquota(cp);
1576 }
1577 return (error);
1578 good:
1579 if (hfs_getinoquota(cp))
1580 panic("hfs_chown: lost quota");
1581 #endif /* QUOTA */
1582
1583
1584 /*
1585 According to the SUSv3 Standard, chown() shall mark
1586 for update the st_ctime field of the file.
1587 (No exceptions mentioned)
1588 */
1589 cp->c_touch_chgtime = TRUE;
1590 return (0);
1591 }
1592
1593
1594 /*
1595 * The hfs_exchange routine swaps the fork data in two files by
1596 * exchanging some of the information in the cnode. It is used
1597 * to preserve the file ID when updating an existing file, in
1598 * case the file is being tracked through its file ID. Typically
1599 * its used after creating a new file during a safe-save.
1600 */
1601 int
1602 hfs_vnop_exchange(ap)
1603 struct vnop_exchange_args /* {
1604 struct vnode *a_fvp;
1605 struct vnode *a_tvp;
1606 int a_options;
1607 vfs_context_t a_context;
1608 } */ *ap;
1609 {
1610 struct vnode *from_vp = ap->a_fvp;
1611 struct vnode *to_vp = ap->a_tvp;
1612 struct cnode *from_cp;
1613 struct cnode *to_cp;
1614 struct hfsmount *hfsmp;
1615 struct cat_desc tempdesc;
1616 struct cat_attr tempattr;
1617 const unsigned char *from_nameptr;
1618 const unsigned char *to_nameptr;
1619 char from_iname[32];
1620 char to_iname[32];
1621 u_int32_t tempflag;
1622 cnid_t from_parid;
1623 cnid_t to_parid;
1624 int lockflags;
1625 int error = 0, started_tr = 0, got_cookie = 0;
1626 cat_cookie_t cookie;
1627 time_t orig_from_ctime, orig_to_ctime;
1628
1629 /* The files must be on the same volume. */
1630 if (vnode_mount(from_vp) != vnode_mount(to_vp))
1631 return (EXDEV);
1632
1633 if (from_vp == to_vp)
1634 return (EINVAL);
1635
1636 orig_from_ctime = VTOC(from_vp)->c_ctime;
1637 orig_to_ctime = VTOC(to_vp)->c_ctime;
1638
1639
1640 #if CONFIG_PROTECT
1641 /*
1642 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
1643 * because the EAs will not be swapped. As a result, the persistent keys would not
1644 * match and the files will be garbage.
1645 */
1646 if (cp_fs_protected (vnode_mount(from_vp))) {
1647 return EINVAL;
1648 }
1649 #endif
1650
1651 #if HFS_COMPRESSION
1652 if ( hfs_file_is_compressed(VTOC(from_vp), 0) ) {
1653 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
1654 return error;
1655 }
1656 }
1657
1658 if ( hfs_file_is_compressed(VTOC(to_vp), 0) ) {
1659 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
1660 return error;
1661 }
1662 }
1663 #endif // HFS_COMPRESSION
1664
1665 /*
1666 * Normally, we want to notify the user handlers about the event,
1667 * except if it's a handler driving the event.
1668 */
1669 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
1670 check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1671 check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1672 } else {
1673 /*
1674 * We're doing a data-swap.
1675 * Take the truncate lock/cnode lock, then verify there are no mmap references.
1676 * Issue a hfs_filedone to flush out all of the remaining state for this file.
1677 * Allow the rest of the codeflow to re-acquire the cnode locks in order.
1678 */
1679
1680 hfs_lock_truncate (VTOC(from_vp), HFS_SHARED_LOCK);
1681
1682 if ((error = hfs_lock(VTOC(from_vp), HFS_EXCLUSIVE_LOCK))) {
1683 hfs_unlock_truncate (VTOC(from_vp), 0);
1684 return error;
1685 }
1686
1687 /* Verify the source file is not in use by anyone besides us (including mmap refs) */
1688 if (vnode_isinuse(from_vp, 1)) {
1689 error = EBUSY;
1690 hfs_unlock(VTOC(from_vp));
1691 hfs_unlock_truncate (VTOC(from_vp), 0);
1692 return error;
1693 }
1694
1695 /* Flush out the data in the source file */
1696 VTOC(from_vp)->c_flag |= C_SWAPINPROGRESS;
1697 error = hfs_filedone (from_vp, ap->a_context);
1698 VTOC(from_vp)->c_flag &= ~C_SWAPINPROGRESS;
1699 hfs_unlock(VTOC(from_vp));
1700 hfs_unlock_truncate(VTOC(from_vp), 0);
1701
1702 if (error) {
1703 return error;
1704 }
1705 }
1706
1707 if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
1708 return (error);
1709
1710 from_cp = VTOC(from_vp);
1711 to_cp = VTOC(to_vp);
1712 hfsmp = VTOHFS(from_vp);
1713
1714 /* Only normal files can be exchanged. */
1715 if (!vnode_isreg(from_vp) || !vnode_isreg(to_vp) ||
1716 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
1717 error = EINVAL;
1718 goto exit;
1719 }
1720
1721 // Don't allow modification of the journal or journal_info_block
1722 if (hfs_is_journal_file(hfsmp, from_cp) ||
1723 hfs_is_journal_file(hfsmp, to_cp)) {
1724 error = EPERM;
1725 goto exit;
1726 }
1727
1728 /*
1729 * Ok, now that all of the pre-flighting is done, call the underlying
1730 * function if needed.
1731 */
1732 if (ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) {
1733 error = hfs_movedata(from_vp, to_vp);
1734 goto exit;
1735 }
1736
1737
1738 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1739 goto exit;
1740 }
1741 started_tr = 1;
1742
1743 /*
1744 * Reserve some space in the Catalog file.
1745 */
1746 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
1747 goto exit;
1748 }
1749 got_cookie = 1;
1750
1751 /* The backend code always tries to delete the virtual
1752 * extent id for exchanging files so we need to lock
1753 * the extents b-tree.
1754 */
1755 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1756
1757 /* Account for the location of the catalog objects. */
1758 if (from_cp->c_flag & C_HARDLINK) {
1759 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
1760 from_cp->c_attr.ca_linkref);
1761 from_nameptr = (unsigned char *)from_iname;
1762 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1763 from_cp->c_hint = 0;
1764 } else {
1765 from_nameptr = from_cp->c_desc.cd_nameptr;
1766 from_parid = from_cp->c_parentcnid;
1767 }
1768 if (to_cp->c_flag & C_HARDLINK) {
1769 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
1770 to_cp->c_attr.ca_linkref);
1771 to_nameptr = (unsigned char *)to_iname;
1772 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1773 to_cp->c_hint = 0;
1774 } else {
1775 to_nameptr = to_cp->c_desc.cd_nameptr;
1776 to_parid = to_cp->c_parentcnid;
1777 }
1778
1779 /* Do the exchange */
1780 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
1781 to_parid, from_cp->c_hint, to_cp->c_hint);
1782 hfs_systemfile_unlock(hfsmp, lockflags);
1783
1784 /*
1785 * Note that we don't need to exchange any extended attributes
1786 * since the attributes are keyed by file ID.
1787 */
1788
1789 if (error != E_NONE) {
1790 error = MacToVFSError(error);
1791 goto exit;
1792 }
1793
1794 /* Purge the vnodes from the name cache */
1795 if (from_vp)
1796 cache_purge(from_vp);
1797 if (to_vp)
1798 cache_purge(to_vp);
1799
1800 /* Save a copy of from attributes before swapping. */
1801 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1802 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1803 tempflag = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1804
1805 /*
1806 * Swap the descriptors and all non-fork related attributes.
1807 * (except the modify date)
1808 */
1809 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1810
1811 from_cp->c_hint = 0;
1812 from_cp->c_fileid = from_cp->c_cnid;
1813 from_cp->c_itime = to_cp->c_itime;
1814 from_cp->c_btime = to_cp->c_btime;
1815 from_cp->c_atime = to_cp->c_atime;
1816 from_cp->c_ctime = to_cp->c_ctime;
1817 from_cp->c_gid = to_cp->c_gid;
1818 from_cp->c_uid = to_cp->c_uid;
1819 from_cp->c_bsdflags = to_cp->c_bsdflags;
1820 from_cp->c_mode = to_cp->c_mode;
1821 from_cp->c_linkcount = to_cp->c_linkcount;
1822 from_cp->c_flag = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1823 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
1824 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1825
1826 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1827 to_cp->c_hint = 0;
1828 to_cp->c_fileid = to_cp->c_cnid;
1829 to_cp->c_itime = tempattr.ca_itime;
1830 to_cp->c_btime = tempattr.ca_btime;
1831 to_cp->c_atime = tempattr.ca_atime;
1832 to_cp->c_ctime = tempattr.ca_ctime;
1833 to_cp->c_gid = tempattr.ca_gid;
1834 to_cp->c_uid = tempattr.ca_uid;
1835 to_cp->c_bsdflags = tempattr.ca_flags;
1836 to_cp->c_mode = tempattr.ca_mode;
1837 to_cp->c_linkcount = tempattr.ca_linkcount;
1838 to_cp->c_flag = tempflag;
1839 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
1840 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1841
1842 /* Rehash the cnodes using their new file IDs */
1843 hfs_chash_rehash(hfsmp, from_cp, to_cp);
1844
1845 /*
1846 * When a file moves out of "Cleanup At Startup"
1847 * we can drop its NODUMP status.
1848 */
1849 if ((from_cp->c_bsdflags & UF_NODUMP) &&
1850 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1851 from_cp->c_bsdflags &= ~UF_NODUMP;
1852 from_cp->c_touch_chgtime = TRUE;
1853 }
1854 if ((to_cp->c_bsdflags & UF_NODUMP) &&
1855 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1856 to_cp->c_bsdflags &= ~UF_NODUMP;
1857 to_cp->c_touch_chgtime = TRUE;
1858 }
1859
1860 exit:
1861 if (got_cookie) {
1862 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
1863 }
1864 if (started_tr) {
1865 hfs_end_transaction(hfsmp);
1866 }
1867
1868 hfs_unlockpair(from_cp, to_cp);
1869 return (error);
1870 }
1871
1872 int
1873 hfs_vnop_mmap(struct vnop_mmap_args *ap)
1874 {
1875 struct vnode *vp = ap->a_vp;
1876 int error;
1877
1878 if (VNODE_IS_RSRC(vp)) {
1879 /* allow pageins of the resource fork */
1880 } else {
1881 int compressed = hfs_file_is_compressed(VTOC(vp), 1); /* 1 == don't take the cnode lock */
1882 time_t orig_ctime = VTOC(vp)->c_ctime;
1883
1884 if (!compressed && (VTOC(vp)->c_bsdflags & UF_COMPRESSED)) {
1885 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
1886 if (error != 0) {
1887 return error;
1888 }
1889 }
1890
1891 if (ap->a_fflags & PROT_WRITE) {
1892 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1893 }
1894 }
1895
1896 //
1897 // NOTE: we return ENOTSUP because we want the cluster layer
1898 // to actually do all the real work.
1899 //
1900 return (ENOTSUP);
1901 }
1902
1903 /*
1904 * hfs_movedata
1905 *
1906 * This is a non-symmetric variant of exchangedata. In this function,
1907 * the contents of the fork in from_vp are moved to the fork
1908 * specified by to_vp.
1909 *
1910 * The cnodes pointed to by 'from_vp' and 'to_vp' must be locked.
1911 *
1912 * The vnode pointed to by 'to_vp' *must* be empty prior to invoking this function.
1913 * We impose this restriction because we may not be able to fully delete the entire
1914 * file's contents in a single transaction, particularly if it has a lot of extents.
1915 * In the normal file deletion codepath, the file is screened for two conditions:
1916 * 1) bigger than 400MB, and 2) more than 8 extents. If so, the file is relocated to
1917 * the hidden directory and the deletion is broken up into multiple truncates. We can't
1918 * do that here because both files need to exist in the namespace. The main reason this
1919 * is imposed is that we may have to touch a whole lot of bitmap blocks if there are
1920 * many extents.
1921 *
1922 * Any data written to 'from_vp' after this call completes is not guaranteed
1923 * to be moved.
1924 *
1925 * Arguments:
1926 * vnode from_vp: source file
1927 * vnode to_vp: destination file; must be empty
1928 *
1929 * Returns:
1930 * EFBIG - Destination file was not empty
1931 * 0 - success
1932 *
1933 *
1934 */
1935 int hfs_movedata (struct vnode *from_vp, struct vnode *to_vp) {
1936
1937 struct cnode *from_cp;
1938 struct cnode *to_cp;
1939 struct hfsmount *hfsmp = NULL;
1940 int error = 0;
1941 int started_tr = 0;
1942 int lockflags = 0;
1943 int overflow_blocks;
1944 int rsrc = 0;
1945
1946
1947 /* Get the HFS pointers */
1948 from_cp = VTOC(from_vp);
1949 to_cp = VTOC(to_vp);
1950 hfsmp = VTOHFS(from_vp);
1951
1952 /* Verify that neither source/dest file is open-unlinked */
1953 if (from_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
1954 error = EBUSY;
1955 goto movedata_exit;
1956 }
1957
1958 if (to_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
1959 error = EBUSY;
1960 goto movedata_exit;
1961 }
1962
1963 /*
1964 * Verify the source file is not in use by anyone besides us.
1965 *
1966 * This function is typically invoked by a namespace handler
1967 * process responding to a temporarily stalled system call.
1968 * The FD that it is working off of is opened O_EVTONLY, so
1969 * it really has no active usecounts (the kusecount from O_EVTONLY
1970 * is subtracted from the total usecounts).
1971 *
1972 * As a result, we shouldn't have any active usecounts against
1973 * this vnode when we go to check it below.
1974 */
1975 if (vnode_isinuse(from_vp, 0)) {
1976 error = EBUSY;
1977 goto movedata_exit;
1978 }
1979
1980 if (from_cp->c_rsrc_vp == from_vp) {
1981 rsrc = 1;
1982 }
1983
1984 /*
1985 * We assume that the destination file is already empty.
1986 * Verify that it is.
1987 */
1988 if (rsrc) {
1989 if (to_cp->c_rsrcfork->ff_size > 0) {
1990 error = EFBIG;
1991 goto movedata_exit;
1992 }
1993 }
1994 else {
1995 if (to_cp->c_datafork->ff_size > 0) {
1996 error = EFBIG;
1997 goto movedata_exit;
1998 }
1999 }
2000
2001 /* If the source has the rsrc open, make sure the destination is also the rsrc */
2002 if (rsrc) {
2003 if (to_vp != to_cp->c_rsrc_vp) {
2004 error = EINVAL;
2005 goto movedata_exit;
2006 }
2007 }
2008 else {
2009 /* Verify that both forks are data forks */
2010 if (to_vp != to_cp->c_vp) {
2011 error = EINVAL;
2012 goto movedata_exit;
2013 }
2014 }
2015
2016 /*
2017 * See if the source file has overflow extents. If it doesn't, we don't
2018 * need to call into MoveData, and the catalog will be enough.
2019 */
2020 if (rsrc) {
2021 overflow_blocks = overflow_extents(from_cp->c_rsrcfork);
2022 }
2023 else {
2024 overflow_blocks = overflow_extents(from_cp->c_datafork);
2025 }
2026
2027 if ((error = hfs_start_transaction (hfsmp)) != 0) {
2028 goto movedata_exit;
2029 }
2030 started_tr = 1;
2031
2032 /* Lock the system files: catalog, extents, attributes */
2033 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2034
2035 /* Copy over any catalog allocation data into the new spot. */
2036 if (rsrc) {
2037 if ((error = hfs_move_fork (from_cp->c_rsrcfork, from_cp, to_cp->c_rsrcfork, to_cp))){
2038 hfs_systemfile_unlock(hfsmp, lockflags);
2039 goto movedata_exit;
2040 }
2041 }
2042 else {
2043 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp, to_cp->c_datafork, to_cp))) {
2044 hfs_systemfile_unlock(hfsmp, lockflags);
2045 goto movedata_exit;
2046 }
2047 }
2048
2049 /*
2050 * Note that because all we're doing is moving the extents around, we can
2051 * probably do this in a single transaction: Each extent record (group of 8)
2052 * is 64 bytes. A extent overflow B-Tree node is typically 4k. This means
2053 * each node can hold roughly ~60 extent records == (480 extents).
2054 *
2055 * If a file was massively fragmented and had 20k extents, this means we'd
2056 * roughly touch 20k/480 == 41 to 42 nodes, plus the index nodes, for half
2057 * of the operation. (inserting or deleting). So if we're manipulating 80-100
2058 * nodes, this is basically 320k of data to write to the journal in
2059 * a bad case.
2060 */
2061 if (overflow_blocks != 0) {
2062 if (rsrc) {
2063 error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1);
2064 }
2065 else {
2066 error = MoveData (hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0);
2067 }
2068 }
2069
2070 if (error) {
2071 /* Reverse the operation. Copy the fork data back into the source */
2072 if (rsrc) {
2073 hfs_move_fork (to_cp->c_rsrcfork, to_cp, from_cp->c_rsrcfork, from_cp);
2074 }
2075 else {
2076 hfs_move_fork (to_cp->c_datafork, to_cp, from_cp->c_datafork, from_cp);
2077 }
2078 }
2079 else {
2080 struct cat_fork *src_data = NULL;
2081 struct cat_fork *src_rsrc = NULL;
2082 struct cat_fork *dst_data = NULL;
2083 struct cat_fork *dst_rsrc = NULL;
2084
2085 /* Touch the times*/
2086 to_cp->c_touch_acctime = TRUE;
2087 to_cp->c_touch_chgtime = TRUE;
2088 to_cp->c_touch_modtime = TRUE;
2089
2090 from_cp->c_touch_acctime = TRUE;
2091 from_cp->c_touch_chgtime = TRUE;
2092 from_cp->c_touch_modtime = TRUE;
2093
2094 hfs_touchtimes(hfsmp, to_cp);
2095 hfs_touchtimes(hfsmp, from_cp);
2096
2097 if (from_cp->c_datafork) {
2098 src_data = &from_cp->c_datafork->ff_data;
2099 }
2100 if (from_cp->c_rsrcfork) {
2101 src_rsrc = &from_cp->c_rsrcfork->ff_data;
2102 }
2103
2104 if (to_cp->c_datafork) {
2105 dst_data = &to_cp->c_datafork->ff_data;
2106 }
2107 if (to_cp->c_rsrcfork) {
2108 dst_rsrc = &to_cp->c_rsrcfork->ff_data;
2109 }
2110
2111 /* Update the catalog nodes */
2112 (void) cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2113 src_data, src_rsrc);
2114
2115 (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2116 dst_data, dst_rsrc);
2117
2118 }
2119 /* unlock the system files */
2120 hfs_systemfile_unlock(hfsmp, lockflags);
2121
2122
2123 movedata_exit:
2124 if (started_tr) {
2125 hfs_end_transaction(hfsmp);
2126 }
2127
2128 return error;
2129
2130 }
2131
2132 /*
2133 * Copy all of the catalog and runtime data in srcfork to dstfork.
2134 *
2135 * This allows us to maintain the invalid ranges across the movedata operation so
2136 * we don't need to force all of the pending IO right now. In addition, we move all
2137 * non overflow-extent extents into the destination here.
2138 */
2139 static int hfs_move_fork (struct filefork *srcfork, struct cnode *src_cp,
2140 struct filefork *dstfork, struct cnode *dst_cp) {
2141 struct rl_entry *invalid_range;
2142 int size = sizeof(struct HFSPlusExtentDescriptor);
2143 size = size * kHFSPlusExtentDensity;
2144
2145 /* If the dstfork has any invalid ranges, bail out */
2146 invalid_range = TAILQ_FIRST(&dstfork->ff_invalidranges);
2147 if (invalid_range != NULL) {
2148 return EFBIG;
2149 }
2150
2151 if (dstfork->ff_data.cf_size != 0 || dstfork->ff_data.cf_new_size != 0) {
2152 return EFBIG;
2153 }
2154
2155 /* First copy the invalid ranges */
2156 while ((invalid_range = TAILQ_FIRST(&srcfork->ff_invalidranges))) {
2157 off_t start = invalid_range->rl_start;
2158 off_t end = invalid_range->rl_end;
2159
2160 /* Remove it from the srcfork and add it to dstfork */
2161 rl_remove(start, end, &srcfork->ff_invalidranges);
2162 rl_add(start, end, &dstfork->ff_invalidranges);
2163 }
2164
2165 /*
2166 * Ignore the ff_union. We don't move symlinks or system files.
2167 * Now copy the in-catalog extent information
2168 */
2169 dstfork->ff_data.cf_size = srcfork->ff_data.cf_size;
2170 dstfork->ff_data.cf_new_size = srcfork->ff_data.cf_new_size;
2171 dstfork->ff_data.cf_vblocks = srcfork->ff_data.cf_vblocks;
2172 dstfork->ff_data.cf_blocks = srcfork->ff_data.cf_blocks;
2173
2174 /* just memcpy the whole array of extents to the new location. */
2175 memcpy (dstfork->ff_data.cf_extents, srcfork->ff_data.cf_extents, size);
2176
2177 /*
2178 * Copy the cnode attribute data.
2179 *
2180 */
2181 src_cp->c_blocks -= srcfork->ff_data.cf_vblocks;
2182 src_cp->c_blocks -= srcfork->ff_data.cf_blocks;
2183
2184 dst_cp->c_blocks += srcfork->ff_data.cf_vblocks;
2185 dst_cp->c_blocks += srcfork->ff_data.cf_blocks;
2186
2187 /* Now delete the entries in the source fork */
2188 srcfork->ff_data.cf_size = 0;
2189 srcfork->ff_data.cf_new_size = 0;
2190 srcfork->ff_data.cf_union.cfu_bytesread = 0;
2191 srcfork->ff_data.cf_vblocks = 0;
2192 srcfork->ff_data.cf_blocks = 0;
2193
2194 /* Zero out the old extents */
2195 bzero (srcfork->ff_data.cf_extents, size);
2196 return 0;
2197 }
2198
2199
2200 /*
2201 * cnode must be locked
2202 */
2203 int
2204 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
2205 {
2206 struct cnode *cp = VTOC(vp);
2207 struct filefork *fp = NULL;
2208 int retval = 0;
2209 struct hfsmount *hfsmp = VTOHFS(vp);
2210 struct rl_entry *invalid_range;
2211 struct timeval tv;
2212 int waitdata; /* attributes necessary for data retrieval */
2213 int wait; /* all other attributes (e.g. atime, etc.) */
2214 int lockflag;
2215 int took_trunc_lock = 0;
2216 int locked_buffers = 0;
2217
2218 /*
2219 * Applications which only care about data integrity rather than full
2220 * file integrity may opt out of (delay) expensive metadata update
2221 * operations as a performance optimization.
2222 */
2223 wait = (waitfor == MNT_WAIT);
2224 waitdata = (waitfor == MNT_DWAIT) | wait;
2225 if (always_do_fullfsync)
2226 fullsync = 1;
2227
2228 /* HFS directories don't have any data blocks. */
2229 if (vnode_isdir(vp))
2230 goto metasync;
2231 fp = VTOF(vp);
2232
2233 /*
2234 * For system files flush the B-tree header and
2235 * for regular files write out any clusters
2236 */
2237 if (vnode_issystem(vp)) {
2238 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2239 // XXXdbg
2240 if (hfsmp->jnl == NULL) {
2241 BTFlushPath(VTOF(vp));
2242 }
2243 }
2244 } else if (UBCINFOEXISTS(vp)) {
2245 hfs_unlock(cp);
2246 hfs_lock_truncate(cp, HFS_SHARED_LOCK);
2247 took_trunc_lock = 1;
2248
2249 if (fp->ff_unallocblocks != 0) {
2250 hfs_unlock_truncate(cp, 0);
2251
2252 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
2253 }
2254 /* Don't hold cnode lock when calling into cluster layer. */
2255 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2256
2257 hfs_lock(cp, HFS_FORCE_LOCK);
2258 }
2259 /*
2260 * When MNT_WAIT is requested and the zero fill timeout
2261 * has expired then we must explicitly zero out any areas
2262 * that are currently marked invalid (holes).
2263 *
2264 * Files with NODUMP can bypass zero filling here.
2265 */
2266 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
2267 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
2268 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
2269 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) &&
2270 cp->c_zftimeout != 0))) {
2271
2272 microuptime(&tv);
2273 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && !fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
2274 /* Remember that a force sync was requested. */
2275 cp->c_flag |= C_ZFWANTSYNC;
2276 goto datasync;
2277 }
2278 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
2279 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
2280 hfs_unlock(cp);
2281 if (took_trunc_lock) {
2282 hfs_unlock_truncate(cp, 0);
2283 }
2284 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
2285 hfs_lock(cp, HFS_FORCE_LOCK);
2286 took_trunc_lock = 1;
2287 }
2288 while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) {
2289 off_t start = invalid_range->rl_start;
2290 off_t end = invalid_range->rl_end;
2291
2292 /* The range about to be written must be validated
2293 * first, so that VNOP_BLOCKMAP() will return the
2294 * appropriate mapping for the cluster code:
2295 */
2296 rl_remove(start, end, &fp->ff_invalidranges);
2297
2298 /* Don't hold cnode lock when calling into cluster layer. */
2299 hfs_unlock(cp);
2300 (void) cluster_write(vp, (struct uio *) 0,
2301 fp->ff_size, end + 1, start, (off_t)0,
2302 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
2303 hfs_lock(cp, HFS_FORCE_LOCK);
2304 cp->c_flag |= C_MODIFIED;
2305 }
2306 hfs_unlock(cp);
2307 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2308 hfs_lock(cp, HFS_FORCE_LOCK);
2309 }
2310 cp->c_flag &= ~C_ZFWANTSYNC;
2311 cp->c_zftimeout = 0;
2312 }
2313 datasync:
2314 if (took_trunc_lock) {
2315 hfs_unlock_truncate(cp, 0);
2316 took_trunc_lock = 0;
2317 }
2318 /*
2319 * if we have a journal and if journal_active() returns != 0 then the
2320 * we shouldn't do anything to a locked block (because it is part
2321 * of a transaction). otherwise we'll just go through the normal
2322 * code path and flush the buffer. note journal_active() can return
2323 * -1 if the journal is invalid -- however we still need to skip any
2324 * locked blocks as they get cleaned up when we finish the transaction
2325 * or close the journal.
2326 */
2327 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
2328 if (hfsmp->jnl)
2329 lockflag = BUF_SKIP_LOCKED;
2330 else
2331 lockflag = 0;
2332
2333 /*
2334 * Flush all dirty buffers associated with a vnode.
2335 * Record how many of them were dirty AND locked (if necessary).
2336 */
2337 locked_buffers = buf_flushdirtyblks_skipinfo(vp, waitdata, lockflag, "hfs_fsync");
2338 if ((lockflag & BUF_SKIP_LOCKED) && (locked_buffers) && (vnode_vtype(vp) == VLNK)) {
2339 /*
2340 * If there are dirty symlink buffers, then we may need to take action
2341 * to prevent issues later on if we are journaled. If we're fsyncing a
2342 * symlink vnode then we are in one of three cases:
2343 *
2344 * 1) automatic sync has fired. In this case, we don't want the behavior to change.
2345 *
2346 * 2) Someone has opened the FD for the symlink (not what it points to)
2347 * and has issued an fsync against it. This should be rare, and we don't
2348 * want the behavior to change.
2349 *
2350 * 3) We are being called by a vclean which is trying to reclaim this
2351 * symlink vnode. If this is the case, then allowing this fsync to
2352 * proceed WITHOUT flushing the journal could result in the vclean
2353 * invalidating the buffer's blocks before the journal transaction is
2354 * written to disk. To prevent this, we force a journal flush
2355 * if the vnode is in the middle of a recycle (VL_TERMINATE or VL_DEAD is set).
2356 */
2357 if (vnode_isrecycled(vp)) {
2358 fullsync = 1;
2359 }
2360 }
2361
2362 metasync:
2363 if (vnode_isreg(vp) && vnode_issystem(vp)) {
2364 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2365 microuptime(&tv);
2366 BTSetLastSync(VTOF(vp), tv.tv_sec);
2367 }
2368 cp->c_touch_acctime = FALSE;
2369 cp->c_touch_chgtime = FALSE;
2370 cp->c_touch_modtime = FALSE;
2371 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
2372 retval = hfs_update(vp, wait);
2373
2374 /*
2375 * When MNT_WAIT is requested push out the catalog record for
2376 * this file. If they asked for a full fsync, we can skip this
2377 * because the journal_flush or hfs_metasync_all will push out
2378 * all of the metadata changes.
2379 */
2380 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
2381 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
2382 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
2383 }
2384
2385 /*
2386 * If this was a full fsync, make sure all metadata
2387 * changes get to stable storage.
2388 */
2389 if (fullsync) {
2390 if (hfsmp->jnl) {
2391 hfs_journal_flush(hfsmp, FALSE);
2392
2393 if (journal_uses_fua(hfsmp->jnl)) {
2394 /*
2395 * the journal_flush did NOT issue a sync track cache command,
2396 * and the fullsync indicates we are supposed to flush all cached
2397 * data to the media, so issue the sync track cache command
2398 * explicitly
2399 */
2400 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
2401 }
2402 } else {
2403 retval = hfs_metasync_all(hfsmp);
2404 /* XXX need to pass context! */
2405 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
2406 }
2407 }
2408 }
2409
2410 return (retval);
2411 }
2412
2413
2414 /* Sync an hfs catalog b-tree node */
2415 int
2416 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
2417 {
2418 vnode_t vp;
2419 buf_t bp;
2420 int lockflags;
2421
2422 vp = HFSTOVCB(hfsmp)->catalogRefNum;
2423
2424 // XXXdbg - don't need to do this on a journaled volume
2425 if (hfsmp->jnl) {
2426 return 0;
2427 }
2428
2429 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2430 /*
2431 * Look for a matching node that has been delayed
2432 * but is not part of a set (B_LOCKED).
2433 *
2434 * BLK_ONLYVALID causes buf_getblk to return a
2435 * buf_t for the daddr64_t specified only if it's
2436 * currently resident in the cache... the size
2437 * parameter to buf_getblk is ignored when this flag
2438 * is set
2439 */
2440 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
2441
2442 if (bp) {
2443 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
2444 (void) VNOP_BWRITE(bp);
2445 else
2446 buf_brelse(bp);
2447 }
2448
2449 hfs_systemfile_unlock(hfsmp, lockflags);
2450
2451 return (0);
2452 }
2453
2454
2455 /*
2456 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
2457 * without a journal. Note that the volume bitmap does not get written;
2458 * we rely on fsck_hfs to fix that up (which it can do without any loss
2459 * of data).
2460 */
2461 int
2462 hfs_metasync_all(struct hfsmount *hfsmp)
2463 {
2464 int lockflags;
2465
2466 /* Lock all of the B-trees so we get a mutually consistent state */
2467 lockflags = hfs_systemfile_lock(hfsmp,
2468 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2469
2470 /* Sync each of the B-trees */
2471 if (hfsmp->hfs_catalog_vp)
2472 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
2473 if (hfsmp->hfs_extents_vp)
2474 hfs_btsync(hfsmp->hfs_extents_vp, 0);
2475 if (hfsmp->hfs_attribute_vp)
2476 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
2477
2478 /* Wait for all of the writes to complete */
2479 if (hfsmp->hfs_catalog_vp)
2480 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
2481 if (hfsmp->hfs_extents_vp)
2482 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
2483 if (hfsmp->hfs_attribute_vp)
2484 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
2485
2486 hfs_systemfile_unlock(hfsmp, lockflags);
2487
2488 return 0;
2489 }
2490
2491
2492 /*ARGSUSED 1*/
2493 static int
2494 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
2495 {
2496 buf_clearflags(bp, B_LOCKED);
2497 (void) buf_bawrite(bp);
2498
2499 return(BUF_CLAIMED);
2500 }
2501
2502
2503 int
2504 hfs_btsync(struct vnode *vp, int sync_transaction)
2505 {
2506 struct cnode *cp = VTOC(vp);
2507 struct timeval tv;
2508 int flags = 0;
2509
2510 if (sync_transaction)
2511 flags |= BUF_SKIP_NONLOCKED;
2512 /*
2513 * Flush all dirty buffers associated with b-tree.
2514 */
2515 buf_iterate(vp, hfs_btsync_callback, flags, 0);
2516
2517 microuptime(&tv);
2518 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
2519 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
2520 cp->c_touch_acctime = FALSE;
2521 cp->c_touch_chgtime = FALSE;
2522 cp->c_touch_modtime = FALSE;
2523
2524 return 0;
2525 }
2526
2527 /*
2528 * Remove a directory.
2529 */
2530 int
2531 hfs_vnop_rmdir(ap)
2532 struct vnop_rmdir_args /* {
2533 struct vnode *a_dvp;
2534 struct vnode *a_vp;
2535 struct componentname *a_cnp;
2536 vfs_context_t a_context;
2537 } */ *ap;
2538 {
2539 struct vnode *dvp = ap->a_dvp;
2540 struct vnode *vp = ap->a_vp;
2541 struct cnode *dcp = VTOC(dvp);
2542 struct cnode *cp = VTOC(vp);
2543 int error;
2544 time_t orig_ctime;
2545
2546 orig_ctime = VTOC(vp)->c_ctime;
2547
2548 if (!S_ISDIR(cp->c_mode)) {
2549 return (ENOTDIR);
2550 }
2551 if (dvp == vp) {
2552 return (EINVAL);
2553 }
2554
2555 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
2556 cp = VTOC(vp);
2557
2558 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
2559 return (error);
2560 }
2561
2562 /* Check for a race with rmdir on the parent directory */
2563 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
2564 hfs_unlockpair (dcp, cp);
2565 return ENOENT;
2566 }
2567 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
2568
2569 hfs_unlockpair(dcp, cp);
2570
2571 return (error);
2572 }
2573
2574 /*
2575 * Remove a directory
2576 *
2577 * Both dvp and vp cnodes are locked
2578 */
2579 int
2580 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
2581 int skip_reserve, int only_unlink)
2582 {
2583 struct cnode *cp;
2584 struct cnode *dcp;
2585 struct hfsmount * hfsmp;
2586 struct cat_desc desc;
2587 int lockflags;
2588 int error = 0, started_tr = 0;
2589
2590 cp = VTOC(vp);
2591 dcp = VTOC(dvp);
2592 hfsmp = VTOHFS(vp);
2593
2594 if (dcp == cp) {
2595 return (EINVAL); /* cannot remove "." */
2596 }
2597 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
2598 return (0);
2599 }
2600 if (cp->c_entries != 0) {
2601 return (ENOTEMPTY);
2602 }
2603
2604 /*
2605 * If the directory is open or in use (e.g. opendir() or current working
2606 * directory for some process); wait for inactive/reclaim to actually
2607 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
2608 * of removing open-unlinked directories from the catalog, as well as getting rid
2609 * of EAs still on the element. So change only_unlink to true, so that it will get
2610 * cleaned up below.
2611 *
2612 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
2613 * but it really means C_NOEXISTS because the item was actually removed from the
2614 * catalog. Then when we try to remove the entry from the catalog later on, it won't
2615 * really be there anymore.
2616 */
2617 if (vnode_isinuse(vp, 0)) {
2618 only_unlink = 1;
2619 }
2620
2621 /* Deal with directory hardlinks */
2622 if (cp->c_flag & C_HARDLINK) {
2623 /*
2624 * Note that if we have a directory which was a hardlink at any point,
2625 * its actual directory data is stored in the directory inode in the hidden
2626 * directory rather than the leaf element(s) present in the namespace.
2627 *
2628 * If there are still other hardlinks to this directory,
2629 * then we'll just eliminate this particular link and the vnode will still exist.
2630 * If this is the last link to an empty directory, then we'll open-unlink the
2631 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
2632 *
2633 * We could also return EBUSY here.
2634 */
2635
2636 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
2637 }
2638
2639 /*
2640 * In a few cases, we may want to allow the directory to persist in an
2641 * open-unlinked state. If the directory is being open-unlinked (still has usecount
2642 * references), or if it has EAs, or if it was being deleted as part of a rename,
2643 * then we go ahead and move it to the hidden directory.
2644 *
2645 * If the directory is being open-unlinked, then we want to keep the catalog entry
2646 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
2647 *
2648 * If the directory had EAs, then we want to use the open-unlink trick so that the
2649 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
2650 * due to overflowing the journal.
2651 *
2652 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
2653 * in order to maintain rename atomicity.
2654 *
2655 * Note that the allow_dirs argument to hfs_removefile specifies that it is
2656 * supposed to handle directories for this case.
2657 */
2658
2659 if (((hfsmp->hfs_attribute_vp != NULL) &&
2660 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
2661 (only_unlink != 0)) {
2662
2663 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
2664 /*
2665 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
2666 * it here just in case we were invoked by rmdir() on a directory that had
2667 * EAs. To ensure that we start reclaiming the space as soon as possible,
2668 * we call vnode_recycle on the directory.
2669 */
2670 vnode_recycle(vp);
2671
2672 return ret;
2673
2674 }
2675
2676 dcp->c_flag |= C_DIR_MODIFICATION;
2677
2678 #if QUOTA
2679 if (hfsmp->hfs_flags & HFS_QUOTAS)
2680 (void)hfs_getinoquota(cp);
2681 #endif
2682 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2683 goto out;
2684 }
2685 started_tr = 1;
2686
2687 /*
2688 * Verify the directory is empty (and valid).
2689 * (Rmdir ".." won't be valid since
2690 * ".." will contain a reference to
2691 * the current directory and thus be
2692 * non-empty.)
2693 */
2694 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
2695 error = EPERM;
2696 goto out;
2697 }
2698
2699 /* Remove the entry from the namei cache: */
2700 cache_purge(vp);
2701
2702 /*
2703 * Protect against a race with rename by using the component
2704 * name passed in and parent id from dvp (instead of using
2705 * the cp->c_desc which may have changed).
2706 */
2707 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
2708 desc.cd_namelen = cnp->cn_namelen;
2709 desc.cd_parentcnid = dcp->c_fileid;
2710 desc.cd_cnid = cp->c_cnid;
2711 desc.cd_flags = CD_ISDIR;
2712 desc.cd_encoding = cp->c_encoding;
2713 desc.cd_hint = 0;
2714
2715 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
2716 error = 0;
2717 goto out;
2718 }
2719
2720 /* Remove entry from catalog */
2721 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2722
2723 if (!skip_reserve) {
2724 /*
2725 * Reserve some space in the Catalog file.
2726 */
2727 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
2728 hfs_systemfile_unlock(hfsmp, lockflags);
2729 goto out;
2730 }
2731 }
2732
2733 error = cat_delete(hfsmp, &desc, &cp->c_attr);
2734 if (error == 0) {
2735 /* The parent lost a child */
2736 if (dcp->c_entries > 0)
2737 dcp->c_entries--;
2738 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
2739 dcp->c_dirchangecnt++;
2740 dcp->c_touch_chgtime = TRUE;
2741 dcp->c_touch_modtime = TRUE;
2742 hfs_touchtimes(hfsmp, cp);
2743 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2744 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
2745 }
2746
2747 hfs_systemfile_unlock(hfsmp, lockflags);
2748
2749 if (error)
2750 goto out;
2751
2752 #if QUOTA
2753 if (hfsmp->hfs_flags & HFS_QUOTAS)
2754 (void)hfs_chkiq(cp, -1, NOCRED, 0);
2755 #endif /* QUOTA */
2756
2757 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
2758
2759 /* Mark C_NOEXISTS since the catalog entry is now gone */
2760 cp->c_flag |= C_NOEXISTS;
2761 out:
2762 dcp->c_flag &= ~C_DIR_MODIFICATION;
2763 wakeup((caddr_t)&dcp->c_flag);
2764
2765 if (started_tr) {
2766 hfs_end_transaction(hfsmp);
2767 }
2768
2769 return (error);
2770 }
2771
2772
2773 /*
2774 * Remove a file or link.
2775 */
2776 int
2777 hfs_vnop_remove(ap)
2778 struct vnop_remove_args /* {
2779 struct vnode *a_dvp;
2780 struct vnode *a_vp;
2781 struct componentname *a_cnp;
2782 int a_flags;
2783 vfs_context_t a_context;
2784 } */ *ap;
2785 {
2786 struct vnode *dvp = ap->a_dvp;
2787 struct vnode *vp = ap->a_vp;
2788 struct cnode *dcp = VTOC(dvp);
2789 struct cnode *cp;
2790 struct vnode *rvp = NULL;
2791 int error=0, recycle_rsrc=0;
2792 time_t orig_ctime;
2793 uint32_t rsrc_vid = 0;
2794
2795 if (dvp == vp) {
2796 return (EINVAL);
2797 }
2798
2799 orig_ctime = VTOC(vp)->c_ctime;
2800 if ( (!vnode_isnamedstream(vp)) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
2801 error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
2802 if (error) {
2803 // XXXdbg - decide on a policy for handling namespace handler failures!
2804 // for now we just let them proceed.
2805 }
2806 }
2807 error = 0;
2808
2809 cp = VTOC(vp);
2810
2811 relock:
2812
2813 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
2814
2815 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
2816 hfs_unlock_truncate(cp, 0);
2817 if (rvp) {
2818 vnode_put (rvp);
2819 }
2820 return (error);
2821 }
2822
2823 /*
2824 * Lazily respond to determining if there is a valid resource fork
2825 * vnode attached to 'cp' if it is a regular file or symlink.
2826 * If the vnode does not exist, then we may proceed without having to
2827 * create it.
2828 *
2829 * If, however, it does exist, then we need to acquire an iocount on the
2830 * vnode after acquiring its vid. This ensures that if we have to do I/O
2831 * against it, it can't get recycled from underneath us in the middle
2832 * of this call.
2833 *
2834 * Note: this function may be invoked for directory hardlinks, so just skip these
2835 * steps if 'vp' is a directory.
2836 */
2837
2838
2839 if ((vp->v_type == VLNK) || (vp->v_type == VREG)) {
2840 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
2841 /* We need to acquire the rsrc vnode */
2842 rvp = cp->c_rsrc_vp;
2843 rsrc_vid = vnode_vid (rvp);
2844
2845 /* Unlock everything to acquire iocount on the rsrc vnode */
2846 hfs_unlock_truncate (cp, 0);
2847 hfs_unlockpair (dcp, cp);
2848
2849 /* Use the vid to maintain identity on rvp */
2850 if (vnode_getwithvid(rvp, rsrc_vid)) {
2851 /*
2852 * If this fails, then it was recycled or
2853 * reclaimed in the interim. Reset fields and
2854 * start over.
2855 */
2856 rvp = NULL;
2857 rsrc_vid = 0;
2858 }
2859 goto relock;
2860 }
2861 }
2862
2863 /*
2864 * Check to see if we raced rmdir for the parent directory
2865 * hfs_removefile already checks for a race on vp/cp
2866 */
2867 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
2868 error = ENOENT;
2869 goto rm_done;
2870 }
2871
2872 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
2873
2874 /*
2875 * If the remove succeeded in deleting the file, then we may need to mark
2876 * the resource fork for recycle so that it is reclaimed as quickly
2877 * as possible. If it were not recycled quickly, then this resource fork
2878 * vnode could keep a v_parent reference on the data fork, which prevents it
2879 * from going through reclaim (by giving it extra usecounts), except in the force-
2880 * unmount case.
2881 *
2882 * However, a caveat: we need to continue to supply resource fork
2883 * access to open-unlinked files even if the resource fork is not open. This is
2884 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
2885 * this already if the data fork has been re-parented to the hidden directory.
2886 *
2887 * As a result, all we really need to do here is mark the resource fork vnode
2888 * for recycle. If it goes out of core, it can be brought in again if needed.
2889 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
2890 * more work.
2891 */
2892 if ((error == 0) && (rvp)) {
2893 recycle_rsrc = 1;
2894 }
2895
2896 /*
2897 * Drop the truncate lock before unlocking the cnode
2898 * (which can potentially perform a vnode_put and
2899 * recycle the vnode which in turn might require the
2900 * truncate lock)
2901 */
2902 rm_done:
2903 hfs_unlock_truncate(cp, 0);
2904 hfs_unlockpair(dcp, cp);
2905
2906 if (recycle_rsrc) {
2907 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
2908 vnode_recycle(rvp);
2909 }
2910
2911 if (rvp) {
2912 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
2913 vnode_put(rvp);
2914 }
2915
2916 return (error);
2917 }
2918
2919
2920 int
2921 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
2922
2923 if ( !(buf_flags(bp) & B_META))
2924 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
2925 /*
2926 * it's part of the current transaction, kill it.
2927 */
2928 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
2929
2930 return (BUF_CLAIMED);
2931 }
2932
2933 /*
2934 * hfs_removefile
2935 *
2936 * Similar to hfs_vnop_remove except there are additional options.
2937 * This function may be used to remove directories if they have
2938 * lots of EA's -- note the 'allow_dirs' argument.
2939 *
2940 * This function is able to delete blocks & fork data for the resource
2941 * fork even if it does not exist in core (and have a backing vnode).
2942 * It should infer the correct behavior based on the number of blocks
2943 * in the cnode and whether or not the resource fork pointer exists or
2944 * not. As a result, one only need pass in the 'vp' corresponding to the
2945 * data fork of this file (or main vnode in the case of a directory).
2946 * Passing in a resource fork will result in an error.
2947 *
2948 * Because we do not create any vnodes in this function, we are not at
2949 * risk of deadlocking against ourselves by double-locking.
2950 *
2951 * Requires cnode and truncate locks to be held.
2952 */
2953 int
2954 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
2955 int flags, int skip_reserve, int allow_dirs,
2956 __unused struct vnode *rvp, int only_unlink)
2957 {
2958 struct cnode *cp;
2959 struct cnode *dcp;
2960 struct vnode *rsrc_vp = NULL;
2961 struct hfsmount *hfsmp;
2962 struct cat_desc desc;
2963 struct timeval tv;
2964 int dataforkbusy = 0;
2965 int rsrcforkbusy = 0;
2966 int lockflags;
2967 int error = 0;
2968 int started_tr = 0;
2969 int isbigfile = 0, defer_remove=0, isdir=0;
2970 int update_vh = 0;
2971
2972 cp = VTOC(vp);
2973 dcp = VTOC(dvp);
2974 hfsmp = VTOHFS(vp);
2975
2976 /* Check if we lost a race post lookup. */
2977 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
2978 return (0);
2979 }
2980
2981 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
2982 return 0;
2983 }
2984
2985 /* Make sure a remove is permitted */
2986 if (VNODE_IS_RSRC(vp)) {
2987 return (EPERM);
2988 }
2989 else {
2990 /*
2991 * We know it's a data fork.
2992 * Probe the cnode to see if we have a valid resource fork
2993 * in hand or not.
2994 */
2995 rsrc_vp = cp->c_rsrc_vp;
2996 }
2997
2998 /* Don't allow deleting the journal or journal_info_block. */
2999 if (hfs_is_journal_file(hfsmp, cp)) {
3000 return (EPERM);
3001 }
3002
3003 /*
3004 * If removing a symlink, then we need to ensure that the
3005 * data blocks for the symlink are not still in-flight or pending.
3006 * If so, we will unlink the symlink here, making its blocks
3007 * available for re-allocation by a subsequent transaction. That is OK, but
3008 * then the I/O for the data blocks could then go out before the journal
3009 * transaction that created it was flushed, leading to I/O ordering issues.
3010 */
3011 if (vp->v_type == VLNK) {
3012 /*
3013 * This will block if the asynchronous journal flush is in progress.
3014 * If this symlink is not being renamed over and doesn't have any open FDs,
3015 * then we'll remove it from the journal's bufs below in kill_block.
3016 */
3017 buf_wait_for_shadow_io (vp, 0);
3018 }
3019
3020 /*
3021 * Hard links require special handling.
3022 */
3023 if (cp->c_flag & C_HARDLINK) {
3024 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3025 return (EBUSY);
3026 } else {
3027 /* A directory hard link with a link count of one is
3028 * treated as a regular directory. Therefore it should
3029 * only be removed using rmdir().
3030 */
3031 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3032 (allow_dirs == 0)) {
3033 return (EPERM);
3034 }
3035 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3036 }
3037 }
3038
3039 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3040 if (vnode_isdir(vp)) {
3041 if (allow_dirs == 0)
3042 return (EPERM); /* POSIX */
3043 isdir = 1;
3044 }
3045 /* Sanity check the parent ids. */
3046 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3047 (cp->c_parentcnid != dcp->c_fileid)) {
3048 return (EINVAL);
3049 }
3050
3051 dcp->c_flag |= C_DIR_MODIFICATION;
3052
3053 // this guy is going away so mark him as such
3054 cp->c_flag |= C_DELETED;
3055
3056
3057 /* Remove our entry from the namei cache. */
3058 cache_purge(vp);
3059
3060 /*
3061 * If the caller was operating on a file (as opposed to a
3062 * directory with EAs), then we need to figure out
3063 * whether or not it has a valid resource fork vnode.
3064 *
3065 * If there was a valid resource fork vnode, then we need
3066 * to use hfs_truncate to eliminate its data. If there is
3067 * no vnode, then we hold the cnode lock which would
3068 * prevent it from being created. As a result,
3069 * we can use the data deletion functions which do not
3070 * require that a cnode/vnode pair exist.
3071 */
3072
3073 /* Check if this file is being used. */
3074 if (isdir == 0) {
3075 dataforkbusy = vnode_isinuse(vp, 0);
3076 /*
3077 * At this point, we know that 'vp' points to the
3078 * a data fork because we checked it up front. And if
3079 * there is no rsrc fork, rsrc_vp will be NULL.
3080 */
3081 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3082 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3083 }
3084 }
3085
3086 /* Check if we have to break the deletion into multiple pieces. */
3087 if (isdir == 0) {
3088 isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp)));
3089 }
3090
3091 /* Check if the file has xattrs. If it does we'll have to delete them in
3092 individual transactions in case there are too many */
3093 if ((hfsmp->hfs_attribute_vp != NULL) &&
3094 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3095 defer_remove = 1;
3096 }
3097
3098 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3099 if (only_unlink) {
3100 defer_remove = 1;
3101 }
3102
3103 /*
3104 * Carbon semantics prohibit deleting busy files.
3105 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3106 */
3107 if (dataforkbusy || rsrcforkbusy) {
3108 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3109 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3110 error = EBUSY;
3111 goto out;
3112 }
3113 }
3114
3115 #if QUOTA
3116 if (hfsmp->hfs_flags & HFS_QUOTAS)
3117 (void)hfs_getinoquota(cp);
3118 #endif /* QUOTA */
3119
3120 /*
3121 * Do a ubc_setsize to indicate we need to wipe contents if:
3122 * 1) item is a regular file.
3123 * 2) Neither fork is busy AND we are not told to unlink this.
3124 *
3125 * We need to check for the defer_remove since it can be set without
3126 * having a busy data or rsrc fork
3127 */
3128 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3129 /*
3130 * A ubc_setsize can cause a pagein so defer it
3131 * until after the cnode lock is dropped. The
3132 * cnode lock cannot be dropped/reacquired here
3133 * since we might already hold the journal lock.
3134 */
3135 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3136 cp->c_flag |= C_NEED_DATA_SETSIZE;
3137 }
3138 if (!rsrcforkbusy && rsrc_vp) {
3139 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3140 }
3141 }
3142
3143 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3144 goto out;
3145 }
3146 started_tr = 1;
3147
3148 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3149 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3150 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3151 }
3152
3153 /*
3154 * Prepare to truncate any non-busy forks. Busy forks will
3155 * get truncated when their vnode goes inactive.
3156 * Note that we will only enter this region if we
3157 * can avoid creating an open-unlinked file. If
3158 * either region is busy, we will have to create an open
3159 * unlinked file.
3160 *
3161 * Since we are deleting the file, we need to stagger the runtime
3162 * modifications to do things in such a way that a crash won't
3163 * result in us getting overlapped extents or any other
3164 * bad inconsistencies. As such, we call prepare_release_storage
3165 * which updates the UBC, updates quota information, and releases
3166 * any loaned blocks that belong to this file. No actual
3167 * truncation or bitmap manipulation is done until *AFTER*
3168 * the catalog record is removed.
3169 */
3170 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3171
3172 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3173
3174 error = hfs_prepare_release_storage (hfsmp, vp);
3175 if (error) {
3176 goto out;
3177 }
3178 update_vh = 1;
3179 }
3180
3181 /*
3182 * If the resource fork vnode does not exist, we can skip this step.
3183 */
3184 if (!rsrcforkbusy && rsrc_vp) {
3185 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3186 if (error) {
3187 goto out;
3188 }
3189 update_vh = 1;
3190 }
3191 }
3192
3193 /*
3194 * Protect against a race with rename by using the component
3195 * name passed in and parent id from dvp (instead of using
3196 * the cp->c_desc which may have changed). Also, be aware that
3197 * because we allow directories to be passed in, we need to special case
3198 * this temporary descriptor in case we were handed a directory.
3199 */
3200 if (isdir) {
3201 desc.cd_flags = CD_ISDIR;
3202 }
3203 else {
3204 desc.cd_flags = 0;
3205 }
3206 desc.cd_encoding = cp->c_desc.cd_encoding;
3207 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3208 desc.cd_namelen = cnp->cn_namelen;
3209 desc.cd_parentcnid = dcp->c_fileid;
3210 desc.cd_hint = cp->c_desc.cd_hint;
3211 desc.cd_cnid = cp->c_cnid;
3212 microtime(&tv);
3213
3214 /*
3215 * There are two cases to consider:
3216 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
3217 * 2. File is not in use ==> remove the file
3218 *
3219 * We can get a directory in case 1 because it may have had lots of attributes,
3220 * which need to get removed here.
3221 */
3222 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
3223 char delname[32];
3224 struct cat_desc to_desc;
3225 struct cat_desc todir_desc;
3226
3227 /*
3228 * Orphan this file or directory (move to hidden directory).
3229 * Again, we need to take care that we treat directories as directories,
3230 * and files as files. Because directories with attributes can be passed in
3231 * check to make sure that we have a directory or a file before filling in the
3232 * temporary descriptor's flags. We keep orphaned directories AND files in
3233 * the FILE_HARDLINKS private directory since we're generalizing over all
3234 * orphaned filesystem objects.
3235 */
3236 bzero(&todir_desc, sizeof(todir_desc));
3237 todir_desc.cd_parentcnid = 2;
3238
3239 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
3240 bzero(&to_desc, sizeof(to_desc));
3241 to_desc.cd_nameptr = (const u_int8_t *)delname;
3242 to_desc.cd_namelen = strlen(delname);
3243 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
3244 if (isdir) {
3245 to_desc.cd_flags = CD_ISDIR;
3246 }
3247 else {
3248 to_desc.cd_flags = 0;
3249 }
3250 to_desc.cd_cnid = cp->c_cnid;
3251
3252 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3253 if (!skip_reserve) {
3254 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
3255 hfs_systemfile_unlock(hfsmp, lockflags);
3256 goto out;
3257 }
3258 }
3259
3260 error = cat_rename(hfsmp, &desc, &todir_desc,
3261 &to_desc, (struct cat_desc *)NULL);
3262
3263 if (error == 0) {
3264 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
3265 if (isdir == 1) {
3266 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
3267 }
3268 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
3269 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
3270
3271 /* Update the parent directory */
3272 if (dcp->c_entries > 0)
3273 dcp->c_entries--;
3274 if (isdir == 1) {
3275 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3276 }
3277 dcp->c_dirchangecnt++;
3278 dcp->c_ctime = tv.tv_sec;
3279 dcp->c_mtime = tv.tv_sec;
3280 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3281
3282 /* Update the file or directory's state */
3283 cp->c_flag |= C_DELETED;
3284 cp->c_ctime = tv.tv_sec;
3285 --cp->c_linkcount;
3286 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
3287 }
3288 hfs_systemfile_unlock(hfsmp, lockflags);
3289 if (error)
3290 goto out;
3291
3292 }
3293 else {
3294 /*
3295 * Nobody is using this item; we can safely remove everything.
3296 */
3297 struct filefork *temp_rsrc_fork = NULL;
3298 #if QUOTA
3299 off_t savedbytes;
3300 int blksize = hfsmp->blockSize;
3301 #endif
3302 u_int32_t fileid = cp->c_fileid;
3303
3304 /*
3305 * Figure out if we need to read the resource fork data into
3306 * core before wiping out the catalog record.
3307 *
3308 * 1) Must not be a directory
3309 * 2) cnode's c_rsrcfork ptr must be NULL.
3310 * 3) rsrc fork must have actual blocks
3311 */
3312 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
3313 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3314 /*
3315 * The resource fork vnode & filefork did not exist.
3316 * Create a temporary one for use in this function only.
3317 */
3318 MALLOC_ZONE (temp_rsrc_fork, struct filefork *, sizeof (struct filefork), M_HFSFORK, M_WAITOK);
3319 bzero(temp_rsrc_fork, sizeof(struct filefork));
3320 temp_rsrc_fork->ff_cp = cp;
3321 rl_init(&temp_rsrc_fork->ff_invalidranges);
3322 }
3323
3324 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3325
3326 /* Look up the resource fork first, if necessary */
3327 if (temp_rsrc_fork) {
3328 error = cat_lookup (hfsmp, &desc, 1, (struct cat_desc*) NULL,
3329 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
3330 if (error) {
3331 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3332 hfs_systemfile_unlock (hfsmp, lockflags);
3333 goto out;
3334 }
3335 }
3336
3337 if (!skip_reserve) {
3338 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3339 if (temp_rsrc_fork) {
3340 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3341 }
3342 hfs_systemfile_unlock(hfsmp, lockflags);
3343 goto out;
3344 }
3345 }
3346
3347 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3348
3349 if (error && error != ENXIO && error != ENOENT) {
3350 printf("hfs_removefile: deleting file %s (%d), err: %d\n",
3351 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
3352 }
3353
3354 if (error == 0) {
3355 /* Update the parent directory */
3356 if (dcp->c_entries > 0)
3357 dcp->c_entries--;
3358 dcp->c_dirchangecnt++;
3359 dcp->c_ctime = tv.tv_sec;
3360 dcp->c_mtime = tv.tv_sec;
3361 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3362 }
3363 hfs_systemfile_unlock(hfsmp, lockflags);
3364
3365 if (error) {
3366 if (temp_rsrc_fork) {
3367 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3368 }
3369 goto out;
3370 }
3371
3372 /*
3373 * Now that we've wiped out the catalog record, the file effectively doesn't
3374 * exist anymore. So update the quota records to reflect the loss of the
3375 * data fork and the resource fork.
3376 */
3377 #if QUOTA
3378 if (cp->c_datafork->ff_blocks > 0) {
3379 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
3380 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3381 }
3382
3383 /*
3384 * We may have just deleted the catalog record for a resource fork even
3385 * though it did not exist in core as a vnode. However, just because there
3386 * was a resource fork pointer in the cnode does not mean that it had any blocks.
3387 */
3388 if (temp_rsrc_fork || cp->c_rsrcfork) {
3389 if (cp->c_rsrcfork) {
3390 if (cp->c_rsrcfork->ff_blocks > 0) {
3391 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
3392 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3393 }
3394 }
3395 else {
3396 /* we must have used a temporary fork */
3397 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
3398 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3399 }
3400 }
3401
3402 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3403 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3404 }
3405 #endif
3406
3407 /*
3408 * If we didn't get any errors deleting the catalog entry, then go ahead
3409 * and release the backing store now. The filefork pointers are still valid.
3410 */
3411 if (temp_rsrc_fork) {
3412 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
3413 }
3414 else {
3415 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
3416 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
3417 }
3418 if (error) {
3419 /*
3420 * If we encountered an error updating the extents and bitmap,
3421 * mark the volume inconsistent. At this point, the catalog record has
3422 * already been deleted, so we can't recover it at this point. We need
3423 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
3424 * The subsequent fsck should be able to recover the free space for us.
3425 */
3426 hfs_mark_volume_inconsistent(hfsmp);
3427 }
3428 else {
3429 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
3430 update_vh = 0;
3431 }
3432
3433 /* Get rid of the temporary rsrc fork */
3434 if (temp_rsrc_fork) {
3435 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3436 }
3437
3438 cp->c_flag |= C_NOEXISTS;
3439 cp->c_flag &= ~C_DELETED;
3440
3441 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
3442 --cp->c_linkcount;
3443
3444 /*
3445 * We must never get a directory if we're in this else block. We could
3446 * accidentally drop the number of files in the volume header if we did.
3447 */
3448 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
3449
3450 }
3451
3452 /*
3453 * All done with this cnode's descriptor...
3454 *
3455 * Note: all future catalog calls for this cnode must be by
3456 * fileid only. This is OK for HFS (which doesn't have file
3457 * thread records) since HFS doesn't support the removal of
3458 * busy files.
3459 */
3460 cat_releasedesc(&cp->c_desc);
3461
3462 out:
3463 if (error) {
3464 cp->c_flag &= ~C_DELETED;
3465 }
3466
3467 if (update_vh) {
3468 /*
3469 * If we bailed out earlier, we may need to update the volume header
3470 * to deal with the borrowed blocks accounting.
3471 */
3472 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
3473 }
3474
3475 if (started_tr) {
3476 hfs_end_transaction(hfsmp);
3477 }
3478
3479 dcp->c_flag &= ~C_DIR_MODIFICATION;
3480 wakeup((caddr_t)&dcp->c_flag);
3481
3482 return (error);
3483 }
3484
3485
3486 __private_extern__ void
3487 replace_desc(struct cnode *cp, struct cat_desc *cdp)
3488 {
3489 // fixes 4348457 and 4463138
3490 if (&cp->c_desc == cdp) {
3491 return;
3492 }
3493
3494 /* First release allocated name buffer */
3495 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
3496 const u_int8_t *name = cp->c_desc.cd_nameptr;
3497
3498 cp->c_desc.cd_nameptr = 0;
3499 cp->c_desc.cd_namelen = 0;
3500 cp->c_desc.cd_flags &= ~CD_HASBUF;
3501 vfs_removename((const char *)name);
3502 }
3503 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
3504
3505 /* Cnode now owns the name buffer */
3506 cdp->cd_nameptr = 0;
3507 cdp->cd_namelen = 0;
3508 cdp->cd_flags &= ~CD_HASBUF;
3509 }
3510
3511
3512 /*
3513 * Rename a cnode.
3514 *
3515 * The VFS layer guarantees that:
3516 * - source and destination will either both be directories, or
3517 * both not be directories.
3518 * - all the vnodes are from the same file system
3519 *
3520 * When the target is a directory, HFS must ensure that its empty.
3521 *
3522 * Note that this function requires up to 6 vnodes in order to work properly
3523 * if it is operating on files (and not on directories). This is because only
3524 * files can have resource forks, and we now require iocounts to be held on the
3525 * vnodes corresponding to the resource forks (if applicable) as well as
3526 * the files or directories undergoing rename. The problem with not holding
3527 * iocounts on the resource fork vnodes is that it can lead to a deadlock
3528 * situation: The rsrc fork of the source file may be recycled and reclaimed
3529 * in order to provide a vnode for the destination file's rsrc fork. Since
3530 * data and rsrc forks share the same cnode, we'd eventually try to lock the
3531 * source file's cnode in order to sync its rsrc fork to disk, but it's already
3532 * been locked. By taking the rsrc fork vnodes up front we ensure that they
3533 * cannot be recycled, and that the situation mentioned above cannot happen.
3534 */
3535 int
3536 hfs_vnop_rename(ap)
3537 struct vnop_rename_args /* {
3538 struct vnode *a_fdvp;
3539 struct vnode *a_fvp;
3540 struct componentname *a_fcnp;
3541 struct vnode *a_tdvp;
3542 struct vnode *a_tvp;
3543 struct componentname *a_tcnp;
3544 vfs_context_t a_context;
3545 } */ *ap;
3546 {
3547 struct vnode *tvp = ap->a_tvp;
3548 struct vnode *tdvp = ap->a_tdvp;
3549 struct vnode *fvp = ap->a_fvp;
3550 struct vnode *fdvp = ap->a_fdvp;
3551 /*
3552 * Note that we only need locals for the target/destination's
3553 * resource fork vnode (and only if necessary). We don't care if the
3554 * source has a resource fork vnode or not.
3555 */
3556 struct vnode *tvp_rsrc = NULLVP;
3557 uint32_t tvp_rsrc_vid = 0;
3558 struct componentname *tcnp = ap->a_tcnp;
3559 struct componentname *fcnp = ap->a_fcnp;
3560 struct proc *p = vfs_context_proc(ap->a_context);
3561 struct cnode *fcp;
3562 struct cnode *fdcp;
3563 struct cnode *tdcp;
3564 struct cnode *tcp;
3565 struct cnode *error_cnode;
3566 struct cat_desc from_desc;
3567 struct cat_desc to_desc;
3568 struct cat_desc out_desc;
3569 struct hfsmount *hfsmp;
3570 cat_cookie_t cookie;
3571 int tvp_deleted = 0;
3572 int started_tr = 0, got_cookie = 0;
3573 int took_trunc_lock = 0;
3574 int lockflags;
3575 int error;
3576 time_t orig_from_ctime, orig_to_ctime;
3577 int emit_rename = 1;
3578 int emit_delete = 1;
3579
3580 orig_from_ctime = VTOC(fvp)->c_ctime;
3581 if (tvp && VTOC(tvp)) {
3582 orig_to_ctime = VTOC(tvp)->c_ctime;
3583 } else {
3584 orig_to_ctime = ~0;
3585 }
3586
3587 hfsmp = VTOHFS(tdvp);
3588 /*
3589 * Do special case checks here. If fvp == tvp then we need to check the
3590 * cnode with locks held.
3591 */
3592 if (fvp == tvp) {
3593 int is_hardlink = 0;
3594 /*
3595 * In this case, we do *NOT* ever emit a DELETE event.
3596 * We may not necessarily emit a RENAME event
3597 */
3598 emit_delete = 0;
3599 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK))) {
3600 return error;
3601 }
3602 /* Check to see if the item is a hardlink or not */
3603 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
3604 hfs_unlock (VTOC(fvp));
3605
3606 /*
3607 * If the item is not a hardlink, then case sensitivity must be off, otherwise
3608 * two names should not resolve to the same cnode unless they were case variants.
3609 */
3610 if (is_hardlink) {
3611 emit_rename = 0;
3612 /*
3613 * Hardlinks are a little trickier. We only want to emit a rename event
3614 * if the item is a hardlink, the parent directories are the same, case sensitivity
3615 * is off, and the case folded names are the same. See the fvp == tvp case below for more
3616 * info.
3617 */
3618
3619 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
3620 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
3621 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
3622 /* Then in this case only it is ok to emit a rename */
3623 emit_rename = 1;
3624 }
3625 }
3626 }
3627 }
3628 if (emit_rename) {
3629 check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
3630 }
3631
3632 if (tvp && VTOC(tvp)) {
3633 if (emit_delete) {
3634 check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3635 }
3636 }
3637
3638 retry:
3639 /* When tvp exists, take the truncate lock for hfs_removefile(). */
3640 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
3641 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK);
3642 took_trunc_lock = 1;
3643 }
3644
3645 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
3646 HFS_EXCLUSIVE_LOCK, &error_cnode);
3647 if (error) {
3648 if (took_trunc_lock) {
3649 hfs_unlock_truncate(VTOC(tvp), 0);
3650 took_trunc_lock = 0;
3651 }
3652
3653 /*
3654 * We hit an error path. If we were trying to re-acquire the locks
3655 * after coming through here once, we might have already obtained
3656 * an iocount on tvp's resource fork vnode. Drop that before dealing
3657 * with the failure. Note this is safe -- since we are in an
3658 * error handling path, we can't be holding the cnode locks.
3659 */
3660 if (tvp_rsrc) {
3661 vnode_put (tvp_rsrc);
3662 tvp_rsrc_vid = 0;
3663 tvp_rsrc = NULL;
3664 }
3665
3666 /*
3667 * tvp might no longer exist. If the cause of the lock failure
3668 * was tvp, then we can try again with tvp/tcp set to NULL.
3669 * This is ok because the vfs syscall will vnode_put the vnodes
3670 * after we return from hfs_vnop_rename.
3671 */
3672 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
3673 tcp = NULL;
3674 tvp = NULL;
3675 goto retry;
3676 }
3677
3678 return (error);
3679 }
3680
3681 fdcp = VTOC(fdvp);
3682 fcp = VTOC(fvp);
3683 tdcp = VTOC(tdvp);
3684 tcp = tvp ? VTOC(tvp) : NULL;
3685
3686 /*
3687 * Acquire iocounts on the destination's resource fork vnode
3688 * if necessary. If dst/src are files and the dst has a resource
3689 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
3690 * If it does not exist, then we don't care and can skip it.
3691 */
3692 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
3693 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
3694 tvp_rsrc = tcp->c_rsrc_vp;
3695 /*
3696 * We can look at the vid here because we're holding the
3697 * cnode lock on the underlying cnode for this rsrc vnode.
3698 */
3699 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
3700
3701 /* Unlock everything to acquire iocount on this rsrc vnode */
3702 if (took_trunc_lock) {
3703 hfs_unlock_truncate (VTOC(tvp), 0);
3704 took_trunc_lock = 0;
3705 }
3706 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
3707
3708 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
3709 /* iocount acquisition failed. Reset fields and start over.. */
3710 tvp_rsrc_vid = 0;
3711 tvp_rsrc = NULL;
3712 }
3713 goto retry;
3714 }
3715 }
3716
3717 /* Ensure we didn't race src or dst parent directories with rmdir. */
3718 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
3719 error = ENOENT;
3720 goto out;
3721 }
3722
3723 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
3724 error = ENOENT;
3725 goto out;
3726 }
3727
3728
3729 /* Check for a race against unlink. The hfs_valid_cnode checks validate
3730 * the parent/child relationship with fdcp and tdcp, as well as the
3731 * component name of the target cnodes.
3732 */
3733 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
3734 error = ENOENT;
3735 goto out;
3736 }
3737
3738 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
3739 //
3740 // hmm, the destination vnode isn't valid any more.
3741 // in this case we can just drop him and pretend he
3742 // never existed in the first place.
3743 //
3744 if (took_trunc_lock) {
3745 hfs_unlock_truncate(VTOC(tvp), 0);
3746 took_trunc_lock = 0;
3747 }
3748 error = 0;
3749
3750 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
3751
3752 tcp = NULL;
3753 tvp = NULL;
3754
3755 // retry the locking with tvp null'ed out
3756 goto retry;
3757 }
3758
3759 fdcp->c_flag |= C_DIR_MODIFICATION;
3760 if (fdvp != tdvp) {
3761 tdcp->c_flag |= C_DIR_MODIFICATION;
3762 }
3763
3764 /*
3765 * Disallow renaming of a directory hard link if the source and
3766 * destination parent directories are different, or a directory whose
3767 * descendant is a directory hard link and the one of the ancestors
3768 * of the destination directory is a directory hard link.
3769 */
3770 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
3771 if (fcp->c_flag & C_HARDLINK) {
3772 error = EPERM;
3773 goto out;
3774 }
3775 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
3776 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3777 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
3778 error = EPERM;
3779 hfs_systemfile_unlock(hfsmp, lockflags);
3780 goto out;
3781 }
3782 hfs_systemfile_unlock(hfsmp, lockflags);
3783 }
3784 }
3785
3786 /*
3787 * The following edge case is caught here:
3788 * (to cannot be a descendent of from)
3789 *
3790 * o fdvp
3791 * /
3792 * /
3793 * o fvp
3794 * \
3795 * \
3796 * o tdvp
3797 * /
3798 * /
3799 * o tvp
3800 */
3801 if (tdcp->c_parentcnid == fcp->c_fileid) {
3802 error = EINVAL;
3803 goto out;
3804 }
3805
3806 /*
3807 * The following two edge cases are caught here:
3808 * (note tvp is not empty)
3809 *
3810 * o tdvp o tdvp
3811 * / /
3812 * / /
3813 * o tvp tvp o fdvp
3814 * \ \
3815 * \ \
3816 * o fdvp o fvp
3817 * /
3818 * /
3819 * o fvp
3820 */
3821 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
3822 error = ENOTEMPTY;
3823 goto out;
3824 }
3825
3826 /*
3827 * The following edge case is caught here:
3828 * (the from child and parent are the same)
3829 *
3830 * o tdvp
3831 * /
3832 * /
3833 * fdvp o fvp
3834 */
3835 if (fdvp == fvp) {
3836 error = EINVAL;
3837 goto out;
3838 }
3839
3840 /*
3841 * Make sure "from" vnode and its parent are changeable.
3842 */
3843 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
3844 error = EPERM;
3845 goto out;
3846 }
3847
3848 /*
3849 * If the destination parent directory is "sticky", then the
3850 * user must own the parent directory, or the destination of
3851 * the rename, otherwise the destination may not be changed
3852 * (except by root). This implements append-only directories.
3853 *
3854 * Note that checks for immutable and write access are done
3855 * by the call to hfs_removefile.
3856 */
3857 if (tvp && (tdcp->c_mode & S_ISTXT) &&
3858 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
3859 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
3860 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
3861 error = EPERM;
3862 goto out;
3863 }
3864
3865 /* Don't allow modification of the journal or journal_info_block */
3866 if (hfs_is_journal_file(hfsmp, fcp) ||
3867 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
3868 error = EPERM;
3869 goto out;
3870 }
3871
3872 #if QUOTA
3873 if (tvp)
3874 (void)hfs_getinoquota(tcp);
3875 #endif
3876 /* Preflighting done, take fvp out of the name space. */
3877 cache_purge(fvp);
3878
3879 bzero(&from_desc, sizeof(from_desc));
3880 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
3881 from_desc.cd_namelen = fcnp->cn_namelen;
3882 from_desc.cd_parentcnid = fdcp->c_fileid;
3883 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
3884 from_desc.cd_cnid = fcp->c_cnid;
3885
3886 bzero(&to_desc, sizeof(to_desc));
3887 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
3888 to_desc.cd_namelen = tcnp->cn_namelen;
3889 to_desc.cd_parentcnid = tdcp->c_fileid;
3890 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
3891 to_desc.cd_cnid = fcp->c_cnid;
3892
3893 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3894 goto out;
3895 }
3896 started_tr = 1;
3897
3898 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
3899 * inside a journal transaction and without holding a cnode lock.
3900 * As setting of this bit depends on being in journal transaction for
3901 * concurrency, check this bit again after we start journal transaction for rename
3902 * to ensure that this directory does not have any descendant that
3903 * is a directory hard link.
3904 */
3905 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
3906 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
3907 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3908 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
3909 error = EPERM;
3910 hfs_systemfile_unlock(hfsmp, lockflags);
3911 goto out;
3912 }
3913 hfs_systemfile_unlock(hfsmp, lockflags);
3914 }
3915 }
3916
3917 // if it's a hardlink then re-lookup the name so
3918 // that we get the correct cnid in from_desc (see
3919 // the comment in hfs_removefile for more details)
3920 //
3921 if (fcp->c_flag & C_HARDLINK) {
3922 struct cat_desc tmpdesc;
3923 cnid_t real_cnid;
3924
3925 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
3926 tmpdesc.cd_namelen = fcnp->cn_namelen;
3927 tmpdesc.cd_parentcnid = fdcp->c_fileid;
3928 tmpdesc.cd_hint = fdcp->c_childhint;
3929 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
3930 tmpdesc.cd_encoding = 0;
3931
3932 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3933
3934 if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) {
3935 hfs_systemfile_unlock(hfsmp, lockflags);
3936 goto out;
3937 }
3938
3939 // use the real cnid instead of whatever happened to be there
3940 from_desc.cd_cnid = real_cnid;
3941 hfs_systemfile_unlock(hfsmp, lockflags);
3942 }
3943
3944 /*
3945 * Reserve some space in the Catalog file.
3946 */
3947 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
3948 goto out;
3949 }
3950 got_cookie = 1;
3951
3952 /*
3953 * If the destination exists then it may need to be removed.
3954 *
3955 * Due to HFS's locking system, we should always move the
3956 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
3957 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
3958 * of the actual vnop that it was trying to do (stat, link, readlink),
3959 * we must release the cnode lock of that element during the interim to
3960 * do MAC checking, vnode authorization, and other calls. In that time,
3961 * the item can be deleted (or renamed over). However, only in the rename
3962 * case is it inappropriate to return ENOENT from any of those calls. Either
3963 * the call should return information about the old element (stale), or get
3964 * information about the newer element that we are about to write in its place.
3965 *
3966 * HFS lookup has been modified to detect a rename and re-drive its
3967 * lookup internally. For other calls that have already succeeded in
3968 * their lookup call and are waiting to acquire the cnode lock in order
3969 * to proceed, that cnode lock will not fail due to the cnode being marked
3970 * C_NOEXISTS, because it won't have been marked as such. It will only
3971 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
3972 * element. All future callers will get the new element.
3973 *
3974 * To implement this behavior, we pass the "only_unlink" argument to
3975 * hfs_removefile and hfs_removedir. This will result in the vnode acting
3976 * as though it is open-unlinked. Additionally, when we are done moving the
3977 * element to the hidden directory, we vnode_recycle the target so that it is
3978 * reclaimed as soon as possible. Reclaim and inactive are both
3979 * capable of clearing out unused blocks for an open-unlinked file or dir.
3980 */
3981 if (tvp) {
3982 /*
3983 * When fvp matches tvp they could be case variants
3984 * or matching hard links.
3985 */
3986 if (fvp == tvp) {
3987 if (!(fcp->c_flag & C_HARDLINK)) {
3988 /*
3989 * If they're not hardlinks, then fvp == tvp must mean we
3990 * are using case-insensitive HFS because case-sensitive would
3991 * not use the same vnode for both. In this case we just update
3992 * the catalog for: a -> A
3993 */
3994 goto skip_rm; /* simple case variant */
3995
3996 }
3997 /* For all cases below, we must be using hardlinks */
3998 else if ((fdvp != tdvp) ||
3999 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4000 /*
4001 * If the parent directories are not the same, AND the two items
4002 * are hardlinks, posix says to do nothing:
4003 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4004 * We just return 0 in this case.
4005 *
4006 * If case sensitivity is on, and we are using hardlinks
4007 * then renaming is supposed to do nothing.
4008 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
4009 */
4010 goto out; /* matching hardlinks, nothing to do */
4011
4012 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4013 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4014 /*
4015 * If we get here, then the following must be true:
4016 * a) We are running case-insensitive HFS+.
4017 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
4018 * c) the two names are case-variants of each other.
4019 *
4020 * In this case, we are really only dealing with a single catalog record
4021 * whose name is being updated.
4022 *
4023 * op is dir1/fred -> dir1/FRED
4024 *
4025 * We need to special case the name matching, because if
4026 * dir1/fred <-> dir1/bob were the two links, and the
4027 * op was dir1/fred -> dir1/bob
4028 * That would fail/do nothing.
4029 */
4030 goto skip_rm; /* case-variant hardlink in the same dir */
4031 } else {
4032 goto out; /* matching hardlink, nothing to do */
4033 }
4034 }
4035
4036
4037 if (vnode_isdir(tvp)) {
4038 /*
4039 * hfs_removedir will eventually call hfs_removefile on the directory
4040 * we're working on, because only hfs_removefile does the renaming of the
4041 * item to the hidden directory. The directory will stay around in the
4042 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
4043 * That way, we can destroy all of the EAs as needed and allow new ones to be
4044 * written.
4045 */
4046 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
4047 }
4048 else {
4049 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
4050
4051 /*
4052 * If the destination file had a resource fork vnode, then we need to get rid of
4053 * its blocks when there are no more references to it. Because the call to
4054 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
4055 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
4056 * the resource fork vnode could prevent the data fork vnode from going out of scope
4057 * because it holds a v_parent reference on it. So we mark it for termination
4058 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
4059 * can clean up the blocks of open-unlinked files and resource forks.
4060 *
4061 * We can safely call vnode_recycle on the resource fork because we took an iocount
4062 * reference on it at the beginning of the function.
4063 */
4064
4065 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
4066 vnode_recycle(tvp_rsrc);
4067 }
4068 }
4069
4070 if (error) {
4071 goto out;
4072 }
4073
4074 tvp_deleted = 1;
4075
4076 /* Mark 'tcp' as being deleted due to a rename */
4077 tcp->c_flag |= C_RENAMED;
4078
4079 /*
4080 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
4081 * as quickly as possible.
4082 */
4083 vnode_recycle(tvp);
4084 }
4085 skip_rm:
4086 /*
4087 * All done with tvp and fvp.
4088 *
4089 * We also jump to this point if there was no destination observed during lookup and namei.
4090 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
4091 * competing thread from racing us and creating a file or dir at the destination of this rename
4092 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
4093 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
4094 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
4095 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
4096 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
4097 * will be swallowed and it will restart the operation.
4098 */
4099
4100 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4101 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
4102 hfs_systemfile_unlock(hfsmp, lockflags);
4103
4104 if (error) {
4105 if (error == EEXIST) {
4106 error = ERECYCLE;
4107 }
4108 goto out;
4109 }
4110
4111 /* Invalidate negative cache entries in the destination directory */
4112 if (tdcp->c_flag & C_NEG_ENTRIES) {
4113 cache_purge_negatives(tdvp);
4114 tdcp->c_flag &= ~C_NEG_ENTRIES;
4115 }
4116
4117 /* Update cnode's catalog descriptor */
4118 replace_desc(fcp, &out_desc);
4119 fcp->c_parentcnid = tdcp->c_fileid;
4120 fcp->c_hint = 0;
4121
4122 /* Now indicate this cnode needs to have date-added written to the finderinfo */
4123 fcp->c_flag |= C_NEEDS_DATEADDED;
4124 (void) hfs_update (fvp, 0);
4125
4126
4127 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
4128 (fdcp->c_cnid == kHFSRootFolderID));
4129 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
4130 (tdcp->c_cnid == kHFSRootFolderID));
4131
4132 /* Update both parent directories. */
4133 if (fdvp != tdvp) {
4134 if (vnode_isdir(fvp)) {
4135 /* If the source directory has directory hard link
4136 * descendants, set the kHFSHasChildLinkBit in the
4137 * destination parent hierarchy
4138 */
4139 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
4140 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
4141
4142 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
4143
4144 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
4145 if (error) {
4146 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
4147 error = 0;
4148 }
4149 }
4150 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
4151 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
4152 }
4153 tdcp->c_entries++;
4154 tdcp->c_dirchangecnt++;
4155 if (fdcp->c_entries > 0)
4156 fdcp->c_entries--;
4157 fdcp->c_dirchangecnt++;
4158 fdcp->c_touch_chgtime = TRUE;
4159 fdcp->c_touch_modtime = TRUE;
4160
4161 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
4162 (void) hfs_update(fdvp, 0);
4163 }
4164 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
4165 tdcp->c_touch_chgtime = TRUE;
4166 tdcp->c_touch_modtime = TRUE;
4167
4168 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
4169 (void) hfs_update(tdvp, 0);
4170
4171 /* Update the vnode's name now that the rename has completed. */
4172 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
4173 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
4174
4175 /*
4176 * At this point, we may have a resource fork vnode attached to the
4177 * 'from' vnode. If it exists, we will want to update its name, because
4178 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
4179 *
4180 * Note that the only thing we need to update here is the name attached to
4181 * the vnode, since a resource fork vnode does not have a separate resource
4182 * cnode -- it's still 'fcp'.
4183 */
4184 if (fcp->c_rsrc_vp) {
4185 char* rsrc_path = NULL;
4186 int len;
4187
4188 /* Create a new temporary buffer that's going to hold the new name */
4189 MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
4190 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
4191 len = MIN(len, MAXPATHLEN);
4192
4193 /*
4194 * vnode_update_identity will do the following for us:
4195 * 1) release reference on the existing rsrc vnode's name.
4196 * 2) copy/insert new name into the name cache
4197 * 3) attach the new name to the resource vnode
4198 * 4) update the vnode's vid
4199 */
4200 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
4201
4202 /* Free the memory associated with the resource fork's name */
4203 FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
4204 }
4205 out:
4206 if (got_cookie) {
4207 cat_postflight(hfsmp, &cookie, p);
4208 }
4209 if (started_tr) {
4210 hfs_end_transaction(hfsmp);
4211 }
4212
4213 fdcp->c_flag &= ~C_DIR_MODIFICATION;
4214 wakeup((caddr_t)&fdcp->c_flag);
4215 if (fdvp != tdvp) {
4216 tdcp->c_flag &= ~C_DIR_MODIFICATION;
4217 wakeup((caddr_t)&tdcp->c_flag);
4218 }
4219
4220 if (took_trunc_lock) {
4221 hfs_unlock_truncate(VTOC(tvp), 0);
4222 }
4223
4224 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4225
4226 /* Now vnode_put the resource fork vnode if necessary */
4227 if (tvp_rsrc) {
4228 vnode_put(tvp_rsrc);
4229 tvp_rsrc = NULL;
4230 }
4231
4232 /* After tvp is removed the only acceptable error is EIO */
4233 if (error && tvp_deleted)
4234 error = EIO;
4235
4236 return (error);
4237 }
4238
4239
4240 /*
4241 * Make a directory.
4242 */
4243 int
4244 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
4245 {
4246 /***** HACK ALERT ********/
4247 ap->a_cnp->cn_flags |= MAKEENTRY;
4248 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
4249 }
4250
4251
4252 /*
4253 * Create a symbolic link.
4254 */
4255 int
4256 hfs_vnop_symlink(struct vnop_symlink_args *ap)
4257 {
4258 struct vnode **vpp = ap->a_vpp;
4259 struct vnode *dvp = ap->a_dvp;
4260 struct vnode *vp = NULL;
4261 struct cnode *cp = NULL;
4262 struct hfsmount *hfsmp;
4263 struct filefork *fp;
4264 struct buf *bp = NULL;
4265 char *datap;
4266 int started_tr = 0;
4267 u_int32_t len;
4268 int error;
4269
4270 /* HFS standard disks don't support symbolic links */
4271 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
4272 return (ENOTSUP);
4273
4274 /* Check for empty target name */
4275 if (ap->a_target[0] == 0)
4276 return (EINVAL);
4277
4278 hfsmp = VTOHFS(dvp);
4279 len = strlen(ap->a_target);
4280
4281 /* Check for free space */
4282 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
4283 return (ENOSPC);
4284 }
4285
4286 /* Create the vnode */
4287 ap->a_vap->va_mode |= S_IFLNK;
4288 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
4289 goto out;
4290 }
4291 vp = *vpp;
4292 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
4293 goto out;
4294 }
4295 cp = VTOC(vp);
4296 fp = VTOF(vp);
4297
4298 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
4299 goto out;
4300 }
4301
4302 #if QUOTA
4303 (void)hfs_getinoquota(cp);
4304 #endif /* QUOTA */
4305
4306 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4307 goto out;
4308 }
4309 started_tr = 1;
4310
4311 /*
4312 * Allocate space for the link.
4313 *
4314 * Since we're already inside a transaction,
4315 * tell hfs_truncate to skip the ubc_setsize.
4316 *
4317 * Don't need truncate lock since a symlink is treated as a system file.
4318 */
4319 error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, 0, ap->a_context);
4320
4321 /* On errors, remove the symlink file */
4322 if (error) {
4323 /*
4324 * End the transaction so we don't re-take the cnode lock
4325 * below while inside a transaction (lock order violation).
4326 */
4327 hfs_end_transaction(hfsmp);
4328
4329 /* hfs_removefile() requires holding the truncate lock */
4330 hfs_unlock(cp);
4331 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
4332 hfs_lock(cp, HFS_FORCE_LOCK);
4333
4334 if (hfs_start_transaction(hfsmp) != 0) {
4335 started_tr = 0;
4336 hfs_unlock_truncate(cp, TRUE);
4337 goto out;
4338 }
4339
4340 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
4341 hfs_unlock_truncate(cp, 0);
4342 goto out;
4343 }
4344
4345 /* Write the link to disk */
4346 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
4347 0, 0, BLK_META);
4348 if (hfsmp->jnl) {
4349 journal_modify_block_start(hfsmp->jnl, bp);
4350 }
4351 datap = (char *)buf_dataptr(bp);
4352 bzero(datap, buf_size(bp));
4353 bcopy(ap->a_target, datap, len);
4354
4355 if (hfsmp->jnl) {
4356 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
4357 } else {
4358 buf_bawrite(bp);
4359 }
4360 /*
4361 * We defered the ubc_setsize for hfs_truncate
4362 * since we were inside a transaction.
4363 *
4364 * We don't need to drop the cnode lock here
4365 * since this is a symlink.
4366 */
4367 ubc_setsize(vp, len);
4368 out:
4369 if (started_tr)
4370 hfs_end_transaction(hfsmp);
4371 if ((cp != NULL) && (vp != NULL)) {
4372 hfs_unlock(cp);
4373 }
4374 if (error) {
4375 if (vp) {
4376 vnode_put(vp);
4377 }
4378 *vpp = NULL;
4379 }
4380 return (error);
4381 }
4382
4383
4384 /* structures to hold a "." or ".." directory entry */
4385 struct hfs_stddotentry {
4386 u_int32_t d_fileno; /* unique file number */
4387 u_int16_t d_reclen; /* length of this structure */
4388 u_int8_t d_type; /* dirent file type */
4389 u_int8_t d_namlen; /* len of filename */
4390 char d_name[4]; /* "." or ".." */
4391 };
4392
4393 struct hfs_extdotentry {
4394 u_int64_t d_fileno; /* unique file number */
4395 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
4396 u_int16_t d_reclen; /* length of this structure */
4397 u_int16_t d_namlen; /* len of filename */
4398 u_int8_t d_type; /* dirent file type */
4399 u_char d_name[3]; /* "." or ".." */
4400 };
4401
4402 typedef union {
4403 struct hfs_stddotentry std;
4404 struct hfs_extdotentry ext;
4405 } hfs_dotentry_t;
4406
4407 /*
4408 * hfs_vnop_readdir reads directory entries into the buffer pointed
4409 * to by uio, in a filesystem independent format. Up to uio_resid
4410 * bytes of data can be transferred. The data in the buffer is a
4411 * series of packed dirent structures where each one contains the
4412 * following entries:
4413 *
4414 * u_int32_t d_fileno; // file number of entry
4415 * u_int16_t d_reclen; // length of this record
4416 * u_int8_t d_type; // file type
4417 * u_int8_t d_namlen; // length of string in d_name
4418 * char d_name[MAXNAMELEN+1]; // null terminated file name
4419 *
4420 * The current position (uio_offset) refers to the next block of
4421 * entries. The offset can only be set to a value previously
4422 * returned by hfs_vnop_readdir or zero. This offset does not have
4423 * to match the number of bytes returned (in uio_resid).
4424 *
4425 * In fact, the offset used by HFS is essentially an index (26 bits)
4426 * with a tag (6 bits). The tag is for associating the next request
4427 * with the current request. This enables us to have multiple threads
4428 * reading the directory while the directory is also being modified.
4429 *
4430 * Each tag/index pair is tied to a unique directory hint. The hint
4431 * contains information (filename) needed to build the catalog b-tree
4432 * key for finding the next set of entries.
4433 *
4434 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
4435 * do NOT synthesize entries for "." and "..".
4436 */
4437 int
4438 hfs_vnop_readdir(ap)
4439 struct vnop_readdir_args /* {
4440 vnode_t a_vp;
4441 uio_t a_uio;
4442 int a_flags;
4443 int *a_eofflag;
4444 int *a_numdirent;
4445 vfs_context_t a_context;
4446 } */ *ap;
4447 {
4448 struct vnode *vp = ap->a_vp;
4449 uio_t uio = ap->a_uio;
4450 struct cnode *cp;
4451 struct hfsmount *hfsmp;
4452 directoryhint_t *dirhint = NULL;
4453 directoryhint_t localhint;
4454 off_t offset;
4455 off_t startoffset;
4456 int error = 0;
4457 int eofflag = 0;
4458 user_addr_t user_start = 0;
4459 user_size_t user_len = 0;
4460 int index;
4461 unsigned int tag;
4462 int items;
4463 int lockflags;
4464 int extended;
4465 int nfs_cookies;
4466 cnid_t cnid_hint = 0;
4467
4468 items = 0;
4469 startoffset = offset = uio_offset(uio);
4470 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
4471 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
4472
4473 /* Sanity check the uio data. */
4474 if (uio_iovcnt(uio) > 1)
4475 return (EINVAL);
4476
4477 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
4478 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
4479 if (VTOCMP(vp) != NULL && !compressed) {
4480 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
4481 if (error) {
4482 return error;
4483 }
4484 }
4485 }
4486
4487 cp = VTOC(vp);
4488 hfsmp = VTOHFS(vp);
4489
4490 /* Note that the dirhint calls require an exclusive lock. */
4491 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
4492 return (error);
4493
4494 /* Pick up cnid hint (if any). */
4495 if (nfs_cookies) {
4496 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
4497 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
4498 if (cnid_hint == INT_MAX) { /* searching pass the last item */
4499 eofflag = 1;
4500 goto out;
4501 }
4502 }
4503 /*
4504 * Synthesize entries for "." and "..", unless the directory has
4505 * been deleted, but not closed yet (lazy delete in progress).
4506 */
4507 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
4508 hfs_dotentry_t dotentry[2];
4509 size_t uiosize;
4510
4511 if (extended) {
4512 struct hfs_extdotentry *entry = &dotentry[0].ext;
4513
4514 entry->d_fileno = cp->c_cnid;
4515 entry->d_reclen = sizeof(struct hfs_extdotentry);
4516 entry->d_type = DT_DIR;
4517 entry->d_namlen = 1;
4518 entry->d_name[0] = '.';
4519 entry->d_name[1] = '\0';
4520 entry->d_name[2] = '\0';
4521 entry->d_seekoff = 1;
4522
4523 ++entry;
4524 entry->d_fileno = cp->c_parentcnid;
4525 entry->d_reclen = sizeof(struct hfs_extdotentry);
4526 entry->d_type = DT_DIR;
4527 entry->d_namlen = 2;
4528 entry->d_name[0] = '.';
4529 entry->d_name[1] = '.';
4530 entry->d_name[2] = '\0';
4531 entry->d_seekoff = 2;
4532 uiosize = 2 * sizeof(struct hfs_extdotentry);
4533 } else {
4534 struct hfs_stddotentry *entry = &dotentry[0].std;
4535
4536 entry->d_fileno = cp->c_cnid;
4537 entry->d_reclen = sizeof(struct hfs_stddotentry);
4538 entry->d_type = DT_DIR;
4539 entry->d_namlen = 1;
4540 *(int *)&entry->d_name[0] = 0;
4541 entry->d_name[0] = '.';
4542
4543 ++entry;
4544 entry->d_fileno = cp->c_parentcnid;
4545 entry->d_reclen = sizeof(struct hfs_stddotentry);
4546 entry->d_type = DT_DIR;
4547 entry->d_namlen = 2;
4548 *(int *)&entry->d_name[0] = 0;
4549 entry->d_name[0] = '.';
4550 entry->d_name[1] = '.';
4551 uiosize = 2 * sizeof(struct hfs_stddotentry);
4552 }
4553 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
4554 goto out;
4555 }
4556 offset += 2;
4557 }
4558
4559 /* If there are no real entries then we're done. */
4560 if (cp->c_entries == 0) {
4561 error = 0;
4562 eofflag = 1;
4563 uio_setoffset(uio, offset);
4564 goto seekoffcalc;
4565 }
4566
4567 //
4568 // We have to lock the user's buffer here so that we won't
4569 // fault on it after we've acquired a shared lock on the
4570 // catalog file. The issue is that you can get a 3-way
4571 // deadlock if someone else starts a transaction and then
4572 // tries to lock the catalog file but can't because we're
4573 // here and we can't service our page fault because VM is
4574 // blocked trying to start a transaction as a result of
4575 // trying to free up pages for our page fault. It's messy
4576 // but it does happen on dual-processors that are paging
4577 // heavily (see radar 3082639 for more info). By locking
4578 // the buffer up-front we prevent ourselves from faulting
4579 // while holding the shared catalog file lock.
4580 //
4581 // Fortunately this and hfs_search() are the only two places
4582 // currently (10/30/02) that can fault on user data with a
4583 // shared lock on the catalog file.
4584 //
4585 if (hfsmp->jnl && uio_isuserspace(uio)) {
4586 user_start = uio_curriovbase(uio);
4587 user_len = uio_curriovlen(uio);
4588
4589 if ((error = vslock(user_start, user_len)) != 0) {
4590 user_start = 0;
4591 goto out;
4592 }
4593 }
4594 /* Convert offset into a catalog directory index. */
4595 index = (offset & HFS_INDEX_MASK) - 2;
4596 tag = offset & ~HFS_INDEX_MASK;
4597
4598 /* Lock catalog during cat_findname and cat_getdirentries. */
4599 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4600
4601 /* When called from NFS, try and resolve a cnid hint. */
4602 if (nfs_cookies && cnid_hint != 0) {
4603 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
4604 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
4605 localhint.dh_index = index - 1;
4606 localhint.dh_time = 0;
4607 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
4608 dirhint = &localhint; /* don't forget to release the descriptor */
4609 } else {
4610 cat_releasedesc(&localhint.dh_desc);
4611 }
4612 }
4613 }
4614
4615 /* Get a directory hint (cnode must be locked exclusive) */
4616 if (dirhint == NULL) {
4617 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
4618
4619 /* Hide tag from catalog layer. */
4620 dirhint->dh_index &= HFS_INDEX_MASK;
4621 if (dirhint->dh_index == HFS_INDEX_MASK) {
4622 dirhint->dh_index = -1;
4623 }
4624 }
4625
4626 if (index == 0) {
4627 dirhint->dh_threadhint = cp->c_dirthreadhint;
4628 }
4629 else {
4630 /*
4631 * If we have a non-zero index, there is a possibility that during the last
4632 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
4633 * then we don't want to return any new entries for the caller. Just return 0
4634 * items, mark the eofflag, and bail out. Because we won't have done any work, the
4635 * code at the end of the function will release the dirhint for us.
4636 *
4637 * Don't forget to unlock the catalog lock on the way out, too.
4638 */
4639 if (dirhint->dh_desc.cd_flags & CD_EOF) {
4640 error = 0;
4641 eofflag = 1;
4642 uio_setoffset(uio, startoffset);
4643 hfs_systemfile_unlock (hfsmp, lockflags);
4644
4645 goto seekoffcalc;
4646 }
4647 }
4648
4649 /* Pack the buffer with dirent entries. */
4650 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
4651
4652 if (index == 0 && error == 0) {
4653 cp->c_dirthreadhint = dirhint->dh_threadhint;
4654 }
4655
4656 hfs_systemfile_unlock(hfsmp, lockflags);
4657
4658 if (error != 0) {
4659 goto out;
4660 }
4661
4662 /* Get index to the next item */
4663 index += items;
4664
4665 if (items >= (int)cp->c_entries) {
4666 eofflag = 1;
4667 }
4668
4669 /* Convert catalog directory index back into an offset. */
4670 while (tag == 0)
4671 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
4672 uio_setoffset(uio, (index + 2) | tag);
4673 dirhint->dh_index |= tag;
4674
4675 seekoffcalc:
4676 cp->c_touch_acctime = TRUE;
4677
4678 if (ap->a_numdirent) {
4679 if (startoffset == 0)
4680 items += 2;
4681 *ap->a_numdirent = items;
4682 }
4683
4684 out:
4685 if (user_start) {
4686 vsunlock(user_start, user_len, TRUE);
4687 }
4688 /* If we didn't do anything then go ahead and dump the hint. */
4689 if ((dirhint != NULL) &&
4690 (dirhint != &localhint) &&
4691 (uio_offset(uio) == startoffset)) {
4692 hfs_reldirhint(cp, dirhint);
4693 eofflag = 1;
4694 }
4695 if (ap->a_eofflag) {
4696 *ap->a_eofflag = eofflag;
4697 }
4698 if (dirhint == &localhint) {
4699 cat_releasedesc(&localhint.dh_desc);
4700 }
4701 hfs_unlock(cp);
4702 return (error);
4703 }
4704
4705
4706 /*
4707 * Read contents of a symbolic link.
4708 */
4709 int
4710 hfs_vnop_readlink(ap)
4711 struct vnop_readlink_args /* {
4712 struct vnode *a_vp;
4713 struct uio *a_uio;
4714 vfs_context_t a_context;
4715 } */ *ap;
4716 {
4717 struct vnode *vp = ap->a_vp;
4718 struct cnode *cp;
4719 struct filefork *fp;
4720 int error;
4721
4722 if (!vnode_islnk(vp))
4723 return (EINVAL);
4724
4725 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
4726 return (error);
4727 cp = VTOC(vp);
4728 fp = VTOF(vp);
4729
4730 /* Zero length sym links are not allowed */
4731 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
4732 error = EINVAL;
4733 goto exit;
4734 }
4735
4736 /* Cache the path so we don't waste buffer cache resources */
4737 if (fp->ff_symlinkptr == NULL) {
4738 struct buf *bp = NULL;
4739
4740 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
4741 if (fp->ff_symlinkptr == NULL) {
4742 error = ENOMEM;
4743 goto exit;
4744 }
4745 error = (int)buf_meta_bread(vp, (daddr64_t)0,
4746 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
4747 vfs_context_ucred(ap->a_context), &bp);
4748 if (error) {
4749 if (bp)
4750 buf_brelse(bp);
4751 if (fp->ff_symlinkptr) {
4752 FREE(fp->ff_symlinkptr, M_TEMP);
4753 fp->ff_symlinkptr = NULL;
4754 }
4755 goto exit;
4756 }
4757 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
4758
4759 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
4760 buf_markinvalid(bp); /* data no longer needed */
4761 }
4762 buf_brelse(bp);
4763 }
4764 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
4765
4766 /*
4767 * Keep track blocks read
4768 */
4769 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
4770
4771 /*
4772 * If this file hasn't been seen since the start of
4773 * the current sampling period then start over.
4774 */
4775 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
4776 VTOF(vp)->ff_bytesread = fp->ff_size;
4777 else
4778 VTOF(vp)->ff_bytesread += fp->ff_size;
4779
4780 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
4781 // cp->c_touch_acctime = TRUE;
4782 }
4783
4784 exit:
4785 hfs_unlock(cp);
4786 return (error);
4787 }
4788
4789
4790 /*
4791 * Get configurable pathname variables.
4792 */
4793 int
4794 hfs_vnop_pathconf(ap)
4795 struct vnop_pathconf_args /* {
4796 struct vnode *a_vp;
4797 int a_name;
4798 int *a_retval;
4799 vfs_context_t a_context;
4800 } */ *ap;
4801 {
4802 switch (ap->a_name) {
4803 case _PC_LINK_MAX:
4804 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
4805 *ap->a_retval = 1;
4806 else
4807 *ap->a_retval = HFS_LINK_MAX;
4808 break;
4809 case _PC_NAME_MAX:
4810 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
4811 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
4812 else
4813 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
4814 break;
4815 case _PC_PATH_MAX:
4816 *ap->a_retval = PATH_MAX; /* 1024 */
4817 break;
4818 case _PC_PIPE_BUF:
4819 *ap->a_retval = PIPE_BUF;
4820 break;
4821 case _PC_CHOWN_RESTRICTED:
4822 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
4823 break;
4824 case _PC_NO_TRUNC:
4825 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
4826 break;
4827 case _PC_NAME_CHARS_MAX:
4828 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
4829 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
4830 else
4831 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
4832 break;
4833 case _PC_CASE_SENSITIVE:
4834 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
4835 *ap->a_retval = 1;
4836 else
4837 *ap->a_retval = 0;
4838 break;
4839 case _PC_CASE_PRESERVING:
4840 *ap->a_retval = 1;
4841 break;
4842 case _PC_FILESIZEBITS:
4843 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
4844 *ap->a_retval = 32;
4845 else
4846 *ap->a_retval = 64; /* number of bits to store max file size */
4847 break;
4848 case _PC_XATTR_SIZE_BITS:
4849 /* Number of bits to store maximum extended attribute size */
4850 *ap->a_retval = HFS_XATTR_SIZE_BITS;
4851 break;
4852 default:
4853 return (EINVAL);
4854 }
4855
4856 return (0);
4857 }
4858
4859
4860 /*
4861 * Update a cnode's on-disk metadata.
4862 *
4863 * If waitfor is set, then wait for the disk write of
4864 * the node to complete.
4865 *
4866 * The cnode must be locked exclusive
4867 */
4868 int
4869 hfs_update(struct vnode *vp, __unused int waitfor)
4870 {
4871 struct cnode *cp = VTOC(vp);
4872 struct proc *p;
4873 struct cat_fork *dataforkp = NULL;
4874 struct cat_fork *rsrcforkp = NULL;
4875 struct cat_fork datafork;
4876 struct cat_fork rsrcfork;
4877 struct hfsmount *hfsmp;
4878 int lockflags;
4879 int error;
4880
4881 p = current_proc();
4882 hfsmp = VTOHFS(vp);
4883
4884 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
4885 hfsmp->hfs_catalog_vp == NULL){
4886 return (0);
4887 }
4888 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
4889 cp->c_flag &= ~C_MODIFIED;
4890 cp->c_touch_acctime = 0;
4891 cp->c_touch_chgtime = 0;
4892 cp->c_touch_modtime = 0;
4893 return (0);
4894 }
4895
4896 hfs_touchtimes(hfsmp, cp);
4897
4898 /* Nothing to update. */
4899 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
4900 return (0);
4901 }
4902
4903 if (cp->c_datafork)
4904 dataforkp = &cp->c_datafork->ff_data;
4905 if (cp->c_rsrcfork)
4906 rsrcforkp = &cp->c_rsrcfork->ff_data;
4907
4908 /*
4909 * For delayed allocations updates are
4910 * postponed until an fsync or the file
4911 * gets written to disk.
4912 *
4913 * Deleted files can defer meta data updates until inactive.
4914 *
4915 * If we're ever called with the C_FORCEUPDATE flag though
4916 * we have to do the update.
4917 */
4918 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
4919 (ISSET(cp->c_flag, C_DELETED) ||
4920 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
4921 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
4922 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
4923 cp->c_flag |= C_MODIFIED;
4924
4925 return (0);
4926 }
4927
4928 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4929 return error;
4930 }
4931
4932 /*
4933 * Modify the values passed to cat_update based on whether or not
4934 * the file has invalid ranges or borrowed blocks.
4935 */
4936 if (dataforkp) {
4937 off_t numbytes = 0;
4938
4939 /* copy the datafork into a temporary copy so we don't pollute the cnode's */
4940 bcopy(dataforkp, &datafork, sizeof(datafork));
4941 dataforkp = &datafork;
4942
4943 /*
4944 * If there are borrowed blocks, ensure that they are subtracted
4945 * from the total block count before writing the cnode entry to disk.
4946 * Only extents that have actually been marked allocated in the bitmap
4947 * should be reflected in the total block count for this fork.
4948 */
4949 if (cp->c_datafork->ff_unallocblocks != 0) {
4950 // make sure that we don't assign a negative block count
4951 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
4952 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
4953 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
4954 }
4955
4956 /* Also cap the LEOF to the total number of bytes that are allocated. */
4957 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
4958 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
4959 }
4960
4961 /*
4962 * For files with invalid ranges (holes) the on-disk
4963 * field representing the size of the file (cf_size)
4964 * must be no larger than the start of the first hole.
4965 * However, note that if the first invalid range exists
4966 * solely within borrowed blocks, then our LEOF and block
4967 * count should both be zero. As a result, set it to the
4968 * min of the current cf_size and the start of the first
4969 * invalid range, because it may have already been reduced
4970 * to zero by the borrowed blocks check above.
4971 */
4972 if (!TAILQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
4973 numbytes = TAILQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
4974 datafork.cf_size = MIN((numbytes), (datafork.cf_size));
4975 }
4976 }
4977
4978 /*
4979 * For resource forks with delayed allocations, make sure
4980 * the block count and file size match the number of blocks
4981 * actually allocated to the file on disk.
4982 */
4983 if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) {
4984 bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork));
4985 rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks);
4986 rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
4987 rsrcforkp = &rsrcfork;
4988 }
4989
4990 /*
4991 * Lock the Catalog b-tree file.
4992 */
4993 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4994
4995 /* XXX - waitfor is not enforced */
4996 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
4997
4998 hfs_systemfile_unlock(hfsmp, lockflags);
4999
5000 /* After the updates are finished, clear the flags */
5001 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
5002
5003 hfs_end_transaction(hfsmp);
5004
5005 return (error);
5006 }
5007
5008 /*
5009 * Allocate a new node
5010 * Note - Function does not create and return a vnode for whiteout creation.
5011 */
5012 int
5013 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5014 struct vnode_attr *vap, vfs_context_t ctx)
5015 {
5016 struct cnode *cp = NULL;
5017 struct cnode *dcp = NULL;
5018 struct vnode *tvp;
5019 struct hfsmount *hfsmp;
5020 struct cat_desc in_desc, out_desc;
5021 struct cat_attr attr;
5022 struct timeval tv;
5023 int lockflags;
5024 int error, started_tr = 0;
5025 enum vtype vnodetype;
5026 int mode;
5027 int newvnode_flags = 0;
5028 u_int32_t gnv_flags = 0;
5029 int protectable_target = 0;
5030
5031 #if CONFIG_PROTECT
5032 struct cprotect *entry = NULL;
5033 uint32_t cp_class = 0;
5034 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
5035 cp_class = vap->va_dataprotect_class;
5036 }
5037 int protected_mount = 0;
5038 #endif
5039
5040
5041 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK)))
5042 return (error);
5043
5044 /* set the cnode pointer only after successfully acquiring lock */
5045 dcp = VTOC(dvp);
5046
5047 /* Don't allow creation of new entries in open-unlinked directories */
5048 if ((error = hfs_checkdeleted(dcp))) {
5049 hfs_unlock(dcp);
5050 return error;
5051 }
5052
5053 dcp->c_flag |= C_DIR_MODIFICATION;
5054
5055 hfsmp = VTOHFS(dvp);
5056
5057 *vpp = NULL;
5058 tvp = NULL;
5059 out_desc.cd_flags = 0;
5060 out_desc.cd_nameptr = NULL;
5061
5062 vnodetype = vap->va_type;
5063 if (vnodetype == VNON)
5064 vnodetype = VREG;
5065 mode = MAKEIMODE(vnodetype, vap->va_mode);
5066
5067 if (S_ISDIR (mode) || S_ISREG (mode)) {
5068 protectable_target = 1;
5069 }
5070
5071
5072 /* Check if were out of usable disk space. */
5073 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
5074 error = ENOSPC;
5075 goto exit;
5076 }
5077
5078 microtime(&tv);
5079
5080 /* Setup the default attributes */
5081 bzero(&attr, sizeof(attr));
5082 attr.ca_mode = mode;
5083 attr.ca_linkcount = 1;
5084 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
5085 attr.ca_rdev = vap->va_rdev;
5086 }
5087 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
5088 VATTR_SET_SUPPORTED(vap, va_create_time);
5089 attr.ca_itime = vap->va_create_time.tv_sec;
5090 } else {
5091 attr.ca_itime = tv.tv_sec;
5092 }
5093 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
5094 attr.ca_itime += 3600; /* Same as what hfs_update does */
5095 }
5096 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
5097 attr.ca_atimeondisk = attr.ca_atime;
5098 if (VATTR_IS_ACTIVE(vap, va_flags)) {
5099 VATTR_SET_SUPPORTED(vap, va_flags);
5100 attr.ca_flags = vap->va_flags;
5101 }
5102
5103 /*
5104 * HFS+ only: all files get ThreadExists
5105 * HFSX only: dirs get HasFolderCount
5106 */
5107 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
5108 if (vnodetype == VDIR) {
5109 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
5110 attr.ca_recflags = kHFSHasFolderCountMask;
5111 } else {
5112 attr.ca_recflags = kHFSThreadExistsMask;
5113 }
5114 }
5115
5116 #if CONFIG_PROTECT
5117 if (cp_fs_protected(hfsmp->hfs_mp)) {
5118 protected_mount = 1;
5119 }
5120 /*
5121 * On a content-protected HFS+/HFSX filesystem, files and directories
5122 * cannot be created without atomically setting/creating the EA that
5123 * contains the protection class metadata and keys at the same time, in
5124 * the same transaction. As a result, pre-set the "EAs exist" flag
5125 * on the cat_attr for protectable catalog record creations. This will
5126 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
5127 * as having EAs.
5128 */
5129 if ((protected_mount) && (protectable_target)) {
5130 attr.ca_recflags |= kHFSHasAttributesMask;
5131 }
5132 #endif
5133
5134
5135 /*
5136 * Add the date added to the item. See above, as
5137 * all of the dates are set to the itime.
5138 */
5139 hfs_write_dateadded (&attr, attr.ca_atime);
5140
5141 attr.ca_uid = vap->va_uid;
5142 attr.ca_gid = vap->va_gid;
5143 VATTR_SET_SUPPORTED(vap, va_mode);
5144 VATTR_SET_SUPPORTED(vap, va_uid);
5145 VATTR_SET_SUPPORTED(vap, va_gid);
5146
5147 #if QUOTA
5148 /* check to see if this node's creation would cause us to go over
5149 * quota. If so, abort this operation.
5150 */
5151 if (hfsmp->hfs_flags & HFS_QUOTAS) {
5152 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
5153 vfs_context_ucred(ctx)))) {
5154 goto exit;
5155 }
5156 }
5157 #endif
5158
5159
5160 /* Tag symlinks with a type and creator. */
5161 if (vnodetype == VLNK) {
5162 struct FndrFileInfo *fip;
5163
5164 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
5165 fip->fdType = SWAP_BE32(kSymLinkFileType);
5166 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
5167 }
5168 if (cnp->cn_flags & ISWHITEOUT)
5169 attr.ca_flags |= UF_OPAQUE;
5170
5171 /* Setup the descriptor */
5172 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
5173 in_desc.cd_namelen = cnp->cn_namelen;
5174 in_desc.cd_parentcnid = dcp->c_fileid;
5175 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
5176 in_desc.cd_hint = dcp->c_childhint;
5177 in_desc.cd_encoding = 0;
5178
5179 #if CONFIG_PROTECT
5180 /*
5181 * To preserve file creation atomicity with regards to the content protection EA,
5182 * we must create the file in the catalog and then write out the EA in the same
5183 * transaction. Pre-flight any operations that we can (such as allocating/preparing
5184 * the buffer, wrapping the keys) before we start the txn and take the requisite
5185 * b-tree locks. We pass '0' as the fileid because we do not know it yet.
5186 */
5187 if ((protected_mount) && (protectable_target)) {
5188 error = cp_entry_create_keys (&entry, dcp, hfsmp, cp_class, 0, attr.ca_mode);
5189 if (error) {
5190 goto exit;
5191 }
5192 }
5193 #endif
5194
5195 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5196 goto exit;
5197 }
5198 started_tr = 1;
5199
5200 // have to also lock the attribute file because cat_create() needs
5201 // to check that any fileID it wants to use does not have orphaned
5202 // attributes in it.
5203 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
5204
5205 /* Reserve some space in the Catalog file. */
5206 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
5207 hfs_systemfile_unlock(hfsmp, lockflags);
5208 goto exit;
5209 }
5210 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
5211 if (error == 0) {
5212 /* Update the parent directory */
5213 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5214 dcp->c_entries++;
5215 if (vnodetype == VDIR) {
5216 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
5217 }
5218 dcp->c_dirchangecnt++;
5219 dcp->c_ctime = tv.tv_sec;
5220 dcp->c_mtime = tv.tv_sec;
5221 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
5222
5223 #if CONFIG_PROTECT
5224 /*
5225 * If we are creating a content protected file, now is when
5226 * we create the EA. We must create it in the same transaction
5227 * that creates the file. We can also guarantee that the file
5228 * MUST exist because we are still holding the catalog lock
5229 * at this point.
5230 */
5231 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
5232 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
5233
5234 if (error) {
5235 int delete_err;
5236 /*
5237 * If we fail the EA creation, then we need to delete the file.
5238 * Luckily, we are still holding all of the right locks.
5239 */
5240 delete_err = cat_delete (hfsmp, &out_desc, &attr);
5241 if (delete_err == 0) {
5242 /* Update the parent directory */
5243 if (dcp->c_entries > 0)
5244 dcp->c_entries--;
5245 dcp->c_dirchangecnt++;
5246 dcp->c_ctime = tv.tv_sec;
5247 dcp->c_mtime = tv.tv_sec;
5248 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
5249 }
5250
5251 /* Emit EINVAL if we fail to create EA*/
5252 error = EINVAL;
5253 }
5254 }
5255 #endif
5256 }
5257 hfs_systemfile_unlock(hfsmp, lockflags);
5258 if (error)
5259 goto exit;
5260
5261 /* Invalidate negative cache entries in the directory */
5262 if (dcp->c_flag & C_NEG_ENTRIES) {
5263 cache_purge_negatives(dvp);
5264 dcp->c_flag &= ~C_NEG_ENTRIES;
5265 }
5266
5267 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
5268 (dcp->c_cnid == kHFSRootFolderID));
5269
5270 // XXXdbg
5271 // have to end the transaction here before we call hfs_getnewvnode()
5272 // because that can cause us to try and reclaim a vnode on a different
5273 // file system which could cause us to start a transaction which can
5274 // deadlock with someone on that other file system (since we could be
5275 // holding two transaction locks as well as various vnodes and we did
5276 // not obtain the locks on them in the proper order).
5277 //
5278 // NOTE: this means that if the quota check fails or we have to update
5279 // the change time on a block-special device that those changes
5280 // will happen as part of independent transactions.
5281 //
5282 if (started_tr) {
5283 hfs_end_transaction(hfsmp);
5284 started_tr = 0;
5285 }
5286
5287 #if CONFIG_PROTECT
5288 /*
5289 * At this point, we must have encountered success with writing the EA.
5290 * Update MKB with the data for the cached key, then destroy it. This may
5291 * prevent information leakage by ensuring the cache key is only unwrapped
5292 * to perform file I/O and it is allowed.
5293 */
5294
5295 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
5296 cp_update_mkb (entry, attr.ca_fileid);
5297 cp_entry_destroy (&entry);
5298 }
5299 #endif
5300
5301 /* Do not create vnode for whiteouts */
5302 if (S_ISWHT(mode)) {
5303 goto exit;
5304 }
5305
5306 gnv_flags |= GNV_CREATE;
5307
5308 /*
5309 * Create a vnode for the object just created.
5310 *
5311 * NOTE: Maintaining the cnode lock on the parent directory is important,
5312 * as it prevents race conditions where other threads want to look up entries
5313 * in the directory and/or add things as we are in the process of creating
5314 * the vnode below. However, this has the potential for causing a
5315 * double lock panic when dealing with shadow files on a HFS boot partition.
5316 * The panic could occur if we are not cleaning up after ourselves properly
5317 * when done with a shadow file or in the error cases. The error would occur if we
5318 * try to create a new vnode, and then end up reclaiming another shadow vnode to
5319 * create the new one. However, if everything is working properly, this should
5320 * be a non-issue as we would never enter that reclaim codepath.
5321 *
5322 * The cnode is locked on successful return.
5323 */
5324 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
5325 NULL, &tvp, &newvnode_flags);
5326 if (error)
5327 goto exit;
5328
5329 cp = VTOC(tvp);
5330 *vpp = tvp;
5331
5332 #if QUOTA
5333 /*
5334 * Once we create this vnode, we need to initialize its quota data
5335 * structures, if necessary. We know that it is OK to just go ahead and
5336 * initialize because we've already validated earlier (through the hfs_quotacheck
5337 * function) to see if creating this cnode/vnode would cause us to go over quota.
5338 */
5339 if (hfsmp->hfs_flags & HFS_QUOTAS) {
5340 (void) hfs_getinoquota(cp);
5341 }
5342 #endif
5343
5344 exit:
5345 cat_releasedesc(&out_desc);
5346
5347 #if CONFIG_PROTECT
5348 /*
5349 * We may have jumped here in error-handling various situations above.
5350 * If we haven't already dumped the temporary CP used to initialize
5351 * the file atomically, then free it now. cp_entry_destroy should null
5352 * out the pointer if it was called already.
5353 */
5354 if (entry) {
5355 cp_entry_destroy (&entry);
5356 }
5357 #endif
5358
5359 /*
5360 * Make sure we release cnode lock on dcp.
5361 */
5362 if (dcp) {
5363 dcp->c_flag &= ~C_DIR_MODIFICATION;
5364 wakeup((caddr_t)&dcp->c_flag);
5365
5366 hfs_unlock(dcp);
5367 }
5368 if (error == 0 && cp != NULL) {
5369 hfs_unlock(cp);
5370 }
5371 if (started_tr) {
5372 hfs_end_transaction(hfsmp);
5373 started_tr = 0;
5374 }
5375
5376 return (error);
5377 }
5378
5379
5380 /*
5381 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the cnode that is
5382 * found in 'vp'. The rsrc fork vnode is returned with the cnode locked and iocount
5383 * on the rsrc vnode.
5384 *
5385 * *rvpp is an output argument for returning the pointer to the resource fork vnode.
5386 * In most cases, the resource fork vnode will not be set if we return an error.
5387 * However, if error_on_unlinked is set, we may have already acquired the resource fork vnode
5388 * before we discover the error (the file has gone open-unlinked). In this case only,
5389 * we may return a vnode in the output argument despite an error.
5390 *
5391 * If can_drop_lock is set, then it is safe for this function to temporarily drop
5392 * and then re-acquire the cnode lock. We may need to do this, for example, in order to
5393 * acquire an iocount or promote our lock.
5394 *
5395 * error_on_unlinked is an argument which indicates that we are to return an error if we
5396 * discover that the cnode has gone into an open-unlinked state ( C_DELETED or C_NOEXISTS)
5397 * is set in the cnode flags. This is only necessary if can_drop_lock is true, otherwise
5398 * there's really no reason to double-check for errors on the cnode.
5399 */
5400
5401 int
5402 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp,
5403 int can_drop_lock, int error_on_unlinked)
5404 {
5405 struct vnode *rvp;
5406 struct vnode *dvp = NULLVP;
5407 struct cnode *cp = VTOC(vp);
5408 int error;
5409 int vid;
5410 int delete_status = 0;
5411
5412 if (vnode_vtype(vp) == VDIR) {
5413 return EINVAL;
5414 }
5415
5416 /*
5417 * Need to check the status of the cnode to validate it hasn't gone
5418 * open-unlinked on us before we can actually do work with it.
5419 */
5420 delete_status = hfs_checkdeleted(cp);
5421 if ((delete_status) && (error_on_unlinked)) {
5422 return delete_status;
5423 }
5424
5425 restart:
5426 /* Attempt to use existing vnode */
5427 if ((rvp = cp->c_rsrc_vp)) {
5428 vid = vnode_vid(rvp);
5429
5430 /*
5431 * It is not safe to hold the cnode lock when calling vnode_getwithvid()
5432 * for the alternate fork -- vnode_getwithvid() could deadlock waiting
5433 * for a VL_WANTTERM while another thread has an iocount on the alternate
5434 * fork vnode and is attempting to acquire the common cnode lock.
5435 *
5436 * But it's also not safe to drop the cnode lock when we're holding
5437 * multiple cnode locks, like during a hfs_removefile() operation
5438 * since we could lock out of order when re-acquiring the cnode lock.
5439 *
5440 * So we can only drop the lock here if its safe to drop it -- which is
5441 * most of the time with the exception being hfs_removefile().
5442 */
5443 if (can_drop_lock)
5444 hfs_unlock(cp);
5445
5446 error = vnode_getwithvid(rvp, vid);
5447
5448 if (can_drop_lock) {
5449 (void) hfs_lock(cp, HFS_FORCE_LOCK);
5450
5451 /*
5452 * When we relinquished our cnode lock, the cnode could have raced
5453 * with a delete and gotten deleted. If the caller did not want
5454 * us to ignore open-unlinked files, then re-check the C_DELETED
5455 * state and see if we need to return an ENOENT here because the item
5456 * got deleted in the intervening time.
5457 */
5458 if (error_on_unlinked) {
5459 if ((delete_status = hfs_checkdeleted(cp))) {
5460 /*
5461 * If error == 0, this means that we succeeded in acquiring an iocount on the
5462 * rsrc fork vnode. However, if we're in this block of code, that means that we noticed
5463 * that the cnode has gone open-unlinked. In this case, the caller requested that we
5464 * not do any other work and return an errno. The caller will be responsible for
5465 * dropping the iocount we just acquired because we can't do it until we've released
5466 * the cnode lock.
5467 */
5468 if (error == 0) {
5469 *rvpp = rvp;
5470 }
5471 return delete_status;
5472 }
5473 }
5474
5475 /*
5476 * When our lock was relinquished, the resource fork
5477 * could have been recycled. Check for this and try
5478 * again.
5479 */
5480 if (error == ENOENT)
5481 goto restart;
5482 }
5483 if (error) {
5484 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
5485
5486 if (name)
5487 printf("hfs_vgetrsrc: couldn't get resource"
5488 " fork for %s, err %d\n", name, error);
5489 return (error);
5490 }
5491 } else {
5492 struct cat_fork rsrcfork;
5493 struct componentname cn;
5494 struct cat_desc *descptr = NULL;
5495 struct cat_desc to_desc;
5496 char delname[32];
5497 int lockflags;
5498 int newvnode_flags = 0;
5499
5500 /*
5501 * Make sure cnode lock is exclusive, if not upgrade it.
5502 *
5503 * We assume that we were called from a read-only VNOP (getattr)
5504 * and that its safe to have the cnode lock dropped and reacquired.
5505 */
5506 if (cp->c_lockowner != current_thread()) {
5507 if (!can_drop_lock) {
5508 return (EINVAL);
5509 }
5510 /*
5511 * If the upgrade fails we lose the lock and
5512 * have to take the exclusive lock on our own.
5513 */
5514 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
5515 lck_rw_lock_exclusive(&cp->c_rwlock);
5516 cp->c_lockowner = current_thread();
5517 }
5518
5519 /*
5520 * hfs_vgetsrc may be invoked for a cnode that has already been marked
5521 * C_DELETED. This is because we need to continue to provide rsrc
5522 * fork access to open-unlinked files. In this case, build a fake descriptor
5523 * like in hfs_removefile. If we don't do this, buildkey will fail in
5524 * cat_lookup because this cnode has no name in its descriptor. However,
5525 * only do this if the caller did not specify that they wanted us to
5526 * error out upon encountering open-unlinked files.
5527 */
5528
5529 if ((error_on_unlinked) && (can_drop_lock)) {
5530 if ((error = hfs_checkdeleted(cp))) {
5531 return error;
5532 }
5533 }
5534
5535 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
5536 bzero (&to_desc, sizeof(to_desc));
5537 bzero (delname, 32);
5538 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
5539 to_desc.cd_nameptr = (const u_int8_t*) delname;
5540 to_desc.cd_namelen = strlen(delname);
5541 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
5542 to_desc.cd_flags = 0;
5543 to_desc.cd_cnid = cp->c_cnid;
5544
5545 descptr = &to_desc;
5546 }
5547 else {
5548 descptr = &cp->c_desc;
5549 }
5550
5551
5552 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5553
5554 /* Get resource fork data */
5555 error = cat_lookup(hfsmp, descptr, 1, (struct cat_desc *)0,
5556 (struct cat_attr *)0, &rsrcfork, NULL);
5557
5558 hfs_systemfile_unlock(hfsmp, lockflags);
5559 if (error) {
5560 return (error);
5561 }
5562 /*
5563 * Supply hfs_getnewvnode with a component name.
5564 */
5565 cn.cn_pnbuf = NULL;
5566 if (descptr->cd_nameptr) {
5567 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
5568 cn.cn_nameiop = LOOKUP;
5569 cn.cn_flags = ISLASTCN | HASBUF;
5570 cn.cn_context = NULL;
5571 cn.cn_pnlen = MAXPATHLEN;
5572 cn.cn_nameptr = cn.cn_pnbuf;
5573 cn.cn_hash = 0;
5574 cn.cn_consume = 0;
5575 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
5576 "%s%s", descptr->cd_nameptr,
5577 _PATH_RSRCFORKSPEC);
5578 }
5579 dvp = vnode_getparent(vp);
5580 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
5581 descptr, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
5582 &rsrcfork, &rvp, &newvnode_flags);
5583 if (dvp)
5584 vnode_put(dvp);
5585 if (cn.cn_pnbuf)
5586 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
5587 if (error)
5588 return (error);
5589 }
5590
5591 *rvpp = rvp;
5592 return (0);
5593 }
5594
5595 /*
5596 * Wrapper for special device reads
5597 */
5598 int
5599 hfsspec_read(ap)
5600 struct vnop_read_args /* {
5601 struct vnode *a_vp;
5602 struct uio *a_uio;
5603 int a_ioflag;
5604 vfs_context_t a_context;
5605 } */ *ap;
5606 {
5607 /*
5608 * Set access flag.
5609 */
5610 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
5611 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
5612 }
5613
5614 /*
5615 * Wrapper for special device writes
5616 */
5617 int
5618 hfsspec_write(ap)
5619 struct vnop_write_args /* {
5620 struct vnode *a_vp;
5621 struct uio *a_uio;
5622 int a_ioflag;
5623 vfs_context_t a_context;
5624 } */ *ap;
5625 {
5626 /*
5627 * Set update and change flags.
5628 */
5629 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
5630 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
5631 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
5632 }
5633
5634 /*
5635 * Wrapper for special device close
5636 *
5637 * Update the times on the cnode then do device close.
5638 */
5639 int
5640 hfsspec_close(ap)
5641 struct vnop_close_args /* {
5642 struct vnode *a_vp;
5643 int a_fflag;
5644 vfs_context_t a_context;
5645 } */ *ap;
5646 {
5647 struct vnode *vp = ap->a_vp;
5648 struct cnode *cp;
5649
5650 if (vnode_isinuse(ap->a_vp, 0)) {
5651 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
5652 cp = VTOC(vp);
5653 hfs_touchtimes(VTOHFS(vp), cp);
5654 hfs_unlock(cp);
5655 }
5656 }
5657 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
5658 }
5659
5660 #if FIFO
5661 /*
5662 * Wrapper for fifo reads
5663 */
5664 static int
5665 hfsfifo_read(ap)
5666 struct vnop_read_args /* {
5667 struct vnode *a_vp;
5668 struct uio *a_uio;
5669 int a_ioflag;
5670 vfs_context_t a_context;
5671 } */ *ap;
5672 {
5673 /*
5674 * Set access flag.
5675 */
5676 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
5677 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
5678 }
5679
5680 /*
5681 * Wrapper for fifo writes
5682 */
5683 static int
5684 hfsfifo_write(ap)
5685 struct vnop_write_args /* {
5686 struct vnode *a_vp;
5687 struct uio *a_uio;
5688 int a_ioflag;
5689 vfs_context_t a_context;
5690 } */ *ap;
5691 {
5692 /*
5693 * Set update and change flags.
5694 */
5695 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
5696 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
5697 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
5698 }
5699
5700 /*
5701 * Wrapper for fifo close
5702 *
5703 * Update the times on the cnode then do device close.
5704 */
5705 static int
5706 hfsfifo_close(ap)
5707 struct vnop_close_args /* {
5708 struct vnode *a_vp;
5709 int a_fflag;
5710 vfs_context_t a_context;
5711 } */ *ap;
5712 {
5713 struct vnode *vp = ap->a_vp;
5714 struct cnode *cp;
5715
5716 if (vnode_isinuse(ap->a_vp, 1)) {
5717 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
5718 cp = VTOC(vp);
5719 hfs_touchtimes(VTOHFS(vp), cp);
5720 hfs_unlock(cp);
5721 }
5722 }
5723 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
5724 }
5725
5726
5727 #endif /* FIFO */
5728
5729 /*
5730 * Synchronize a file's in-core state with that on disk.
5731 */
5732 int
5733 hfs_vnop_fsync(ap)
5734 struct vnop_fsync_args /* {
5735 struct vnode *a_vp;
5736 int a_waitfor;
5737 vfs_context_t a_context;
5738 } */ *ap;
5739 {
5740 struct vnode* vp = ap->a_vp;
5741 int error;
5742
5743 /* Note: We check hfs flags instead of vfs mount flag because during
5744 * read-write update, hfs marks itself read-write much earlier than
5745 * the vfs, and hence won't result in skipping of certain writes like
5746 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
5747 */
5748 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
5749 return 0;
5750 }
5751
5752 #if CONFIG_PROTECT
5753 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
5754 return (error);
5755 }
5756 #endif /* CONFIG_PROTECT */
5757
5758 /*
5759 * We need to allow ENOENT lock errors since unlink
5760 * systenm call can call VNOP_FSYNC during vclean.
5761 */
5762 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
5763 if (error)
5764 return (0);
5765
5766 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
5767
5768 hfs_unlock(VTOC(vp));
5769 return (error);
5770 }
5771
5772
5773 int
5774 hfs_vnop_whiteout(ap)
5775 struct vnop_whiteout_args /* {
5776 struct vnode *a_dvp;
5777 struct componentname *a_cnp;
5778 int a_flags;
5779 vfs_context_t a_context;
5780 } */ *ap;
5781 {
5782 int error = 0;
5783 struct vnode *vp = NULL;
5784 struct vnode_attr va;
5785 struct vnop_lookup_args lookup_args;
5786 struct vnop_remove_args remove_args;
5787 struct hfsmount *hfsmp;
5788
5789 hfsmp = VTOHFS(ap->a_dvp);
5790 if (hfsmp->hfs_flags & HFS_STANDARD) {
5791 error = ENOTSUP;
5792 goto exit;
5793 }
5794
5795 switch (ap->a_flags) {
5796 case LOOKUP:
5797 error = 0;
5798 break;
5799
5800 case CREATE:
5801 VATTR_INIT(&va);
5802 VATTR_SET(&va, va_type, VREG);
5803 VATTR_SET(&va, va_mode, S_IFWHT);
5804 VATTR_SET(&va, va_uid, 0);
5805 VATTR_SET(&va, va_gid, 0);
5806
5807 error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context);
5808 /* No need to release the vnode as no vnode is created for whiteouts */
5809 break;
5810
5811 case DELETE:
5812 lookup_args.a_dvp = ap->a_dvp;
5813 lookup_args.a_vpp = &vp;
5814 lookup_args.a_cnp = ap->a_cnp;
5815 lookup_args.a_context = ap->a_context;
5816
5817 error = hfs_vnop_lookup(&lookup_args);
5818 if (error) {
5819 break;
5820 }
5821
5822 remove_args.a_dvp = ap->a_dvp;
5823 remove_args.a_vp = vp;
5824 remove_args.a_cnp = ap->a_cnp;
5825 remove_args.a_flags = 0;
5826 remove_args.a_context = ap->a_context;
5827
5828 error = hfs_vnop_remove(&remove_args);
5829 vnode_put(vp);
5830 break;
5831
5832 default:
5833 panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags);
5834 };
5835
5836 exit:
5837 return (error);
5838 }
5839
5840 int (**hfs_vnodeop_p)(void *);
5841 int (**hfs_std_vnodeop_p) (void *);
5842
5843 #define VOPFUNC int (*)(void *)
5844
5845 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
5846
5847 /*
5848 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
5849 * is for use with HFS standard to block out operations that would modify the file system
5850 */
5851
5852 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
5853 { &vnop_default_desc, (VOPFUNC)vn_default_error },
5854 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
5855 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
5856 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
5857 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
5858 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
5859 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
5860 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
5861 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
5862 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
5863 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
5864 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
5865 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
5866 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
5867 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
5868 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
5869 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
5870 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
5871 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
5872 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
5873 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
5874 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
5875 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
5876 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
5877 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
5878 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
5879 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
5880 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
5881 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
5882 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
5883 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
5884 #if CONFIG_SEARCHFS
5885 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
5886 #else
5887 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
5888 #endif
5889 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
5890 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
5891 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
5892 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
5893 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
5894 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
5895 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
5896 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
5897 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
5898 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
5899 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
5900 { &vnop_whiteout_desc, (VOPFUNC)hfs_readonly_op}, /* whiteout (READONLY) */
5901 #if NAMEDSTREAMS
5902 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
5903 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
5904 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
5905 #endif
5906 { NULL, (VOPFUNC)NULL }
5907 };
5908
5909 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
5910 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
5911
5912
5913 /* VNOP table for HFS+ */
5914 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
5915 { &vnop_default_desc, (VOPFUNC)vn_default_error },
5916 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
5917 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
5918 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
5919 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
5920 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
5921 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
5922 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
5923 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
5924 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
5925 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
5926 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
5927 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
5928 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
5929 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
5930 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
5931 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
5932 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
5933 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
5934 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
5935 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
5936 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
5937 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
5938 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
5939 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
5940 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
5941 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
5942 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
5943 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
5944 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
5945 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
5946 #if CONFIG_SEARCHFS
5947 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
5948 #else
5949 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
5950 #endif
5951 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
5952 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
5953 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
5954 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
5955 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
5956 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
5957 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
5958 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
5959 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
5960 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
5961 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
5962 { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout},
5963 #if NAMEDSTREAMS
5964 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
5965 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
5966 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
5967 #endif
5968 { NULL, (VOPFUNC)NULL }
5969 };
5970
5971 struct vnodeopv_desc hfs_vnodeop_opv_desc =
5972 { &hfs_vnodeop_p, hfs_vnodeop_entries };
5973
5974
5975 /* Spec Op vnop table for HFS+ */
5976 int (**hfs_specop_p)(void *);
5977 struct vnodeopv_entry_desc hfs_specop_entries[] = {
5978 { &vnop_default_desc, (VOPFUNC)vn_default_error },
5979 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
5980 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
5981 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
5982 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
5983 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
5984 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
5985 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
5986 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
5987 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
5988 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
5989 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
5990 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
5991 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
5992 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
5993 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
5994 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
5995 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
5996 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
5997 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
5998 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
5999 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
6000 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
6001 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6002 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6003 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
6004 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
6005 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6006 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
6007 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
6008 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
6009 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6010 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6011 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6012 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
6013 };
6014 struct vnodeopv_desc hfs_specop_opv_desc =
6015 { &hfs_specop_p, hfs_specop_entries };
6016
6017 #if FIFO
6018 /* HFS+ FIFO VNOP table */
6019 int (**hfs_fifoop_p)(void *);
6020 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
6021 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6022 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
6023 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
6024 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
6025 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
6026 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
6027 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6028 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
6029 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
6030 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
6031 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
6032 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
6033 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
6034 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
6035 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
6036 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
6037 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
6038 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
6039 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
6040 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
6041 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
6042 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
6043 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
6044 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6045 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6046 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
6047 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
6048 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6049 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
6050 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
6051 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
6052 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6053 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6054 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6055 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
6056 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
6057 };
6058 struct vnodeopv_desc hfs_fifoop_opv_desc =
6059 { &hfs_fifoop_p, hfs_fifoop_entries };
6060 #endif /* FIFO */
6061
6062
6063