2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* @(#)hfs_readwrite.c 1.0
30 * (c) 1998-2001 Apple Computer, Inc. All Rights Reserved
32 * hfs_readwrite.c -- vnode operations to deal with reading and writing files.
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/filedesc.h>
44 #include <sys/buf_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/vnode.h>
48 #include <sys/vnode_internal.h>
50 #include <sys/vfs_context.h>
51 #include <sys/fsevents.h>
52 #include <kern/kalloc.h>
54 #include <sys/sysctl.h>
55 #include <sys/fsctl.h>
56 #include <sys/mount_internal.h>
57 #include <sys/file_internal.h>
59 #include <miscfs/specfs/specdev.h>
62 #include <sys/ubc_internal.h>
64 #include <vm/vm_pageout.h>
65 #include <vm/vm_kern.h>
67 #include <sys/kdebug.h>
70 #include "hfs_attrlist.h"
71 #include "hfs_endian.h"
72 #include "hfs_fsctl.h"
73 #include "hfs_quota.h"
74 #include "hfscommon/headers/FileMgrInternal.h"
75 #include "hfscommon/headers/BTreesInternal.h"
76 #include "hfs_cnode.h"
79 #define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2)))
82 MAXHFSFILESIZE
= 0x7FFFFFFF /* this needs to go in the mount structure */
85 /* from bsd/hfs/hfs_vfsops.c */
86 extern int hfs_vfs_vget (struct mount
*mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
);
88 static int hfs_clonefile(struct vnode
*, int, int, int);
89 static int hfs_clonesysfile(struct vnode
*, int, int, int, kauth_cred_t
, struct proc
*);
90 static int hfs_minorupdate(struct vnode
*vp
);
91 static int do_hfs_truncate(struct vnode
*vp
, off_t length
, int flags
, int skip
, vfs_context_t context
);
93 /* from bsd/hfs/hfs_vnops.c */
94 extern decmpfs_cnode
* hfs_lazy_init_decmpfs_cnode (struct cnode
*cp
);
98 int flush_cache_on_write
= 0;
99 SYSCTL_INT (_kern
, OID_AUTO
, flush_cache_on_write
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &flush_cache_on_write
, 0, "always flush the drive cache on writes to uncached files");
102 * Read data from a file.
105 hfs_vnop_read(struct vnop_read_args
*ap
)
108 struct vnop_read_args {
109 struct vnodeop_desc *a_desc;
113 vfs_context_t a_context;
117 uio_t uio
= ap
->a_uio
;
118 struct vnode
*vp
= ap
->a_vp
;
121 struct hfsmount
*hfsmp
;
124 off_t start_resid
= uio_resid(uio
);
125 off_t offset
= uio_offset(uio
);
127 int took_truncate_lock
= 0;
129 int throttled_count
= 0;
131 /* Preflight checks */
132 if (!vnode_isreg(vp
)) {
133 /* can only read regular files */
139 if (start_resid
== 0)
140 return (0); /* Nothing left to do */
142 return (EINVAL
); /* cant read from a negative offset */
144 if ((ap
->a_ioflag
& (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) ==
145 (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) {
146 /* Don't allow unencrypted io request from user space */
153 if (VNODE_IS_RSRC(vp
)) {
154 if (hfs_hides_rsrc(ap
->a_context
, VTOC(vp
), 1)) { /* 1 == don't take the cnode lock */
157 /* otherwise read the resource fork normally */
159 int compressed
= hfs_file_is_compressed(VTOC(vp
), 1); /* 1 == don't take the cnode lock */
161 retval
= decmpfs_read_compressed(ap
, &compressed
, VTOCMP(vp
));
164 /* successful read, update the access time */
165 VTOC(vp
)->c_touch_acctime
= TRUE
;
167 /* compressed files are not hot file candidates */
168 if (VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) {
169 VTOF(vp
)->ff_bytesread
= 0;
174 /* otherwise the file was converted back to a regular file while we were reading it */
176 } else if ((VTOC(vp
)->c_bsdflags
& UF_COMPRESSED
)) {
179 error
= check_for_dataless_file(vp
, NAMESPACE_HANDLER_READ_OP
);
186 #endif /* HFS_COMPRESSION */
193 if ((retval
= cp_handle_vnop (vp
, CP_READ_ACCESS
, ap
->a_ioflag
)) != 0) {
199 * If this read request originated from a syscall (as opposed to
200 * an in-kernel page fault or something), then set it up for
203 if (ap
->a_ioflag
& IO_SYSCALL_DISPATCH
) {
204 io_throttle
= IO_RETURN_ON_THROTTLE
;
209 /* Protect against a size change. */
210 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
211 took_truncate_lock
= 1;
213 filesize
= fp
->ff_size
;
214 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
217 * Check the file size. Note that per POSIX spec, we return 0 at
218 * file EOF, so attempting a read at an offset that is too big
219 * should just return 0 on HFS+. Since the return value was initialized
220 * to 0 above, we just jump to exit. HFS Standard has its own behavior.
222 if (offset
> filesize
) {
223 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) &&
224 (offset
> (off_t
)MAXHFSFILESIZE
)) {
230 KERNEL_DEBUG(HFSDBG_READ
| DBG_FUNC_START
,
231 (int)uio_offset(uio
), uio_resid(uio
), (int)filesize
, (int)filebytes
, 0);
233 retval
= cluster_read(vp
, uio
, filesize
, ap
->a_ioflag
|io_throttle
);
235 cp
->c_touch_acctime
= TRUE
;
237 KERNEL_DEBUG(HFSDBG_READ
| DBG_FUNC_END
,
238 (int)uio_offset(uio
), uio_resid(uio
), (int)filesize
, (int)filebytes
, 0);
241 * Keep track blocks read
243 if (hfsmp
->hfc_stage
== HFC_RECORDING
&& retval
== 0) {
244 int took_cnode_lock
= 0;
247 bytesread
= start_resid
- uio_resid(uio
);
249 /* When ff_bytesread exceeds 32-bits, update it behind the cnode lock. */
250 if ((fp
->ff_bytesread
+ bytesread
) > 0x00000000ffffffff) {
251 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
255 * If this file hasn't been seen since the start of
256 * the current sampling period then start over.
258 if (cp
->c_atime
< hfsmp
->hfc_timebase
) {
261 fp
->ff_bytesread
= bytesread
;
263 cp
->c_atime
= tv
.tv_sec
;
265 fp
->ff_bytesread
+= bytesread
;
271 if (took_truncate_lock
) {
272 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
274 if (retval
== EAGAIN
) {
275 throttle_lowpri_io(1);
281 if (throttled_count
) {
282 throttle_info_reset_window((uthread_t
)get_bsdthread_info(current_thread()));
288 * Write data to a file.
291 hfs_vnop_write(struct vnop_write_args
*ap
)
293 uio_t uio
= ap
->a_uio
;
294 struct vnode
*vp
= ap
->a_vp
;
297 struct hfsmount
*hfsmp
;
298 kauth_cred_t cred
= NULL
;
301 off_t bytesToAdd
= 0;
302 off_t actualBytesAdded
;
307 int ioflag
= ap
->a_ioflag
;
310 int cnode_locked
= 0;
311 int partialwrite
= 0;
313 time_t orig_ctime
=VTOC(vp
)->c_ctime
;
314 int took_truncate_lock
= 0;
315 int io_return_on_throttle
= 0;
316 int throttled_count
= 0;
317 struct rl_entry
*invalid_range
;
320 if ( hfs_file_is_compressed(VTOC(vp
), 1) ) { /* 1 == don't take the cnode lock */
321 int state
= decmpfs_cnode_get_vnode_state(VTOCMP(vp
));
323 case FILE_IS_COMPRESSED
:
325 case FILE_IS_CONVERTING
:
326 /* if FILE_IS_CONVERTING, we allow writes but do not
327 bother with snapshots or else we will deadlock.
332 printf("invalid state %d for compressed file\n", state
);
335 } else if ((VTOC(vp
)->c_bsdflags
& UF_COMPRESSED
)) {
338 error
= check_for_dataless_file(vp
, NAMESPACE_HANDLER_WRITE_OP
);
345 check_for_tracked_file(vp
, orig_ctime
, NAMESPACE_HANDLER_WRITE_OP
, uio
);
350 if ((ioflag
& (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) ==
351 (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) {
352 /* Don't allow unencrypted io request from user space */
357 resid
= uio_resid(uio
);
358 offset
= uio_offset(uio
);
364 if (!vnode_isreg(vp
))
365 return (EPERM
); /* Can only write regular files */
372 if ((retval
= cp_handle_vnop (vp
, CP_WRITE_ACCESS
, 0)) != 0) {
377 eflags
= kEFDeferMask
; /* defer file block allocations */
380 * When the underlying device is sparse and space
381 * is low (< 8MB), stop doing delayed allocations
382 * and begin doing synchronous I/O.
384 if ((hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) &&
385 (hfs_freeblks(hfsmp
, 0) < 2048)) {
386 eflags
&= ~kEFDeferMask
;
389 #endif /* HFS_SPARSE_DEV */
391 if ((ioflag
& (IO_SINGLE_WRITER
| IO_SYSCALL_DISPATCH
)) ==
392 (IO_SINGLE_WRITER
| IO_SYSCALL_DISPATCH
)) {
393 io_return_on_throttle
= IO_RETURN_ON_THROTTLE
;
398 * Protect against a size change.
400 * Note: If took_truncate_lock is true, then we previously got the lock shared
401 * but needed to upgrade to exclusive. So try getting it exclusive from the
404 if (ioflag
& IO_APPEND
|| took_truncate_lock
) {
405 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
408 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
410 took_truncate_lock
= 1;
413 if (ioflag
& IO_APPEND
) {
414 uio_setoffset(uio
, fp
->ff_size
);
415 offset
= fp
->ff_size
;
417 if ((cp
->c_bsdflags
& APPEND
) && offset
!= fp
->ff_size
) {
422 origFileSize
= fp
->ff_size
;
423 writelimit
= offset
+ resid
;
424 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
427 * We may need an exclusive truncate lock for several reasons, all
428 * of which are because we may be writing to a (portion of a) block
429 * for the first time, and we need to make sure no readers see the
430 * prior, uninitialized contents of the block. The cases are:
432 * 1. We have unallocated (delayed allocation) blocks. We may be
433 * allocating new blocks to the file and writing to them.
434 * (A more precise check would be whether the range we're writing
435 * to contains delayed allocation blocks.)
436 * 2. We need to extend the file. The bytes between the old EOF
437 * and the new EOF are not yet initialized. This is important
438 * even if we're not allocating new blocks to the file. If the
439 * old EOF and new EOF are in the same block, we still need to
440 * protect that range of bytes until they are written for the
442 * 3. The write overlaps some invalid ranges (delayed zero fill; that
443 * part of the file has been allocated, but not yet written).
445 * If we had a shared lock with the above cases, we need to try to upgrade
446 * to an exclusive lock. If the upgrade fails, we will lose the shared
447 * lock, and will need to take the truncate lock again; the took_truncate_lock
448 * flag will still be set, causing us to try for an exclusive lock next time.
450 * NOTE: Testing for #3 (delayed zero fill) needs to be done while the cnode
451 * lock is held, since it protects the range lists.
453 if ((cp
->c_truncatelockowner
== HFS_SHARED_OWNER
) &&
454 ((fp
->ff_unallocblocks
!= 0) ||
455 (writelimit
> origFileSize
))) {
456 if (lck_rw_lock_shared_to_exclusive(&cp
->c_truncatelock
) == FALSE
) {
458 * Lock upgrade failed and we lost our shared lock, try again.
459 * Note: we do not set took_truncate_lock=0 here. Leaving it
460 * set to 1 will cause us to try to get the lock exclusive.
465 /* Store the owner in the c_truncatelockowner field if we successfully upgrade */
466 cp
->c_truncatelockowner
= current_thread();
470 if ( (retval
= hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
476 * Now that we have the cnode lock, see if there are delayed zero fill ranges
477 * overlapping our write. If so, we need the truncate lock exclusive (see above).
479 if ((cp
->c_truncatelockowner
== HFS_SHARED_OWNER
) &&
480 (rl_scan(&fp
->ff_invalidranges
, offset
, writelimit
-1, &invalid_range
) != RL_NOOVERLAP
)) {
482 * When testing, it appeared that calling lck_rw_lock_shared_to_exclusive() causes
483 * a deadlock, rather than simply returning failure. (That is, it apparently does
484 * not behave like a "try_lock"). Since this condition is rare, just drop the
485 * cnode lock and try again. Since took_truncate_lock is set, we will
486 * automatically take the truncate lock exclusive.
490 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
494 KERNEL_DEBUG(HFSDBG_WRITE
| DBG_FUNC_START
,
495 (int)offset
, uio_resid(uio
), (int)fp
->ff_size
,
498 /* Check if we do not need to extend the file */
499 if (writelimit
<= filebytes
) {
503 cred
= vfs_context_ucred(ap
->a_context
);
504 bytesToAdd
= writelimit
- filebytes
;
507 retval
= hfs_chkdq(cp
, (int64_t)(roundup(bytesToAdd
, hfsmp
->blockSize
)),
513 if (hfs_start_transaction(hfsmp
) != 0) {
518 while (writelimit
> filebytes
) {
519 bytesToAdd
= writelimit
- filebytes
;
520 if (cred
&& suser(cred
, NULL
) != 0)
521 eflags
|= kEFReserveMask
;
523 /* Protect extents b-tree and allocation bitmap */
524 lockflags
= SFL_BITMAP
;
525 if (overflow_extents(fp
))
526 lockflags
|= SFL_EXTENTS
;
527 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
529 /* Files that are changing size are not hot file candidates. */
530 if (hfsmp
->hfc_stage
== HFC_RECORDING
) {
531 fp
->ff_bytesread
= 0;
533 retval
= MacToVFSError(ExtendFileC (hfsmp
, (FCB
*)fp
, bytesToAdd
,
534 0, eflags
, &actualBytesAdded
));
536 hfs_systemfile_unlock(hfsmp
, lockflags
);
538 if ((actualBytesAdded
== 0) && (retval
== E_NONE
))
540 if (retval
!= E_NONE
)
542 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
543 KERNEL_DEBUG(HFSDBG_WRITE
| DBG_FUNC_NONE
,
544 (int)offset
, uio_resid(uio
), (int)fp
->ff_size
, (int)filebytes
, 0);
546 (void) hfs_update(vp
, TRUE
);
547 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
548 (void) hfs_end_transaction(hfsmp
);
551 * If we didn't grow the file enough try a partial write.
552 * POSIX expects this behavior.
554 if ((retval
== ENOSPC
) && (filebytes
> offset
)) {
557 uio_setresid(uio
, (uio_resid(uio
) - bytesToAdd
));
559 writelimit
= filebytes
;
562 if (retval
== E_NONE
) {
571 if (writelimit
> fp
->ff_size
)
572 filesize
= writelimit
;
574 filesize
= fp
->ff_size
;
576 lflag
= ioflag
& ~(IO_TAILZEROFILL
| IO_HEADZEROFILL
| IO_NOZEROVALID
| IO_NOZERODIRTY
);
578 if (offset
<= fp
->ff_size
) {
579 zero_off
= offset
& ~PAGE_MASK_64
;
581 /* Check to see whether the area between the zero_offset and the start
582 of the transfer to see whether is invalid and should be zero-filled
583 as part of the transfer:
585 if (offset
> zero_off
) {
586 if (rl_scan(&fp
->ff_invalidranges
, zero_off
, offset
- 1, &invalid_range
) != RL_NOOVERLAP
)
587 lflag
|= IO_HEADZEROFILL
;
590 off_t eof_page_base
= fp
->ff_size
& ~PAGE_MASK_64
;
592 /* The bytes between fp->ff_size and uio->uio_offset must never be
593 read without being zeroed. The current last block is filled with zeroes
594 if it holds valid data but in all cases merely do a little bookkeeping
595 to track the area from the end of the current last page to the start of
596 the area actually written. For the same reason only the bytes up to the
597 start of the page where this write will start is invalidated; any remainder
598 before uio->uio_offset is explicitly zeroed as part of the cluster_write.
600 Note that inval_start, the start of the page after the current EOF,
601 may be past the start of the write, in which case the zeroing
602 will be handled by the cluser_write of the actual data.
604 inval_start
= (fp
->ff_size
+ (PAGE_SIZE_64
- 1)) & ~PAGE_MASK_64
;
605 inval_end
= offset
& ~PAGE_MASK_64
;
606 zero_off
= fp
->ff_size
;
608 if ((fp
->ff_size
& PAGE_MASK_64
) &&
609 (rl_scan(&fp
->ff_invalidranges
,
612 &invalid_range
) != RL_NOOVERLAP
)) {
613 /* The page containing the EOF is not valid, so the
614 entire page must be made inaccessible now. If the write
615 starts on a page beyond the page containing the eof
616 (inval_end > eof_page_base), add the
617 whole page to the range to be invalidated. Otherwise
618 (i.e. if the write starts on the same page), zero-fill
619 the entire page explicitly now:
621 if (inval_end
> eof_page_base
) {
622 inval_start
= eof_page_base
;
624 zero_off
= eof_page_base
;
628 if (inval_start
< inval_end
) {
630 /* There's some range of data that's going to be marked invalid */
632 if (zero_off
< inval_start
) {
633 /* The pages between inval_start and inval_end are going to be invalidated,
634 and the actual write will start on a page past inval_end. Now's the last
635 chance to zero-fill the page containing the EOF:
639 retval
= cluster_write(vp
, (uio_t
) 0,
640 fp
->ff_size
, inval_start
,
642 lflag
| IO_HEADZEROFILL
| IO_NOZERODIRTY
);
643 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
645 if (retval
) goto ioerr_exit
;
646 offset
= uio_offset(uio
);
649 /* Mark the remaining area of the newly allocated space as invalid: */
650 rl_add(inval_start
, inval_end
- 1 , &fp
->ff_invalidranges
);
652 cp
->c_zftimeout
= tv
.tv_sec
+ ZFTIMELIMIT
;
653 zero_off
= fp
->ff_size
= inval_end
;
656 if (offset
> zero_off
) lflag
|= IO_HEADZEROFILL
;
659 /* Check to see whether the area between the end of the write and the end of
660 the page it falls in is invalid and should be zero-filled as part of the transfer:
662 tail_off
= (writelimit
+ (PAGE_SIZE_64
- 1)) & ~PAGE_MASK_64
;
663 if (tail_off
> filesize
) tail_off
= filesize
;
664 if (tail_off
> writelimit
) {
665 if (rl_scan(&fp
->ff_invalidranges
, writelimit
, tail_off
- 1, &invalid_range
) != RL_NOOVERLAP
) {
666 lflag
|= IO_TAILZEROFILL
;
671 * if the write starts beyond the current EOF (possibly advanced in the
672 * zeroing of the last block, above), then we'll zero fill from the current EOF
673 * to where the write begins:
675 * NOTE: If (and ONLY if) the portion of the file about to be written is
676 * before the current EOF it might be marked as invalid now and must be
677 * made readable (removed from the invalid ranges) before cluster_write
680 io_start
= (lflag
& IO_HEADZEROFILL
) ? zero_off
: offset
;
681 if (io_start
< fp
->ff_size
) {
684 io_end
= (lflag
& IO_TAILZEROFILL
) ? tail_off
: writelimit
;
685 rl_remove(io_start
, io_end
- 1, &fp
->ff_invalidranges
);
692 * We need to tell UBC the fork's new size BEFORE calling
693 * cluster_write, in case any of the new pages need to be
694 * paged out before cluster_write completes (which does happen
695 * in embedded systems due to extreme memory pressure).
696 * Similarly, we need to tell hfs_vnop_pageout what the new EOF
697 * will be, so that it can pass that on to cluster_pageout, and
698 * allow those pageouts.
700 * We don't update ff_size yet since we don't want pageins to
701 * be able to see uninitialized data between the old and new
702 * EOF, until cluster_write has completed and initialized that
705 * The vnode pager relies on the file size last given to UBC via
706 * ubc_setsize. hfs_vnop_pageout relies on fp->ff_new_size or
707 * ff_size (whichever is larger). NOTE: ff_new_size is always
708 * zero, unless we are extending the file via write.
710 if (filesize
> fp
->ff_size
) {
711 fp
->ff_new_size
= filesize
;
712 ubc_setsize(vp
, filesize
);
714 retval
= cluster_write(vp
, uio
, fp
->ff_size
, filesize
, zero_off
,
715 tail_off
, lflag
| IO_NOZERODIRTY
| io_return_on_throttle
);
717 fp
->ff_new_size
= 0; /* no longer extending; use ff_size */
719 if (retval
== EAGAIN
) {
721 * EAGAIN indicates that we still have I/O to do, but
722 * that we now need to be throttled
724 if (resid
!= uio_resid(uio
)) {
726 * did manage to do some I/O before returning EAGAIN
728 resid
= uio_resid(uio
);
729 offset
= uio_offset(uio
);
731 cp
->c_touch_chgtime
= TRUE
;
732 cp
->c_touch_modtime
= TRUE
;
733 hfs_incr_gencount(cp
);
735 if (filesize
> fp
->ff_size
) {
737 * we called ubc_setsize before the call to
738 * cluster_write... since we only partially
739 * completed the I/O, we need to
740 * re-adjust our idea of the filesize based
743 ubc_setsize(vp
, offset
);
745 fp
->ff_size
= offset
;
749 if (filesize
> origFileSize
) {
750 ubc_setsize(vp
, origFileSize
);
755 if (filesize
> origFileSize
) {
756 fp
->ff_size
= filesize
;
758 /* Files that are changing size are not hot file candidates. */
759 if (hfsmp
->hfc_stage
== HFC_RECORDING
) {
760 fp
->ff_bytesread
= 0;
763 fp
->ff_new_size
= 0; /* ff_size now has the correct size */
766 uio_setresid(uio
, (uio_resid(uio
) + bytesToAdd
));
770 // XXXdbg - see radar 4871353 for more info
772 if (flush_cache_on_write
&& ((ioflag
& IO_NOCACHE
) || vnode_isnocache(vp
))) {
773 VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, NULL
);
778 if (resid
> uio_resid(uio
)) {
780 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
784 cp
->c_touch_chgtime
= TRUE
;
785 cp
->c_touch_modtime
= TRUE
;
786 hfs_incr_gencount(cp
);
789 * If we successfully wrote any data, and we are not the superuser
790 * we clear the setuid and setgid bits as a precaution against
793 if (cp
->c_mode
& (S_ISUID
| S_ISGID
)) {
794 cred
= vfs_context_ucred(ap
->a_context
);
795 if (cred
&& suser(cred
, NULL
)) {
796 cp
->c_mode
&= ~(S_ISUID
| S_ISGID
);
801 if (ioflag
& IO_UNIT
) {
802 (void)hfs_truncate(vp
, origFileSize
, ioflag
& IO_SYNC
,
804 uio_setoffset(uio
, (uio_offset(uio
) - (resid
- uio_resid(uio
))));
805 uio_setresid(uio
, resid
);
806 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
808 } else if ((ioflag
& IO_SYNC
) && (resid
> uio_resid(uio
)))
809 retval
= hfs_update(vp
, TRUE
);
811 /* Updating vcbWrCnt doesn't need to be atomic. */
814 KERNEL_DEBUG(HFSDBG_WRITE
| DBG_FUNC_END
,
815 (int)uio_offset(uio
), uio_resid(uio
), (int)fp
->ff_size
, (int)filebytes
, 0);
820 if (took_truncate_lock
) {
821 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
823 if (retval
== EAGAIN
) {
824 throttle_lowpri_io(1);
830 if (throttled_count
) {
831 throttle_info_reset_window((uthread_t
)get_bsdthread_info(current_thread()));
836 /* support for the "bulk-access" fcntl */
838 #define CACHE_LEVELS 16
839 #define NUM_CACHE_ENTRIES (64*16)
840 #define PARENT_IDS_FLAG 0x100
842 struct access_cache
{
844 int cachehits
; /* these two for statistics gathering */
846 unsigned int *acache
;
847 unsigned char *haveaccess
;
851 uid_t uid
; /* IN: effective user id */
852 short flags
; /* IN: access requested (i.e. R_OK) */
853 short num_groups
; /* IN: number of groups user belongs to */
854 int num_files
; /* IN: number of files to process */
855 int *file_ids
; /* IN: array of file ids */
856 gid_t
*groups
; /* IN: array of groups */
857 short *access
; /* OUT: access info for each file (0 for 'has access') */
858 } __attribute__((unavailable
)); // this structure is for reference purposes only
860 struct user32_access_t
{
861 uid_t uid
; /* IN: effective user id */
862 short flags
; /* IN: access requested (i.e. R_OK) */
863 short num_groups
; /* IN: number of groups user belongs to */
864 int num_files
; /* IN: number of files to process */
865 user32_addr_t file_ids
; /* IN: array of file ids */
866 user32_addr_t groups
; /* IN: array of groups */
867 user32_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
870 struct user64_access_t
{
871 uid_t uid
; /* IN: effective user id */
872 short flags
; /* IN: access requested (i.e. R_OK) */
873 short num_groups
; /* IN: number of groups user belongs to */
874 int num_files
; /* IN: number of files to process */
875 user64_addr_t file_ids
; /* IN: array of file ids */
876 user64_addr_t groups
; /* IN: array of groups */
877 user64_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
881 // these are the "extended" versions of the above structures
882 // note that it is crucial that they be different sized than
883 // the regular version
884 struct ext_access_t
{
885 uint32_t flags
; /* IN: access requested (i.e. R_OK) */
886 uint32_t num_files
; /* IN: number of files to process */
887 uint32_t map_size
; /* IN: size of the bit map */
888 uint32_t *file_ids
; /* IN: Array of file ids */
889 char *bitmap
; /* OUT: hash-bitmap of interesting directory ids */
890 short *access
; /* OUT: access info for each file (0 for 'has access') */
891 uint32_t num_parents
; /* future use */
892 cnid_t
*parents
; /* future use */
893 } __attribute__((unavailable
)); // this structure is for reference purposes only
895 struct user32_ext_access_t
{
896 uint32_t flags
; /* IN: access requested (i.e. R_OK) */
897 uint32_t num_files
; /* IN: number of files to process */
898 uint32_t map_size
; /* IN: size of the bit map */
899 user32_addr_t file_ids
; /* IN: Array of file ids */
900 user32_addr_t bitmap
; /* OUT: hash-bitmap of interesting directory ids */
901 user32_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
902 uint32_t num_parents
; /* future use */
903 user32_addr_t parents
; /* future use */
906 struct user64_ext_access_t
{
907 uint32_t flags
; /* IN: access requested (i.e. R_OK) */
908 uint32_t num_files
; /* IN: number of files to process */
909 uint32_t map_size
; /* IN: size of the bit map */
910 user64_addr_t file_ids
; /* IN: array of file ids */
911 user64_addr_t bitmap
; /* IN: array of groups */
912 user64_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
913 uint32_t num_parents
;/* future use */
914 user64_addr_t parents
;/* future use */
919 * Perform a binary search for the given parent_id. Return value is
920 * the index if there is a match. If no_match_indexp is non-NULL it
921 * will be assigned with the index to insert the item (even if it was
924 static int cache_binSearch(cnid_t
*array
, unsigned int hi
, cnid_t parent_id
, int *no_match_indexp
)
930 unsigned int mid
= ((hi
- lo
)/2) + lo
;
931 unsigned int this_id
= array
[mid
];
933 if (parent_id
== this_id
) {
938 if (parent_id
< this_id
) {
943 if (parent_id
> this_id
) {
949 /* check if lo and hi converged on the match */
950 if (parent_id
== array
[hi
]) {
954 if (no_match_indexp
) {
955 *no_match_indexp
= hi
;
963 lookup_bucket(struct access_cache
*cache
, int *indexp
, cnid_t parent_id
)
967 int index
, no_match_index
;
969 if (cache
->numcached
== 0) {
971 return 0; // table is empty, so insert at index=0 and report no match
974 if (cache
->numcached
> NUM_CACHE_ENTRIES
) {
975 cache
->numcached
= NUM_CACHE_ENTRIES
;
978 hi
= cache
->numcached
- 1;
980 index
= cache_binSearch(cache
->acache
, hi
, parent_id
, &no_match_index
);
982 /* if no existing entry found, find index for new one */
984 index
= no_match_index
;
995 * Add a node to the access_cache at the given index (or do a lookup first
996 * to find the index if -1 is passed in). We currently do a replace rather
997 * than an insert if the cache is full.
1000 add_node(struct access_cache
*cache
, int index
, cnid_t nodeID
, int access
)
1002 int lookup_index
= -1;
1004 /* need to do a lookup first if -1 passed for index */
1006 if (lookup_bucket(cache
, &lookup_index
, nodeID
)) {
1007 if (cache
->haveaccess
[lookup_index
] != access
&& cache
->haveaccess
[lookup_index
] == ESRCH
) {
1008 // only update an entry if the previous access was ESRCH (i.e. a scope checking error)
1009 cache
->haveaccess
[lookup_index
] = access
;
1012 /* mission accomplished */
1015 index
= lookup_index
;
1020 /* if the cache is full, do a replace rather than an insert */
1021 if (cache
->numcached
>= NUM_CACHE_ENTRIES
) {
1022 cache
->numcached
= NUM_CACHE_ENTRIES
-1;
1024 if (index
> cache
->numcached
) {
1025 index
= cache
->numcached
;
1029 if (index
< cache
->numcached
&& index
< NUM_CACHE_ENTRIES
&& nodeID
> cache
->acache
[index
]) {
1033 if (index
>= 0 && index
< cache
->numcached
) {
1034 /* only do bcopy if we're inserting */
1035 bcopy( cache
->acache
+index
, cache
->acache
+(index
+1), (cache
->numcached
- index
)*sizeof(int) );
1036 bcopy( cache
->haveaccess
+index
, cache
->haveaccess
+(index
+1), (cache
->numcached
- index
)*sizeof(unsigned char) );
1039 cache
->acache
[index
] = nodeID
;
1040 cache
->haveaccess
[index
] = access
;
1054 snoop_callback(const cnode_t
*cp
, void *arg
)
1056 struct cinfo
*cip
= arg
;
1058 cip
->uid
= cp
->c_uid
;
1059 cip
->gid
= cp
->c_gid
;
1060 cip
->mode
= cp
->c_mode
;
1061 cip
->parentcnid
= cp
->c_parentcnid
;
1062 cip
->recflags
= cp
->c_attr
.ca_recflags
;
1068 * Lookup the cnid's attr info (uid, gid, and mode) as well as its parent id. If the item
1069 * isn't incore, then go to the catalog.
1072 do_attr_lookup(struct hfsmount
*hfsmp
, struct access_cache
*cache
, cnid_t cnid
,
1073 struct cnode
*skip_cp
, CatalogKey
*keyp
, struct cat_attr
*cnattrp
)
1077 /* if this id matches the one the fsctl was called with, skip the lookup */
1078 if (cnid
== skip_cp
->c_cnid
) {
1079 cnattrp
->ca_uid
= skip_cp
->c_uid
;
1080 cnattrp
->ca_gid
= skip_cp
->c_gid
;
1081 cnattrp
->ca_mode
= skip_cp
->c_mode
;
1082 cnattrp
->ca_recflags
= skip_cp
->c_attr
.ca_recflags
;
1083 keyp
->hfsPlus
.parentID
= skip_cp
->c_parentcnid
;
1085 struct cinfo c_info
;
1087 /* otherwise, check the cnode hash incase the file/dir is incore */
1088 error
= hfs_chash_snoop(hfsmp
, cnid
, 0, snoop_callback
, &c_info
);
1090 if (error
== EACCES
) {
1093 } else if (!error
) {
1094 cnattrp
->ca_uid
= c_info
.uid
;
1095 cnattrp
->ca_gid
= c_info
.gid
;
1096 cnattrp
->ca_mode
= c_info
.mode
;
1097 cnattrp
->ca_recflags
= c_info
.recflags
;
1098 keyp
->hfsPlus
.parentID
= c_info
.parentcnid
;
1102 if (throttle_io_will_be_throttled(-1, HFSTOVFS(hfsmp
)))
1103 throttle_lowpri_io(1);
1105 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
1107 /* lookup this cnid in the catalog */
1108 error
= cat_getkeyplusattr(hfsmp
, cnid
, keyp
, cnattrp
);
1110 hfs_systemfile_unlock(hfsmp
, lockflags
);
1121 * Compute whether we have access to the given directory (nodeID) and all its parents. Cache
1122 * up to CACHE_LEVELS as we progress towards the root.
1125 do_access_check(struct hfsmount
*hfsmp
, int *err
, struct access_cache
*cache
, HFSCatalogNodeID nodeID
,
1126 struct cnode
*skip_cp
, struct proc
*theProcPtr
, kauth_cred_t myp_ucred
,
1127 struct vfs_context
*my_context
,
1131 uint32_t num_parents
)
1135 HFSCatalogNodeID thisNodeID
;
1136 unsigned int myPerms
;
1137 struct cat_attr cnattr
;
1138 int cache_index
= -1, scope_index
= -1, scope_idx_start
= -1;
1141 int i
= 0, ids_to_cache
= 0;
1142 int parent_ids
[CACHE_LEVELS
];
1144 thisNodeID
= nodeID
;
1145 while (thisNodeID
>= kRootDirID
) {
1146 myResult
= 0; /* default to "no access" */
1148 /* check the cache before resorting to hitting the catalog */
1150 /* ASSUMPTION: access info of cached entries is "final"... i.e. no need
1151 * to look any further after hitting cached dir */
1153 if (lookup_bucket(cache
, &cache_index
, thisNodeID
)) {
1155 myErr
= cache
->haveaccess
[cache_index
];
1156 if (scope_index
!= -1) {
1157 if (myErr
== ESRCH
) {
1161 scope_index
= 0; // so we'll just use the cache result
1162 scope_idx_start
= ids_to_cache
;
1164 myResult
= (myErr
== 0) ? 1 : 0;
1165 goto ExitThisRoutine
;
1171 tmp
= cache_binSearch(parents
, num_parents
-1, thisNodeID
, NULL
);
1172 if (scope_index
== -1)
1174 if (tmp
!= -1 && scope_idx_start
== -1 && ids_to_cache
< CACHE_LEVELS
) {
1175 scope_idx_start
= ids_to_cache
;
1179 /* remember which parents we want to cache */
1180 if (ids_to_cache
< CACHE_LEVELS
) {
1181 parent_ids
[ids_to_cache
] = thisNodeID
;
1184 // Inefficient (using modulo) and we might want to use a hash function, not rely on the node id to be "nice"...
1185 if (bitmap
&& map_size
) {
1186 bitmap
[(thisNodeID
/8)%(map_size
)]|=(1<<(thisNodeID
&7));
1190 /* do the lookup (checks the cnode hash, then the catalog) */
1191 myErr
= do_attr_lookup(hfsmp
, cache
, thisNodeID
, skip_cp
, &catkey
, &cnattr
);
1193 goto ExitThisRoutine
; /* no access */
1196 /* Root always gets access. */
1197 if (suser(myp_ucred
, NULL
) == 0) {
1198 thisNodeID
= catkey
.hfsPlus
.parentID
;
1203 // if the thing has acl's, do the full permission check
1204 if ((cnattr
.ca_recflags
& kHFSHasSecurityMask
) != 0) {
1207 /* get the vnode for this cnid */
1208 myErr
= hfs_vget(hfsmp
, thisNodeID
, &vp
, 0, 0);
1211 goto ExitThisRoutine
;
1214 thisNodeID
= VTOC(vp
)->c_parentcnid
;
1216 hfs_unlock(VTOC(vp
));
1218 if (vnode_vtype(vp
) == VDIR
) {
1219 myErr
= vnode_authorize(vp
, NULL
, (KAUTH_VNODE_SEARCH
| KAUTH_VNODE_LIST_DIRECTORY
), my_context
);
1221 myErr
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_DATA
, my_context
);
1227 goto ExitThisRoutine
;
1231 int mode
= cnattr
.ca_mode
& S_IFMT
;
1232 myPerms
= DerivePermissionSummary(cnattr
.ca_uid
, cnattr
.ca_gid
, cnattr
.ca_mode
, hfsmp
->hfs_mp
,myp_ucred
, theProcPtr
);
1234 if (mode
== S_IFDIR
) {
1235 flags
= R_OK
| X_OK
;
1239 if ( (myPerms
& flags
) != flags
) {
1242 goto ExitThisRoutine
; /* no access */
1245 /* up the hierarchy we go */
1246 thisNodeID
= catkey
.hfsPlus
.parentID
;
1250 /* if here, we have access to this node */
1254 if (parents
&& myErr
== 0 && scope_index
== -1) {
1263 /* cache the parent directory(ies) */
1264 for (i
= 0; i
< ids_to_cache
; i
++) {
1265 if (myErr
== 0 && parents
&& (scope_idx_start
== -1 || i
> scope_idx_start
)) {
1266 add_node(cache
, -1, parent_ids
[i
], ESRCH
);
1268 add_node(cache
, -1, parent_ids
[i
], myErr
);
1276 do_bulk_access_check(struct hfsmount
*hfsmp
, struct vnode
*vp
,
1277 struct vnop_ioctl_args
*ap
, int arg_size
, vfs_context_t context
)
1282 * NOTE: on entry, the vnode has an io_ref. In case this vnode
1283 * happens to be in our list of file_ids, we'll note it
1284 * avoid calling hfs_chashget_nowait() on that id as that
1285 * will cause a "locking against myself" panic.
1287 Boolean check_leaf
= true;
1289 struct user64_ext_access_t
*user_access_structp
;
1290 struct user64_ext_access_t tmp_user_access
;
1291 struct access_cache cache
;
1293 int error
= 0, prev_parent_check_ok
=1;
1297 unsigned int num_files
= 0;
1299 int num_parents
= 0;
1303 cnid_t
*parents
=NULL
;
1307 cnid_t prevParent_cnid
= 0;
1308 unsigned int myPerms
;
1310 struct cat_attr cnattr
;
1312 struct cnode
*skip_cp
= VTOC(vp
);
1313 kauth_cred_t cred
= vfs_context_ucred(context
);
1314 proc_t p
= vfs_context_proc(context
);
1316 is64bit
= proc_is64bit(p
);
1318 /* initialize the local cache and buffers */
1319 cache
.numcached
= 0;
1320 cache
.cachehits
= 0;
1322 cache
.acache
= NULL
;
1323 cache
.haveaccess
= NULL
;
1325 /* struct copyin done during dispatch... need to copy file_id array separately */
1326 if (ap
->a_data
== NULL
) {
1328 goto err_exit_bulk_access
;
1332 if (arg_size
!= sizeof(struct user64_ext_access_t
)) {
1334 goto err_exit_bulk_access
;
1337 user_access_structp
= (struct user64_ext_access_t
*)ap
->a_data
;
1339 } else if (arg_size
== sizeof(struct user32_access_t
)) {
1340 struct user32_access_t
*accessp
= (struct user32_access_t
*)ap
->a_data
;
1342 // convert an old style bulk-access struct to the new style
1343 tmp_user_access
.flags
= accessp
->flags
;
1344 tmp_user_access
.num_files
= accessp
->num_files
;
1345 tmp_user_access
.map_size
= 0;
1346 tmp_user_access
.file_ids
= CAST_USER_ADDR_T(accessp
->file_ids
);
1347 tmp_user_access
.bitmap
= USER_ADDR_NULL
;
1348 tmp_user_access
.access
= CAST_USER_ADDR_T(accessp
->access
);
1349 tmp_user_access
.num_parents
= 0;
1350 user_access_structp
= &tmp_user_access
;
1352 } else if (arg_size
== sizeof(struct user32_ext_access_t
)) {
1353 struct user32_ext_access_t
*accessp
= (struct user32_ext_access_t
*)ap
->a_data
;
1355 // up-cast from a 32-bit version of the struct
1356 tmp_user_access
.flags
= accessp
->flags
;
1357 tmp_user_access
.num_files
= accessp
->num_files
;
1358 tmp_user_access
.map_size
= accessp
->map_size
;
1359 tmp_user_access
.num_parents
= accessp
->num_parents
;
1361 tmp_user_access
.file_ids
= CAST_USER_ADDR_T(accessp
->file_ids
);
1362 tmp_user_access
.bitmap
= CAST_USER_ADDR_T(accessp
->bitmap
);
1363 tmp_user_access
.access
= CAST_USER_ADDR_T(accessp
->access
);
1364 tmp_user_access
.parents
= CAST_USER_ADDR_T(accessp
->parents
);
1366 user_access_structp
= &tmp_user_access
;
1369 goto err_exit_bulk_access
;
1372 map_size
= user_access_structp
->map_size
;
1374 num_files
= user_access_structp
->num_files
;
1376 num_parents
= user_access_structp
->num_parents
;
1378 if (num_files
< 1) {
1379 goto err_exit_bulk_access
;
1381 if (num_files
> 1024) {
1383 goto err_exit_bulk_access
;
1386 if (num_parents
> 1024) {
1388 goto err_exit_bulk_access
;
1391 file_ids
= (int *) kalloc(sizeof(int) * num_files
);
1392 access
= (short *) kalloc(sizeof(short) * num_files
);
1394 bitmap
= (char *) kalloc(sizeof(char) * map_size
);
1398 parents
= (cnid_t
*) kalloc(sizeof(cnid_t
) * num_parents
);
1401 cache
.acache
= (unsigned int *) kalloc(sizeof(int) * NUM_CACHE_ENTRIES
);
1402 cache
.haveaccess
= (unsigned char *) kalloc(sizeof(unsigned char) * NUM_CACHE_ENTRIES
);
1404 if (file_ids
== NULL
|| access
== NULL
|| (map_size
!= 0 && bitmap
== NULL
) || cache
.acache
== NULL
|| cache
.haveaccess
== NULL
) {
1406 kfree(file_ids
, sizeof(int) * num_files
);
1409 kfree(bitmap
, sizeof(char) * map_size
);
1412 kfree(access
, sizeof(short) * num_files
);
1415 kfree(cache
.acache
, sizeof(int) * NUM_CACHE_ENTRIES
);
1417 if (cache
.haveaccess
) {
1418 kfree(cache
.haveaccess
, sizeof(unsigned char) * NUM_CACHE_ENTRIES
);
1421 kfree(parents
, sizeof(cnid_t
) * num_parents
);
1426 // make sure the bitmap is zero'ed out...
1428 bzero(bitmap
, (sizeof(char) * map_size
));
1431 if ((error
= copyin(user_access_structp
->file_ids
, (caddr_t
)file_ids
,
1432 num_files
* sizeof(int)))) {
1433 goto err_exit_bulk_access
;
1437 if ((error
= copyin(user_access_structp
->parents
, (caddr_t
)parents
,
1438 num_parents
* sizeof(cnid_t
)))) {
1439 goto err_exit_bulk_access
;
1443 flags
= user_access_structp
->flags
;
1444 if ((flags
& (F_OK
| R_OK
| W_OK
| X_OK
)) == 0) {
1448 /* check if we've been passed leaf node ids or parent ids */
1449 if (flags
& PARENT_IDS_FLAG
) {
1453 /* Check access to each file_id passed in */
1454 for (i
= 0; i
< num_files
; i
++) {
1456 cnid
= (cnid_t
) file_ids
[i
];
1458 /* root always has access */
1459 if ((!parents
) && (!suser(cred
, NULL
))) {
1465 /* do the lookup (checks the cnode hash, then the catalog) */
1466 error
= do_attr_lookup(hfsmp
, &cache
, cnid
, skip_cp
, &catkey
, &cnattr
);
1468 access
[i
] = (short) error
;
1473 // Check if the leaf matches one of the parent scopes
1474 leaf_index
= cache_binSearch(parents
, num_parents
-1, cnid
, NULL
);
1475 if (leaf_index
>= 0 && parents
[leaf_index
] == cnid
)
1476 prev_parent_check_ok
= 0;
1477 else if (leaf_index
>= 0)
1478 prev_parent_check_ok
= 1;
1481 // if the thing has acl's, do the full permission check
1482 if ((cnattr
.ca_recflags
& kHFSHasSecurityMask
) != 0) {
1485 /* get the vnode for this cnid */
1486 myErr
= hfs_vget(hfsmp
, cnid
, &cvp
, 0, 0);
1492 hfs_unlock(VTOC(cvp
));
1494 if (vnode_vtype(cvp
) == VDIR
) {
1495 myErr
= vnode_authorize(cvp
, NULL
, (KAUTH_VNODE_SEARCH
| KAUTH_VNODE_LIST_DIRECTORY
), context
);
1497 myErr
= vnode_authorize(cvp
, NULL
, KAUTH_VNODE_READ_DATA
, context
);
1506 /* before calling CheckAccess(), check the target file for read access */
1507 myPerms
= DerivePermissionSummary(cnattr
.ca_uid
, cnattr
.ca_gid
,
1508 cnattr
.ca_mode
, hfsmp
->hfs_mp
, cred
, p
);
1510 /* fail fast if no access */
1511 if ((myPerms
& flags
) == 0) {
1517 /* we were passed an array of parent ids */
1518 catkey
.hfsPlus
.parentID
= cnid
;
1521 /* if the last guy had the same parent and had access, we're done */
1522 if (i
> 0 && catkey
.hfsPlus
.parentID
== prevParent_cnid
&& access
[i
-1] == 0 && prev_parent_check_ok
) {
1528 myaccess
= do_access_check(hfsmp
, &error
, &cache
, catkey
.hfsPlus
.parentID
,
1529 skip_cp
, p
, cred
, context
,bitmap
, map_size
, parents
, num_parents
);
1531 if (myaccess
|| (error
== ESRCH
&& leaf_index
!= -1)) {
1532 access
[i
] = 0; // have access.. no errors to report
1534 access
[i
] = (error
!= 0 ? (short) error
: EACCES
);
1537 prevParent_cnid
= catkey
.hfsPlus
.parentID
;
1540 /* copyout the access array */
1541 if ((error
= copyout((caddr_t
)access
, user_access_structp
->access
,
1542 num_files
* sizeof (short)))) {
1543 goto err_exit_bulk_access
;
1545 if (map_size
&& bitmap
) {
1546 if ((error
= copyout((caddr_t
)bitmap
, user_access_structp
->bitmap
,
1547 map_size
* sizeof (char)))) {
1548 goto err_exit_bulk_access
;
1553 err_exit_bulk_access
:
1556 kfree(file_ids
, sizeof(int) * num_files
);
1558 kfree(parents
, sizeof(cnid_t
) * num_parents
);
1560 kfree(bitmap
, sizeof(char) * map_size
);
1562 kfree(access
, sizeof(short) * num_files
);
1564 kfree(cache
.acache
, sizeof(int) * NUM_CACHE_ENTRIES
);
1565 if (cache
.haveaccess
)
1566 kfree(cache
.haveaccess
, sizeof(unsigned char) * NUM_CACHE_ENTRIES
);
1572 /* end "bulk-access" support */
1576 * Control filesystem operating characteristics.
1579 hfs_vnop_ioctl( struct vnop_ioctl_args
/* {
1584 vfs_context_t a_context;
1587 struct vnode
* vp
= ap
->a_vp
;
1588 struct hfsmount
*hfsmp
= VTOHFS(vp
);
1589 vfs_context_t context
= ap
->a_context
;
1590 kauth_cred_t cred
= vfs_context_ucred(context
);
1591 proc_t p
= vfs_context_proc(context
);
1592 struct vfsstatfs
*vfsp
;
1594 off_t jnl_start
, jnl_size
;
1595 struct hfs_journal_info
*jip
;
1598 off_t uncompressed_size
= -1;
1599 int decmpfs_error
= 0;
1601 if (ap
->a_command
== F_RDADVISE
) {
1602 /* we need to inspect the decmpfs state of the file as early as possible */
1603 compressed
= hfs_file_is_compressed(VTOC(vp
), 0);
1605 if (VNODE_IS_RSRC(vp
)) {
1606 /* if this is the resource fork, treat it as if it were empty */
1607 uncompressed_size
= 0;
1609 decmpfs_error
= hfs_uncompressed_size_of_compressed_file(NULL
, vp
, 0, &uncompressed_size
, 0);
1610 if (decmpfs_error
!= 0) {
1611 /* failed to get the uncompressed size, we'll check for this later */
1612 uncompressed_size
= -1;
1617 #endif /* HFS_COMPRESSION */
1619 is64bit
= proc_is64bit(p
);
1624 if ((error
= cp_handle_vnop(vp
, CP_WRITE_ACCESS
, 0)) != 0) {
1628 #endif /* CONFIG_PROTECT */
1630 switch (ap
->a_command
) {
1634 struct vnode
*file_vp
;
1641 /* Caller must be owner of file system. */
1642 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1643 if (suser(cred
, NULL
) &&
1644 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1647 /* Target vnode must be file system's root. */
1648 if (!vnode_isvroot(vp
)) {
1651 bufptr
= (char *)ap
->a_data
;
1652 cnid
= strtoul(bufptr
, NULL
, 10);
1653 if (ap
->a_fflag
& HFS_GETPATH_VOLUME_RELATIVE
) {
1654 flags
|= BUILDPATH_VOLUME_RELATIVE
;
1657 /* We need to call hfs_vfs_vget to leverage the code that will
1658 * fix the origin list for us if needed, as opposed to calling
1659 * hfs_vget, since we will need the parent for build_path call.
1662 if ((error
= hfs_vfs_vget(HFSTOVFS(hfsmp
), cnid
, &file_vp
, context
))) {
1665 error
= build_path(file_vp
, bufptr
, sizeof(pathname_t
), &outlen
, flags
, context
);
1671 case HFS_TRANSFER_DOCUMENT_ID
:
1673 struct cnode
*cp
= NULL
;
1675 u_int32_t to_fd
= *(u_int32_t
*)ap
->a_data
;
1676 struct fileproc
*to_fp
;
1677 struct vnode
*to_vp
;
1678 struct cnode
*to_cp
;
1682 if ((error
= fp_getfvp(p
, to_fd
, &to_fp
, &to_vp
)) != 0) {
1683 //printf("could not get the vnode for fd %d (err %d)\n", to_fd, error);
1686 if ( (error
= vnode_getwithref(to_vp
)) ) {
1691 if (VTOHFS(to_vp
) != hfsmp
) {
1693 goto transfer_cleanup
;
1696 int need_unlock
= 1;
1697 to_cp
= VTOC(to_vp
);
1698 error
= hfs_lockpair(cp
, to_cp
, HFS_EXCLUSIVE_LOCK
);
1700 //printf("could not lock the pair of cnodes (error %d)\n", error);
1701 goto transfer_cleanup
;
1704 if (!(cp
->c_bsdflags
& UF_TRACKED
)) {
1706 } else if (to_cp
->c_bsdflags
& UF_TRACKED
) {
1708 // if the destination is already tracked, return an error
1709 // as otherwise it's a silent deletion of the target's
1713 } else if (S_ISDIR(cp
->c_attr
.ca_mode
) || S_ISREG(cp
->c_attr
.ca_mode
) || S_ISLNK(cp
->c_attr
.ca_mode
)) {
1715 // we can use the FndrExtendedFileInfo because the doc-id is the first
1716 // thing in both it and the ExtendedDirInfo struct which is fixed in
1717 // format and can not change layout
1719 struct FndrExtendedFileInfo
*f_extinfo
= (struct FndrExtendedFileInfo
*)((u_int8_t
*)cp
->c_finderinfo
+ 16);
1720 struct FndrExtendedFileInfo
*to_extinfo
= (struct FndrExtendedFileInfo
*)((u_int8_t
*)to_cp
->c_finderinfo
+ 16);
1722 if (f_extinfo
->document_id
== 0) {
1725 hfs_unlockpair(cp
, to_cp
); // have to unlock to be able to get a new-id
1727 if ((error
= hfs_generate_document_id(hfsmp
, &new_id
)) == 0) {
1729 // re-lock the pair now that we have the document-id
1731 hfs_lockpair(cp
, to_cp
, HFS_EXCLUSIVE_LOCK
);
1732 f_extinfo
->document_id
= new_id
;
1734 goto transfer_cleanup
;
1738 to_extinfo
->document_id
= f_extinfo
->document_id
;
1739 f_extinfo
->document_id
= 0;
1740 //printf("TRANSFERRING: doc-id %d from ino %d to ino %d\n", to_extinfo->document_id, cp->c_fileid, to_cp->c_fileid);
1742 // make sure the destination is also UF_TRACKED
1743 to_cp
->c_bsdflags
|= UF_TRACKED
;
1744 cp
->c_bsdflags
&= ~UF_TRACKED
;
1746 // mark the cnodes dirty
1747 cp
->c_flag
|= C_MODIFIED
| C_FORCEUPDATE
;
1748 to_cp
->c_flag
|= C_MODIFIED
| C_FORCEUPDATE
;
1751 if ((error
= hfs_start_transaction(hfsmp
)) == 0) {
1753 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1755 (void) cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
, NULL
, NULL
);
1756 (void) cat_update(hfsmp
, &to_cp
->c_desc
, &to_cp
->c_attr
, NULL
, NULL
);
1758 hfs_systemfile_unlock (hfsmp
, lockflags
);
1759 (void) hfs_end_transaction(hfsmp
);
1763 add_fsevent(FSE_DOCID_CHANGED
, context
,
1764 FSE_ARG_DEV
, hfsmp
->hfs_raw_dev
,
1765 FSE_ARG_INO
, (ino64_t
)cp
->c_fileid
, // src inode #
1766 FSE_ARG_INO
, (ino64_t
)to_cp
->c_fileid
, // dst inode #
1767 FSE_ARG_INT32
, to_extinfo
->document_id
,
1770 hfs_unlockpair(cp
, to_cp
); // unlock this so we can send the fsevents
1773 if (need_fsevent(FSE_STAT_CHANGED
, vp
)) {
1774 add_fsevent(FSE_STAT_CHANGED
, context
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
1776 if (need_fsevent(FSE_STAT_CHANGED
, to_vp
)) {
1777 add_fsevent(FSE_STAT_CHANGED
, context
, FSE_ARG_VNODE
, to_vp
, FSE_ARG_DONE
);
1780 hfs_unlockpair(cp
, to_cp
); // unlock this so we can send the fsevents
1786 hfs_unlockpair(cp
, to_cp
);
1806 /* Caller must be owner of file system. */
1807 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1808 if (suser(cred
, NULL
) &&
1809 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1812 /* Target vnode must be file system's root. */
1813 if (!vnode_isvroot(vp
)) {
1816 linkfileid
= *(cnid_t
*)ap
->a_data
;
1817 if (linkfileid
< kHFSFirstUserCatalogNodeID
) {
1820 if ((error
= hfs_lookup_siblinglinks(hfsmp
, linkfileid
, &prevlinkid
, &nextlinkid
))) {
1823 if (ap
->a_command
== HFS_NEXT_LINK
) {
1824 *(cnid_t
*)ap
->a_data
= nextlinkid
;
1826 *(cnid_t
*)ap
->a_data
= prevlinkid
;
1831 case HFS_RESIZE_PROGRESS
: {
1833 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1834 if (suser(cred
, NULL
) &&
1835 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1836 return (EACCES
); /* must be owner of file system */
1838 if (!vnode_isvroot(vp
)) {
1841 /* file system must not be mounted read-only */
1842 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1846 return hfs_resize_progress(hfsmp
, (u_int32_t
*)ap
->a_data
);
1849 case HFS_RESIZE_VOLUME
: {
1853 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1854 if (suser(cred
, NULL
) &&
1855 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1856 return (EACCES
); /* must be owner of file system */
1858 if (!vnode_isvroot(vp
)) {
1862 /* filesystem must not be mounted read only */
1863 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1866 newsize
= *(u_int64_t
*)ap
->a_data
;
1867 cursize
= (u_int64_t
)hfsmp
->totalBlocks
* (u_int64_t
)hfsmp
->blockSize
;
1869 if (newsize
> cursize
) {
1870 return hfs_extendfs(hfsmp
, *(u_int64_t
*)ap
->a_data
, context
);
1871 } else if (newsize
< cursize
) {
1872 return hfs_truncatefs(hfsmp
, *(u_int64_t
*)ap
->a_data
, context
);
1877 case HFS_CHANGE_NEXT_ALLOCATION
: {
1878 int error
= 0; /* Assume success */
1881 if (vnode_vfsisrdonly(vp
)) {
1884 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1885 if (suser(cred
, NULL
) &&
1886 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1887 return (EACCES
); /* must be owner of file system */
1889 if (!vnode_isvroot(vp
)) {
1892 hfs_lock_mount(hfsmp
);
1893 location
= *(u_int32_t
*)ap
->a_data
;
1894 if ((location
>= hfsmp
->allocLimit
) &&
1895 (location
!= HFS_NO_UPDATE_NEXT_ALLOCATION
)) {
1897 goto fail_change_next_allocation
;
1899 /* Return previous value. */
1900 *(u_int32_t
*)ap
->a_data
= hfsmp
->nextAllocation
;
1901 if (location
== HFS_NO_UPDATE_NEXT_ALLOCATION
) {
1902 /* On magic value for location, set nextAllocation to next block
1903 * after metadata zone and set flag in mount structure to indicate
1904 * that nextAllocation should not be updated again.
1906 if (hfsmp
->hfs_metazone_end
!= 0) {
1907 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, hfsmp
->hfs_metazone_end
+ 1);
1909 hfsmp
->hfs_flags
|= HFS_SKIP_UPDATE_NEXT_ALLOCATION
;
1911 hfsmp
->hfs_flags
&= ~HFS_SKIP_UPDATE_NEXT_ALLOCATION
;
1912 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, location
);
1914 MarkVCBDirty(hfsmp
);
1915 fail_change_next_allocation
:
1916 hfs_unlock_mount(hfsmp
);
1921 case HFS_SETBACKINGSTOREINFO
: {
1922 struct vnode
* bsfs_rootvp
;
1923 struct vnode
* di_vp
;
1924 struct hfs_backingstoreinfo
*bsdata
;
1927 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1930 if (hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) {
1933 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1934 if (suser(cred
, NULL
) &&
1935 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1936 return (EACCES
); /* must be owner of file system */
1938 bsdata
= (struct hfs_backingstoreinfo
*)ap
->a_data
;
1939 if (bsdata
== NULL
) {
1942 if ((error
= file_vnode(bsdata
->backingfd
, &di_vp
))) {
1945 if ((error
= vnode_getwithref(di_vp
))) {
1946 file_drop(bsdata
->backingfd
);
1950 if (vnode_mount(vp
) == vnode_mount(di_vp
)) {
1951 (void)vnode_put(di_vp
);
1952 file_drop(bsdata
->backingfd
);
1957 * Obtain the backing fs root vnode and keep a reference
1958 * on it. This reference will be dropped in hfs_unmount.
1960 error
= VFS_ROOT(vnode_mount(di_vp
), &bsfs_rootvp
, NULL
); /* XXX use context! */
1962 (void)vnode_put(di_vp
);
1963 file_drop(bsdata
->backingfd
);
1966 vnode_ref(bsfs_rootvp
);
1967 vnode_put(bsfs_rootvp
);
1969 hfs_lock_mount(hfsmp
);
1970 hfsmp
->hfs_backingfs_rootvp
= bsfs_rootvp
;
1971 hfsmp
->hfs_flags
|= HFS_HAS_SPARSE_DEVICE
;
1972 hfsmp
->hfs_sparsebandblks
= bsdata
->bandsize
/ hfsmp
->blockSize
* 4;
1973 hfs_unlock_mount(hfsmp
);
1975 /* We check the MNTK_VIRTUALDEV bit instead of marking the dependent process */
1978 * If the sparse image is on a sparse image file (as opposed to a sparse
1979 * bundle), then we may need to limit the free space to the maximum size
1980 * of a file on that volume. So we query (using pathconf), and if we get
1981 * a meaningful result, we cache the number of blocks for later use in
1984 hfsmp
->hfs_backingfs_maxblocks
= 0;
1985 if (vnode_vtype(di_vp
) == VREG
) {
1988 terr
= vn_pathconf(di_vp
, _PC_FILESIZEBITS
, &hostbits
, context
);
1989 if (terr
== 0 && hostbits
!= 0 && hostbits
< 64) {
1990 u_int64_t hostfilesizemax
= ((u_int64_t
)1) << hostbits
;
1992 hfsmp
->hfs_backingfs_maxblocks
= hostfilesizemax
/ hfsmp
->blockSize
;
1996 /* The free extent cache is managed differently for sparse devices.
1997 * There is a window between which the volume is mounted and the
1998 * device is marked as sparse, so the free extent cache for this
1999 * volume is currently initialized as normal volume (sorted by block
2000 * count). Reset the cache so that it will be rebuilt again
2001 * for sparse device (sorted by start block).
2003 ResetVCBFreeExtCache(hfsmp
);
2005 (void)vnode_put(di_vp
);
2006 file_drop(bsdata
->backingfd
);
2009 case HFS_CLRBACKINGSTOREINFO
: {
2010 struct vnode
* tmpvp
;
2012 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
2013 if (suser(cred
, NULL
) &&
2014 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
2015 return (EACCES
); /* must be owner of file system */
2017 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2021 if ((hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) &&
2022 hfsmp
->hfs_backingfs_rootvp
) {
2024 hfs_lock_mount(hfsmp
);
2025 hfsmp
->hfs_flags
&= ~HFS_HAS_SPARSE_DEVICE
;
2026 tmpvp
= hfsmp
->hfs_backingfs_rootvp
;
2027 hfsmp
->hfs_backingfs_rootvp
= NULLVP
;
2028 hfsmp
->hfs_sparsebandblks
= 0;
2029 hfs_unlock_mount(hfsmp
);
2035 #endif /* HFS_SPARSE_DEV */
2037 /* Change the next CNID stored in the VH */
2038 case HFS_CHANGE_NEXTCNID
: {
2039 int error
= 0; /* Assume success */
2044 if (vnode_vfsisrdonly(vp
)) {
2047 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
2048 if (suser(cred
, NULL
) &&
2049 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
2050 return (EACCES
); /* must be owner of file system */
2053 fileid
= *(u_int32_t
*)ap
->a_data
;
2055 /* Must have catalog lock excl. to advance the CNID pointer */
2056 lockflags
= hfs_systemfile_lock (hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
2058 hfs_lock_mount(hfsmp
);
2060 /* If it is less than the current next CNID, force the wraparound bit to be set */
2061 if (fileid
< hfsmp
->vcbNxtCNID
) {
2065 /* Return previous value. */
2066 *(u_int32_t
*)ap
->a_data
= hfsmp
->vcbNxtCNID
;
2068 hfsmp
->vcbNxtCNID
= fileid
;
2071 hfsmp
->vcbAtrb
|= kHFSCatalogNodeIDsReusedMask
;
2074 MarkVCBDirty(hfsmp
);
2075 hfs_unlock_mount(hfsmp
);
2076 hfs_systemfile_unlock (hfsmp
, lockflags
);
2084 mp
= vnode_mount(vp
);
2085 hfsmp
= VFSTOHFS(mp
);
2090 vfsp
= vfs_statfs(mp
);
2092 if (kauth_cred_getuid(cred
) != vfsp
->f_owner
&&
2093 !kauth_cred_issuser(cred
))
2096 return hfs_freeze(hfsmp
);
2100 vfsp
= vfs_statfs(vnode_mount(vp
));
2101 if (kauth_cred_getuid(cred
) != vfsp
->f_owner
&&
2102 !kauth_cred_issuser(cred
))
2105 return hfs_thaw(hfsmp
, current_proc());
2108 case HFS_BULKACCESS_FSCTL
: {
2111 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
2116 size
= sizeof(struct user64_access_t
);
2118 size
= sizeof(struct user32_access_t
);
2121 return do_bulk_access_check(hfsmp
, vp
, ap
, size
, context
);
2124 case HFS_EXT_BULKACCESS_FSCTL
: {
2127 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
2132 size
= sizeof(struct user64_ext_access_t
);
2134 size
= sizeof(struct user32_ext_access_t
);
2137 return do_bulk_access_check(hfsmp
, vp
, ap
, size
, context
);
2140 case HFS_SET_XATTREXTENTS_STATE
: {
2143 if (ap
->a_data
== NULL
) {
2147 state
= *(int *)ap
->a_data
;
2149 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2153 /* Super-user can enable or disable extent-based extended
2154 * attribute support on a volume
2155 * Note: Starting Mac OS X 10.7, extent-based extended attributes
2156 * are enabled by default, so any change will be transient only
2157 * till the volume is remounted.
2159 if (!kauth_cred_issuser(kauth_cred_get())) {
2162 if (state
== 0 || state
== 1)
2163 return hfs_set_volxattr(hfsmp
, HFS_SET_XATTREXTENTS_STATE
, state
);
2168 case F_SETSTATICCONTENT
: {
2170 int enable_static
= 0;
2171 struct cnode
*cp
= NULL
;
2173 * lock the cnode, decorate the cnode flag, and bail out.
2174 * VFS should have already authenticated the caller for us.
2179 * Note that even though ap->a_data is of type caddr_t,
2180 * the fcntl layer at the syscall handler will pass in NULL
2181 * or 1 depending on what the argument supplied to the fcntl
2182 * was. So it is in fact correct to check the ap->a_data
2183 * argument for zero or non-zero value when deciding whether or not
2184 * to enable the static bit in the cnode.
2188 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2193 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2195 if (enable_static
) {
2196 cp
->c_flag
|= C_SSD_STATIC
;
2199 cp
->c_flag
&= ~C_SSD_STATIC
;
2206 case F_SET_GREEDY_MODE
: {
2208 int enable_greedy_mode
= 0;
2209 struct cnode
*cp
= NULL
;
2211 * lock the cnode, decorate the cnode flag, and bail out.
2212 * VFS should have already authenticated the caller for us.
2217 * Note that even though ap->a_data is of type caddr_t,
2218 * the fcntl layer at the syscall handler will pass in NULL
2219 * or 1 depending on what the argument supplied to the fcntl
2220 * was. So it is in fact correct to check the ap->a_data
2221 * argument for zero or non-zero value when deciding whether or not
2222 * to enable the greedy mode bit in the cnode.
2224 enable_greedy_mode
= 1;
2226 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2231 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2233 if (enable_greedy_mode
) {
2234 cp
->c_flag
|= C_SSD_GREEDY_MODE
;
2237 cp
->c_flag
&= ~C_SSD_GREEDY_MODE
;
2246 uint32_t iotypeflag
= 0;
2248 struct cnode
*cp
= NULL
;
2250 * lock the cnode, decorate the cnode flag, and bail out.
2251 * VFS should have already authenticated the caller for us.
2254 if (ap
->a_data
== NULL
) {
2259 * Note that even though ap->a_data is of type caddr_t, we
2260 * can only use 32 bits of flag values.
2262 iotypeflag
= (uint32_t) ap
->a_data
;
2263 switch (iotypeflag
) {
2264 case F_IOTYPE_ISOCHRONOUS
:
2271 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2276 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2278 switch (iotypeflag
) {
2279 case F_IOTYPE_ISOCHRONOUS
:
2280 cp
->c_flag
|= C_IO_ISOCHRONOUS
;
2290 case F_MAKECOMPRESSED
: {
2292 uint32_t gen_counter
;
2293 struct cnode
*cp
= NULL
;
2294 int reset_decmp
= 0;
2296 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2301 * acquire & lock the cnode.
2302 * VFS should have already authenticated the caller for us.
2307 * Cast the pointer into a uint32_t so we can extract the
2308 * supplied generation counter.
2310 gen_counter
= *((uint32_t*)ap
->a_data
);
2318 /* Grab truncate lock first; we may truncate the file */
2319 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2321 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2323 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
2327 /* Are there any other usecounts/FDs? */
2328 if (vnode_isinuse(vp
, 1)) {
2330 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
2334 /* now we have the cnode locked down; Validate arguments */
2335 if (cp
->c_attr
.ca_flags
& (UF_IMMUTABLE
| UF_COMPRESSED
)) {
2336 /* EINVAL if you are trying to manipulate an IMMUTABLE file */
2338 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
2342 if ((hfs_get_gencount (cp
)) == gen_counter
) {
2344 * OK, the gen_counter matched. Go for it:
2345 * Toggle state bits, truncate file, and suppress mtime update
2348 cp
->c_bsdflags
|= UF_COMPRESSED
;
2350 error
= hfs_truncate(vp
, 0, IO_NDELAY
, HFS_TRUNCATE_SKIPTIMES
,
2357 /* Unlock cnode before executing decmpfs ; they may need to get an EA */
2361 * Reset the decmp state while still holding the truncate lock. We need to
2362 * serialize here against a listxattr on this node which may occur at any
2365 * Even if '0/skiplock' is passed in 2nd argument to hfs_file_is_compressed,
2366 * that will still potentially require getting the com.apple.decmpfs EA. If the
2367 * EA is required, then we can't hold the cnode lock, because the getxattr call is
2368 * generic(through VFS), and can't pass along any info telling it that we're already
2369 * holding it (the lock). If we don't serialize, then we risk listxattr stopping
2370 * and trying to fill in the hfs_file_is_compressed info during the callback
2371 * operation, which will result in deadlock against the b-tree node.
2373 * So, to serialize against listxattr (which will grab buf_t meta references on
2374 * the b-tree blocks), we hold the truncate lock as we're manipulating the
2377 if ((reset_decmp
) && (error
== 0)) {
2378 decmpfs_cnode
*dp
= VTOCMP (vp
);
2380 decmpfs_cnode_set_vnode_state(dp
, FILE_TYPE_UNKNOWN
, 0);
2383 /* Initialize the decmpfs node as needed */
2384 (void) hfs_file_is_compressed (cp
, 0); /* ok to take lock */
2387 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
2393 case F_SETBACKINGSTORE
: {
2398 * See comment in F_SETSTATICCONTENT re: using
2399 * a null check for a_data
2402 error
= hfs_set_backingstore (vp
, 1);
2405 error
= hfs_set_backingstore (vp
, 0);
2411 case F_GETPATH_MTMINFO
: {
2414 int *data
= (int*) ap
->a_data
;
2416 /* Ask if this is a backingstore vnode */
2417 error
= hfs_is_backingstore (vp
, data
);
2425 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2428 error
= hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2430 error
= hfs_fsync(vp
, MNT_WAIT
, TRUE
, p
);
2431 hfs_unlock(VTOC(vp
));
2438 register struct cnode
*cp
;
2441 if (!vnode_isreg(vp
))
2444 error
= hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2448 * used by regression test to determine if
2449 * all the dirty pages (via write) have been cleaned
2450 * after a call to 'fsysnc'.
2452 error
= is_file_clean(vp
, VTOF(vp
)->ff_size
);
2459 register struct radvisory
*ra
;
2460 struct filefork
*fp
;
2463 if (!vnode_isreg(vp
))
2466 ra
= (struct radvisory
*)(ap
->a_data
);
2469 /* Protect against a size change. */
2470 hfs_lock_truncate(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2473 if (compressed
&& (uncompressed_size
== -1)) {
2474 /* fetching the uncompressed size failed above, so return the error */
2475 error
= decmpfs_error
;
2476 } else if ((compressed
&& (ra
->ra_offset
>= uncompressed_size
)) ||
2477 (!compressed
&& (ra
->ra_offset
>= fp
->ff_size
))) {
2480 #else /* HFS_COMPRESSION */
2481 if (ra
->ra_offset
>= fp
->ff_size
) {
2484 #endif /* HFS_COMPRESSION */
2486 error
= advisory_read(vp
, fp
->ff_size
, ra
->ra_offset
, ra
->ra_count
);
2489 hfs_unlock_truncate(VTOC(vp
), HFS_LOCK_DEFAULT
);
2493 case _IOC(IOC_OUT
,'h', 4, 0): /* Create date in local time */
2496 *(user_time_t
*)(ap
->a_data
) = (user_time_t
) (to_bsd_time(VTOVCB(vp
)->localCreateDate
));
2499 *(user32_time_t
*)(ap
->a_data
) = (user32_time_t
) (to_bsd_time(VTOVCB(vp
)->localCreateDate
));
2504 case SPOTLIGHT_FSCTL_GET_MOUNT_TIME
:
2505 *(uint32_t *)ap
->a_data
= hfsmp
->hfs_mount_time
;
2508 case SPOTLIGHT_FSCTL_GET_LAST_MTIME
:
2509 *(uint32_t *)ap
->a_data
= hfsmp
->hfs_last_mounted_mtime
;
2512 case HFS_FSCTL_GET_VERY_LOW_DISK
:
2513 *(uint32_t*)ap
->a_data
= hfsmp
->hfs_freespace_notify_dangerlimit
;
2516 case HFS_FSCTL_SET_VERY_LOW_DISK
:
2517 if (*(uint32_t *)ap
->a_data
>= hfsmp
->hfs_freespace_notify_warninglimit
) {
2521 hfsmp
->hfs_freespace_notify_dangerlimit
= *(uint32_t *)ap
->a_data
;
2524 case HFS_FSCTL_GET_LOW_DISK
:
2525 *(uint32_t*)ap
->a_data
= hfsmp
->hfs_freespace_notify_warninglimit
;
2528 case HFS_FSCTL_SET_LOW_DISK
:
2529 if ( *(uint32_t *)ap
->a_data
>= hfsmp
->hfs_freespace_notify_desiredlevel
2530 || *(uint32_t *)ap
->a_data
<= hfsmp
->hfs_freespace_notify_dangerlimit
) {
2535 hfsmp
->hfs_freespace_notify_warninglimit
= *(uint32_t *)ap
->a_data
;
2538 case HFS_FSCTL_GET_DESIRED_DISK
:
2539 *(uint32_t*)ap
->a_data
= hfsmp
->hfs_freespace_notify_desiredlevel
;
2542 case HFS_FSCTL_SET_DESIRED_DISK
:
2543 if (*(uint32_t *)ap
->a_data
<= hfsmp
->hfs_freespace_notify_warninglimit
) {
2547 hfsmp
->hfs_freespace_notify_desiredlevel
= *(uint32_t *)ap
->a_data
;
2550 case HFS_VOLUME_STATUS
:
2551 *(uint32_t *)ap
->a_data
= hfsmp
->hfs_notification_conditions
;
2554 case HFS_SET_BOOT_INFO
:
2555 if (!vnode_isvroot(vp
))
2557 if (!kauth_cred_issuser(cred
) && (kauth_cred_getuid(cred
) != vfs_statfs(HFSTOVFS(hfsmp
))->f_owner
))
2558 return(EACCES
); /* must be superuser or owner of filesystem */
2559 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2562 hfs_lock_mount (hfsmp
);
2563 bcopy(ap
->a_data
, &hfsmp
->vcbFndrInfo
, sizeof(hfsmp
->vcbFndrInfo
));
2564 hfs_unlock_mount (hfsmp
);
2565 (void) hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
2568 case HFS_GET_BOOT_INFO
:
2569 if (!vnode_isvroot(vp
))
2571 hfs_lock_mount (hfsmp
);
2572 bcopy(&hfsmp
->vcbFndrInfo
, ap
->a_data
, sizeof(hfsmp
->vcbFndrInfo
));
2573 hfs_unlock_mount(hfsmp
);
2576 case HFS_MARK_BOOT_CORRUPT
:
2577 /* Mark the boot volume corrupt by setting
2578 * kHFSVolumeInconsistentBit in the volume header. This will
2579 * force fsck_hfs on next mount.
2581 if (!kauth_cred_issuser(kauth_cred_get())) {
2585 /* Allowed only on the root vnode of the boot volume */
2586 if (!(vfs_flags(HFSTOVFS(hfsmp
)) & MNT_ROOTFS
) ||
2587 !vnode_isvroot(vp
)) {
2590 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2593 printf ("hfs_vnop_ioctl: Marking the boot volume corrupt.\n");
2594 hfs_mark_inconsistent(hfsmp
, HFS_FSCK_FORCED
);
2597 case HFS_FSCTL_GET_JOURNAL_INFO
:
2598 jip
= (struct hfs_journal_info
*)ap
->a_data
;
2603 if (hfsmp
->jnl
== NULL
) {
2607 jnl_start
= (off_t
)(hfsmp
->jnl_start
* HFSTOVCB(hfsmp
)->blockSize
) + (off_t
)HFSTOVCB(hfsmp
)->hfsPlusIOPosOffset
;
2608 jnl_size
= (off_t
)hfsmp
->jnl_size
;
2611 jip
->jstart
= jnl_start
;
2612 jip
->jsize
= jnl_size
;
2615 case HFS_SET_ALWAYS_ZEROFILL
: {
2616 struct cnode
*cp
= VTOC(vp
);
2618 if (*(int *)ap
->a_data
) {
2619 cp
->c_flag
|= C_ALWAYS_ZEROFILL
;
2621 cp
->c_flag
&= ~C_ALWAYS_ZEROFILL
;
2626 case HFS_DISABLE_METAZONE
: {
2627 /* Only root can disable metadata zone */
2628 if (!kauth_cred_issuser(kauth_cred_get())) {
2631 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2635 /* Disable metadata zone now */
2636 (void) hfs_metadatazone_init(hfsmp
, true);
2637 printf ("hfs: Disabling metadata zone on %s\n", hfsmp
->vcbVN
);
2642 case HFS_FSINFO_METADATA_BLOCKS
: {
2644 struct hfsinfo_metadata
*hinfo
;
2646 hinfo
= (struct hfsinfo_metadata
*)ap
->a_data
;
2648 /* Get information about number of metadata blocks */
2649 error
= hfs_getinfo_metadata_blocks(hfsmp
, hinfo
);
2657 case HFS_CS_FREESPACE_TRIM
: {
2661 /* Only root allowed */
2662 if (!kauth_cred_issuser(kauth_cred_get())) {
2667 * This core functionality is similar to hfs_scan_blocks().
2668 * The main difference is that hfs_scan_blocks() is called
2669 * as part of mount where we are assured that the journal is
2670 * empty to start with. This fcntl() can be called on a
2671 * mounted volume, therefore it has to flush the content of
2672 * the journal as well as ensure the state of summary table.
2674 * This fcntl scans over the entire allocation bitmap,
2675 * creates list of all the free blocks, and issues TRIM
2676 * down to the underlying device. This can take long time
2677 * as it can generate up to 512MB of read I/O.
2680 if ((hfsmp
->hfs_flags
& HFS_SUMMARY_TABLE
) == 0) {
2681 error
= hfs_init_summary(hfsmp
);
2683 printf("hfs: fsctl() could not initialize summary table for %s\n", hfsmp
->vcbVN
);
2689 * The journal maintains list of recently deallocated blocks to
2690 * issue DKIOCUNMAPs when the corresponding journal transaction is
2691 * flushed to the disk. To avoid any race conditions, we only
2692 * want one active trim list and only one thread issuing DKIOCUNMAPs.
2693 * Therefore we make sure that the journal trim list is sync'ed,
2694 * empty, and not modifiable for the duration of our scan.
2696 * Take the journal lock before flushing the journal to the disk.
2697 * We will keep on holding the journal lock till we don't get the
2698 * bitmap lock to make sure that no new journal transactions can
2699 * start. This will make sure that the journal trim list is not
2700 * modified after the journal flush and before getting bitmap lock.
2701 * We can release the journal lock after we acquire the bitmap
2702 * lock as it will prevent any further block deallocations.
2704 hfs_journal_lock(hfsmp
);
2706 /* Flush the journal and wait for all I/Os to finish up */
2707 error
= hfs_journal_flush(hfsmp
, TRUE
);
2709 hfs_journal_unlock(hfsmp
);
2713 /* Take bitmap lock to ensure it is not being modified */
2714 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
2716 /* Release the journal lock */
2717 hfs_journal_unlock(hfsmp
);
2720 * ScanUnmapBlocks reads the bitmap in large block size
2721 * (up to 1MB) unlike the runtime which reads the bitmap
2722 * in the 4K block size. This can cause buf_t collisions
2723 * and potential data corruption. To avoid this, we
2724 * invalidate all the existing buffers associated with
2725 * the bitmap vnode before scanning it.
2727 * Note: ScanUnmapBlock() cleans up all the buffers
2728 * after itself, so there won't be any large buffers left
2729 * for us to clean up after it returns.
2731 error
= buf_invalidateblks(hfsmp
->hfs_allocation_vp
, 0, 0, 0);
2733 hfs_systemfile_unlock(hfsmp
, lockflags
);
2737 /* Traverse bitmap and issue DKIOCUNMAPs */
2738 error
= ScanUnmapBlocks(hfsmp
);
2739 hfs_systemfile_unlock(hfsmp
, lockflags
);
2758 hfs_vnop_select(__unused
struct vnop_select_args
*ap
)
2760 struct vnop_select_args {
2765 vfs_context_t a_context;
2770 * We should really check to see if I/O is possible.
2776 * Converts a logical block number to a physical block, and optionally returns
2777 * the amount of remaining blocks in a run. The logical block is based on hfsNode.logBlockSize.
2778 * The physical block number is based on the device block size, currently its 512.
2779 * The block run is returned in logical blocks, and is the REMAINING amount of blocks
2782 hfs_bmap(struct vnode
*vp
, daddr_t bn
, struct vnode
**vpp
, daddr64_t
*bnp
, unsigned int *runp
)
2784 struct filefork
*fp
= VTOF(vp
);
2785 struct hfsmount
*hfsmp
= VTOHFS(vp
);
2786 int retval
= E_NONE
;
2787 u_int32_t logBlockSize
;
2788 size_t bytesContAvail
= 0;
2789 off_t blockposition
;
2794 * Check for underlying vnode requests and ensure that logical
2795 * to physical mapping is requested.
2798 *vpp
= hfsmp
->hfs_devvp
;
2802 logBlockSize
= GetLogicalBlockSize(vp
);
2803 blockposition
= (off_t
)bn
* logBlockSize
;
2805 lockExtBtree
= overflow_extents(fp
);
2808 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_EXTENTS
, HFS_EXCLUSIVE_LOCK
);
2810 retval
= MacToVFSError(
2811 MapFileBlockC (HFSTOVCB(hfsmp
),
2819 hfs_systemfile_unlock(hfsmp
, lockflags
);
2821 if (retval
== E_NONE
) {
2822 /* Figure out how many read ahead blocks there are */
2824 if (can_cluster(logBlockSize
)) {
2825 /* Make sure this result never goes negative: */
2826 *runp
= (bytesContAvail
< logBlockSize
) ? 0 : (bytesContAvail
/ logBlockSize
) - 1;
2836 * Convert logical block number to file offset.
2839 hfs_vnop_blktooff(struct vnop_blktooff_args
*ap
)
2841 struct vnop_blktooff_args {
2848 if (ap
->a_vp
== NULL
)
2850 *ap
->a_offset
= (off_t
)ap
->a_lblkno
* (off_t
)GetLogicalBlockSize(ap
->a_vp
);
2856 * Convert file offset to logical block number.
2859 hfs_vnop_offtoblk(struct vnop_offtoblk_args
*ap
)
2861 struct vnop_offtoblk_args {
2864 daddr64_t *a_lblkno;
2868 if (ap
->a_vp
== NULL
)
2870 *ap
->a_lblkno
= (daddr64_t
)(ap
->a_offset
/ (off_t
)GetLogicalBlockSize(ap
->a_vp
));
2876 * Map file offset to physical block number.
2878 * If this function is called for write operation, and if the file
2879 * had virtual blocks allocated (delayed allocation), real blocks
2880 * are allocated by calling ExtendFileC().
2882 * If this function is called for read operation, and if the file
2883 * had virtual blocks allocated (delayed allocation), no change
2884 * to the size of file is done, and if required, rangelist is
2885 * searched for mapping.
2887 * System file cnodes are expected to be locked (shared or exclusive).
2890 hfs_vnop_blockmap(struct vnop_blockmap_args
*ap
)
2892 struct vnop_blockmap_args {
2900 vfs_context_t a_context;
2904 struct vnode
*vp
= ap
->a_vp
;
2906 struct filefork
*fp
;
2907 struct hfsmount
*hfsmp
;
2908 size_t bytesContAvail
= 0;
2909 int retval
= E_NONE
;
2912 struct rl_entry
*invalid_range
;
2913 enum rl_overlaptype overlaptype
;
2918 if (VNODE_IS_RSRC(vp
)) {
2919 /* allow blockmaps to the resource fork */
2921 if ( hfs_file_is_compressed(VTOC(vp
), 1) ) { /* 1 == don't take the cnode lock */
2922 int state
= decmpfs_cnode_get_vnode_state(VTOCMP(vp
));
2924 case FILE_IS_COMPRESSED
:
2926 case FILE_IS_CONVERTING
:
2927 /* if FILE_IS_CONVERTING, we allow blockmap */
2930 printf("invalid state %d for compressed file\n", state
);
2935 #endif /* HFS_COMPRESSION */
2937 /* Do not allow blockmap operation on a directory */
2938 if (vnode_isdir(vp
)) {
2943 * Check for underlying vnode requests and ensure that logical
2944 * to physical mapping is requested.
2946 if (ap
->a_bpn
== NULL
)
2949 if ( !vnode_issystem(vp
) && !vnode_islnk(vp
) && !vnode_isswap(vp
)) {
2950 if (VTOC(vp
)->c_lockowner
!= current_thread()) {
2951 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
2960 /* Check virtual blocks only when performing write operation */
2961 if ((ap
->a_flags
& VNODE_WRITE
) && (fp
->ff_unallocblocks
!= 0)) {
2962 if (hfs_start_transaction(hfsmp
) != 0) {
2968 syslocks
= SFL_EXTENTS
| SFL_BITMAP
;
2970 } else if (overflow_extents(fp
)) {
2971 syslocks
= SFL_EXTENTS
;
2975 lockflags
= hfs_systemfile_lock(hfsmp
, syslocks
, HFS_EXCLUSIVE_LOCK
);
2978 * Check for any delayed allocations.
2980 if ((ap
->a_flags
& VNODE_WRITE
) && (fp
->ff_unallocblocks
!= 0)) {
2982 u_int32_t loanedBlocks
;
2985 // Make sure we have a transaction. It's possible
2986 // that we came in and fp->ff_unallocblocks was zero
2987 // but during the time we blocked acquiring the extents
2988 // btree, ff_unallocblocks became non-zero and so we
2989 // will need to start a transaction.
2991 if (started_tr
== 0) {
2993 hfs_systemfile_unlock(hfsmp
, lockflags
);
3000 * Note: ExtendFileC will Release any blocks on loan and
3001 * aquire real blocks. So we ask to extend by zero bytes
3002 * since ExtendFileC will account for the virtual blocks.
3005 loanedBlocks
= fp
->ff_unallocblocks
;
3006 retval
= ExtendFileC(hfsmp
, (FCB
*)fp
, 0, 0,
3007 kEFAllMask
| kEFNoClumpMask
, &actbytes
);
3010 fp
->ff_unallocblocks
= loanedBlocks
;
3011 cp
->c_blocks
+= loanedBlocks
;
3012 fp
->ff_blocks
+= loanedBlocks
;
3014 hfs_lock_mount (hfsmp
);
3015 hfsmp
->loanedBlocks
+= loanedBlocks
;
3016 hfs_unlock_mount (hfsmp
);
3018 hfs_systemfile_unlock(hfsmp
, lockflags
);
3019 cp
->c_flag
|= C_MODIFIED
;
3021 (void) hfs_update(vp
, TRUE
);
3022 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3024 hfs_end_transaction(hfsmp
);
3031 retval
= MapFileBlockC(hfsmp
, (FCB
*)fp
, ap
->a_size
, ap
->a_foffset
,
3032 ap
->a_bpn
, &bytesContAvail
);
3034 hfs_systemfile_unlock(hfsmp
, lockflags
);
3039 (void) hfs_update(vp
, TRUE
);
3040 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3041 hfs_end_transaction(hfsmp
);
3045 /* On write, always return error because virtual blocks, if any,
3046 * should have been allocated in ExtendFileC(). We do not
3047 * allocate virtual blocks on read, therefore return error
3048 * only if no virtual blocks are allocated. Otherwise we search
3049 * rangelist for zero-fills
3051 if ((MacToVFSError(retval
) != ERANGE
) ||
3052 (ap
->a_flags
& VNODE_WRITE
) ||
3053 ((ap
->a_flags
& VNODE_READ
) && (fp
->ff_unallocblocks
== 0))) {
3057 /* Validate if the start offset is within logical file size */
3058 if (ap
->a_foffset
>= fp
->ff_size
) {
3063 * At this point, we have encountered a failure during
3064 * MapFileBlockC that resulted in ERANGE, and we are not servicing
3065 * a write, and there are borrowed blocks.
3067 * However, the cluster layer will not call blockmap for
3068 * blocks that are borrowed and in-cache. We have to assume that
3069 * because we observed ERANGE being emitted from MapFileBlockC, this
3070 * extent range is not valid on-disk. So we treat this as a
3071 * mapping that needs to be zero-filled prior to reading.
3073 * Note that under certain circumstances (such as non-contiguous
3074 * userland VM mappings in the calling process), cluster_io
3075 * may be forced to split a large I/O driven by hfs_vnop_write
3076 * into multiple sub-I/Os that necessitate a RMW cycle. If this is
3077 * the case here, then we have already removed the invalid range list
3078 * mapping prior to getting to this blockmap call, so we should not
3079 * search the invalid rangelist for this byte range.
3082 bytesContAvail
= fp
->ff_size
- ap
->a_foffset
;
3084 * Clip the contiguous available bytes to, at most, the allowable
3085 * maximum or the amount requested.
3088 if (bytesContAvail
> ap
->a_size
) {
3089 bytesContAvail
= ap
->a_size
;
3092 *ap
->a_bpn
= (daddr64_t
) -1;
3098 /* MapFileC() found a valid extent in the filefork. Search the
3099 * mapping information further for invalid file ranges
3101 overlaptype
= rl_scan(&fp
->ff_invalidranges
, ap
->a_foffset
,
3102 ap
->a_foffset
+ (off_t
)bytesContAvail
- 1,
3104 if (overlaptype
!= RL_NOOVERLAP
) {
3105 switch(overlaptype
) {
3106 case RL_MATCHINGOVERLAP
:
3107 case RL_OVERLAPCONTAINSRANGE
:
3108 case RL_OVERLAPSTARTSBEFORE
:
3109 /* There's no valid block for this byte offset */
3110 *ap
->a_bpn
= (daddr64_t
)-1;
3111 /* There's no point limiting the amount to be returned
3112 * if the invalid range that was hit extends all the way
3113 * to the EOF (i.e. there's no valid bytes between the
3114 * end of this range and the file's EOF):
3116 if (((off_t
)fp
->ff_size
> (invalid_range
->rl_end
+ 1)) &&
3117 ((size_t)(invalid_range
->rl_end
+ 1 - ap
->a_foffset
) < bytesContAvail
)) {
3118 bytesContAvail
= invalid_range
->rl_end
+ 1 - ap
->a_foffset
;
3122 case RL_OVERLAPISCONTAINED
:
3123 case RL_OVERLAPENDSAFTER
:
3124 /* The range of interest hits an invalid block before the end: */
3125 if (invalid_range
->rl_start
== ap
->a_foffset
) {
3126 /* There's actually no valid information to be had starting here: */
3127 *ap
->a_bpn
= (daddr64_t
)-1;
3128 if (((off_t
)fp
->ff_size
> (invalid_range
->rl_end
+ 1)) &&
3129 ((size_t)(invalid_range
->rl_end
+ 1 - ap
->a_foffset
) < bytesContAvail
)) {
3130 bytesContAvail
= invalid_range
->rl_end
+ 1 - ap
->a_foffset
;
3133 bytesContAvail
= invalid_range
->rl_start
- ap
->a_foffset
;
3140 if (bytesContAvail
> ap
->a_size
)
3141 bytesContAvail
= ap
->a_size
;
3147 *ap
->a_run
= bytesContAvail
;
3150 *(int *)ap
->a_poff
= 0;
3156 return (MacToVFSError(retval
));
3160 * prepare and issue the I/O
3161 * buf_strategy knows how to deal
3162 * with requests that require
3166 hfs_vnop_strategy(struct vnop_strategy_args
*ap
)
3168 buf_t bp
= ap
->a_bp
;
3169 vnode_t vp
= buf_vnode(bp
);
3172 /* Mark buffer as containing static data if cnode flag set */
3173 if (VTOC(vp
)->c_flag
& C_SSD_STATIC
) {
3177 /* Mark buffer as containing static data if cnode flag set */
3178 if (VTOC(vp
)->c_flag
& C_SSD_GREEDY_MODE
) {
3179 bufattr_markgreedymode(&bp
->b_attr
);
3182 /* mark buffer as containing burst mode data if cnode flag set */
3183 if (VTOC(vp
)->c_flag
& C_IO_ISOCHRONOUS
) {
3184 bufattr_markisochronous(&bp
->b_attr
);
3190 if ((!bufattr_rawencrypted(&bp
->b_attr
)) &&
3191 ((cp
= cp_get_protected_cnode(vp
)) != NULL
)) {
3193 * We rely upon the truncate lock to protect the
3194 * CP cache key from getting tossed prior to our IO finishing here.
3195 * Nearly all cluster io calls to manipulate file payload from HFS
3196 * take the truncate lock before calling into the cluster
3197 * layer to ensure the file size does not change, or that they
3198 * have exclusive right to change the EOF of the file.
3199 * That same guarantee protects us here since the code that
3200 * deals with CP lock events must now take the truncate lock
3201 * before doing anything.
3203 * There is 1 exception here:
3204 * 1) One exception should be the VM swapfile IO, because HFS will
3205 * funnel the VNOP_PAGEOUT directly into a cluster_pageout call for the
3206 * swapfile code only without holding the truncate lock. This is because
3207 * individual swapfiles are maintained at fixed-length sizes by the VM code.
3208 * In non-swapfile IO we use PAGEOUT_V2 semantics which allow us to
3209 * create our own UPL and thus take the truncate lock before calling
3210 * into the cluster layer. In that case, however, we are not concerned
3211 * with the CP blob being wiped out in the middle of the IO
3212 * because there isn't anything to toss; the VM swapfile key stays
3213 * in-core as long as the file is open.
3218 * Last chance: If this data protected I/O does not have unwrapped keys
3219 * present, then try to get them. We already know that it should, by this point.
3221 if (cp
->c_cpentry
->cp_flags
& (CP_KEY_FLUSHED
| CP_NEEDS_KEYS
)) {
3222 int io_op
= ( (buf_flags(bp
) & B_READ
) ? CP_READ_ACCESS
: CP_WRITE_ACCESS
);
3223 if ((error
= cp_handle_vnop(vp
, io_op
, 0)) != 0) {
3225 * We have to be careful here. By this point in the I/O path, VM or the cluster
3226 * engine has prepared a buf_t with the proper file offsets and all the rest,
3227 * so simply erroring out will result in us leaking this particular buf_t.
3228 * We need to properly decorate the buf_t just as buf_strategy would so as
3229 * to make it appear that the I/O errored out with the particular error code.
3231 buf_seterror (bp
, error
);
3239 * For filesystem resize, we may not have access to the underlying
3240 * file's cache key for whatever reason (device may be locked). However,
3241 * we do not need it since we are going to use the temporary HFS-wide resize key
3242 * which is generated once we start relocating file content. If this file's I/O
3243 * should be done using the resize key, it will have been supplied already, so
3244 * do not attach the file's cp blob to the buffer.
3246 if ((cp
->c_cpentry
->cp_flags
& CP_RELOCATION_INFLIGHT
) == 0) {
3247 buf_setcpaddr(bp
, cp
->c_cpentry
);
3250 #endif /* CONFIG_PROTECT */
3252 error
= buf_strategy(VTOHFS(vp
)->hfs_devvp
, ap
);
3258 hfs_minorupdate(struct vnode
*vp
) {
3259 struct cnode
*cp
= VTOC(vp
);
3260 cp
->c_flag
&= ~C_MODIFIED
;
3261 cp
->c_touch_acctime
= 0;
3262 cp
->c_touch_chgtime
= 0;
3263 cp
->c_touch_modtime
= 0;
3269 do_hfs_truncate(struct vnode
*vp
, off_t length
, int flags
, int truncateflags
, vfs_context_t context
)
3271 register struct cnode
*cp
= VTOC(vp
);
3272 struct filefork
*fp
= VTOF(vp
);
3273 kauth_cred_t cred
= vfs_context_ucred(context
);
3276 off_t actualBytesAdded
;
3278 u_int32_t fileblocks
;
3280 struct hfsmount
*hfsmp
;
3282 int skipupdate
= (truncateflags
& HFS_TRUNCATE_SKIPUPDATE
);
3283 int suppress_times
= (truncateflags
& HFS_TRUNCATE_SKIPTIMES
);
3285 blksize
= VTOVCB(vp
)->blockSize
;
3286 fileblocks
= fp
->ff_blocks
;
3287 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3289 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_START
,
3290 (int)length
, (int)fp
->ff_size
, (int)filebytes
, 0, 0);
3295 /* This should only happen with a corrupt filesystem */
3296 if ((off_t
)fp
->ff_size
< 0)
3299 if ((!ISHFSPLUS(VTOVCB(vp
))) && (length
> (off_t
)MAXHFSFILESIZE
))
3306 /* Files that are changing size are not hot file candidates. */
3307 if (hfsmp
->hfc_stage
== HFC_RECORDING
) {
3308 fp
->ff_bytesread
= 0;
3312 * We cannot just check if fp->ff_size == length (as an optimization)
3313 * since there may be extra physical blocks that also need truncation.
3316 if ((retval
= hfs_getinoquota(cp
)))
3321 * Lengthen the size of the file. We must ensure that the
3322 * last byte of the file is allocated. Since the smallest
3323 * value of ff_size is 0, length will be at least 1.
3325 if (length
> (off_t
)fp
->ff_size
) {
3327 retval
= hfs_chkdq(cp
, (int64_t)(roundup(length
- filebytes
, blksize
)),
3333 * If we don't have enough physical space then
3334 * we need to extend the physical size.
3336 if (length
> filebytes
) {
3338 u_int32_t blockHint
= 0;
3340 /* All or nothing and don't round up to clumpsize. */
3341 eflags
= kEFAllMask
| kEFNoClumpMask
;
3343 if (cred
&& (suser(cred
, NULL
) != 0)) {
3344 eflags
|= kEFReserveMask
; /* keep a reserve */
3348 * Allocate Journal and Quota files in metadata zone.
3350 if (filebytes
== 0 &&
3351 hfsmp
->hfs_flags
& HFS_METADATA_ZONE
&&
3352 hfs_virtualmetafile(cp
)) {
3353 eflags
|= kEFMetadataMask
;
3354 blockHint
= hfsmp
->hfs_metazone_start
;
3356 if (hfs_start_transaction(hfsmp
) != 0) {
3361 /* Protect extents b-tree and allocation bitmap */
3362 lockflags
= SFL_BITMAP
;
3363 if (overflow_extents(fp
))
3364 lockflags
|= SFL_EXTENTS
;
3365 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3368 * Keep growing the file as long as the current EOF is
3369 * less than the desired value.
3371 while ((length
> filebytes
) && (retval
== E_NONE
)) {
3372 bytesToAdd
= length
- filebytes
;
3373 retval
= MacToVFSError(ExtendFileC(VTOVCB(vp
),
3378 &actualBytesAdded
));
3380 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)blksize
;
3381 if (actualBytesAdded
== 0 && retval
== E_NONE
) {
3382 if (length
> filebytes
)
3388 hfs_systemfile_unlock(hfsmp
, lockflags
);
3392 (void) hfs_minorupdate(vp
);
3395 (void) hfs_update(vp
, TRUE
);
3396 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3400 hfs_end_transaction(hfsmp
);
3405 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_NONE
,
3406 (int)length
, (int)fp
->ff_size
, (int)filebytes
, 0, 0);
3409 if (ISSET(flags
, IO_NOZEROFILL
)) {
3410 // An optimisation for the hibernation file
3411 if (vnode_isswap(vp
))
3412 rl_remove_all(&fp
->ff_invalidranges
);
3414 if (UBCINFOEXISTS(vp
) && (vnode_issystem(vp
) == 0) && retval
== E_NONE
) {
3415 struct rl_entry
*invalid_range
;
3418 zero_limit
= (fp
->ff_size
+ (PAGE_SIZE_64
- 1)) & ~PAGE_MASK_64
;
3419 if (length
< zero_limit
) zero_limit
= length
;
3421 if (length
> (off_t
)fp
->ff_size
) {
3424 /* Extending the file: time to fill out the current last page w. zeroes? */
3425 if ((fp
->ff_size
& PAGE_MASK_64
) &&
3426 (rl_scan(&fp
->ff_invalidranges
, fp
->ff_size
& ~PAGE_MASK_64
,
3427 fp
->ff_size
- 1, &invalid_range
) == RL_NOOVERLAP
)) {
3429 /* There's some valid data at the start of the (current) last page
3430 of the file, so zero out the remainder of that page to ensure the
3431 entire page contains valid data. Since there is no invalid range
3432 possible past the (current) eof, there's no need to remove anything
3433 from the invalid range list before calling cluster_write(): */
3435 retval
= cluster_write(vp
, (struct uio
*) 0, fp
->ff_size
, zero_limit
,
3436 fp
->ff_size
, (off_t
)0,
3437 (flags
& IO_SYNC
) | IO_HEADZEROFILL
| IO_NOZERODIRTY
);
3438 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
3439 if (retval
) goto Err_Exit
;
3441 /* Merely invalidate the remaining area, if necessary: */
3442 if (length
> zero_limit
) {
3444 rl_add(zero_limit
, length
- 1, &fp
->ff_invalidranges
);
3445 cp
->c_zftimeout
= tv
.tv_sec
+ ZFTIMELIMIT
;
3448 /* The page containing the (current) eof is invalid: just add the
3449 remainder of the page to the invalid list, along with the area
3450 being newly allocated:
3453 rl_add(fp
->ff_size
, length
- 1, &fp
->ff_invalidranges
);
3454 cp
->c_zftimeout
= tv
.tv_sec
+ ZFTIMELIMIT
;
3458 panic("hfs_truncate: invoked on non-UBC object?!");
3461 if (suppress_times
== 0) {
3462 cp
->c_touch_modtime
= TRUE
;
3464 fp
->ff_size
= length
;
3466 } else { /* Shorten the size of the file */
3468 // An optimisation for the hibernation file
3469 if (ISSET(flags
, IO_NOZEROFILL
) && vnode_isswap(vp
)) {
3470 rl_remove_all(&fp
->ff_invalidranges
);
3471 } else if ((off_t
)fp
->ff_size
> length
) {
3472 /* Any space previously marked as invalid is now irrelevant: */
3473 rl_remove(length
, fp
->ff_size
- 1, &fp
->ff_invalidranges
);
3477 * Account for any unmapped blocks. Note that the new
3478 * file length can still end up with unmapped blocks.
3480 if (fp
->ff_unallocblocks
> 0) {
3481 u_int32_t finalblks
;
3482 u_int32_t loanedBlocks
;
3484 hfs_lock_mount(hfsmp
);
3485 loanedBlocks
= fp
->ff_unallocblocks
;
3486 cp
->c_blocks
-= loanedBlocks
;
3487 fp
->ff_blocks
-= loanedBlocks
;
3488 fp
->ff_unallocblocks
= 0;
3490 hfsmp
->loanedBlocks
-= loanedBlocks
;
3492 finalblks
= (length
+ blksize
- 1) / blksize
;
3493 if (finalblks
> fp
->ff_blocks
) {
3494 /* calculate required unmapped blocks */
3495 loanedBlocks
= finalblks
- fp
->ff_blocks
;
3496 hfsmp
->loanedBlocks
+= loanedBlocks
;
3498 fp
->ff_unallocblocks
= loanedBlocks
;
3499 cp
->c_blocks
+= loanedBlocks
;
3500 fp
->ff_blocks
+= loanedBlocks
;
3502 hfs_unlock_mount (hfsmp
);
3506 off_t savedbytes
= ((off_t
)fp
->ff_blocks
* (off_t
)blksize
);
3508 if (hfs_start_transaction(hfsmp
) != 0) {
3513 if (fp
->ff_unallocblocks
== 0) {
3514 /* Protect extents b-tree and allocation bitmap */
3515 lockflags
= SFL_BITMAP
;
3516 if (overflow_extents(fp
))
3517 lockflags
|= SFL_EXTENTS
;
3518 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3520 retval
= MacToVFSError(TruncateFileC(VTOVCB(vp
), (FCB
*)fp
, length
, 0,
3521 FORK_IS_RSRC (fp
), FTOC(fp
)->c_fileid
, false));
3523 hfs_systemfile_unlock(hfsmp
, lockflags
);
3527 fp
->ff_size
= length
;
3530 (void) hfs_minorupdate(vp
);
3533 (void) hfs_update(vp
, TRUE
);
3534 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3537 hfs_end_transaction(hfsmp
);
3539 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)blksize
;
3543 /* These are bytesreleased */
3544 (void) hfs_chkdq(cp
, (int64_t)-(savedbytes
- filebytes
), NOCRED
, 0);
3548 * Only set update flag if the logical length changes & we aren't
3549 * suppressing modtime updates.
3551 if (((off_t
)fp
->ff_size
!= length
) && (suppress_times
== 0)) {
3552 cp
->c_touch_modtime
= TRUE
;
3554 fp
->ff_size
= length
;
3556 if (cp
->c_mode
& (S_ISUID
| S_ISGID
)) {
3557 if (!vfs_context_issuser(context
)) {
3558 cp
->c_mode
&= ~(S_ISUID
| S_ISGID
);
3563 retval
= hfs_minorupdate(vp
);
3566 cp
->c_touch_chgtime
= TRUE
; /* status changed */
3567 if (suppress_times
== 0) {
3568 cp
->c_touch_modtime
= TRUE
; /* file data was modified */
3571 * If we are not suppressing the modtime update, then
3572 * update the gen count as well.
3574 if (S_ISREG(cp
->c_attr
.ca_mode
) || S_ISLNK (cp
->c_attr
.ca_mode
)) {
3575 hfs_incr_gencount(cp
);
3579 retval
= hfs_update(vp
, MNT_WAIT
);
3582 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_NONE
,
3583 -1, -1, -1, retval
, 0);
3588 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_END
,
3589 (int)length
, (int)fp
->ff_size
, (int)filebytes
, retval
, 0);
3595 * Preparation which must be done prior to deleting the catalog record
3596 * of a file or directory. In order to make the on-disk as safe as possible,
3597 * we remove the catalog entry before releasing the bitmap blocks and the
3598 * overflow extent records. However, some work must be done prior to deleting
3599 * the catalog record.
3601 * When calling this function, the cnode must exist both in memory and on-disk.
3602 * If there are both resource fork and data fork vnodes, this function should
3603 * be called on both.
3607 hfs_prepare_release_storage (struct hfsmount
*hfsmp
, struct vnode
*vp
) {
3609 struct filefork
*fp
= VTOF(vp
);
3610 struct cnode
*cp
= VTOC(vp
);
3615 /* Cannot truncate an HFS directory! */
3616 if (vnode_isdir(vp
)) {
3621 * See the comment below in hfs_truncate for why we need to call
3622 * setsize here. Essentially we want to avoid pending IO if we
3623 * already know that the blocks are going to be released here.
3624 * This function is only called when totally removing all storage for a file, so
3625 * we can take a shortcut and immediately setsize (0);
3629 /* This should only happen with a corrupt filesystem */
3630 if ((off_t
)fp
->ff_size
< 0)
3634 * We cannot just check if fp->ff_size == length (as an optimization)
3635 * since there may be extra physical blocks that also need truncation.
3638 if ((retval
= hfs_getinoquota(cp
))) {
3643 /* Wipe out any invalid ranges which have yet to be backed by disk */
3644 rl_remove(0, fp
->ff_size
- 1, &fp
->ff_invalidranges
);
3647 * Account for any unmapped blocks. Since we're deleting the
3648 * entire file, we don't have to worry about just shrinking
3649 * to a smaller number of borrowed blocks.
3651 if (fp
->ff_unallocblocks
> 0) {
3652 u_int32_t loanedBlocks
;
3654 hfs_lock_mount (hfsmp
);
3655 loanedBlocks
= fp
->ff_unallocblocks
;
3656 cp
->c_blocks
-= loanedBlocks
;
3657 fp
->ff_blocks
-= loanedBlocks
;
3658 fp
->ff_unallocblocks
= 0;
3660 hfsmp
->loanedBlocks
-= loanedBlocks
;
3662 hfs_unlock_mount (hfsmp
);
3670 * Special wrapper around calling TruncateFileC. This function is useable
3671 * even when the catalog record does not exist any longer, making it ideal
3672 * for use when deleting a file. The simplification here is that we know
3673 * that we are releasing all blocks.
3675 * Note that this function may be called when there is no vnode backing
3676 * the file fork in question. We may call this from hfs_vnop_inactive
3677 * to clear out resource fork data (and may not want to clear out the data
3678 * fork yet). As a result, we pointer-check both sets of inputs before
3679 * doing anything with them.
3681 * The caller is responsible for saving off a copy of the filefork(s)
3682 * embedded within the cnode prior to calling this function. The pointers
3683 * supplied as arguments must be valid even if the cnode is no longer valid.
3687 hfs_release_storage (struct hfsmount
*hfsmp
, struct filefork
*datafork
,
3688 struct filefork
*rsrcfork
, u_int32_t fileid
) {
3691 u_int32_t fileblocks
;
3696 blksize
= hfsmp
->blockSize
;
3700 datafork
->ff_size
= 0;
3702 fileblocks
= datafork
->ff_blocks
;
3703 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3705 /* We killed invalid ranges and loaned blocks before we removed the catalog entry */
3707 while (filebytes
> 0) {
3708 if (filebytes
> HFS_BIGFILE_SIZE
) {
3709 filebytes
-= HFS_BIGFILE_SIZE
;
3714 /* Start a transaction, and wipe out as many blocks as we can in this iteration */
3715 if (hfs_start_transaction(hfsmp
) != 0) {
3720 if (datafork
->ff_unallocblocks
== 0) {
3721 /* Protect extents b-tree and allocation bitmap */
3722 lockflags
= SFL_BITMAP
;
3723 if (overflow_extents(datafork
))
3724 lockflags
|= SFL_EXTENTS
;
3725 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3727 error
= MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp
), datafork
, filebytes
, 1, 0, fileid
, false));
3729 hfs_systemfile_unlock(hfsmp
, lockflags
);
3731 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3733 /* Finish the transaction and start over if necessary */
3734 hfs_end_transaction(hfsmp
);
3743 if (error
== 0 && rsrcfork
) {
3744 rsrcfork
->ff_size
= 0;
3746 fileblocks
= rsrcfork
->ff_blocks
;
3747 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3749 /* We killed invalid ranges and loaned blocks before we removed the catalog entry */
3751 while (filebytes
> 0) {
3752 if (filebytes
> HFS_BIGFILE_SIZE
) {
3753 filebytes
-= HFS_BIGFILE_SIZE
;
3758 /* Start a transaction, and wipe out as many blocks as we can in this iteration */
3759 if (hfs_start_transaction(hfsmp
) != 0) {
3764 if (rsrcfork
->ff_unallocblocks
== 0) {
3765 /* Protect extents b-tree and allocation bitmap */
3766 lockflags
= SFL_BITMAP
;
3767 if (overflow_extents(rsrcfork
))
3768 lockflags
|= SFL_EXTENTS
;
3769 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3771 error
= MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp
), rsrcfork
, filebytes
, 1, 1, fileid
, false));
3773 hfs_systemfile_unlock(hfsmp
, lockflags
);
3775 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3777 /* Finish the transaction and start over if necessary */
3778 hfs_end_transaction(hfsmp
);
3789 errno_t
hfs_ubc_setsize(vnode_t vp
, off_t len
, bool have_cnode_lock
)
3794 * Call ubc_setsize to give the VM subsystem a chance to do
3795 * whatever it needs to with existing pages before we delete
3796 * blocks. Note that symlinks don't use the UBC so we'll
3797 * get back ENOENT in that case.
3799 if (have_cnode_lock
) {
3800 error
= ubc_setsize_ex(vp
, len
, UBC_SETSIZE_NO_FS_REENTRY
);
3801 if (error
== EAGAIN
) {
3802 cnode_t
*cp
= VTOC(vp
);
3804 if (cp
->c_truncatelockowner
!= current_thread()) {
3805 #if DEVELOPMENT || DEBUG
3806 panic("hfs: hfs_ubc_setsize called without exclusive truncate lock!");
3808 printf("hfs: hfs_ubc_setsize called without exclusive truncate lock!\n");
3813 error
= ubc_setsize_ex(vp
, len
, 0);
3814 hfs_lock_always(cp
, HFS_EXCLUSIVE_LOCK
);
3817 error
= ubc_setsize_ex(vp
, len
, 0);
3819 return error
== ENOENT
? 0 : error
;
3823 * Truncate a cnode to at most length size, freeing (or adding) the
3827 hfs_truncate(struct vnode
*vp
, off_t length
, int flags
,
3828 int truncateflags
, vfs_context_t context
)
3830 struct filefork
*fp
= VTOF(vp
);
3832 u_int32_t fileblocks
;
3835 struct cnode
*cp
= VTOC(vp
);
3837 /* Cannot truncate an HFS directory! */
3838 if (vnode_isdir(vp
)) {
3841 /* A swap file cannot change size. */
3842 if (vnode_isswap(vp
) && length
&& !ISSET(flags
, IO_NOAUTH
)) {
3846 blksize
= VTOVCB(vp
)->blockSize
;
3847 fileblocks
= fp
->ff_blocks
;
3848 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3850 bool caller_has_cnode_lock
= (cp
->c_lockowner
== current_thread());
3852 error
= hfs_ubc_setsize(vp
, length
, caller_has_cnode_lock
);
3856 if (!caller_has_cnode_lock
) {
3857 error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
3862 // have to loop truncating or growing files that are
3863 // really big because otherwise transactions can get
3864 // enormous and consume too many kernel resources.
3866 if (length
< filebytes
) {
3867 while (filebytes
> length
) {
3868 if ((filebytes
- length
) > HFS_BIGFILE_SIZE
) {
3869 filebytes
-= HFS_BIGFILE_SIZE
;
3873 cp
->c_flag
|= C_FORCEUPDATE
;
3874 error
= do_hfs_truncate(vp
, filebytes
, flags
, truncateflags
, context
);
3878 } else if (length
> filebytes
) {
3879 while (filebytes
< length
) {
3880 if ((length
- filebytes
) > HFS_BIGFILE_SIZE
) {
3881 filebytes
+= HFS_BIGFILE_SIZE
;
3885 cp
->c_flag
|= C_FORCEUPDATE
;
3886 error
= do_hfs_truncate(vp
, filebytes
, flags
, truncateflags
, context
);
3890 } else /* Same logical size */ {
3892 error
= do_hfs_truncate(vp
, length
, flags
, truncateflags
, context
);
3894 /* Files that are changing size are not hot file candidates. */
3895 if (VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) {
3896 fp
->ff_bytesread
= 0;
3899 if (!caller_has_cnode_lock
)
3902 // Make sure UBC's size matches up (in case we didn't completely succeed)
3903 errno_t err2
= hfs_ubc_setsize(vp
, fp
->ff_size
, caller_has_cnode_lock
);
3912 * Preallocate file storage space.
3915 hfs_vnop_allocate(struct vnop_allocate_args
/* {
3919 off_t *a_bytesallocated;
3921 vfs_context_t a_context;
3924 struct vnode
*vp
= ap
->a_vp
;
3926 struct filefork
*fp
;
3928 off_t length
= ap
->a_length
;
3930 off_t moreBytesRequested
;
3931 off_t actualBytesAdded
;
3933 u_int32_t fileblocks
;
3934 int retval
, retval2
;
3935 u_int32_t blockHint
;
3936 u_int32_t extendFlags
; /* For call to ExtendFileC */
3937 struct hfsmount
*hfsmp
;
3938 kauth_cred_t cred
= vfs_context_ucred(ap
->a_context
);
3942 *(ap
->a_bytesallocated
) = 0;
3944 if (!vnode_isreg(vp
))
3946 if (length
< (off_t
)0)
3951 orig_ctime
= VTOC(vp
)->c_ctime
;
3953 check_for_tracked_file(vp
, orig_ctime
, ap
->a_length
== 0 ? NAMESPACE_HANDLER_TRUNCATE_OP
|NAMESPACE_HANDLER_DELETE_OP
: NAMESPACE_HANDLER_TRUNCATE_OP
, NULL
);
3955 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
3957 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
3965 fileblocks
= fp
->ff_blocks
;
3966 filebytes
= (off_t
)fileblocks
* (off_t
)vcb
->blockSize
;
3968 if ((ap
->a_flags
& ALLOCATEFROMVOL
) && (length
< filebytes
)) {
3973 /* Fill in the flags word for the call to Extend the file */
3975 extendFlags
= kEFNoClumpMask
;
3976 if (ap
->a_flags
& ALLOCATECONTIG
)
3977 extendFlags
|= kEFContigMask
;
3978 if (ap
->a_flags
& ALLOCATEALL
)
3979 extendFlags
|= kEFAllMask
;
3980 if (cred
&& suser(cred
, NULL
) != 0)
3981 extendFlags
|= kEFReserveMask
;
3982 if (hfs_virtualmetafile(cp
))
3983 extendFlags
|= kEFMetadataMask
;
3987 startingPEOF
= filebytes
;
3989 if (ap
->a_flags
& ALLOCATEFROMPEOF
)
3990 length
+= filebytes
;
3991 else if (ap
->a_flags
& ALLOCATEFROMVOL
)
3992 blockHint
= ap
->a_offset
/ VTOVCB(vp
)->blockSize
;
3994 /* If no changes are necesary, then we're done */
3995 if (filebytes
== length
)
3999 * Lengthen the size of the file. We must ensure that the
4000 * last byte of the file is allocated. Since the smallest
4001 * value of filebytes is 0, length will be at least 1.
4003 if (length
> filebytes
) {
4004 off_t total_bytes_added
= 0, orig_request_size
;
4006 orig_request_size
= moreBytesRequested
= length
- filebytes
;
4009 retval
= hfs_chkdq(cp
,
4010 (int64_t)(roundup(moreBytesRequested
, vcb
->blockSize
)),
4017 * Metadata zone checks.
4019 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
) {
4021 * Allocate Journal and Quota files in metadata zone.
4023 if (hfs_virtualmetafile(cp
)) {
4024 blockHint
= hfsmp
->hfs_metazone_start
;
4025 } else if ((blockHint
>= hfsmp
->hfs_metazone_start
) &&
4026 (blockHint
<= hfsmp
->hfs_metazone_end
)) {
4028 * Move blockHint outside metadata zone.
4030 blockHint
= hfsmp
->hfs_metazone_end
+ 1;
4035 while ((length
> filebytes
) && (retval
== E_NONE
)) {
4036 off_t bytesRequested
;
4038 if (hfs_start_transaction(hfsmp
) != 0) {
4043 /* Protect extents b-tree and allocation bitmap */
4044 lockflags
= SFL_BITMAP
;
4045 if (overflow_extents(fp
))
4046 lockflags
|= SFL_EXTENTS
;
4047 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
4049 if (moreBytesRequested
>= HFS_BIGFILE_SIZE
) {
4050 bytesRequested
= HFS_BIGFILE_SIZE
;
4052 bytesRequested
= moreBytesRequested
;
4055 if (extendFlags
& kEFContigMask
) {
4056 // if we're on a sparse device, this will force it to do a
4057 // full scan to find the space needed.
4058 hfsmp
->hfs_flags
&= ~HFS_DID_CONTIG_SCAN
;
4061 retval
= MacToVFSError(ExtendFileC(vcb
,
4066 &actualBytesAdded
));
4068 if (retval
== E_NONE
) {
4069 *(ap
->a_bytesallocated
) += actualBytesAdded
;
4070 total_bytes_added
+= actualBytesAdded
;
4071 moreBytesRequested
-= actualBytesAdded
;
4072 if (blockHint
!= 0) {
4073 blockHint
+= actualBytesAdded
/ vcb
->blockSize
;
4076 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)vcb
->blockSize
;
4078 hfs_systemfile_unlock(hfsmp
, lockflags
);
4081 (void) hfs_update(vp
, TRUE
);
4082 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
4085 hfs_end_transaction(hfsmp
);
4090 * if we get an error and no changes were made then exit
4091 * otherwise we must do the hfs_update to reflect the changes
4093 if (retval
&& (startingPEOF
== filebytes
))
4097 * Adjust actualBytesAdded to be allocation block aligned, not
4098 * clump size aligned.
4099 * NOTE: So what we are reporting does not affect reality
4100 * until the file is closed, when we truncate the file to allocation
4103 if (total_bytes_added
!= 0 && orig_request_size
< total_bytes_added
)
4104 *(ap
->a_bytesallocated
) =
4105 roundup(orig_request_size
, (off_t
)vcb
->blockSize
);
4107 } else { /* Shorten the size of the file */
4110 * N.B. At present, this code is never called. If and when we
4111 * do start using it, it looks like there might be slightly
4112 * strange semantics with the file size: it's possible for the
4113 * file size to *increase* e.g. if current file size is 5,
4114 * length is 1024 and filebytes is 4096, the file size will
4115 * end up being 1024 bytes. This isn't necessarily a problem
4116 * but it's not consistent with the code above which doesn't
4117 * change the file size.
4120 retval
= hfs_truncate(vp
, length
, 0, 0, ap
->a_context
);
4121 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)vcb
->blockSize
;
4124 * if we get an error and no changes were made then exit
4125 * otherwise we must do the hfs_update to reflect the changes
4127 if (retval
&& (startingPEOF
== filebytes
)) goto Err_Exit
;
4129 /* These are bytesreleased */
4130 (void) hfs_chkdq(cp
, (int64_t)-((startingPEOF
- filebytes
)), NOCRED
,0);
4133 if (fp
->ff_size
> filebytes
) {
4134 fp
->ff_size
= filebytes
;
4136 hfs_ubc_setsize(vp
, fp
->ff_size
, true);
4141 cp
->c_touch_chgtime
= TRUE
;
4142 cp
->c_touch_modtime
= TRUE
;
4143 retval2
= hfs_update(vp
, MNT_WAIT
);
4148 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
4155 * Pagein for HFS filesystem
4158 hfs_vnop_pagein(struct vnop_pagein_args
*ap
)
4160 struct vnop_pagein_args {
4163 vm_offset_t a_pl_offset,
4167 vfs_context_t a_context;
4173 struct filefork
*fp
;
4176 upl_page_info_t
*pl
;
4178 off_t page_needed_f_offset
;
4183 boolean_t truncate_lock_held
= FALSE
;
4184 boolean_t file_converted
= FALSE
;
4192 if ((error
= cp_handle_vnop(vp
, CP_READ_ACCESS
| CP_WRITE_ACCESS
, 0)) != 0) {
4194 * If we errored here, then this means that one of two things occurred:
4195 * 1. there was a problem with the decryption of the key.
4196 * 2. the device is locked and we are not allowed to access this particular file.
4198 * Either way, this means that we need to shut down this upl now. As long as
4199 * the pl pointer is NULL (meaning that we're supposed to create the UPL ourselves)
4200 * then we create a upl and immediately abort it.
4202 if (ap
->a_pl
== NULL
) {
4203 /* create the upl */
4204 ubc_create_upl (vp
, ap
->a_f_offset
, ap
->a_size
, &upl
, &pl
,
4205 UPL_UBC_PAGEIN
| UPL_RET_ONLY_ABSENT
);
4206 /* mark the range as needed so it doesn't immediately get discarded upon abort */
4207 ubc_upl_range_needed (upl
, ap
->a_pl_offset
/ PAGE_SIZE
, 1);
4209 /* Abort the range */
4210 ubc_upl_abort_range (upl
, 0, ap
->a_size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
4216 #endif /* CONFIG_PROTECT */
4218 if (ap
->a_pl
!= NULL
) {
4220 * this can only happen for swap files now that
4221 * we're asking for V2 paging behavior...
4222 * so don't need to worry about decompression, or
4223 * keeping track of blocks read or taking the truncate lock
4225 error
= cluster_pagein(vp
, ap
->a_pl
, ap
->a_pl_offset
, ap
->a_f_offset
,
4226 ap
->a_size
, (off_t
)fp
->ff_size
, ap
->a_flags
);
4230 page_needed_f_offset
= ap
->a_f_offset
+ ap
->a_pl_offset
;
4234 * take truncate lock (shared/recursive) to guard against
4235 * zero-fill thru fsync interfering, but only for v2
4237 * the HFS_RECURSE_TRUNCLOCK arg indicates that we want the
4238 * lock shared and we are allowed to recurse 1 level if this thread already
4239 * owns the lock exclusively... this can legally occur
4240 * if we are doing a shrinking ftruncate against a file
4241 * that is mapped private, and the pages being truncated
4242 * do not currently exist in the cache... in that case
4243 * we will have to page-in the missing pages in order
4244 * to provide them to the private mapping... we must
4245 * also call hfs_unlock_truncate with a postive been_recursed
4246 * arg to indicate that if we have recursed, there is no need to drop
4247 * the lock. Allowing this simple recursion is necessary
4248 * in order to avoid a certain deadlock... since the ftruncate
4249 * already holds the truncate lock exclusively, if we try
4250 * to acquire it shared to protect the pagein path, we will
4253 * NOTE: The if () block below is a workaround in order to prevent a
4254 * VM deadlock. See rdar://7853471.
4256 * If we are in a forced unmount, then launchd will still have the
4257 * dyld_shared_cache file mapped as it is trying to reboot. If we
4258 * take the truncate lock here to service a page fault, then our
4259 * thread could deadlock with the forced-unmount. The forced unmount
4260 * thread will try to reclaim the dyld_shared_cache vnode, but since it's
4261 * marked C_DELETED, it will call ubc_setsize(0). As a result, the unmount
4262 * thread will think it needs to copy all of the data out of the file
4263 * and into a VM copy object. If we hold the cnode lock here, then that
4264 * VM operation will not be able to proceed, because we'll set a busy page
4265 * before attempting to grab the lock. Note that this isn't as simple as "don't
4266 * call ubc_setsize" because doing that would just shift the problem to the
4267 * ubc_msync done before the vnode is reclaimed.
4269 * So, if a forced unmount on this volume is in flight AND the cnode is
4270 * marked C_DELETED, then just go ahead and do the page in without taking
4271 * the lock (thus suspending pagein_v2 semantics temporarily). Since it's on a file
4272 * that is not going to be available on the next mount, this seems like a
4273 * OK solution from a correctness point of view, even though it is hacky.
4275 if (vfs_isforce(vp
->v_mount
)) {
4276 if (cp
->c_flag
& C_DELETED
) {
4277 /* If we don't get it, then just go ahead and operate without the lock */
4278 truncate_lock_held
= hfs_try_trunclock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4282 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4283 truncate_lock_held
= TRUE
;
4286 kret
= ubc_create_upl(vp
, ap
->a_f_offset
, ap
->a_size
, &upl
, &pl
, UPL_UBC_PAGEIN
| UPL_RET_ONLY_ABSENT
);
4288 if ((kret
!= KERN_SUCCESS
) || (upl
== (upl_t
) NULL
)) {
4292 ubc_upl_range_needed(upl
, ap
->a_pl_offset
/ PAGE_SIZE
, 1);
4294 upl_size
= isize
= ap
->a_size
;
4297 * Scan from the back to find the last page in the UPL, so that we
4298 * aren't looking at a UPL that may have already been freed by the
4299 * preceding aborts/completions.
4301 for (pg_index
= ((isize
) / PAGE_SIZE
); pg_index
> 0;) {
4302 if (upl_page_present(pl
, --pg_index
))
4304 if (pg_index
== 0) {
4306 * no absent pages were found in the range specified
4307 * just abort the UPL to get rid of it and then we're done
4309 ubc_upl_abort_range(upl
, 0, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4314 * initialize the offset variables before we touch the UPL.
4315 * f_offset is the position into the file, in bytes
4316 * offset is the position into the UPL, in bytes
4317 * pg_index is the pg# of the UPL we're operating on
4318 * isize is the offset into the UPL of the last page that is present.
4320 isize
= ((pg_index
+ 1) * PAGE_SIZE
);
4323 f_offset
= ap
->a_f_offset
;
4329 if ( !upl_page_present(pl
, pg_index
)) {
4331 * we asked for RET_ONLY_ABSENT, so it's possible
4332 * to get back empty slots in the UPL.
4333 * just skip over them
4335 f_offset
+= PAGE_SIZE
;
4336 offset
+= PAGE_SIZE
;
4343 * We know that we have at least one absent page.
4344 * Now checking to see how many in a row we have
4347 xsize
= isize
- PAGE_SIZE
;
4350 if ( !upl_page_present(pl
, pg_index
+ num_of_pages
))
4355 xsize
= num_of_pages
* PAGE_SIZE
;
4358 if (VNODE_IS_RSRC(vp
)) {
4359 /* allow pageins of the resource fork */
4361 int compressed
= hfs_file_is_compressed(VTOC(vp
), 1); /* 1 == don't take the cnode lock */
4365 if (truncate_lock_held
) {
4367 * can't hold the truncate lock when calling into the decmpfs layer
4368 * since it calls back into this layer... even though we're only
4369 * holding the lock in shared mode, and the re-entrant path only
4370 * takes the lock shared, we can deadlock if some other thread
4371 * tries to grab the lock exclusively in between.
4373 hfs_unlock_truncate(cp
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4374 truncate_lock_held
= FALSE
;
4377 ap
->a_pl_offset
= offset
;
4378 ap
->a_f_offset
= f_offset
;
4381 error
= decmpfs_pagein_compressed(ap
, &compressed
, VTOCMP(vp
));
4383 * note that decpfs_pagein_compressed can change the state of
4384 * 'compressed'... it will set it to 0 if the file is no longer
4385 * compressed once the compression lock is successfully taken
4386 * i.e. we would block on that lock while the file is being inflated
4390 /* successful page-in, update the access time */
4391 VTOC(vp
)->c_touch_acctime
= TRUE
;
4393 /* compressed files are not hot file candidates */
4394 if (VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) {
4395 fp
->ff_bytesread
= 0;
4397 } else if (error
== EAGAIN
) {
4399 * EAGAIN indicates someone else already holds the compression lock...
4400 * to avoid deadlocking, we'll abort this range of pages with an
4401 * indication that the pagein needs to be redriven
4403 ubc_upl_abort_range(upl
, (upl_offset_t
) offset
, xsize
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_RESTART
);
4404 } else if (error
== ENOSPC
) {
4406 if (upl_size
== PAGE_SIZE
)
4407 panic("decmpfs_pagein_compressed: couldn't ubc_upl_map a single page\n");
4409 ubc_upl_abort_range(upl
, (upl_offset_t
) offset
, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4411 ap
->a_size
= PAGE_SIZE
;
4413 ap
->a_pl_offset
= 0;
4414 ap
->a_f_offset
= page_needed_f_offset
;
4418 goto pagein_next_range
;
4422 * Set file_converted only if the file became decompressed while we were
4423 * paging in. If it were still compressed, we would re-start the loop using the goto
4424 * in the above block. This avoid us overloading truncate_lock_held as our retry_pagein
4425 * condition below, since we could have avoided taking the truncate lock to prevent
4426 * a deadlock in the force unmount case.
4428 file_converted
= TRUE
;
4431 if (file_converted
== TRUE
) {
4433 * the file was converted back to a regular file after we first saw it as compressed
4434 * we need to abort the upl, retake the truncate lock, recreate the UPL and start over
4435 * reset a_size so that we consider what remains of the original request
4436 * and null out a_upl and a_pl_offset.
4438 * We should only be able to get into this block if the decmpfs_pagein_compressed
4439 * successfully decompressed the range in question for this file.
4441 ubc_upl_abort_range(upl
, (upl_offset_t
) offset
, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4445 ap
->a_pl_offset
= 0;
4447 /* Reset file_converted back to false so that we don't infinite-loop. */
4448 file_converted
= FALSE
;
4453 error
= cluster_pagein(vp
, upl
, offset
, f_offset
, xsize
, (off_t
)fp
->ff_size
, ap
->a_flags
);
4456 * Keep track of blocks read.
4458 if ( !vnode_isswap(vp
) && VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
&& error
== 0) {
4460 int took_cnode_lock
= 0;
4462 if (ap
->a_f_offset
== 0 && fp
->ff_size
< PAGE_SIZE
)
4463 bytesread
= fp
->ff_size
;
4467 /* When ff_bytesread exceeds 32-bits, update it behind the cnode lock. */
4468 if ((fp
->ff_bytesread
+ bytesread
) > 0x00000000ffffffff && cp
->c_lockowner
!= current_thread()) {
4469 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
4470 took_cnode_lock
= 1;
4473 * If this file hasn't been seen since the start of
4474 * the current sampling period then start over.
4476 if (cp
->c_atime
< VTOHFS(vp
)->hfc_timebase
) {
4479 fp
->ff_bytesread
= bytesread
;
4481 cp
->c_atime
= tv
.tv_sec
;
4483 fp
->ff_bytesread
+= bytesread
;
4485 cp
->c_touch_acctime
= TRUE
;
4486 if (took_cnode_lock
)
4493 pg_index
+= num_of_pages
;
4499 if (truncate_lock_held
== TRUE
) {
4500 /* Note 1 is passed to hfs_unlock_truncate in been_recursed argument */
4501 hfs_unlock_truncate(cp
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4508 * Pageout for HFS filesystem.
4511 hfs_vnop_pageout(struct vnop_pageout_args
*ap
)
4513 struct vnop_pageout_args {
4516 vm_offset_t a_pl_offset,
4520 vfs_context_t a_context;
4524 vnode_t vp
= ap
->a_vp
;
4526 struct filefork
*fp
;
4530 upl_page_info_t
* pl
;
4531 vm_offset_t a_pl_offset
;
4533 int is_pageoutv2
= 0;
4540 * Figure out where the file ends, for pageout purposes. If
4541 * ff_new_size > ff_size, then we're in the middle of extending the
4542 * file via a write, so it is safe (and necessary) that we be able
4543 * to pageout up to that point.
4545 filesize
= fp
->ff_size
;
4546 if (fp
->ff_new_size
> filesize
)
4547 filesize
= fp
->ff_new_size
;
4549 a_flags
= ap
->a_flags
;
4550 a_pl_offset
= ap
->a_pl_offset
;
4553 * we can tell if we're getting the new or old behavior from the UPL
4555 if ((upl
= ap
->a_pl
) == NULL
) {
4560 * we're in control of any UPL we commit
4561 * make sure someone hasn't accidentally passed in UPL_NOCOMMIT
4563 a_flags
&= ~UPL_NOCOMMIT
;
4567 * For V2 semantics, we want to take the cnode truncate lock
4568 * shared to guard against the file size changing via zero-filling.
4570 * However, we have to be careful because we may be invoked
4571 * via the ubc_msync path to write out dirty mmap'd pages
4572 * in response to a lock event on a content-protected
4573 * filesystem (e.g. to write out class A files).
4574 * As a result, we want to take the truncate lock 'SHARED' with
4575 * the mini-recursion locktype so that we don't deadlock/panic
4576 * because we may be already holding the truncate lock exclusive to force any other
4577 * IOs to have blocked behind us.
4579 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4581 if (a_flags
& UPL_MSYNC
) {
4582 request_flags
= UPL_UBC_MSYNC
| UPL_RET_ONLY_DIRTY
;
4585 request_flags
= UPL_UBC_PAGEOUT
| UPL_RET_ONLY_DIRTY
;
4588 kret
= ubc_create_upl(vp
, ap
->a_f_offset
, ap
->a_size
, &upl
, &pl
, request_flags
);
4590 if ((kret
!= KERN_SUCCESS
) || (upl
== (upl_t
) NULL
)) {
4596 * from this point forward upl points at the UPL we're working with
4597 * it was either passed in or we succesfully created it
4601 * Now that HFS is opting into VFC_VFSVNOP_PAGEOUTV2, we may need to operate on our own
4602 * UPL instead of relying on the UPL passed into us. We go ahead and do that here,
4603 * scanning for dirty ranges. We'll issue our own N cluster_pageout calls, for
4604 * N dirty ranges in the UPL. Note that this is almost a direct copy of the
4605 * logic in vnode_pageout except that we need to do it after grabbing the truncate
4606 * lock in HFS so that we don't lock invert ourselves.
4608 * Note that we can still get into this function on behalf of the default pager with
4609 * non-V2 behavior (swapfiles). However in that case, we did not grab locks above
4610 * since fsync and other writing threads will grab the locks, then mark the
4611 * relevant pages as busy. But the pageout codepath marks the pages as busy,
4612 * and THEN would attempt to grab the truncate lock, which would result in deadlock. So
4613 * we do not try to grab anything for the pre-V2 case, which should only be accessed
4614 * by the paging/VM system.
4626 f_offset
= ap
->a_f_offset
;
4629 * Scan from the back to find the last page in the UPL, so that we
4630 * aren't looking at a UPL that may have already been freed by the
4631 * preceding aborts/completions.
4633 for (pg_index
= ((isize
) / PAGE_SIZE
); pg_index
> 0;) {
4634 if (upl_page_present(pl
, --pg_index
))
4636 if (pg_index
== 0) {
4637 ubc_upl_abort_range(upl
, 0, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4643 * initialize the offset variables before we touch the UPL.
4644 * a_f_offset is the position into the file, in bytes
4645 * offset is the position into the UPL, in bytes
4646 * pg_index is the pg# of the UPL we're operating on.
4647 * isize is the offset into the UPL of the last non-clean page.
4649 isize
= ((pg_index
+ 1) * PAGE_SIZE
);
4658 if ( !upl_page_present(pl
, pg_index
)) {
4660 * we asked for RET_ONLY_DIRTY, so it's possible
4661 * to get back empty slots in the UPL.
4662 * just skip over them
4664 f_offset
+= PAGE_SIZE
;
4665 offset
+= PAGE_SIZE
;
4671 if ( !upl_dirty_page(pl
, pg_index
)) {
4672 panic ("hfs_vnop_pageout: unforeseen clean page @ index %d for UPL %p\n", pg_index
, upl
);
4676 * We know that we have at least one dirty page.
4677 * Now checking to see how many in a row we have
4680 xsize
= isize
- PAGE_SIZE
;
4683 if ( !upl_dirty_page(pl
, pg_index
+ num_of_pages
))
4688 xsize
= num_of_pages
* PAGE_SIZE
;
4690 if (!vnode_isswap(vp
)) {
4696 if (cp
->c_lockowner
!= current_thread()) {
4697 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
4699 * we're in the v2 path, so we are the
4700 * owner of the UPL... we may have already
4701 * processed some of the UPL, so abort it
4702 * from the current working offset to the
4705 ubc_upl_abort_range(upl
,
4707 ap
->a_size
- offset
,
4708 UPL_ABORT_FREE_ON_EMPTY
);
4713 end_of_range
= f_offset
+ xsize
- 1;
4715 if (end_of_range
>= filesize
) {
4716 end_of_range
= (off_t
)(filesize
- 1);
4718 if (f_offset
< filesize
) {
4719 rl_remove(f_offset
, end_of_range
, &fp
->ff_invalidranges
);
4720 cp
->c_flag
|= C_MODIFIED
; /* leof is dirty */
4726 if ((error
= cluster_pageout(vp
, upl
, offset
, f_offset
,
4727 xsize
, filesize
, a_flags
))) {
4734 pg_index
+= num_of_pages
;
4736 /* capture errnos bubbled out of cluster_pageout if they occurred */
4737 if (error_ret
!= 0) {
4740 } /* end block for v2 pageout behavior */
4742 if (!vnode_isswap(vp
)) {
4746 if (cp
->c_lockowner
!= current_thread()) {
4747 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
4748 if (!(a_flags
& UPL_NOCOMMIT
)) {
4749 ubc_upl_abort_range(upl
,
4752 UPL_ABORT_FREE_ON_EMPTY
);
4758 end_of_range
= ap
->a_f_offset
+ ap
->a_size
- 1;
4760 if (end_of_range
>= filesize
) {
4761 end_of_range
= (off_t
)(filesize
- 1);
4763 if (ap
->a_f_offset
< filesize
) {
4764 rl_remove(ap
->a_f_offset
, end_of_range
, &fp
->ff_invalidranges
);
4765 cp
->c_flag
|= C_MODIFIED
; /* leof is dirty */
4773 * just call cluster_pageout for old pre-v2 behavior
4775 retval
= cluster_pageout(vp
, upl
, a_pl_offset
, ap
->a_f_offset
,
4776 ap
->a_size
, filesize
, a_flags
);
4780 * If data was written, update the modification time of the file
4781 * but only if it's mapped writable; we will have touched the
4782 * modifcation time for direct writes.
4784 if (retval
== 0 && (ubc_is_mapped_writable(vp
)
4785 || ISSET(cp
->c_flag
, C_MIGHT_BE_DIRTY_FROM_MAPPING
))) {
4786 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
4788 // Check again with lock
4789 bool mapped_writable
= ubc_is_mapped_writable(vp
);
4791 || ISSET(cp
->c_flag
, C_MIGHT_BE_DIRTY_FROM_MAPPING
)) {
4792 cp
->c_touch_modtime
= TRUE
;
4793 cp
->c_touch_chgtime
= TRUE
;
4796 * We only need to increment the generation counter if
4797 * it's currently mapped writable because we incremented
4798 * the counter in hfs_vnop_mnomap.
4800 if (mapped_writable
)
4801 hfs_incr_gencount(VTOC(vp
));
4804 * If setuid or setgid bits are set and this process is
4805 * not the superuser then clear the setuid and setgid bits
4806 * as a precaution against tampering.
4808 if ((cp
->c_mode
& (S_ISUID
| S_ISGID
)) &&
4809 (vfs_context_suser(ap
->a_context
) != 0)) {
4810 cp
->c_mode
&= ~(S_ISUID
| S_ISGID
);
4820 * Release the truncate lock. Note that because
4821 * we may have taken the lock recursively by
4822 * being invoked via ubc_msync due to lockdown,
4823 * we should release it recursively, too.
4825 hfs_unlock_truncate(cp
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4831 * Intercept B-Tree node writes to unswap them if necessary.
4834 hfs_vnop_bwrite(struct vnop_bwrite_args
*ap
)
4837 register struct buf
*bp
= ap
->a_bp
;
4838 register struct vnode
*vp
= buf_vnode(bp
);
4839 BlockDescriptor block
;
4841 /* Trap B-Tree writes */
4842 if ((VTOC(vp
)->c_fileid
== kHFSExtentsFileID
) ||
4843 (VTOC(vp
)->c_fileid
== kHFSCatalogFileID
) ||
4844 (VTOC(vp
)->c_fileid
== kHFSAttributesFileID
) ||
4845 (vp
== VTOHFS(vp
)->hfc_filevp
)) {
4848 * Swap and validate the node if it is in native byte order.
4849 * This is always be true on big endian, so we always validate
4850 * before writing here. On little endian, the node typically has
4851 * been swapped and validated when it was written to the journal,
4852 * so we won't do anything here.
4854 if (((u_int16_t
*)((char *)buf_dataptr(bp
) + buf_count(bp
) - 2))[0] == 0x000e) {
4855 /* Prepare the block pointer */
4856 block
.blockHeader
= bp
;
4857 block
.buffer
= (char *)buf_dataptr(bp
);
4858 block
.blockNum
= buf_lblkno(bp
);
4859 /* not found in cache ==> came from disk */
4860 block
.blockReadFromDisk
= (buf_fromcache(bp
) == 0);
4861 block
.blockSize
= buf_count(bp
);
4863 /* Endian un-swap B-Tree node */
4864 retval
= hfs_swap_BTNode (&block
, vp
, kSwapBTNodeHostToBig
, false);
4866 panic("hfs_vnop_bwrite: about to write corrupt node!\n");
4870 /* This buffer shouldn't be locked anymore but if it is clear it */
4871 if ((buf_flags(bp
) & B_LOCKED
)) {
4873 if (VTOHFS(vp
)->jnl
) {
4874 panic("hfs: CLEARING the lock bit on bp %p\n", bp
);
4876 buf_clearflags(bp
, B_LOCKED
);
4878 retval
= vn_bwrite (ap
);
4884 * Relocate a file to a new location on disk
4885 * cnode must be locked on entry
4887 * Relocation occurs by cloning the file's data from its
4888 * current set of blocks to a new set of blocks. During
4889 * the relocation all of the blocks (old and new) are
4890 * owned by the file.
4897 * ----------------- -----------------
4898 * |///////////////| | | STEP 1 (acquire new blocks)
4899 * ----------------- -----------------
4902 * ----------------- -----------------
4903 * |///////////////| |///////////////| STEP 2 (clone data)
4904 * ----------------- -----------------
4908 * |///////////////| STEP 3 (head truncate blocks)
4912 * During steps 2 and 3 page-outs to file offsets less
4913 * than or equal to N are suspended.
4915 * During step 3 page-ins to the file get suspended.
4918 hfs_relocate(struct vnode
*vp
, u_int32_t blockHint
, kauth_cred_t cred
,
4922 struct filefork
*fp
;
4923 struct hfsmount
*hfsmp
;
4928 u_int32_t nextallocsave
;
4929 daddr64_t sector_a
, sector_b
;
4934 int took_trunc_lock
= 0;
4936 enum vtype vnodetype
;
4938 vnodetype
= vnode_vtype(vp
);
4939 if (vnodetype
!= VREG
) {
4940 /* Not allowed to move symlinks. */
4945 if (hfsmp
->hfs_flags
& HFS_FRAGMENTED_FREESPACE
) {
4951 if (fp
->ff_unallocblocks
)
4956 * <rdar://problem/9118426>
4957 * Disable HFS file relocation on content-protected filesystems
4959 if (cp_fs_protected (hfsmp
->hfs_mp
)) {
4963 /* If it's an SSD, also disable HFS relocation */
4964 if (hfsmp
->hfs_flags
& HFS_SSD
) {
4969 blksize
= hfsmp
->blockSize
;
4971 blockHint
= hfsmp
->nextAllocation
;
4973 if (fp
->ff_size
> 0x7fffffff) {
4978 // We do not believe that this call to hfs_fsync() is
4979 // necessary and it causes a journal transaction
4980 // deadlock so we are removing it.
4982 //if (vnodetype == VREG && !vnode_issystem(vp)) {
4983 // retval = hfs_fsync(vp, MNT_WAIT, 0, p);
4988 if (!vnode_issystem(vp
) && (vnodetype
!= VLNK
)) {
4990 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
4991 /* Force lock since callers expects lock to be held. */
4992 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
))) {
4993 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
4996 /* No need to continue if file was removed. */
4997 if (cp
->c_flag
& C_NOEXISTS
) {
4998 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5001 took_trunc_lock
= 1;
5003 headblks
= fp
->ff_blocks
;
5004 datablks
= howmany(fp
->ff_size
, blksize
);
5005 growsize
= datablks
* blksize
;
5006 eflags
= kEFContigMask
| kEFAllMask
| kEFNoClumpMask
;
5007 if (blockHint
>= hfsmp
->hfs_metazone_start
&&
5008 blockHint
<= hfsmp
->hfs_metazone_end
)
5009 eflags
|= kEFMetadataMask
;
5011 if (hfs_start_transaction(hfsmp
) != 0) {
5012 if (took_trunc_lock
)
5013 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5018 * Protect the extents b-tree and the allocation bitmap
5019 * during MapFileBlockC and ExtendFileC operations.
5021 lockflags
= SFL_BITMAP
;
5022 if (overflow_extents(fp
))
5023 lockflags
|= SFL_EXTENTS
;
5024 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5026 retval
= MapFileBlockC(hfsmp
, (FCB
*)fp
, 1, growsize
- 1, §or_a
, NULL
);
5028 retval
= MacToVFSError(retval
);
5033 * STEP 1 - acquire new allocation blocks.
5035 nextallocsave
= hfsmp
->nextAllocation
;
5036 retval
= ExtendFileC(hfsmp
, (FCB
*)fp
, growsize
, blockHint
, eflags
, &newbytes
);
5037 if (eflags
& kEFMetadataMask
) {
5038 hfs_lock_mount(hfsmp
);
5039 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, nextallocsave
);
5040 MarkVCBDirty(hfsmp
);
5041 hfs_unlock_mount(hfsmp
);
5044 retval
= MacToVFSError(retval
);
5046 cp
->c_flag
|= C_MODIFIED
;
5047 if (newbytes
< growsize
) {
5050 } else if (fp
->ff_blocks
< (headblks
+ datablks
)) {
5051 printf("hfs_relocate: allocation failed id=%u, vol=%s\n", cp
->c_cnid
, hfsmp
->vcbVN
);
5056 retval
= MapFileBlockC(hfsmp
, (FCB
*)fp
, 1, growsize
, §or_b
, NULL
);
5058 retval
= MacToVFSError(retval
);
5059 } else if ((sector_a
+ 1) == sector_b
) {
5062 } else if ((eflags
& kEFMetadataMask
) &&
5063 ((((u_int64_t
)sector_b
* hfsmp
->hfs_logical_block_size
) / blksize
) >
5064 hfsmp
->hfs_metazone_end
)) {
5066 const char * filestr
;
5067 char emptystr
= '\0';
5069 if (cp
->c_desc
.cd_nameptr
!= NULL
) {
5070 filestr
= (const char *)&cp
->c_desc
.cd_nameptr
[0];
5071 } else if (vnode_name(vp
) != NULL
) {
5072 filestr
= vnode_name(vp
);
5074 filestr
= &emptystr
;
5081 /* Done with system locks and journal for now. */
5082 hfs_systemfile_unlock(hfsmp
, lockflags
);
5084 hfs_end_transaction(hfsmp
);
5089 * Check to see if failure is due to excessive fragmentation.
5091 if ((retval
== ENOSPC
) &&
5092 (hfs_freeblks(hfsmp
, 0) > (datablks
* 2))) {
5093 hfsmp
->hfs_flags
|= HFS_FRAGMENTED_FREESPACE
;
5098 * STEP 2 - clone file data into the new allocation blocks.
5101 if (vnodetype
== VLNK
)
5103 else if (vnode_issystem(vp
))
5104 retval
= hfs_clonesysfile(vp
, headblks
, datablks
, blksize
, cred
, p
);
5106 retval
= hfs_clonefile(vp
, headblks
, datablks
, blksize
);
5108 /* Start transaction for step 3 or for a restore. */
5109 if (hfs_start_transaction(hfsmp
) != 0) {
5118 * STEP 3 - switch to cloned data and remove old blocks.
5120 lockflags
= SFL_BITMAP
;
5121 if (overflow_extents(fp
))
5122 lockflags
|= SFL_EXTENTS
;
5123 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5125 retval
= HeadTruncateFile(hfsmp
, (FCB
*)fp
, headblks
);
5127 hfs_systemfile_unlock(hfsmp
, lockflags
);
5132 if (took_trunc_lock
)
5133 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5136 hfs_systemfile_unlock(hfsmp
, lockflags
);
5140 /* Push cnode's new extent data to disk. */
5142 (void) hfs_update(vp
, MNT_WAIT
);
5145 if (cp
->c_cnid
< kHFSFirstUserCatalogNodeID
)
5146 (void) hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, HFS_ALTFLUSH
);
5148 (void) hfs_flushvolumeheader(hfsmp
, MNT_NOWAIT
, 0);
5152 hfs_end_transaction(hfsmp
);
5157 if (fp
->ff_blocks
== headblks
) {
5158 if (took_trunc_lock
)
5159 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5163 * Give back any newly allocated space.
5165 if (lockflags
== 0) {
5166 lockflags
= SFL_BITMAP
;
5167 if (overflow_extents(fp
))
5168 lockflags
|= SFL_EXTENTS
;
5169 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5172 (void) TruncateFileC(hfsmp
, (FCB
*)fp
, fp
->ff_size
, 0, FORK_IS_RSRC(fp
),
5173 FTOC(fp
)->c_fileid
, false);
5175 hfs_systemfile_unlock(hfsmp
, lockflags
);
5178 if (took_trunc_lock
)
5179 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5185 * Clone a file's data within the file.
5189 hfs_clonefile(struct vnode
*vp
, int blkstart
, int blkcnt
, int blksize
)
5200 writebase
= blkstart
* blksize
;
5201 copysize
= blkcnt
* blksize
;
5202 iosize
= bufsize
= MIN(copysize
, 128 * 1024);
5205 hfs_unlock(VTOC(vp
));
5208 if ((error
= cp_handle_vnop(vp
, CP_WRITE_ACCESS
, 0)) != 0) {
5209 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
5212 #endif /* CONFIG_PROTECT */
5214 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&bufp
, bufsize
)) {
5215 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
5219 auio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
);
5221 while (offset
< copysize
) {
5222 iosize
= MIN(copysize
- offset
, iosize
);
5224 uio_reset(auio
, offset
, UIO_SYSSPACE
, UIO_READ
);
5225 uio_addiov(auio
, (uintptr_t)bufp
, iosize
);
5227 error
= cluster_read(vp
, auio
, copysize
, IO_NOCACHE
);
5229 printf("hfs_clonefile: cluster_read failed - %d\n", error
);
5232 if (uio_resid(auio
) != 0) {
5233 printf("hfs_clonefile: cluster_read: uio_resid = %lld\n", (int64_t)uio_resid(auio
));
5238 uio_reset(auio
, writebase
+ offset
, UIO_SYSSPACE
, UIO_WRITE
);
5239 uio_addiov(auio
, (uintptr_t)bufp
, iosize
);
5241 error
= cluster_write(vp
, auio
, writebase
+ offset
,
5242 writebase
+ offset
+ iosize
,
5243 uio_offset(auio
), 0, IO_NOCACHE
| IO_SYNC
);
5245 printf("hfs_clonefile: cluster_write failed - %d\n", error
);
5248 if (uio_resid(auio
) != 0) {
5249 printf("hfs_clonefile: cluster_write failed - uio_resid not zero\n");
5257 if ((blksize
& PAGE_MASK
)) {
5259 * since the copy may not have started on a PAGE
5260 * boundary (or may not have ended on one), we
5261 * may have pages left in the cache since NOCACHE
5262 * will let partially written pages linger...
5263 * lets just flush the entire range to make sure
5264 * we don't have any pages left that are beyond
5265 * (or intersect) the real LEOF of this file
5267 ubc_msync(vp
, writebase
, writebase
+ offset
, NULL
, UBC_INVALIDATE
| UBC_PUSHDIRTY
);
5270 * No need to call ubc_msync or hfs_invalbuf
5271 * since the file was copied using IO_NOCACHE and
5272 * the copy was done starting and ending on a page
5273 * boundary in the file.
5276 kmem_free(kernel_map
, (vm_offset_t
)bufp
, bufsize
);
5278 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
5283 * Clone a system (metadata) file.
5287 hfs_clonesysfile(struct vnode
*vp
, int blkstart
, int blkcnt
, int blksize
,
5288 kauth_cred_t cred
, struct proc
*p
)
5294 struct buf
*bp
= NULL
;
5297 daddr64_t start_blk
;
5304 iosize
= GetLogicalBlockSize(vp
);
5305 bufsize
= MIN(blkcnt
* blksize
, 1024 * 1024) & ~(iosize
- 1);
5306 breadcnt
= bufsize
/ iosize
;
5308 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&bufp
, bufsize
)) {
5311 start_blk
= ((daddr64_t
)blkstart
* blksize
) / iosize
;
5312 last_blk
= ((daddr64_t
)blkcnt
* blksize
) / iosize
;
5315 while (blkno
< last_blk
) {
5317 * Read up to a megabyte
5320 for (i
= 0, blk
= blkno
; (i
< breadcnt
) && (blk
< last_blk
); ++i
, ++blk
) {
5321 error
= (int)buf_meta_bread(vp
, blk
, iosize
, cred
, &bp
);
5323 printf("hfs_clonesysfile: meta_bread error %d\n", error
);
5326 if (buf_count(bp
) != iosize
) {
5327 printf("hfs_clonesysfile: b_bcount is only %d\n", buf_count(bp
));
5330 bcopy((char *)buf_dataptr(bp
), offset
, iosize
);
5332 buf_markinvalid(bp
);
5340 * Write up to a megabyte
5343 for (i
= 0; (i
< breadcnt
) && (blkno
< last_blk
); ++i
, ++blkno
) {
5344 bp
= buf_getblk(vp
, start_blk
+ blkno
, iosize
, 0, 0, BLK_META
);
5346 printf("hfs_clonesysfile: getblk failed on blk %qd\n", start_blk
+ blkno
);
5350 bcopy(offset
, (char *)buf_dataptr(bp
), iosize
);
5351 error
= (int)buf_bwrite(bp
);
5363 kmem_free(kernel_map
, (vm_offset_t
)bufp
, bufsize
);
5365 error
= hfs_fsync(vp
, MNT_WAIT
, 0, p
);