2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* @(#)hfs_readwrite.c 1.0
30 * (c) 1998-2001 Apple Computer, Inc. All Rights Reserved
32 * hfs_readwrite.c -- vnode operations to deal with reading and writing files.
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/filedesc.h>
44 #include <sys/buf_internal.h>
46 #include <sys/kauth.h>
47 #include <sys/vnode.h>
48 #include <sys/vnode_internal.h>
50 #include <sys/vfs_context.h>
51 #include <sys/fsevents.h>
52 #include <kern/kalloc.h>
54 #include <sys/sysctl.h>
55 #include <sys/fsctl.h>
56 #include <sys/mount_internal.h>
57 #include <sys/file_internal.h>
59 #include <miscfs/specfs/specdev.h>
62 #include <sys/ubc_internal.h>
64 #include <vm/vm_pageout.h>
65 #include <vm/vm_kern.h>
67 #include <sys/kdebug.h>
70 #include "hfs_attrlist.h"
71 #include "hfs_endian.h"
72 #include "hfs_fsctl.h"
73 #include "hfs_quota.h"
74 #include "hfscommon/headers/FileMgrInternal.h"
75 #include "hfscommon/headers/BTreesInternal.h"
76 #include "hfs_cnode.h"
79 #define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2)))
82 MAXHFSFILESIZE
= 0x7FFFFFFF /* this needs to go in the mount structure */
85 /* from bsd/hfs/hfs_vfsops.c */
86 extern int hfs_vfs_vget (struct mount
*mp
, ino64_t ino
, struct vnode
**vpp
, vfs_context_t context
);
88 static int hfs_clonefile(struct vnode
*, int, int, int);
89 static int hfs_clonesysfile(struct vnode
*, int, int, int, kauth_cred_t
, struct proc
*);
90 static int hfs_minorupdate(struct vnode
*vp
);
91 static int do_hfs_truncate(struct vnode
*vp
, off_t length
, int flags
, int skip
, vfs_context_t context
);
93 /* from bsd/hfs/hfs_vnops.c */
94 extern decmpfs_cnode
* hfs_lazy_init_decmpfs_cnode (struct cnode
*cp
);
98 int flush_cache_on_write
= 0;
99 SYSCTL_INT (_kern
, OID_AUTO
, flush_cache_on_write
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &flush_cache_on_write
, 0, "always flush the drive cache on writes to uncached files");
102 * Read data from a file.
105 hfs_vnop_read(struct vnop_read_args
*ap
)
108 struct vnop_read_args {
109 struct vnodeop_desc *a_desc;
113 vfs_context_t a_context;
117 uio_t uio
= ap
->a_uio
;
118 struct vnode
*vp
= ap
->a_vp
;
121 struct hfsmount
*hfsmp
;
124 off_t start_resid
= uio_resid(uio
);
125 off_t offset
= uio_offset(uio
);
127 int took_truncate_lock
= 0;
129 int throttled_count
= 0;
131 /* Preflight checks */
132 if (!vnode_isreg(vp
)) {
133 /* can only read regular files */
139 if (start_resid
== 0)
140 return (0); /* Nothing left to do */
142 return (EINVAL
); /* cant read from a negative offset */
144 if ((ap
->a_ioflag
& (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) ==
145 (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) {
146 /* Don't allow unencrypted io request from user space */
153 if (VNODE_IS_RSRC(vp
)) {
154 if (hfs_hides_rsrc(ap
->a_context
, VTOC(vp
), 1)) { /* 1 == don't take the cnode lock */
157 /* otherwise read the resource fork normally */
159 int compressed
= hfs_file_is_compressed(VTOC(vp
), 1); /* 1 == don't take the cnode lock */
161 retval
= decmpfs_read_compressed(ap
, &compressed
, VTOCMP(vp
));
164 /* successful read, update the access time */
165 VTOC(vp
)->c_touch_acctime
= TRUE
;
167 /* compressed files are not hot file candidates */
168 if (VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) {
169 VTOF(vp
)->ff_bytesread
= 0;
174 /* otherwise the file was converted back to a regular file while we were reading it */
176 } else if ((VTOC(vp
)->c_bsdflags
& UF_COMPRESSED
)) {
179 error
= check_for_dataless_file(vp
, NAMESPACE_HANDLER_READ_OP
);
186 #endif /* HFS_COMPRESSION */
193 if ((retval
= cp_handle_vnop (vp
, CP_READ_ACCESS
, ap
->a_ioflag
)) != 0) {
199 * If this read request originated from a syscall (as opposed to
200 * an in-kernel page fault or something), then set it up for
203 if (ap
->a_ioflag
& IO_SYSCALL_DISPATCH
) {
204 io_throttle
= IO_RETURN_ON_THROTTLE
;
209 /* Protect against a size change. */
210 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
211 took_truncate_lock
= 1;
213 filesize
= fp
->ff_size
;
214 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
217 * Check the file size. Note that per POSIX spec, we return 0 at
218 * file EOF, so attempting a read at an offset that is too big
219 * should just return 0 on HFS+. Since the return value was initialized
220 * to 0 above, we just jump to exit. HFS Standard has its own behavior.
222 if (offset
> filesize
) {
223 if ((hfsmp
->hfs_flags
& HFS_STANDARD
) &&
224 (offset
> (off_t
)MAXHFSFILESIZE
)) {
230 KERNEL_DEBUG(HFSDBG_READ
| DBG_FUNC_START
,
231 (int)uio_offset(uio
), uio_resid(uio
), (int)filesize
, (int)filebytes
, 0);
233 retval
= cluster_read(vp
, uio
, filesize
, ap
->a_ioflag
|io_throttle
);
235 cp
->c_touch_acctime
= TRUE
;
237 KERNEL_DEBUG(HFSDBG_READ
| DBG_FUNC_END
,
238 (int)uio_offset(uio
), uio_resid(uio
), (int)filesize
, (int)filebytes
, 0);
241 * Keep track blocks read
243 if (hfsmp
->hfc_stage
== HFC_RECORDING
&& retval
== 0) {
244 int took_cnode_lock
= 0;
247 bytesread
= start_resid
- uio_resid(uio
);
249 /* When ff_bytesread exceeds 32-bits, update it behind the cnode lock. */
250 if ((fp
->ff_bytesread
+ bytesread
) > 0x00000000ffffffff) {
251 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
255 * If this file hasn't been seen since the start of
256 * the current sampling period then start over.
258 if (cp
->c_atime
< hfsmp
->hfc_timebase
) {
261 fp
->ff_bytesread
= bytesread
;
263 cp
->c_atime
= tv
.tv_sec
;
265 fp
->ff_bytesread
+= bytesread
;
271 if (took_truncate_lock
) {
272 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
274 if (retval
== EAGAIN
) {
275 throttle_lowpri_io(1);
281 if (throttled_count
) {
282 throttle_info_reset_window((uthread_t
)get_bsdthread_info(current_thread()));
288 * Write data to a file.
291 hfs_vnop_write(struct vnop_write_args
*ap
)
293 uio_t uio
= ap
->a_uio
;
294 struct vnode
*vp
= ap
->a_vp
;
297 struct hfsmount
*hfsmp
;
298 kauth_cred_t cred
= NULL
;
301 off_t bytesToAdd
= 0;
302 off_t actualBytesAdded
;
307 int ioflag
= ap
->a_ioflag
;
310 int cnode_locked
= 0;
311 int partialwrite
= 0;
313 time_t orig_ctime
=VTOC(vp
)->c_ctime
;
314 int took_truncate_lock
= 0;
315 int io_return_on_throttle
= 0;
316 int throttled_count
= 0;
317 struct rl_entry
*invalid_range
;
320 if ( hfs_file_is_compressed(VTOC(vp
), 1) ) { /* 1 == don't take the cnode lock */
321 int state
= decmpfs_cnode_get_vnode_state(VTOCMP(vp
));
323 case FILE_IS_COMPRESSED
:
325 case FILE_IS_CONVERTING
:
326 /* if FILE_IS_CONVERTING, we allow writes but do not
327 bother with snapshots or else we will deadlock.
332 printf("invalid state %d for compressed file\n", state
);
335 } else if ((VTOC(vp
)->c_bsdflags
& UF_COMPRESSED
)) {
338 error
= check_for_dataless_file(vp
, NAMESPACE_HANDLER_WRITE_OP
);
345 check_for_tracked_file(vp
, orig_ctime
, NAMESPACE_HANDLER_WRITE_OP
, uio
);
350 if ((ioflag
& (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) ==
351 (IO_SKIP_ENCRYPTION
|IO_SYSCALL_DISPATCH
)) {
352 /* Don't allow unencrypted io request from user space */
357 resid
= uio_resid(uio
);
358 offset
= uio_offset(uio
);
364 if (!vnode_isreg(vp
))
365 return (EPERM
); /* Can only write regular files */
372 if ((retval
= cp_handle_vnop (vp
, CP_WRITE_ACCESS
, 0)) != 0) {
377 eflags
= kEFDeferMask
; /* defer file block allocations */
380 * When the underlying device is sparse and space
381 * is low (< 8MB), stop doing delayed allocations
382 * and begin doing synchronous I/O.
384 if ((hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) &&
385 (hfs_freeblks(hfsmp
, 0) < 2048)) {
386 eflags
&= ~kEFDeferMask
;
389 #endif /* HFS_SPARSE_DEV */
391 if ((ioflag
& (IO_SINGLE_WRITER
| IO_SYSCALL_DISPATCH
)) ==
392 (IO_SINGLE_WRITER
| IO_SYSCALL_DISPATCH
)) {
393 io_return_on_throttle
= IO_RETURN_ON_THROTTLE
;
398 * Protect against a size change.
400 * Note: If took_truncate_lock is true, then we previously got the lock shared
401 * but needed to upgrade to exclusive. So try getting it exclusive from the
404 if (ioflag
& IO_APPEND
|| took_truncate_lock
) {
405 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
408 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_DEFAULT
);
410 took_truncate_lock
= 1;
413 if (ioflag
& IO_APPEND
) {
414 uio_setoffset(uio
, fp
->ff_size
);
415 offset
= fp
->ff_size
;
417 if ((cp
->c_bsdflags
& APPEND
) && offset
!= fp
->ff_size
) {
422 origFileSize
= fp
->ff_size
;
423 writelimit
= offset
+ resid
;
424 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
427 * We may need an exclusive truncate lock for several reasons, all
428 * of which are because we may be writing to a (portion of a) block
429 * for the first time, and we need to make sure no readers see the
430 * prior, uninitialized contents of the block. The cases are:
432 * 1. We have unallocated (delayed allocation) blocks. We may be
433 * allocating new blocks to the file and writing to them.
434 * (A more precise check would be whether the range we're writing
435 * to contains delayed allocation blocks.)
436 * 2. We need to extend the file. The bytes between the old EOF
437 * and the new EOF are not yet initialized. This is important
438 * even if we're not allocating new blocks to the file. If the
439 * old EOF and new EOF are in the same block, we still need to
440 * protect that range of bytes until they are written for the
442 * 3. The write overlaps some invalid ranges (delayed zero fill; that
443 * part of the file has been allocated, but not yet written).
445 * If we had a shared lock with the above cases, we need to try to upgrade
446 * to an exclusive lock. If the upgrade fails, we will lose the shared
447 * lock, and will need to take the truncate lock again; the took_truncate_lock
448 * flag will still be set, causing us to try for an exclusive lock next time.
450 * NOTE: Testing for #3 (delayed zero fill) needs to be done while the cnode
451 * lock is held, since it protects the range lists.
453 if ((cp
->c_truncatelockowner
== HFS_SHARED_OWNER
) &&
454 ((fp
->ff_unallocblocks
!= 0) ||
455 (writelimit
> origFileSize
))) {
456 if (lck_rw_lock_shared_to_exclusive(&cp
->c_truncatelock
) == FALSE
) {
458 * Lock upgrade failed and we lost our shared lock, try again.
459 * Note: we do not set took_truncate_lock=0 here. Leaving it
460 * set to 1 will cause us to try to get the lock exclusive.
465 /* Store the owner in the c_truncatelockowner field if we successfully upgrade */
466 cp
->c_truncatelockowner
= current_thread();
470 if ( (retval
= hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
476 * Now that we have the cnode lock, see if there are delayed zero fill ranges
477 * overlapping our write. If so, we need the truncate lock exclusive (see above).
479 if ((cp
->c_truncatelockowner
== HFS_SHARED_OWNER
) &&
480 (rl_scan(&fp
->ff_invalidranges
, offset
, writelimit
-1, &invalid_range
) != RL_NOOVERLAP
)) {
482 * When testing, it appeared that calling lck_rw_lock_shared_to_exclusive() causes
483 * a deadlock, rather than simply returning failure. (That is, it apparently does
484 * not behave like a "try_lock"). Since this condition is rare, just drop the
485 * cnode lock and try again. Since took_truncate_lock is set, we will
486 * automatically take the truncate lock exclusive.
490 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
494 KERNEL_DEBUG(HFSDBG_WRITE
| DBG_FUNC_START
,
495 (int)offset
, uio_resid(uio
), (int)fp
->ff_size
,
498 /* Check if we do not need to extend the file */
499 if (writelimit
<= filebytes
) {
503 cred
= vfs_context_ucred(ap
->a_context
);
504 bytesToAdd
= writelimit
- filebytes
;
507 retval
= hfs_chkdq(cp
, (int64_t)(roundup(bytesToAdd
, hfsmp
->blockSize
)),
513 if (hfs_start_transaction(hfsmp
) != 0) {
518 while (writelimit
> filebytes
) {
519 bytesToAdd
= writelimit
- filebytes
;
520 if (cred
&& suser(cred
, NULL
) != 0)
521 eflags
|= kEFReserveMask
;
523 /* Protect extents b-tree and allocation bitmap */
524 lockflags
= SFL_BITMAP
;
525 if (overflow_extents(fp
))
526 lockflags
|= SFL_EXTENTS
;
527 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
529 /* Files that are changing size are not hot file candidates. */
530 if (hfsmp
->hfc_stage
== HFC_RECORDING
) {
531 fp
->ff_bytesread
= 0;
533 retval
= MacToVFSError(ExtendFileC (hfsmp
, (FCB
*)fp
, bytesToAdd
,
534 0, eflags
, &actualBytesAdded
));
536 hfs_systemfile_unlock(hfsmp
, lockflags
);
538 if ((actualBytesAdded
== 0) && (retval
== E_NONE
))
540 if (retval
!= E_NONE
)
542 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
543 KERNEL_DEBUG(HFSDBG_WRITE
| DBG_FUNC_NONE
,
544 (int)offset
, uio_resid(uio
), (int)fp
->ff_size
, (int)filebytes
, 0);
546 (void) hfs_update(vp
, TRUE
);
547 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
548 (void) hfs_end_transaction(hfsmp
);
551 * If we didn't grow the file enough try a partial write.
552 * POSIX expects this behavior.
554 if ((retval
== ENOSPC
) && (filebytes
> offset
)) {
557 uio_setresid(uio
, (uio_resid(uio
) - bytesToAdd
));
559 writelimit
= filebytes
;
562 if (retval
== E_NONE
) {
571 if (writelimit
> fp
->ff_size
)
572 filesize
= writelimit
;
574 filesize
= fp
->ff_size
;
576 lflag
= ioflag
& ~(IO_TAILZEROFILL
| IO_HEADZEROFILL
| IO_NOZEROVALID
| IO_NOZERODIRTY
);
578 if (offset
<= fp
->ff_size
) {
579 zero_off
= offset
& ~PAGE_MASK_64
;
581 /* Check to see whether the area between the zero_offset and the start
582 of the transfer to see whether is invalid and should be zero-filled
583 as part of the transfer:
585 if (offset
> zero_off
) {
586 if (rl_scan(&fp
->ff_invalidranges
, zero_off
, offset
- 1, &invalid_range
) != RL_NOOVERLAP
)
587 lflag
|= IO_HEADZEROFILL
;
590 off_t eof_page_base
= fp
->ff_size
& ~PAGE_MASK_64
;
592 /* The bytes between fp->ff_size and uio->uio_offset must never be
593 read without being zeroed. The current last block is filled with zeroes
594 if it holds valid data but in all cases merely do a little bookkeeping
595 to track the area from the end of the current last page to the start of
596 the area actually written. For the same reason only the bytes up to the
597 start of the page where this write will start is invalidated; any remainder
598 before uio->uio_offset is explicitly zeroed as part of the cluster_write.
600 Note that inval_start, the start of the page after the current EOF,
601 may be past the start of the write, in which case the zeroing
602 will be handled by the cluser_write of the actual data.
604 inval_start
= (fp
->ff_size
+ (PAGE_SIZE_64
- 1)) & ~PAGE_MASK_64
;
605 inval_end
= offset
& ~PAGE_MASK_64
;
606 zero_off
= fp
->ff_size
;
608 if ((fp
->ff_size
& PAGE_MASK_64
) &&
609 (rl_scan(&fp
->ff_invalidranges
,
612 &invalid_range
) != RL_NOOVERLAP
)) {
613 /* The page containing the EOF is not valid, so the
614 entire page must be made inaccessible now. If the write
615 starts on a page beyond the page containing the eof
616 (inval_end > eof_page_base), add the
617 whole page to the range to be invalidated. Otherwise
618 (i.e. if the write starts on the same page), zero-fill
619 the entire page explicitly now:
621 if (inval_end
> eof_page_base
) {
622 inval_start
= eof_page_base
;
624 zero_off
= eof_page_base
;
628 if (inval_start
< inval_end
) {
630 /* There's some range of data that's going to be marked invalid */
632 if (zero_off
< inval_start
) {
633 /* The pages between inval_start and inval_end are going to be invalidated,
634 and the actual write will start on a page past inval_end. Now's the last
635 chance to zero-fill the page containing the EOF:
639 retval
= cluster_write(vp
, (uio_t
) 0,
640 fp
->ff_size
, inval_start
,
642 lflag
| IO_HEADZEROFILL
| IO_NOZERODIRTY
);
643 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
645 if (retval
) goto ioerr_exit
;
646 offset
= uio_offset(uio
);
649 /* Mark the remaining area of the newly allocated space as invalid: */
650 rl_add(inval_start
, inval_end
- 1 , &fp
->ff_invalidranges
);
652 cp
->c_zftimeout
= tv
.tv_sec
+ ZFTIMELIMIT
;
653 zero_off
= fp
->ff_size
= inval_end
;
656 if (offset
> zero_off
) lflag
|= IO_HEADZEROFILL
;
659 /* Check to see whether the area between the end of the write and the end of
660 the page it falls in is invalid and should be zero-filled as part of the transfer:
662 tail_off
= (writelimit
+ (PAGE_SIZE_64
- 1)) & ~PAGE_MASK_64
;
663 if (tail_off
> filesize
) tail_off
= filesize
;
664 if (tail_off
> writelimit
) {
665 if (rl_scan(&fp
->ff_invalidranges
, writelimit
, tail_off
- 1, &invalid_range
) != RL_NOOVERLAP
) {
666 lflag
|= IO_TAILZEROFILL
;
671 * if the write starts beyond the current EOF (possibly advanced in the
672 * zeroing of the last block, above), then we'll zero fill from the current EOF
673 * to where the write begins:
675 * NOTE: If (and ONLY if) the portion of the file about to be written is
676 * before the current EOF it might be marked as invalid now and must be
677 * made readable (removed from the invalid ranges) before cluster_write
680 io_start
= (lflag
& IO_HEADZEROFILL
) ? zero_off
: offset
;
681 if (io_start
< fp
->ff_size
) {
684 io_end
= (lflag
& IO_TAILZEROFILL
) ? tail_off
: writelimit
;
685 rl_remove(io_start
, io_end
- 1, &fp
->ff_invalidranges
);
692 * We need to tell UBC the fork's new size BEFORE calling
693 * cluster_write, in case any of the new pages need to be
694 * paged out before cluster_write completes (which does happen
695 * in embedded systems due to extreme memory pressure).
696 * Similarly, we need to tell hfs_vnop_pageout what the new EOF
697 * will be, so that it can pass that on to cluster_pageout, and
698 * allow those pageouts.
700 * We don't update ff_size yet since we don't want pageins to
701 * be able to see uninitialized data between the old and new
702 * EOF, until cluster_write has completed and initialized that
705 * The vnode pager relies on the file size last given to UBC via
706 * ubc_setsize. hfs_vnop_pageout relies on fp->ff_new_size or
707 * ff_size (whichever is larger). NOTE: ff_new_size is always
708 * zero, unless we are extending the file via write.
710 if (filesize
> fp
->ff_size
) {
711 fp
->ff_new_size
= filesize
;
712 ubc_setsize(vp
, filesize
);
714 retval
= cluster_write(vp
, uio
, fp
->ff_size
, filesize
, zero_off
,
715 tail_off
, lflag
| IO_NOZERODIRTY
| io_return_on_throttle
);
717 fp
->ff_new_size
= 0; /* no longer extending; use ff_size */
719 if (retval
== EAGAIN
) {
721 * EAGAIN indicates that we still have I/O to do, but
722 * that we now need to be throttled
724 if (resid
!= uio_resid(uio
)) {
726 * did manage to do some I/O before returning EAGAIN
728 resid
= uio_resid(uio
);
729 offset
= uio_offset(uio
);
731 cp
->c_touch_chgtime
= TRUE
;
732 cp
->c_touch_modtime
= TRUE
;
733 hfs_incr_gencount(cp
);
735 if (filesize
> fp
->ff_size
) {
737 * we called ubc_setsize before the call to
738 * cluster_write... since we only partially
739 * completed the I/O, we need to
740 * re-adjust our idea of the filesize based
743 ubc_setsize(vp
, offset
);
745 fp
->ff_size
= offset
;
749 if (filesize
> origFileSize
) {
750 ubc_setsize(vp
, origFileSize
);
755 if (filesize
> origFileSize
) {
756 fp
->ff_size
= filesize
;
758 /* Files that are changing size are not hot file candidates. */
759 if (hfsmp
->hfc_stage
== HFC_RECORDING
) {
760 fp
->ff_bytesread
= 0;
763 fp
->ff_new_size
= 0; /* ff_size now has the correct size */
766 uio_setresid(uio
, (uio_resid(uio
) + bytesToAdd
));
770 // XXXdbg - see radar 4871353 for more info
772 if (flush_cache_on_write
&& ((ioflag
& IO_NOCACHE
) || vnode_isnocache(vp
))) {
773 VNOP_IOCTL(hfsmp
->hfs_devvp
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, NULL
);
778 if (resid
> uio_resid(uio
)) {
780 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
784 cp
->c_touch_chgtime
= TRUE
;
785 cp
->c_touch_modtime
= TRUE
;
786 hfs_incr_gencount(cp
);
789 * If we successfully wrote any data, and we are not the superuser
790 * we clear the setuid and setgid bits as a precaution against
793 if (cp
->c_mode
& (S_ISUID
| S_ISGID
)) {
794 cred
= vfs_context_ucred(ap
->a_context
);
795 if (cred
&& suser(cred
, NULL
)) {
796 cp
->c_mode
&= ~(S_ISUID
| S_ISGID
);
801 if (ioflag
& IO_UNIT
) {
802 (void)hfs_truncate(vp
, origFileSize
, ioflag
& IO_SYNC
,
804 uio_setoffset(uio
, (uio_offset(uio
) - (resid
- uio_resid(uio
))));
805 uio_setresid(uio
, resid
);
806 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)hfsmp
->blockSize
;
808 } else if ((ioflag
& IO_SYNC
) && (resid
> uio_resid(uio
)))
809 retval
= hfs_update(vp
, TRUE
);
811 /* Updating vcbWrCnt doesn't need to be atomic. */
814 KERNEL_DEBUG(HFSDBG_WRITE
| DBG_FUNC_END
,
815 (int)uio_offset(uio
), uio_resid(uio
), (int)fp
->ff_size
, (int)filebytes
, 0);
820 if (took_truncate_lock
) {
821 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
823 if (retval
== EAGAIN
) {
824 throttle_lowpri_io(1);
830 if (throttled_count
) {
831 throttle_info_reset_window((uthread_t
)get_bsdthread_info(current_thread()));
836 /* support for the "bulk-access" fcntl */
838 #define CACHE_LEVELS 16
839 #define NUM_CACHE_ENTRIES (64*16)
840 #define PARENT_IDS_FLAG 0x100
842 struct access_cache
{
844 int cachehits
; /* these two for statistics gathering */
846 unsigned int *acache
;
847 unsigned char *haveaccess
;
851 uid_t uid
; /* IN: effective user id */
852 short flags
; /* IN: access requested (i.e. R_OK) */
853 short num_groups
; /* IN: number of groups user belongs to */
854 int num_files
; /* IN: number of files to process */
855 int *file_ids
; /* IN: array of file ids */
856 gid_t
*groups
; /* IN: array of groups */
857 short *access
; /* OUT: access info for each file (0 for 'has access') */
858 } __attribute__((unavailable
)); // this structure is for reference purposes only
860 struct user32_access_t
{
861 uid_t uid
; /* IN: effective user id */
862 short flags
; /* IN: access requested (i.e. R_OK) */
863 short num_groups
; /* IN: number of groups user belongs to */
864 int num_files
; /* IN: number of files to process */
865 user32_addr_t file_ids
; /* IN: array of file ids */
866 user32_addr_t groups
; /* IN: array of groups */
867 user32_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
870 struct user64_access_t
{
871 uid_t uid
; /* IN: effective user id */
872 short flags
; /* IN: access requested (i.e. R_OK) */
873 short num_groups
; /* IN: number of groups user belongs to */
874 int num_files
; /* IN: number of files to process */
875 user64_addr_t file_ids
; /* IN: array of file ids */
876 user64_addr_t groups
; /* IN: array of groups */
877 user64_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
881 // these are the "extended" versions of the above structures
882 // note that it is crucial that they be different sized than
883 // the regular version
884 struct ext_access_t
{
885 uint32_t flags
; /* IN: access requested (i.e. R_OK) */
886 uint32_t num_files
; /* IN: number of files to process */
887 uint32_t map_size
; /* IN: size of the bit map */
888 uint32_t *file_ids
; /* IN: Array of file ids */
889 char *bitmap
; /* OUT: hash-bitmap of interesting directory ids */
890 short *access
; /* OUT: access info for each file (0 for 'has access') */
891 uint32_t num_parents
; /* future use */
892 cnid_t
*parents
; /* future use */
893 } __attribute__((unavailable
)); // this structure is for reference purposes only
895 struct user32_ext_access_t
{
896 uint32_t flags
; /* IN: access requested (i.e. R_OK) */
897 uint32_t num_files
; /* IN: number of files to process */
898 uint32_t map_size
; /* IN: size of the bit map */
899 user32_addr_t file_ids
; /* IN: Array of file ids */
900 user32_addr_t bitmap
; /* OUT: hash-bitmap of interesting directory ids */
901 user32_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
902 uint32_t num_parents
; /* future use */
903 user32_addr_t parents
; /* future use */
906 struct user64_ext_access_t
{
907 uint32_t flags
; /* IN: access requested (i.e. R_OK) */
908 uint32_t num_files
; /* IN: number of files to process */
909 uint32_t map_size
; /* IN: size of the bit map */
910 user64_addr_t file_ids
; /* IN: array of file ids */
911 user64_addr_t bitmap
; /* IN: array of groups */
912 user64_addr_t access
; /* OUT: access info for each file (0 for 'has access') */
913 uint32_t num_parents
;/* future use */
914 user64_addr_t parents
;/* future use */
919 * Perform a binary search for the given parent_id. Return value is
920 * the index if there is a match. If no_match_indexp is non-NULL it
921 * will be assigned with the index to insert the item (even if it was
924 static int cache_binSearch(cnid_t
*array
, unsigned int hi
, cnid_t parent_id
, int *no_match_indexp
)
930 unsigned int mid
= ((hi
- lo
)/2) + lo
;
931 unsigned int this_id
= array
[mid
];
933 if (parent_id
== this_id
) {
938 if (parent_id
< this_id
) {
943 if (parent_id
> this_id
) {
949 /* check if lo and hi converged on the match */
950 if (parent_id
== array
[hi
]) {
954 if (no_match_indexp
) {
955 *no_match_indexp
= hi
;
963 lookup_bucket(struct access_cache
*cache
, int *indexp
, cnid_t parent_id
)
967 int index
, no_match_index
;
969 if (cache
->numcached
== 0) {
971 return 0; // table is empty, so insert at index=0 and report no match
974 if (cache
->numcached
> NUM_CACHE_ENTRIES
) {
975 cache
->numcached
= NUM_CACHE_ENTRIES
;
978 hi
= cache
->numcached
- 1;
980 index
= cache_binSearch(cache
->acache
, hi
, parent_id
, &no_match_index
);
982 /* if no existing entry found, find index for new one */
984 index
= no_match_index
;
995 * Add a node to the access_cache at the given index (or do a lookup first
996 * to find the index if -1 is passed in). We currently do a replace rather
997 * than an insert if the cache is full.
1000 add_node(struct access_cache
*cache
, int index
, cnid_t nodeID
, int access
)
1002 int lookup_index
= -1;
1004 /* need to do a lookup first if -1 passed for index */
1006 if (lookup_bucket(cache
, &lookup_index
, nodeID
)) {
1007 if (cache
->haveaccess
[lookup_index
] != access
&& cache
->haveaccess
[lookup_index
] == ESRCH
) {
1008 // only update an entry if the previous access was ESRCH (i.e. a scope checking error)
1009 cache
->haveaccess
[lookup_index
] = access
;
1012 /* mission accomplished */
1015 index
= lookup_index
;
1020 /* if the cache is full, do a replace rather than an insert */
1021 if (cache
->numcached
>= NUM_CACHE_ENTRIES
) {
1022 cache
->numcached
= NUM_CACHE_ENTRIES
-1;
1024 if (index
> cache
->numcached
) {
1025 index
= cache
->numcached
;
1029 if (index
< cache
->numcached
&& index
< NUM_CACHE_ENTRIES
&& nodeID
> cache
->acache
[index
]) {
1033 if (index
>= 0 && index
< cache
->numcached
) {
1034 /* only do bcopy if we're inserting */
1035 bcopy( cache
->acache
+index
, cache
->acache
+(index
+1), (cache
->numcached
- index
)*sizeof(int) );
1036 bcopy( cache
->haveaccess
+index
, cache
->haveaccess
+(index
+1), (cache
->numcached
- index
)*sizeof(unsigned char) );
1039 cache
->acache
[index
] = nodeID
;
1040 cache
->haveaccess
[index
] = access
;
1054 snoop_callback(const cnode_t
*cp
, void *arg
)
1056 struct cinfo
*cip
= arg
;
1058 cip
->uid
= cp
->c_uid
;
1059 cip
->gid
= cp
->c_gid
;
1060 cip
->mode
= cp
->c_mode
;
1061 cip
->parentcnid
= cp
->c_parentcnid
;
1062 cip
->recflags
= cp
->c_attr
.ca_recflags
;
1068 * Lookup the cnid's attr info (uid, gid, and mode) as well as its parent id. If the item
1069 * isn't incore, then go to the catalog.
1072 do_attr_lookup(struct hfsmount
*hfsmp
, struct access_cache
*cache
, cnid_t cnid
,
1073 struct cnode
*skip_cp
, CatalogKey
*keyp
, struct cat_attr
*cnattrp
)
1077 /* if this id matches the one the fsctl was called with, skip the lookup */
1078 if (cnid
== skip_cp
->c_cnid
) {
1079 cnattrp
->ca_uid
= skip_cp
->c_uid
;
1080 cnattrp
->ca_gid
= skip_cp
->c_gid
;
1081 cnattrp
->ca_mode
= skip_cp
->c_mode
;
1082 cnattrp
->ca_recflags
= skip_cp
->c_attr
.ca_recflags
;
1083 keyp
->hfsPlus
.parentID
= skip_cp
->c_parentcnid
;
1085 struct cinfo c_info
;
1087 /* otherwise, check the cnode hash incase the file/dir is incore */
1088 error
= hfs_chash_snoop(hfsmp
, cnid
, 0, snoop_callback
, &c_info
);
1090 if (error
== EACCES
) {
1093 } else if (!error
) {
1094 cnattrp
->ca_uid
= c_info
.uid
;
1095 cnattrp
->ca_gid
= c_info
.gid
;
1096 cnattrp
->ca_mode
= c_info
.mode
;
1097 cnattrp
->ca_recflags
= c_info
.recflags
;
1098 keyp
->hfsPlus
.parentID
= c_info
.parentcnid
;
1102 if (throttle_io_will_be_throttled(-1, HFSTOVFS(hfsmp
)))
1103 throttle_lowpri_io(1);
1105 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_SHARED_LOCK
);
1107 /* lookup this cnid in the catalog */
1108 error
= cat_getkeyplusattr(hfsmp
, cnid
, keyp
, cnattrp
);
1110 hfs_systemfile_unlock(hfsmp
, lockflags
);
1121 * Compute whether we have access to the given directory (nodeID) and all its parents. Cache
1122 * up to CACHE_LEVELS as we progress towards the root.
1125 do_access_check(struct hfsmount
*hfsmp
, int *err
, struct access_cache
*cache
, HFSCatalogNodeID nodeID
,
1126 struct cnode
*skip_cp
, struct proc
*theProcPtr
, kauth_cred_t myp_ucred
,
1127 struct vfs_context
*my_context
,
1131 uint32_t num_parents
)
1135 HFSCatalogNodeID thisNodeID
;
1136 unsigned int myPerms
;
1137 struct cat_attr cnattr
;
1138 int cache_index
= -1, scope_index
= -1, scope_idx_start
= -1;
1141 int i
= 0, ids_to_cache
= 0;
1142 int parent_ids
[CACHE_LEVELS
];
1144 thisNodeID
= nodeID
;
1145 while (thisNodeID
>= kRootDirID
) {
1146 myResult
= 0; /* default to "no access" */
1148 /* check the cache before resorting to hitting the catalog */
1150 /* ASSUMPTION: access info of cached entries is "final"... i.e. no need
1151 * to look any further after hitting cached dir */
1153 if (lookup_bucket(cache
, &cache_index
, thisNodeID
)) {
1155 myErr
= cache
->haveaccess
[cache_index
];
1156 if (scope_index
!= -1) {
1157 if (myErr
== ESRCH
) {
1161 scope_index
= 0; // so we'll just use the cache result
1162 scope_idx_start
= ids_to_cache
;
1164 myResult
= (myErr
== 0) ? 1 : 0;
1165 goto ExitThisRoutine
;
1171 tmp
= cache_binSearch(parents
, num_parents
-1, thisNodeID
, NULL
);
1172 if (scope_index
== -1)
1174 if (tmp
!= -1 && scope_idx_start
== -1 && ids_to_cache
< CACHE_LEVELS
) {
1175 scope_idx_start
= ids_to_cache
;
1179 /* remember which parents we want to cache */
1180 if (ids_to_cache
< CACHE_LEVELS
) {
1181 parent_ids
[ids_to_cache
] = thisNodeID
;
1184 // Inefficient (using modulo) and we might want to use a hash function, not rely on the node id to be "nice"...
1185 if (bitmap
&& map_size
) {
1186 bitmap
[(thisNodeID
/8)%(map_size
)]|=(1<<(thisNodeID
&7));
1190 /* do the lookup (checks the cnode hash, then the catalog) */
1191 myErr
= do_attr_lookup(hfsmp
, cache
, thisNodeID
, skip_cp
, &catkey
, &cnattr
);
1193 goto ExitThisRoutine
; /* no access */
1196 /* Root always gets access. */
1197 if (suser(myp_ucred
, NULL
) == 0) {
1198 thisNodeID
= catkey
.hfsPlus
.parentID
;
1203 // if the thing has acl's, do the full permission check
1204 if ((cnattr
.ca_recflags
& kHFSHasSecurityMask
) != 0) {
1207 /* get the vnode for this cnid */
1208 myErr
= hfs_vget(hfsmp
, thisNodeID
, &vp
, 0, 0);
1211 goto ExitThisRoutine
;
1214 thisNodeID
= VTOC(vp
)->c_parentcnid
;
1216 hfs_unlock(VTOC(vp
));
1218 if (vnode_vtype(vp
) == VDIR
) {
1219 myErr
= vnode_authorize(vp
, NULL
, (KAUTH_VNODE_SEARCH
| KAUTH_VNODE_LIST_DIRECTORY
), my_context
);
1221 myErr
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_DATA
, my_context
);
1227 goto ExitThisRoutine
;
1231 int mode
= cnattr
.ca_mode
& S_IFMT
;
1232 myPerms
= DerivePermissionSummary(cnattr
.ca_uid
, cnattr
.ca_gid
, cnattr
.ca_mode
, hfsmp
->hfs_mp
,myp_ucred
, theProcPtr
);
1234 if (mode
== S_IFDIR
) {
1235 flags
= R_OK
| X_OK
;
1239 if ( (myPerms
& flags
) != flags
) {
1242 goto ExitThisRoutine
; /* no access */
1245 /* up the hierarchy we go */
1246 thisNodeID
= catkey
.hfsPlus
.parentID
;
1250 /* if here, we have access to this node */
1254 if (parents
&& myErr
== 0 && scope_index
== -1) {
1263 /* cache the parent directory(ies) */
1264 for (i
= 0; i
< ids_to_cache
; i
++) {
1265 if (myErr
== 0 && parents
&& (scope_idx_start
== -1 || i
> scope_idx_start
)) {
1266 add_node(cache
, -1, parent_ids
[i
], ESRCH
);
1268 add_node(cache
, -1, parent_ids
[i
], myErr
);
1276 do_bulk_access_check(struct hfsmount
*hfsmp
, struct vnode
*vp
,
1277 struct vnop_ioctl_args
*ap
, int arg_size
, vfs_context_t context
)
1282 * NOTE: on entry, the vnode has an io_ref. In case this vnode
1283 * happens to be in our list of file_ids, we'll note it
1284 * avoid calling hfs_chashget_nowait() on that id as that
1285 * will cause a "locking against myself" panic.
1287 Boolean check_leaf
= true;
1289 struct user64_ext_access_t
*user_access_structp
;
1290 struct user64_ext_access_t tmp_user_access
;
1291 struct access_cache cache
;
1293 int error
= 0, prev_parent_check_ok
=1;
1297 unsigned int num_files
= 0;
1299 int num_parents
= 0;
1303 cnid_t
*parents
=NULL
;
1307 cnid_t prevParent_cnid
= 0;
1308 unsigned int myPerms
;
1310 struct cat_attr cnattr
;
1312 struct cnode
*skip_cp
= VTOC(vp
);
1313 kauth_cred_t cred
= vfs_context_ucred(context
);
1314 proc_t p
= vfs_context_proc(context
);
1316 is64bit
= proc_is64bit(p
);
1318 /* initialize the local cache and buffers */
1319 cache
.numcached
= 0;
1320 cache
.cachehits
= 0;
1322 cache
.acache
= NULL
;
1323 cache
.haveaccess
= NULL
;
1325 /* struct copyin done during dispatch... need to copy file_id array separately */
1326 if (ap
->a_data
== NULL
) {
1328 goto err_exit_bulk_access
;
1332 if (arg_size
!= sizeof(struct user64_ext_access_t
)) {
1334 goto err_exit_bulk_access
;
1337 user_access_structp
= (struct user64_ext_access_t
*)ap
->a_data
;
1339 } else if (arg_size
== sizeof(struct user32_access_t
)) {
1340 struct user32_access_t
*accessp
= (struct user32_access_t
*)ap
->a_data
;
1342 // convert an old style bulk-access struct to the new style
1343 tmp_user_access
.flags
= accessp
->flags
;
1344 tmp_user_access
.num_files
= accessp
->num_files
;
1345 tmp_user_access
.map_size
= 0;
1346 tmp_user_access
.file_ids
= CAST_USER_ADDR_T(accessp
->file_ids
);
1347 tmp_user_access
.bitmap
= USER_ADDR_NULL
;
1348 tmp_user_access
.access
= CAST_USER_ADDR_T(accessp
->access
);
1349 tmp_user_access
.num_parents
= 0;
1350 user_access_structp
= &tmp_user_access
;
1352 } else if (arg_size
== sizeof(struct user32_ext_access_t
)) {
1353 struct user32_ext_access_t
*accessp
= (struct user32_ext_access_t
*)ap
->a_data
;
1355 // up-cast from a 32-bit version of the struct
1356 tmp_user_access
.flags
= accessp
->flags
;
1357 tmp_user_access
.num_files
= accessp
->num_files
;
1358 tmp_user_access
.map_size
= accessp
->map_size
;
1359 tmp_user_access
.num_parents
= accessp
->num_parents
;
1361 tmp_user_access
.file_ids
= CAST_USER_ADDR_T(accessp
->file_ids
);
1362 tmp_user_access
.bitmap
= CAST_USER_ADDR_T(accessp
->bitmap
);
1363 tmp_user_access
.access
= CAST_USER_ADDR_T(accessp
->access
);
1364 tmp_user_access
.parents
= CAST_USER_ADDR_T(accessp
->parents
);
1366 user_access_structp
= &tmp_user_access
;
1369 goto err_exit_bulk_access
;
1372 map_size
= user_access_structp
->map_size
;
1374 num_files
= user_access_structp
->num_files
;
1376 num_parents
= user_access_structp
->num_parents
;
1378 if (num_files
< 1) {
1379 goto err_exit_bulk_access
;
1381 if (num_files
> 1024) {
1383 goto err_exit_bulk_access
;
1386 if (num_parents
> 1024) {
1388 goto err_exit_bulk_access
;
1391 file_ids
= (int *) kalloc(sizeof(int) * num_files
);
1392 access
= (short *) kalloc(sizeof(short) * num_files
);
1394 bitmap
= (char *) kalloc(sizeof(char) * map_size
);
1398 parents
= (cnid_t
*) kalloc(sizeof(cnid_t
) * num_parents
);
1401 cache
.acache
= (unsigned int *) kalloc(sizeof(int) * NUM_CACHE_ENTRIES
);
1402 cache
.haveaccess
= (unsigned char *) kalloc(sizeof(unsigned char) * NUM_CACHE_ENTRIES
);
1404 if (file_ids
== NULL
|| access
== NULL
|| (map_size
!= 0 && bitmap
== NULL
) || cache
.acache
== NULL
|| cache
.haveaccess
== NULL
) {
1406 kfree(file_ids
, sizeof(int) * num_files
);
1409 kfree(bitmap
, sizeof(char) * map_size
);
1412 kfree(access
, sizeof(short) * num_files
);
1415 kfree(cache
.acache
, sizeof(int) * NUM_CACHE_ENTRIES
);
1417 if (cache
.haveaccess
) {
1418 kfree(cache
.haveaccess
, sizeof(unsigned char) * NUM_CACHE_ENTRIES
);
1421 kfree(parents
, sizeof(cnid_t
) * num_parents
);
1426 // make sure the bitmap is zero'ed out...
1428 bzero(bitmap
, (sizeof(char) * map_size
));
1431 if ((error
= copyin(user_access_structp
->file_ids
, (caddr_t
)file_ids
,
1432 num_files
* sizeof(int)))) {
1433 goto err_exit_bulk_access
;
1437 if ((error
= copyin(user_access_structp
->parents
, (caddr_t
)parents
,
1438 num_parents
* sizeof(cnid_t
)))) {
1439 goto err_exit_bulk_access
;
1443 flags
= user_access_structp
->flags
;
1444 if ((flags
& (F_OK
| R_OK
| W_OK
| X_OK
)) == 0) {
1448 /* check if we've been passed leaf node ids or parent ids */
1449 if (flags
& PARENT_IDS_FLAG
) {
1453 /* Check access to each file_id passed in */
1454 for (i
= 0; i
< num_files
; i
++) {
1456 cnid
= (cnid_t
) file_ids
[i
];
1458 /* root always has access */
1459 if ((!parents
) && (!suser(cred
, NULL
))) {
1465 /* do the lookup (checks the cnode hash, then the catalog) */
1466 error
= do_attr_lookup(hfsmp
, &cache
, cnid
, skip_cp
, &catkey
, &cnattr
);
1468 access
[i
] = (short) error
;
1473 // Check if the leaf matches one of the parent scopes
1474 leaf_index
= cache_binSearch(parents
, num_parents
-1, cnid
, NULL
);
1475 if (leaf_index
>= 0 && parents
[leaf_index
] == cnid
)
1476 prev_parent_check_ok
= 0;
1477 else if (leaf_index
>= 0)
1478 prev_parent_check_ok
= 1;
1481 // if the thing has acl's, do the full permission check
1482 if ((cnattr
.ca_recflags
& kHFSHasSecurityMask
) != 0) {
1485 /* get the vnode for this cnid */
1486 myErr
= hfs_vget(hfsmp
, cnid
, &cvp
, 0, 0);
1492 hfs_unlock(VTOC(cvp
));
1494 if (vnode_vtype(cvp
) == VDIR
) {
1495 myErr
= vnode_authorize(cvp
, NULL
, (KAUTH_VNODE_SEARCH
| KAUTH_VNODE_LIST_DIRECTORY
), context
);
1497 myErr
= vnode_authorize(cvp
, NULL
, KAUTH_VNODE_READ_DATA
, context
);
1506 /* before calling CheckAccess(), check the target file for read access */
1507 myPerms
= DerivePermissionSummary(cnattr
.ca_uid
, cnattr
.ca_gid
,
1508 cnattr
.ca_mode
, hfsmp
->hfs_mp
, cred
, p
);
1510 /* fail fast if no access */
1511 if ((myPerms
& flags
) == 0) {
1517 /* we were passed an array of parent ids */
1518 catkey
.hfsPlus
.parentID
= cnid
;
1521 /* if the last guy had the same parent and had access, we're done */
1522 if (i
> 0 && catkey
.hfsPlus
.parentID
== prevParent_cnid
&& access
[i
-1] == 0 && prev_parent_check_ok
) {
1528 myaccess
= do_access_check(hfsmp
, &error
, &cache
, catkey
.hfsPlus
.parentID
,
1529 skip_cp
, p
, cred
, context
,bitmap
, map_size
, parents
, num_parents
);
1531 if (myaccess
|| (error
== ESRCH
&& leaf_index
!= -1)) {
1532 access
[i
] = 0; // have access.. no errors to report
1534 access
[i
] = (error
!= 0 ? (short) error
: EACCES
);
1537 prevParent_cnid
= catkey
.hfsPlus
.parentID
;
1540 /* copyout the access array */
1541 if ((error
= copyout((caddr_t
)access
, user_access_structp
->access
,
1542 num_files
* sizeof (short)))) {
1543 goto err_exit_bulk_access
;
1545 if (map_size
&& bitmap
) {
1546 if ((error
= copyout((caddr_t
)bitmap
, user_access_structp
->bitmap
,
1547 map_size
* sizeof (char)))) {
1548 goto err_exit_bulk_access
;
1553 err_exit_bulk_access
:
1556 kfree(file_ids
, sizeof(int) * num_files
);
1558 kfree(parents
, sizeof(cnid_t
) * num_parents
);
1560 kfree(bitmap
, sizeof(char) * map_size
);
1562 kfree(access
, sizeof(short) * num_files
);
1564 kfree(cache
.acache
, sizeof(int) * NUM_CACHE_ENTRIES
);
1565 if (cache
.haveaccess
)
1566 kfree(cache
.haveaccess
, sizeof(unsigned char) * NUM_CACHE_ENTRIES
);
1572 /* end "bulk-access" support */
1576 * Control filesystem operating characteristics.
1579 hfs_vnop_ioctl( struct vnop_ioctl_args
/* {
1584 vfs_context_t a_context;
1587 struct vnode
* vp
= ap
->a_vp
;
1588 struct hfsmount
*hfsmp
= VTOHFS(vp
);
1589 vfs_context_t context
= ap
->a_context
;
1590 kauth_cred_t cred
= vfs_context_ucred(context
);
1591 proc_t p
= vfs_context_proc(context
);
1592 struct vfsstatfs
*vfsp
;
1594 off_t jnl_start
, jnl_size
;
1595 struct hfs_journal_info
*jip
;
1598 off_t uncompressed_size
= -1;
1599 int decmpfs_error
= 0;
1601 if (ap
->a_command
== F_RDADVISE
) {
1602 /* we need to inspect the decmpfs state of the file as early as possible */
1603 compressed
= hfs_file_is_compressed(VTOC(vp
), 0);
1605 if (VNODE_IS_RSRC(vp
)) {
1606 /* if this is the resource fork, treat it as if it were empty */
1607 uncompressed_size
= 0;
1609 decmpfs_error
= hfs_uncompressed_size_of_compressed_file(NULL
, vp
, 0, &uncompressed_size
, 0);
1610 if (decmpfs_error
!= 0) {
1611 /* failed to get the uncompressed size, we'll check for this later */
1612 uncompressed_size
= -1;
1617 #endif /* HFS_COMPRESSION */
1619 is64bit
= proc_is64bit(p
);
1624 if ((error
= cp_handle_vnop(vp
, CP_WRITE_ACCESS
, 0)) != 0) {
1628 #endif /* CONFIG_PROTECT */
1630 switch (ap
->a_command
) {
1634 struct vnode
*file_vp
;
1641 /* Caller must be owner of file system. */
1642 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1643 if (suser(cred
, NULL
) &&
1644 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1647 /* Target vnode must be file system's root. */
1648 if (!vnode_isvroot(vp
)) {
1651 bufptr
= (char *)ap
->a_data
;
1652 cnid
= strtoul(bufptr
, NULL
, 10);
1653 if (ap
->a_fflag
& HFS_GETPATH_VOLUME_RELATIVE
) {
1654 flags
|= BUILDPATH_VOLUME_RELATIVE
;
1657 /* We need to call hfs_vfs_vget to leverage the code that will
1658 * fix the origin list for us if needed, as opposed to calling
1659 * hfs_vget, since we will need the parent for build_path call.
1662 if ((error
= hfs_vfs_vget(HFSTOVFS(hfsmp
), cnid
, &file_vp
, context
))) {
1665 error
= build_path(file_vp
, bufptr
, sizeof(pathname_t
), &outlen
, flags
, context
);
1671 case HFS_TRANSFER_DOCUMENT_ID
:
1673 struct cnode
*cp
= NULL
;
1675 u_int32_t to_fd
= *(u_int32_t
*)ap
->a_data
;
1676 struct fileproc
*to_fp
;
1677 struct vnode
*to_vp
;
1678 struct cnode
*to_cp
;
1682 if ((error
= fp_getfvp(p
, to_fd
, &to_fp
, &to_vp
)) != 0) {
1683 //printf("could not get the vnode for fd %d (err %d)\n", to_fd, error);
1686 if ( (error
= vnode_getwithref(to_vp
)) ) {
1691 if (VTOHFS(to_vp
) != hfsmp
) {
1693 goto transfer_cleanup
;
1696 int need_unlock
= 1;
1697 to_cp
= VTOC(to_vp
);
1698 error
= hfs_lockpair(cp
, to_cp
, HFS_EXCLUSIVE_LOCK
);
1700 //printf("could not lock the pair of cnodes (error %d)\n", error);
1701 goto transfer_cleanup
;
1704 if (!(cp
->c_bsdflags
& UF_TRACKED
)) {
1706 } else if (to_cp
->c_bsdflags
& UF_TRACKED
) {
1708 // if the destination is already tracked, return an error
1709 // as otherwise it's a silent deletion of the target's
1713 } else if (S_ISDIR(cp
->c_attr
.ca_mode
) || S_ISREG(cp
->c_attr
.ca_mode
) || S_ISLNK(cp
->c_attr
.ca_mode
)) {
1715 // we can use the FndrExtendedFileInfo because the doc-id is the first
1716 // thing in both it and the ExtendedDirInfo struct which is fixed in
1717 // format and can not change layout
1719 struct FndrExtendedFileInfo
*f_extinfo
= (struct FndrExtendedFileInfo
*)((u_int8_t
*)cp
->c_finderinfo
+ 16);
1720 struct FndrExtendedFileInfo
*to_extinfo
= (struct FndrExtendedFileInfo
*)((u_int8_t
*)to_cp
->c_finderinfo
+ 16);
1722 if (f_extinfo
->document_id
== 0) {
1725 hfs_unlockpair(cp
, to_cp
); // have to unlock to be able to get a new-id
1727 if ((error
= hfs_generate_document_id(hfsmp
, &new_id
)) == 0) {
1729 // re-lock the pair now that we have the document-id
1731 hfs_lockpair(cp
, to_cp
, HFS_EXCLUSIVE_LOCK
);
1732 f_extinfo
->document_id
= new_id
;
1734 goto transfer_cleanup
;
1738 to_extinfo
->document_id
= f_extinfo
->document_id
;
1739 f_extinfo
->document_id
= 0;
1740 //printf("TRANSFERRING: doc-id %d from ino %d to ino %d\n", to_extinfo->document_id, cp->c_fileid, to_cp->c_fileid);
1742 // make sure the destination is also UF_TRACKED
1743 to_cp
->c_bsdflags
|= UF_TRACKED
;
1744 cp
->c_bsdflags
&= ~UF_TRACKED
;
1746 // mark the cnodes dirty
1747 cp
->c_flag
|= C_MODIFIED
| C_FORCEUPDATE
;
1748 to_cp
->c_flag
|= C_MODIFIED
| C_FORCEUPDATE
;
1751 if ((error
= hfs_start_transaction(hfsmp
)) == 0) {
1753 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
1755 (void) cat_update(hfsmp
, &cp
->c_desc
, &cp
->c_attr
, NULL
, NULL
);
1756 (void) cat_update(hfsmp
, &to_cp
->c_desc
, &to_cp
->c_attr
, NULL
, NULL
);
1758 hfs_systemfile_unlock (hfsmp
, lockflags
);
1759 (void) hfs_end_transaction(hfsmp
);
1763 add_fsevent(FSE_DOCID_CHANGED
, context
,
1764 FSE_ARG_DEV
, hfsmp
->hfs_raw_dev
,
1765 FSE_ARG_INO
, (ino64_t
)cp
->c_fileid
, // src inode #
1766 FSE_ARG_INO
, (ino64_t
)to_cp
->c_fileid
, // dst inode #
1767 FSE_ARG_INT32
, to_extinfo
->document_id
,
1770 hfs_unlockpair(cp
, to_cp
); // unlock this so we can send the fsevents
1773 if (need_fsevent(FSE_STAT_CHANGED
, vp
)) {
1774 add_fsevent(FSE_STAT_CHANGED
, context
, FSE_ARG_VNODE
, vp
, FSE_ARG_DONE
);
1776 if (need_fsevent(FSE_STAT_CHANGED
, to_vp
)) {
1777 add_fsevent(FSE_STAT_CHANGED
, context
, FSE_ARG_VNODE
, to_vp
, FSE_ARG_DONE
);
1780 hfs_unlockpair(cp
, to_cp
); // unlock this so we can send the fsevents
1786 hfs_unlockpair(cp
, to_cp
);
1806 /* Caller must be owner of file system. */
1807 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1808 if (suser(cred
, NULL
) &&
1809 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1812 /* Target vnode must be file system's root. */
1813 if (!vnode_isvroot(vp
)) {
1816 linkfileid
= *(cnid_t
*)ap
->a_data
;
1817 if (linkfileid
< kHFSFirstUserCatalogNodeID
) {
1820 if ((error
= hfs_lookup_siblinglinks(hfsmp
, linkfileid
, &prevlinkid
, &nextlinkid
))) {
1823 if (ap
->a_command
== HFS_NEXT_LINK
) {
1824 *(cnid_t
*)ap
->a_data
= nextlinkid
;
1826 *(cnid_t
*)ap
->a_data
= prevlinkid
;
1831 case HFS_RESIZE_PROGRESS
: {
1833 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1834 if (suser(cred
, NULL
) &&
1835 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1836 return (EACCES
); /* must be owner of file system */
1838 if (!vnode_isvroot(vp
)) {
1841 /* file system must not be mounted read-only */
1842 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1846 return hfs_resize_progress(hfsmp
, (u_int32_t
*)ap
->a_data
);
1849 case HFS_RESIZE_VOLUME
: {
1853 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1854 if (suser(cred
, NULL
) &&
1855 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1856 return (EACCES
); /* must be owner of file system */
1858 if (!vnode_isvroot(vp
)) {
1862 /* filesystem must not be mounted read only */
1863 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1866 newsize
= *(u_int64_t
*)ap
->a_data
;
1867 cursize
= (u_int64_t
)hfsmp
->totalBlocks
* (u_int64_t
)hfsmp
->blockSize
;
1869 if (newsize
> cursize
) {
1870 return hfs_extendfs(hfsmp
, *(u_int64_t
*)ap
->a_data
, context
);
1871 } else if (newsize
< cursize
) {
1872 return hfs_truncatefs(hfsmp
, *(u_int64_t
*)ap
->a_data
, context
);
1877 case HFS_CHANGE_NEXT_ALLOCATION
: {
1878 int error
= 0; /* Assume success */
1881 if (vnode_vfsisrdonly(vp
)) {
1884 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1885 if (suser(cred
, NULL
) &&
1886 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1887 return (EACCES
); /* must be owner of file system */
1889 if (!vnode_isvroot(vp
)) {
1892 hfs_lock_mount(hfsmp
);
1893 location
= *(u_int32_t
*)ap
->a_data
;
1894 if ((location
>= hfsmp
->allocLimit
) &&
1895 (location
!= HFS_NO_UPDATE_NEXT_ALLOCATION
)) {
1897 goto fail_change_next_allocation
;
1899 /* Return previous value. */
1900 *(u_int32_t
*)ap
->a_data
= hfsmp
->nextAllocation
;
1901 if (location
== HFS_NO_UPDATE_NEXT_ALLOCATION
) {
1902 /* On magic value for location, set nextAllocation to next block
1903 * after metadata zone and set flag in mount structure to indicate
1904 * that nextAllocation should not be updated again.
1906 if (hfsmp
->hfs_metazone_end
!= 0) {
1907 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, hfsmp
->hfs_metazone_end
+ 1);
1909 hfsmp
->hfs_flags
|= HFS_SKIP_UPDATE_NEXT_ALLOCATION
;
1911 hfsmp
->hfs_flags
&= ~HFS_SKIP_UPDATE_NEXT_ALLOCATION
;
1912 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, location
);
1914 MarkVCBDirty(hfsmp
);
1915 fail_change_next_allocation
:
1916 hfs_unlock_mount(hfsmp
);
1921 case HFS_SETBACKINGSTOREINFO
: {
1922 struct vnode
* bsfs_rootvp
;
1923 struct vnode
* di_vp
;
1924 struct hfs_backingstoreinfo
*bsdata
;
1927 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
1930 if (hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) {
1933 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
1934 if (suser(cred
, NULL
) &&
1935 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
1936 return (EACCES
); /* must be owner of file system */
1938 bsdata
= (struct hfs_backingstoreinfo
*)ap
->a_data
;
1939 if (bsdata
== NULL
) {
1942 if ((error
= file_vnode(bsdata
->backingfd
, &di_vp
))) {
1945 if ((error
= vnode_getwithref(di_vp
))) {
1946 file_drop(bsdata
->backingfd
);
1950 if (vnode_mount(vp
) == vnode_mount(di_vp
)) {
1951 (void)vnode_put(di_vp
);
1952 file_drop(bsdata
->backingfd
);
1957 * Obtain the backing fs root vnode and keep a reference
1958 * on it. This reference will be dropped in hfs_unmount.
1960 error
= VFS_ROOT(vnode_mount(di_vp
), &bsfs_rootvp
, NULL
); /* XXX use context! */
1962 (void)vnode_put(di_vp
);
1963 file_drop(bsdata
->backingfd
);
1966 vnode_ref(bsfs_rootvp
);
1967 vnode_put(bsfs_rootvp
);
1969 hfs_lock_mount(hfsmp
);
1970 hfsmp
->hfs_backingfs_rootvp
= bsfs_rootvp
;
1971 hfsmp
->hfs_flags
|= HFS_HAS_SPARSE_DEVICE
;
1972 hfsmp
->hfs_sparsebandblks
= bsdata
->bandsize
/ hfsmp
->blockSize
* 4;
1973 hfs_unlock_mount(hfsmp
);
1975 /* We check the MNTK_VIRTUALDEV bit instead of marking the dependent process */
1978 * If the sparse image is on a sparse image file (as opposed to a sparse
1979 * bundle), then we may need to limit the free space to the maximum size
1980 * of a file on that volume. So we query (using pathconf), and if we get
1981 * a meaningful result, we cache the number of blocks for later use in
1984 hfsmp
->hfs_backingfs_maxblocks
= 0;
1985 if (vnode_vtype(di_vp
) == VREG
) {
1988 terr
= vn_pathconf(di_vp
, _PC_FILESIZEBITS
, &hostbits
, context
);
1989 if (terr
== 0 && hostbits
!= 0 && hostbits
< 64) {
1990 u_int64_t hostfilesizemax
= ((u_int64_t
)1) << hostbits
;
1992 hfsmp
->hfs_backingfs_maxblocks
= hostfilesizemax
/ hfsmp
->blockSize
;
1996 /* The free extent cache is managed differently for sparse devices.
1997 * There is a window between which the volume is mounted and the
1998 * device is marked as sparse, so the free extent cache for this
1999 * volume is currently initialized as normal volume (sorted by block
2000 * count). Reset the cache so that it will be rebuilt again
2001 * for sparse device (sorted by start block).
2003 ResetVCBFreeExtCache(hfsmp
);
2005 (void)vnode_put(di_vp
);
2006 file_drop(bsdata
->backingfd
);
2009 case HFS_CLRBACKINGSTOREINFO
: {
2010 struct vnode
* tmpvp
;
2012 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
2013 if (suser(cred
, NULL
) &&
2014 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
2015 return (EACCES
); /* must be owner of file system */
2017 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2021 if ((hfsmp
->hfs_flags
& HFS_HAS_SPARSE_DEVICE
) &&
2022 hfsmp
->hfs_backingfs_rootvp
) {
2024 hfs_lock_mount(hfsmp
);
2025 hfsmp
->hfs_flags
&= ~HFS_HAS_SPARSE_DEVICE
;
2026 tmpvp
= hfsmp
->hfs_backingfs_rootvp
;
2027 hfsmp
->hfs_backingfs_rootvp
= NULLVP
;
2028 hfsmp
->hfs_sparsebandblks
= 0;
2029 hfs_unlock_mount(hfsmp
);
2035 #endif /* HFS_SPARSE_DEV */
2037 /* Change the next CNID stored in the VH */
2038 case HFS_CHANGE_NEXTCNID
: {
2039 int error
= 0; /* Assume success */
2044 if (vnode_vfsisrdonly(vp
)) {
2047 vfsp
= vfs_statfs(HFSTOVFS(hfsmp
));
2048 if (suser(cred
, NULL
) &&
2049 kauth_cred_getuid(cred
) != vfsp
->f_owner
) {
2050 return (EACCES
); /* must be owner of file system */
2053 fileid
= *(u_int32_t
*)ap
->a_data
;
2055 /* Must have catalog lock excl. to advance the CNID pointer */
2056 lockflags
= hfs_systemfile_lock (hfsmp
, SFL_CATALOG
, HFS_EXCLUSIVE_LOCK
);
2058 hfs_lock_mount(hfsmp
);
2060 /* If it is less than the current next CNID, force the wraparound bit to be set */
2061 if (fileid
< hfsmp
->vcbNxtCNID
) {
2065 /* Return previous value. */
2066 *(u_int32_t
*)ap
->a_data
= hfsmp
->vcbNxtCNID
;
2068 hfsmp
->vcbNxtCNID
= fileid
;
2071 hfsmp
->vcbAtrb
|= kHFSCatalogNodeIDsReusedMask
;
2074 MarkVCBDirty(hfsmp
);
2075 hfs_unlock_mount(hfsmp
);
2076 hfs_systemfile_unlock (hfsmp
, lockflags
);
2084 mp
= vnode_mount(vp
);
2085 hfsmp
= VFSTOHFS(mp
);
2090 vfsp
= vfs_statfs(mp
);
2092 if (kauth_cred_getuid(cred
) != vfsp
->f_owner
&&
2093 !kauth_cred_issuser(cred
))
2096 return hfs_freeze(hfsmp
);
2100 vfsp
= vfs_statfs(vnode_mount(vp
));
2101 if (kauth_cred_getuid(cred
) != vfsp
->f_owner
&&
2102 !kauth_cred_issuser(cred
))
2105 return hfs_thaw(hfsmp
, current_proc());
2108 case HFS_BULKACCESS_FSCTL
: {
2111 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
2116 size
= sizeof(struct user64_access_t
);
2118 size
= sizeof(struct user32_access_t
);
2121 return do_bulk_access_check(hfsmp
, vp
, ap
, size
, context
);
2124 case HFS_EXT_BULKACCESS_FSCTL
: {
2127 if (hfsmp
->hfs_flags
& HFS_STANDARD
) {
2132 size
= sizeof(struct user64_ext_access_t
);
2134 size
= sizeof(struct user32_ext_access_t
);
2137 return do_bulk_access_check(hfsmp
, vp
, ap
, size
, context
);
2140 case HFS_SET_XATTREXTENTS_STATE
: {
2143 if (ap
->a_data
== NULL
) {
2147 state
= *(int *)ap
->a_data
;
2149 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2153 /* Super-user can enable or disable extent-based extended
2154 * attribute support on a volume
2155 * Note: Starting Mac OS X 10.7, extent-based extended attributes
2156 * are enabled by default, so any change will be transient only
2157 * till the volume is remounted.
2159 if (!kauth_cred_issuser(kauth_cred_get())) {
2162 if (state
== 0 || state
== 1)
2163 return hfs_set_volxattr(hfsmp
, HFS_SET_XATTREXTENTS_STATE
, state
);
2168 case F_SETSTATICCONTENT
: {
2170 int enable_static
= 0;
2171 struct cnode
*cp
= NULL
;
2173 * lock the cnode, decorate the cnode flag, and bail out.
2174 * VFS should have already authenticated the caller for us.
2179 * Note that even though ap->a_data is of type caddr_t,
2180 * the fcntl layer at the syscall handler will pass in NULL
2181 * or 1 depending on what the argument supplied to the fcntl
2182 * was. So it is in fact correct to check the ap->a_data
2183 * argument for zero or non-zero value when deciding whether or not
2184 * to enable the static bit in the cnode.
2188 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2193 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2195 if (enable_static
) {
2196 cp
->c_flag
|= C_SSD_STATIC
;
2199 cp
->c_flag
&= ~C_SSD_STATIC
;
2206 case F_SET_GREEDY_MODE
: {
2208 int enable_greedy_mode
= 0;
2209 struct cnode
*cp
= NULL
;
2211 * lock the cnode, decorate the cnode flag, and bail out.
2212 * VFS should have already authenticated the caller for us.
2217 * Note that even though ap->a_data is of type caddr_t,
2218 * the fcntl layer at the syscall handler will pass in NULL
2219 * or 1 depending on what the argument supplied to the fcntl
2220 * was. So it is in fact correct to check the ap->a_data
2221 * argument for zero or non-zero value when deciding whether or not
2222 * to enable the greedy mode bit in the cnode.
2224 enable_greedy_mode
= 1;
2226 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2231 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2233 if (enable_greedy_mode
) {
2234 cp
->c_flag
|= C_SSD_GREEDY_MODE
;
2237 cp
->c_flag
&= ~C_SSD_GREEDY_MODE
;
2246 uint32_t iotypeflag
= 0;
2248 struct cnode
*cp
= NULL
;
2250 * lock the cnode, decorate the cnode flag, and bail out.
2251 * VFS should have already authenticated the caller for us.
2254 if (ap
->a_data
== NULL
) {
2259 * Note that even though ap->a_data is of type caddr_t, we
2260 * can only use 32 bits of flag values.
2262 iotypeflag
= (uint32_t) ap
->a_data
;
2263 switch (iotypeflag
) {
2264 case F_IOTYPE_ISOCHRONOUS
:
2271 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2276 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2278 switch (iotypeflag
) {
2279 case F_IOTYPE_ISOCHRONOUS
:
2280 cp
->c_flag
|= C_IO_ISOCHRONOUS
;
2290 case F_MAKECOMPRESSED
: {
2292 uint32_t gen_counter
;
2293 struct cnode
*cp
= NULL
;
2294 int reset_decmp
= 0;
2296 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2301 * acquire & lock the cnode.
2302 * VFS should have already authenticated the caller for us.
2307 * Cast the pointer into a uint32_t so we can extract the
2308 * supplied generation counter.
2310 gen_counter
= *((uint32_t*)ap
->a_data
);
2318 /* Grab truncate lock first; we may truncate the file */
2319 hfs_lock_truncate (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2321 error
= hfs_lock (cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2323 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
2327 /* Are there any other usecounts/FDs? */
2328 if (vnode_isinuse(vp
, 1)) {
2330 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
2334 /* now we have the cnode locked down; Validate arguments */
2335 if (cp
->c_attr
.ca_flags
& (UF_IMMUTABLE
| UF_COMPRESSED
)) {
2336 /* EINVAL if you are trying to manipulate an IMMUTABLE file */
2338 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
2342 if ((hfs_get_gencount (cp
)) == gen_counter
) {
2344 * OK, the gen_counter matched. Go for it:
2345 * Toggle state bits, truncate file, and suppress mtime update
2348 cp
->c_bsdflags
|= UF_COMPRESSED
;
2350 error
= hfs_truncate(vp
, 0, IO_NDELAY
, HFS_TRUNCATE_SKIPTIMES
,
2357 /* Unlock cnode before executing decmpfs ; they may need to get an EA */
2361 * Reset the decmp state while still holding the truncate lock. We need to
2362 * serialize here against a listxattr on this node which may occur at any
2365 * Even if '0/skiplock' is passed in 2nd argument to hfs_file_is_compressed,
2366 * that will still potentially require getting the com.apple.decmpfs EA. If the
2367 * EA is required, then we can't hold the cnode lock, because the getxattr call is
2368 * generic(through VFS), and can't pass along any info telling it that we're already
2369 * holding it (the lock). If we don't serialize, then we risk listxattr stopping
2370 * and trying to fill in the hfs_file_is_compressed info during the callback
2371 * operation, which will result in deadlock against the b-tree node.
2373 * So, to serialize against listxattr (which will grab buf_t meta references on
2374 * the b-tree blocks), we hold the truncate lock as we're manipulating the
2377 if ((reset_decmp
) && (error
== 0)) {
2378 decmpfs_cnode
*dp
= VTOCMP (vp
);
2380 decmpfs_cnode_set_vnode_state(dp
, FILE_TYPE_UNKNOWN
, 0);
2383 /* Initialize the decmpfs node as needed */
2384 (void) hfs_file_is_compressed (cp
, 0); /* ok to take lock */
2387 hfs_unlock_truncate (cp
, HFS_LOCK_DEFAULT
);
2393 case F_SETBACKINGSTORE
: {
2398 * See comment in F_SETSTATICCONTENT re: using
2399 * a null check for a_data
2402 error
= hfs_set_backingstore (vp
, 1);
2405 error
= hfs_set_backingstore (vp
, 0);
2411 case F_GETPATH_MTMINFO
: {
2414 int *data
= (int*) ap
->a_data
;
2416 /* Ask if this is a backingstore vnode */
2417 error
= hfs_is_backingstore (vp
, data
);
2425 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2428 error
= hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2430 error
= hfs_fsync(vp
, MNT_WAIT
, TRUE
, p
);
2431 hfs_unlock(VTOC(vp
));
2438 register struct cnode
*cp
;
2441 if (!vnode_isreg(vp
))
2444 error
= hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2448 * used by regression test to determine if
2449 * all the dirty pages (via write) have been cleaned
2450 * after a call to 'fsysnc'.
2452 error
= is_file_clean(vp
, VTOF(vp
)->ff_size
);
2459 register struct radvisory
*ra
;
2460 struct filefork
*fp
;
2463 if (!vnode_isreg(vp
))
2466 ra
= (struct radvisory
*)(ap
->a_data
);
2469 /* Protect against a size change. */
2470 hfs_lock_truncate(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
2473 if (compressed
&& (uncompressed_size
== -1)) {
2474 /* fetching the uncompressed size failed above, so return the error */
2475 error
= decmpfs_error
;
2476 } else if ((compressed
&& (ra
->ra_offset
>= uncompressed_size
)) ||
2477 (!compressed
&& (ra
->ra_offset
>= fp
->ff_size
))) {
2480 #else /* HFS_COMPRESSION */
2481 if (ra
->ra_offset
>= fp
->ff_size
) {
2484 #endif /* HFS_COMPRESSION */
2486 error
= advisory_read(vp
, fp
->ff_size
, ra
->ra_offset
, ra
->ra_count
);
2489 hfs_unlock_truncate(VTOC(vp
), HFS_LOCK_DEFAULT
);
2493 case _IOC(IOC_OUT
,'h', 4, 0): /* Create date in local time */
2496 *(user_time_t
*)(ap
->a_data
) = (user_time_t
) (to_bsd_time(VTOVCB(vp
)->localCreateDate
));
2499 *(user32_time_t
*)(ap
->a_data
) = (user32_time_t
) (to_bsd_time(VTOVCB(vp
)->localCreateDate
));
2504 case SPOTLIGHT_FSCTL_GET_MOUNT_TIME
:
2505 *(uint32_t *)ap
->a_data
= hfsmp
->hfs_mount_time
;
2508 case SPOTLIGHT_FSCTL_GET_LAST_MTIME
:
2509 *(uint32_t *)ap
->a_data
= hfsmp
->hfs_last_mounted_mtime
;
2512 case HFS_FSCTL_GET_VERY_LOW_DISK
:
2513 *(uint32_t*)ap
->a_data
= hfsmp
->hfs_freespace_notify_dangerlimit
;
2516 case HFS_FSCTL_SET_VERY_LOW_DISK
:
2517 if (*(uint32_t *)ap
->a_data
>= hfsmp
->hfs_freespace_notify_warninglimit
) {
2521 hfsmp
->hfs_freespace_notify_dangerlimit
= *(uint32_t *)ap
->a_data
;
2524 case HFS_FSCTL_GET_LOW_DISK
:
2525 *(uint32_t*)ap
->a_data
= hfsmp
->hfs_freespace_notify_warninglimit
;
2528 case HFS_FSCTL_SET_LOW_DISK
:
2529 if ( *(uint32_t *)ap
->a_data
>= hfsmp
->hfs_freespace_notify_desiredlevel
2530 || *(uint32_t *)ap
->a_data
<= hfsmp
->hfs_freespace_notify_dangerlimit
) {
2535 hfsmp
->hfs_freespace_notify_warninglimit
= *(uint32_t *)ap
->a_data
;
2538 case HFS_FSCTL_GET_DESIRED_DISK
:
2539 *(uint32_t*)ap
->a_data
= hfsmp
->hfs_freespace_notify_desiredlevel
;
2542 case HFS_FSCTL_SET_DESIRED_DISK
:
2543 if (*(uint32_t *)ap
->a_data
<= hfsmp
->hfs_freespace_notify_warninglimit
) {
2547 hfsmp
->hfs_freespace_notify_desiredlevel
= *(uint32_t *)ap
->a_data
;
2550 case HFS_VOLUME_STATUS
:
2551 *(uint32_t *)ap
->a_data
= hfsmp
->hfs_notification_conditions
;
2554 case HFS_SET_BOOT_INFO
:
2555 if (!vnode_isvroot(vp
))
2557 if (!kauth_cred_issuser(cred
) && (kauth_cred_getuid(cred
) != vfs_statfs(HFSTOVFS(hfsmp
))->f_owner
))
2558 return(EACCES
); /* must be superuser or owner of filesystem */
2559 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2562 hfs_lock_mount (hfsmp
);
2563 bcopy(ap
->a_data
, &hfsmp
->vcbFndrInfo
, sizeof(hfsmp
->vcbFndrInfo
));
2564 hfs_unlock_mount (hfsmp
);
2565 (void) hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, 0);
2568 case HFS_GET_BOOT_INFO
:
2569 if (!vnode_isvroot(vp
))
2571 hfs_lock_mount (hfsmp
);
2572 bcopy(&hfsmp
->vcbFndrInfo
, ap
->a_data
, sizeof(hfsmp
->vcbFndrInfo
));
2573 hfs_unlock_mount(hfsmp
);
2576 case HFS_MARK_BOOT_CORRUPT
:
2577 /* Mark the boot volume corrupt by setting
2578 * kHFSVolumeInconsistentBit in the volume header. This will
2579 * force fsck_hfs on next mount.
2581 if (!kauth_cred_issuser(kauth_cred_get())) {
2585 /* Allowed only on the root vnode of the boot volume */
2586 if (!(vfs_flags(HFSTOVFS(hfsmp
)) & MNT_ROOTFS
) ||
2587 !vnode_isvroot(vp
)) {
2590 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2593 printf ("hfs_vnop_ioctl: Marking the boot volume corrupt.\n");
2594 hfs_mark_inconsistent(hfsmp
, HFS_FSCK_FORCED
);
2597 case HFS_FSCTL_GET_JOURNAL_INFO
:
2598 jip
= (struct hfs_journal_info
*)ap
->a_data
;
2603 if (hfsmp
->jnl
== NULL
) {
2607 jnl_start
= (off_t
)(hfsmp
->jnl_start
* HFSTOVCB(hfsmp
)->blockSize
) + (off_t
)HFSTOVCB(hfsmp
)->hfsPlusIOPosOffset
;
2608 jnl_size
= (off_t
)hfsmp
->jnl_size
;
2611 jip
->jstart
= jnl_start
;
2612 jip
->jsize
= jnl_size
;
2615 case HFS_SET_ALWAYS_ZEROFILL
: {
2616 struct cnode
*cp
= VTOC(vp
);
2618 if (*(int *)ap
->a_data
) {
2619 cp
->c_flag
|= C_ALWAYS_ZEROFILL
;
2621 cp
->c_flag
&= ~C_ALWAYS_ZEROFILL
;
2626 case HFS_DISABLE_METAZONE
: {
2627 /* Only root can disable metadata zone */
2628 if (!kauth_cred_issuser(kauth_cred_get())) {
2631 if (hfsmp
->hfs_flags
& HFS_READ_ONLY
) {
2635 /* Disable metadata zone now */
2636 (void) hfs_metadatazone_init(hfsmp
, true);
2637 printf ("hfs: Disabling metadata zone on %s\n", hfsmp
->vcbVN
);
2642 case HFS_FSINFO_METADATA_BLOCKS
: {
2644 struct hfsinfo_metadata
*hinfo
;
2646 hinfo
= (struct hfsinfo_metadata
*)ap
->a_data
;
2648 /* Get information about number of metadata blocks */
2649 error
= hfs_getinfo_metadata_blocks(hfsmp
, hinfo
);
2657 case HFS_GET_FSINFO
: {
2658 hfs_fsinfo
*fsinfo
= (hfs_fsinfo
*)ap
->a_data
;
2660 /* Only root is allowed to get fsinfo */
2661 if (!kauth_cred_issuser(kauth_cred_get())) {
2666 * Make sure that the caller's version number matches with
2667 * the kernel's version number. This will make sure that
2668 * if the structures being read/written into are changed
2669 * by the kernel, the caller will not read incorrect data.
2671 * The first three fields --- request_type, version and
2672 * flags are same for all the hfs_fsinfo structures, so
2673 * we can access the version number by assuming any
2674 * structure for now.
2676 if (fsinfo
->header
.version
!= HFS_FSINFO_VERSION
) {
2680 /* Make sure that the current file system is not marked inconsistent */
2681 if (hfsmp
->vcbAtrb
& kHFSVolumeInconsistentMask
) {
2685 return hfs_get_fsinfo(hfsmp
, ap
->a_data
);
2688 case HFS_CS_FREESPACE_TRIM
: {
2692 /* Only root allowed */
2693 if (!kauth_cred_issuser(kauth_cred_get())) {
2698 * This core functionality is similar to hfs_scan_blocks().
2699 * The main difference is that hfs_scan_blocks() is called
2700 * as part of mount where we are assured that the journal is
2701 * empty to start with. This fcntl() can be called on a
2702 * mounted volume, therefore it has to flush the content of
2703 * the journal as well as ensure the state of summary table.
2705 * This fcntl scans over the entire allocation bitmap,
2706 * creates list of all the free blocks, and issues TRIM
2707 * down to the underlying device. This can take long time
2708 * as it can generate up to 512MB of read I/O.
2711 if ((hfsmp
->hfs_flags
& HFS_SUMMARY_TABLE
) == 0) {
2712 error
= hfs_init_summary(hfsmp
);
2714 printf("hfs: fsctl() could not initialize summary table for %s\n", hfsmp
->vcbVN
);
2720 * The journal maintains list of recently deallocated blocks to
2721 * issue DKIOCUNMAPs when the corresponding journal transaction is
2722 * flushed to the disk. To avoid any race conditions, we only
2723 * want one active trim list and only one thread issuing DKIOCUNMAPs.
2724 * Therefore we make sure that the journal trim list is sync'ed,
2725 * empty, and not modifiable for the duration of our scan.
2727 * Take the journal lock before flushing the journal to the disk.
2728 * We will keep on holding the journal lock till we don't get the
2729 * bitmap lock to make sure that no new journal transactions can
2730 * start. This will make sure that the journal trim list is not
2731 * modified after the journal flush and before getting bitmap lock.
2732 * We can release the journal lock after we acquire the bitmap
2733 * lock as it will prevent any further block deallocations.
2735 hfs_journal_lock(hfsmp
);
2737 /* Flush the journal and wait for all I/Os to finish up */
2738 error
= hfs_journal_flush(hfsmp
, TRUE
);
2740 hfs_journal_unlock(hfsmp
);
2744 /* Take bitmap lock to ensure it is not being modified */
2745 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_BITMAP
, HFS_EXCLUSIVE_LOCK
);
2747 /* Release the journal lock */
2748 hfs_journal_unlock(hfsmp
);
2751 * ScanUnmapBlocks reads the bitmap in large block size
2752 * (up to 1MB) unlike the runtime which reads the bitmap
2753 * in the 4K block size. This can cause buf_t collisions
2754 * and potential data corruption. To avoid this, we
2755 * invalidate all the existing buffers associated with
2756 * the bitmap vnode before scanning it.
2758 * Note: ScanUnmapBlock() cleans up all the buffers
2759 * after itself, so there won't be any large buffers left
2760 * for us to clean up after it returns.
2762 error
= buf_invalidateblks(hfsmp
->hfs_allocation_vp
, 0, 0, 0);
2764 hfs_systemfile_unlock(hfsmp
, lockflags
);
2768 /* Traverse bitmap and issue DKIOCUNMAPs */
2769 error
= ScanUnmapBlocks(hfsmp
);
2770 hfs_systemfile_unlock(hfsmp
, lockflags
);
2789 hfs_vnop_select(__unused
struct vnop_select_args
*ap
)
2791 struct vnop_select_args {
2796 vfs_context_t a_context;
2801 * We should really check to see if I/O is possible.
2807 * Converts a logical block number to a physical block, and optionally returns
2808 * the amount of remaining blocks in a run. The logical block is based on hfsNode.logBlockSize.
2809 * The physical block number is based on the device block size, currently its 512.
2810 * The block run is returned in logical blocks, and is the REMAINING amount of blocks
2813 hfs_bmap(struct vnode
*vp
, daddr_t bn
, struct vnode
**vpp
, daddr64_t
*bnp
, unsigned int *runp
)
2815 struct filefork
*fp
= VTOF(vp
);
2816 struct hfsmount
*hfsmp
= VTOHFS(vp
);
2817 int retval
= E_NONE
;
2818 u_int32_t logBlockSize
;
2819 size_t bytesContAvail
= 0;
2820 off_t blockposition
;
2825 * Check for underlying vnode requests and ensure that logical
2826 * to physical mapping is requested.
2829 *vpp
= hfsmp
->hfs_devvp
;
2833 logBlockSize
= GetLogicalBlockSize(vp
);
2834 blockposition
= (off_t
)bn
* logBlockSize
;
2836 lockExtBtree
= overflow_extents(fp
);
2839 lockflags
= hfs_systemfile_lock(hfsmp
, SFL_EXTENTS
, HFS_EXCLUSIVE_LOCK
);
2841 retval
= MacToVFSError(
2842 MapFileBlockC (HFSTOVCB(hfsmp
),
2850 hfs_systemfile_unlock(hfsmp
, lockflags
);
2852 if (retval
== E_NONE
) {
2853 /* Figure out how many read ahead blocks there are */
2855 if (can_cluster(logBlockSize
)) {
2856 /* Make sure this result never goes negative: */
2857 *runp
= (bytesContAvail
< logBlockSize
) ? 0 : (bytesContAvail
/ logBlockSize
) - 1;
2867 * Convert logical block number to file offset.
2870 hfs_vnop_blktooff(struct vnop_blktooff_args
*ap
)
2872 struct vnop_blktooff_args {
2879 if (ap
->a_vp
== NULL
)
2881 *ap
->a_offset
= (off_t
)ap
->a_lblkno
* (off_t
)GetLogicalBlockSize(ap
->a_vp
);
2887 * Convert file offset to logical block number.
2890 hfs_vnop_offtoblk(struct vnop_offtoblk_args
*ap
)
2892 struct vnop_offtoblk_args {
2895 daddr64_t *a_lblkno;
2899 if (ap
->a_vp
== NULL
)
2901 *ap
->a_lblkno
= (daddr64_t
)(ap
->a_offset
/ (off_t
)GetLogicalBlockSize(ap
->a_vp
));
2907 * Map file offset to physical block number.
2909 * If this function is called for write operation, and if the file
2910 * had virtual blocks allocated (delayed allocation), real blocks
2911 * are allocated by calling ExtendFileC().
2913 * If this function is called for read operation, and if the file
2914 * had virtual blocks allocated (delayed allocation), no change
2915 * to the size of file is done, and if required, rangelist is
2916 * searched for mapping.
2918 * System file cnodes are expected to be locked (shared or exclusive).
2921 hfs_vnop_blockmap(struct vnop_blockmap_args
*ap
)
2923 struct vnop_blockmap_args {
2931 vfs_context_t a_context;
2935 struct vnode
*vp
= ap
->a_vp
;
2937 struct filefork
*fp
;
2938 struct hfsmount
*hfsmp
;
2939 size_t bytesContAvail
= 0;
2940 int retval
= E_NONE
;
2943 struct rl_entry
*invalid_range
;
2944 enum rl_overlaptype overlaptype
;
2949 if (VNODE_IS_RSRC(vp
)) {
2950 /* allow blockmaps to the resource fork */
2952 if ( hfs_file_is_compressed(VTOC(vp
), 1) ) { /* 1 == don't take the cnode lock */
2953 int state
= decmpfs_cnode_get_vnode_state(VTOCMP(vp
));
2955 case FILE_IS_COMPRESSED
:
2957 case FILE_IS_CONVERTING
:
2958 /* if FILE_IS_CONVERTING, we allow blockmap */
2961 printf("invalid state %d for compressed file\n", state
);
2966 #endif /* HFS_COMPRESSION */
2968 /* Do not allow blockmap operation on a directory */
2969 if (vnode_isdir(vp
)) {
2974 * Check for underlying vnode requests and ensure that logical
2975 * to physical mapping is requested.
2977 if (ap
->a_bpn
== NULL
)
2980 if ( !vnode_issystem(vp
) && !vnode_islnk(vp
) && !vnode_isswap(vp
)) {
2981 if (VTOC(vp
)->c_lockowner
!= current_thread()) {
2982 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
2991 /* Check virtual blocks only when performing write operation */
2992 if ((ap
->a_flags
& VNODE_WRITE
) && (fp
->ff_unallocblocks
!= 0)) {
2993 if (hfs_start_transaction(hfsmp
) != 0) {
2999 syslocks
= SFL_EXTENTS
| SFL_BITMAP
;
3001 } else if (overflow_extents(fp
)) {
3002 syslocks
= SFL_EXTENTS
;
3006 lockflags
= hfs_systemfile_lock(hfsmp
, syslocks
, HFS_EXCLUSIVE_LOCK
);
3009 * Check for any delayed allocations.
3011 if ((ap
->a_flags
& VNODE_WRITE
) && (fp
->ff_unallocblocks
!= 0)) {
3013 u_int32_t loanedBlocks
;
3016 // Make sure we have a transaction. It's possible
3017 // that we came in and fp->ff_unallocblocks was zero
3018 // but during the time we blocked acquiring the extents
3019 // btree, ff_unallocblocks became non-zero and so we
3020 // will need to start a transaction.
3022 if (started_tr
== 0) {
3024 hfs_systemfile_unlock(hfsmp
, lockflags
);
3031 * Note: ExtendFileC will Release any blocks on loan and
3032 * aquire real blocks. So we ask to extend by zero bytes
3033 * since ExtendFileC will account for the virtual blocks.
3036 loanedBlocks
= fp
->ff_unallocblocks
;
3037 retval
= ExtendFileC(hfsmp
, (FCB
*)fp
, 0, 0,
3038 kEFAllMask
| kEFNoClumpMask
, &actbytes
);
3041 fp
->ff_unallocblocks
= loanedBlocks
;
3042 cp
->c_blocks
+= loanedBlocks
;
3043 fp
->ff_blocks
+= loanedBlocks
;
3045 hfs_lock_mount (hfsmp
);
3046 hfsmp
->loanedBlocks
+= loanedBlocks
;
3047 hfs_unlock_mount (hfsmp
);
3049 hfs_systemfile_unlock(hfsmp
, lockflags
);
3050 cp
->c_flag
|= C_MODIFIED
;
3052 (void) hfs_update(vp
, TRUE
);
3053 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3055 hfs_end_transaction(hfsmp
);
3062 retval
= MapFileBlockC(hfsmp
, (FCB
*)fp
, ap
->a_size
, ap
->a_foffset
,
3063 ap
->a_bpn
, &bytesContAvail
);
3065 hfs_systemfile_unlock(hfsmp
, lockflags
);
3070 (void) hfs_update(vp
, TRUE
);
3071 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3072 hfs_end_transaction(hfsmp
);
3076 /* On write, always return error because virtual blocks, if any,
3077 * should have been allocated in ExtendFileC(). We do not
3078 * allocate virtual blocks on read, therefore return error
3079 * only if no virtual blocks are allocated. Otherwise we search
3080 * rangelist for zero-fills
3082 if ((MacToVFSError(retval
) != ERANGE
) ||
3083 (ap
->a_flags
& VNODE_WRITE
) ||
3084 ((ap
->a_flags
& VNODE_READ
) && (fp
->ff_unallocblocks
== 0))) {
3088 /* Validate if the start offset is within logical file size */
3089 if (ap
->a_foffset
>= fp
->ff_size
) {
3094 * At this point, we have encountered a failure during
3095 * MapFileBlockC that resulted in ERANGE, and we are not servicing
3096 * a write, and there are borrowed blocks.
3098 * However, the cluster layer will not call blockmap for
3099 * blocks that are borrowed and in-cache. We have to assume that
3100 * because we observed ERANGE being emitted from MapFileBlockC, this
3101 * extent range is not valid on-disk. So we treat this as a
3102 * mapping that needs to be zero-filled prior to reading.
3104 * Note that under certain circumstances (such as non-contiguous
3105 * userland VM mappings in the calling process), cluster_io
3106 * may be forced to split a large I/O driven by hfs_vnop_write
3107 * into multiple sub-I/Os that necessitate a RMW cycle. If this is
3108 * the case here, then we have already removed the invalid range list
3109 * mapping prior to getting to this blockmap call, so we should not
3110 * search the invalid rangelist for this byte range.
3113 bytesContAvail
= fp
->ff_size
- ap
->a_foffset
;
3115 * Clip the contiguous available bytes to, at most, the allowable
3116 * maximum or the amount requested.
3119 if (bytesContAvail
> ap
->a_size
) {
3120 bytesContAvail
= ap
->a_size
;
3123 *ap
->a_bpn
= (daddr64_t
) -1;
3129 /* MapFileC() found a valid extent in the filefork. Search the
3130 * mapping information further for invalid file ranges
3132 overlaptype
= rl_scan(&fp
->ff_invalidranges
, ap
->a_foffset
,
3133 ap
->a_foffset
+ (off_t
)bytesContAvail
- 1,
3135 if (overlaptype
!= RL_NOOVERLAP
) {
3136 switch(overlaptype
) {
3137 case RL_MATCHINGOVERLAP
:
3138 case RL_OVERLAPCONTAINSRANGE
:
3139 case RL_OVERLAPSTARTSBEFORE
:
3140 /* There's no valid block for this byte offset */
3141 *ap
->a_bpn
= (daddr64_t
)-1;
3142 /* There's no point limiting the amount to be returned
3143 * if the invalid range that was hit extends all the way
3144 * to the EOF (i.e. there's no valid bytes between the
3145 * end of this range and the file's EOF):
3147 if (((off_t
)fp
->ff_size
> (invalid_range
->rl_end
+ 1)) &&
3148 ((size_t)(invalid_range
->rl_end
+ 1 - ap
->a_foffset
) < bytesContAvail
)) {
3149 bytesContAvail
= invalid_range
->rl_end
+ 1 - ap
->a_foffset
;
3153 case RL_OVERLAPISCONTAINED
:
3154 case RL_OVERLAPENDSAFTER
:
3155 /* The range of interest hits an invalid block before the end: */
3156 if (invalid_range
->rl_start
== ap
->a_foffset
) {
3157 /* There's actually no valid information to be had starting here: */
3158 *ap
->a_bpn
= (daddr64_t
)-1;
3159 if (((off_t
)fp
->ff_size
> (invalid_range
->rl_end
+ 1)) &&
3160 ((size_t)(invalid_range
->rl_end
+ 1 - ap
->a_foffset
) < bytesContAvail
)) {
3161 bytesContAvail
= invalid_range
->rl_end
+ 1 - ap
->a_foffset
;
3164 bytesContAvail
= invalid_range
->rl_start
- ap
->a_foffset
;
3171 if (bytesContAvail
> ap
->a_size
)
3172 bytesContAvail
= ap
->a_size
;
3178 *ap
->a_run
= bytesContAvail
;
3181 *(int *)ap
->a_poff
= 0;
3187 return (MacToVFSError(retval
));
3191 * prepare and issue the I/O
3192 * buf_strategy knows how to deal
3193 * with requests that require
3197 hfs_vnop_strategy(struct vnop_strategy_args
*ap
)
3199 buf_t bp
= ap
->a_bp
;
3200 vnode_t vp
= buf_vnode(bp
);
3203 /* Mark buffer as containing static data if cnode flag set */
3204 if (VTOC(vp
)->c_flag
& C_SSD_STATIC
) {
3208 /* Mark buffer as containing static data if cnode flag set */
3209 if (VTOC(vp
)->c_flag
& C_SSD_GREEDY_MODE
) {
3210 bufattr_markgreedymode(&bp
->b_attr
);
3213 /* mark buffer as containing burst mode data if cnode flag set */
3214 if (VTOC(vp
)->c_flag
& C_IO_ISOCHRONOUS
) {
3215 bufattr_markisochronous(&bp
->b_attr
);
3221 if ((!bufattr_rawencrypted(&bp
->b_attr
)) &&
3222 ((cp
= cp_get_protected_cnode(vp
)) != NULL
)) {
3224 * We rely upon the truncate lock to protect the
3225 * CP cache key from getting tossed prior to our IO finishing here.
3226 * Nearly all cluster io calls to manipulate file payload from HFS
3227 * take the truncate lock before calling into the cluster
3228 * layer to ensure the file size does not change, or that they
3229 * have exclusive right to change the EOF of the file.
3230 * That same guarantee protects us here since the code that
3231 * deals with CP lock events must now take the truncate lock
3232 * before doing anything.
3234 * There is 1 exception here:
3235 * 1) One exception should be the VM swapfile IO, because HFS will
3236 * funnel the VNOP_PAGEOUT directly into a cluster_pageout call for the
3237 * swapfile code only without holding the truncate lock. This is because
3238 * individual swapfiles are maintained at fixed-length sizes by the VM code.
3239 * In non-swapfile IO we use PAGEOUT_V2 semantics which allow us to
3240 * create our own UPL and thus take the truncate lock before calling
3241 * into the cluster layer. In that case, however, we are not concerned
3242 * with the CP blob being wiped out in the middle of the IO
3243 * because there isn't anything to toss; the VM swapfile key stays
3244 * in-core as long as the file is open.
3249 * Last chance: If this data protected I/O does not have unwrapped keys
3250 * present, then try to get them. We already know that it should, by this point.
3252 if (cp
->c_cpentry
->cp_flags
& (CP_KEY_FLUSHED
| CP_NEEDS_KEYS
)) {
3253 int io_op
= ( (buf_flags(bp
) & B_READ
) ? CP_READ_ACCESS
: CP_WRITE_ACCESS
);
3254 if ((error
= cp_handle_vnop(vp
, io_op
, 0)) != 0) {
3256 * We have to be careful here. By this point in the I/O path, VM or the cluster
3257 * engine has prepared a buf_t with the proper file offsets and all the rest,
3258 * so simply erroring out will result in us leaking this particular buf_t.
3259 * We need to properly decorate the buf_t just as buf_strategy would so as
3260 * to make it appear that the I/O errored out with the particular error code.
3262 buf_seterror (bp
, error
);
3270 * For filesystem resize, we may not have access to the underlying
3271 * file's cache key for whatever reason (device may be locked). However,
3272 * we do not need it since we are going to use the temporary HFS-wide resize key
3273 * which is generated once we start relocating file content. If this file's I/O
3274 * should be done using the resize key, it will have been supplied already, so
3275 * do not attach the file's cp blob to the buffer.
3277 if ((cp
->c_cpentry
->cp_flags
& CP_RELOCATION_INFLIGHT
) == 0) {
3278 buf_setcpaddr(bp
, cp
->c_cpentry
);
3281 #endif /* CONFIG_PROTECT */
3283 error
= buf_strategy(VTOHFS(vp
)->hfs_devvp
, ap
);
3289 hfs_minorupdate(struct vnode
*vp
) {
3290 struct cnode
*cp
= VTOC(vp
);
3291 cp
->c_flag
&= ~C_MODIFIED
;
3292 cp
->c_touch_acctime
= 0;
3293 cp
->c_touch_chgtime
= 0;
3294 cp
->c_touch_modtime
= 0;
3300 do_hfs_truncate(struct vnode
*vp
, off_t length
, int flags
, int truncateflags
, vfs_context_t context
)
3302 register struct cnode
*cp
= VTOC(vp
);
3303 struct filefork
*fp
= VTOF(vp
);
3304 kauth_cred_t cred
= vfs_context_ucred(context
);
3307 off_t actualBytesAdded
;
3309 u_int32_t fileblocks
;
3311 struct hfsmount
*hfsmp
;
3313 int skipupdate
= (truncateflags
& HFS_TRUNCATE_SKIPUPDATE
);
3314 int suppress_times
= (truncateflags
& HFS_TRUNCATE_SKIPTIMES
);
3316 blksize
= VTOVCB(vp
)->blockSize
;
3317 fileblocks
= fp
->ff_blocks
;
3318 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3320 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_START
,
3321 (int)length
, (int)fp
->ff_size
, (int)filebytes
, 0, 0);
3326 /* This should only happen with a corrupt filesystem */
3327 if ((off_t
)fp
->ff_size
< 0)
3330 if ((!ISHFSPLUS(VTOVCB(vp
))) && (length
> (off_t
)MAXHFSFILESIZE
))
3337 /* Files that are changing size are not hot file candidates. */
3338 if (hfsmp
->hfc_stage
== HFC_RECORDING
) {
3339 fp
->ff_bytesread
= 0;
3343 * We cannot just check if fp->ff_size == length (as an optimization)
3344 * since there may be extra physical blocks that also need truncation.
3347 if ((retval
= hfs_getinoquota(cp
)))
3352 * Lengthen the size of the file. We must ensure that the
3353 * last byte of the file is allocated. Since the smallest
3354 * value of ff_size is 0, length will be at least 1.
3356 if (length
> (off_t
)fp
->ff_size
) {
3358 retval
= hfs_chkdq(cp
, (int64_t)(roundup(length
- filebytes
, blksize
)),
3364 * If we don't have enough physical space then
3365 * we need to extend the physical size.
3367 if (length
> filebytes
) {
3369 u_int32_t blockHint
= 0;
3371 /* All or nothing and don't round up to clumpsize. */
3372 eflags
= kEFAllMask
| kEFNoClumpMask
;
3374 if (cred
&& (suser(cred
, NULL
) != 0)) {
3375 eflags
|= kEFReserveMask
; /* keep a reserve */
3379 * Allocate Journal and Quota files in metadata zone.
3381 if (filebytes
== 0 &&
3382 hfsmp
->hfs_flags
& HFS_METADATA_ZONE
&&
3383 hfs_virtualmetafile(cp
)) {
3384 eflags
|= kEFMetadataMask
;
3385 blockHint
= hfsmp
->hfs_metazone_start
;
3387 if (hfs_start_transaction(hfsmp
) != 0) {
3392 /* Protect extents b-tree and allocation bitmap */
3393 lockflags
= SFL_BITMAP
;
3394 if (overflow_extents(fp
))
3395 lockflags
|= SFL_EXTENTS
;
3396 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3399 * Keep growing the file as long as the current EOF is
3400 * less than the desired value.
3402 while ((length
> filebytes
) && (retval
== E_NONE
)) {
3403 bytesToAdd
= length
- filebytes
;
3404 retval
= MacToVFSError(ExtendFileC(VTOVCB(vp
),
3409 &actualBytesAdded
));
3411 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)blksize
;
3412 if (actualBytesAdded
== 0 && retval
== E_NONE
) {
3413 if (length
> filebytes
)
3419 hfs_systemfile_unlock(hfsmp
, lockflags
);
3423 (void) hfs_minorupdate(vp
);
3426 (void) hfs_update(vp
, TRUE
);
3427 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3431 hfs_end_transaction(hfsmp
);
3436 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_NONE
,
3437 (int)length
, (int)fp
->ff_size
, (int)filebytes
, 0, 0);
3440 if (ISSET(flags
, IO_NOZEROFILL
)) {
3441 // An optimisation for the hibernation file
3442 if (vnode_isswap(vp
))
3443 rl_remove_all(&fp
->ff_invalidranges
);
3445 if (UBCINFOEXISTS(vp
) && (vnode_issystem(vp
) == 0) && retval
== E_NONE
) {
3446 struct rl_entry
*invalid_range
;
3449 zero_limit
= (fp
->ff_size
+ (PAGE_SIZE_64
- 1)) & ~PAGE_MASK_64
;
3450 if (length
< zero_limit
) zero_limit
= length
;
3452 if (length
> (off_t
)fp
->ff_size
) {
3455 /* Extending the file: time to fill out the current last page w. zeroes? */
3456 if ((fp
->ff_size
& PAGE_MASK_64
) &&
3457 (rl_scan(&fp
->ff_invalidranges
, fp
->ff_size
& ~PAGE_MASK_64
,
3458 fp
->ff_size
- 1, &invalid_range
) == RL_NOOVERLAP
)) {
3460 /* There's some valid data at the start of the (current) last page
3461 of the file, so zero out the remainder of that page to ensure the
3462 entire page contains valid data. Since there is no invalid range
3463 possible past the (current) eof, there's no need to remove anything
3464 from the invalid range list before calling cluster_write(): */
3466 retval
= cluster_write(vp
, (struct uio
*) 0, fp
->ff_size
, zero_limit
,
3467 fp
->ff_size
, (off_t
)0,
3468 (flags
& IO_SYNC
) | IO_HEADZEROFILL
| IO_NOZERODIRTY
);
3469 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
3470 if (retval
) goto Err_Exit
;
3472 /* Merely invalidate the remaining area, if necessary: */
3473 if (length
> zero_limit
) {
3475 rl_add(zero_limit
, length
- 1, &fp
->ff_invalidranges
);
3476 cp
->c_zftimeout
= tv
.tv_sec
+ ZFTIMELIMIT
;
3479 /* The page containing the (current) eof is invalid: just add the
3480 remainder of the page to the invalid list, along with the area
3481 being newly allocated:
3484 rl_add(fp
->ff_size
, length
- 1, &fp
->ff_invalidranges
);
3485 cp
->c_zftimeout
= tv
.tv_sec
+ ZFTIMELIMIT
;
3489 panic("hfs_truncate: invoked on non-UBC object?!");
3492 if (suppress_times
== 0) {
3493 cp
->c_touch_modtime
= TRUE
;
3495 fp
->ff_size
= length
;
3497 } else { /* Shorten the size of the file */
3499 // An optimisation for the hibernation file
3500 if (ISSET(flags
, IO_NOZEROFILL
) && vnode_isswap(vp
)) {
3501 rl_remove_all(&fp
->ff_invalidranges
);
3502 } else if ((off_t
)fp
->ff_size
> length
) {
3503 /* Any space previously marked as invalid is now irrelevant: */
3504 rl_remove(length
, fp
->ff_size
- 1, &fp
->ff_invalidranges
);
3508 * Account for any unmapped blocks. Note that the new
3509 * file length can still end up with unmapped blocks.
3511 if (fp
->ff_unallocblocks
> 0) {
3512 u_int32_t finalblks
;
3513 u_int32_t loanedBlocks
;
3515 hfs_lock_mount(hfsmp
);
3516 loanedBlocks
= fp
->ff_unallocblocks
;
3517 cp
->c_blocks
-= loanedBlocks
;
3518 fp
->ff_blocks
-= loanedBlocks
;
3519 fp
->ff_unallocblocks
= 0;
3521 hfsmp
->loanedBlocks
-= loanedBlocks
;
3523 finalblks
= (length
+ blksize
- 1) / blksize
;
3524 if (finalblks
> fp
->ff_blocks
) {
3525 /* calculate required unmapped blocks */
3526 loanedBlocks
= finalblks
- fp
->ff_blocks
;
3527 hfsmp
->loanedBlocks
+= loanedBlocks
;
3529 fp
->ff_unallocblocks
= loanedBlocks
;
3530 cp
->c_blocks
+= loanedBlocks
;
3531 fp
->ff_blocks
+= loanedBlocks
;
3533 hfs_unlock_mount (hfsmp
);
3537 off_t savedbytes
= ((off_t
)fp
->ff_blocks
* (off_t
)blksize
);
3539 if (hfs_start_transaction(hfsmp
) != 0) {
3544 if (fp
->ff_unallocblocks
== 0) {
3545 /* Protect extents b-tree and allocation bitmap */
3546 lockflags
= SFL_BITMAP
;
3547 if (overflow_extents(fp
))
3548 lockflags
|= SFL_EXTENTS
;
3549 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3551 retval
= MacToVFSError(TruncateFileC(VTOVCB(vp
), (FCB
*)fp
, length
, 0,
3552 FORK_IS_RSRC (fp
), FTOC(fp
)->c_fileid
, false));
3554 hfs_systemfile_unlock(hfsmp
, lockflags
);
3558 fp
->ff_size
= length
;
3561 (void) hfs_minorupdate(vp
);
3564 (void) hfs_update(vp
, TRUE
);
3565 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3568 hfs_end_transaction(hfsmp
);
3570 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)blksize
;
3574 /* These are bytesreleased */
3575 (void) hfs_chkdq(cp
, (int64_t)-(savedbytes
- filebytes
), NOCRED
, 0);
3579 * Only set update flag if the logical length changes & we aren't
3580 * suppressing modtime updates.
3582 if (((off_t
)fp
->ff_size
!= length
) && (suppress_times
== 0)) {
3583 cp
->c_touch_modtime
= TRUE
;
3585 fp
->ff_size
= length
;
3587 if (cp
->c_mode
& (S_ISUID
| S_ISGID
)) {
3588 if (!vfs_context_issuser(context
)) {
3589 cp
->c_mode
&= ~(S_ISUID
| S_ISGID
);
3594 retval
= hfs_minorupdate(vp
);
3597 cp
->c_touch_chgtime
= TRUE
; /* status changed */
3598 if (suppress_times
== 0) {
3599 cp
->c_touch_modtime
= TRUE
; /* file data was modified */
3602 * If we are not suppressing the modtime update, then
3603 * update the gen count as well.
3605 if (S_ISREG(cp
->c_attr
.ca_mode
) || S_ISLNK (cp
->c_attr
.ca_mode
)) {
3606 hfs_incr_gencount(cp
);
3610 retval
= hfs_update(vp
, MNT_WAIT
);
3613 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_NONE
,
3614 -1, -1, -1, retval
, 0);
3619 KERNEL_DEBUG(HFSDBG_TRUNCATE
| DBG_FUNC_END
,
3620 (int)length
, (int)fp
->ff_size
, (int)filebytes
, retval
, 0);
3626 * Preparation which must be done prior to deleting the catalog record
3627 * of a file or directory. In order to make the on-disk as safe as possible,
3628 * we remove the catalog entry before releasing the bitmap blocks and the
3629 * overflow extent records. However, some work must be done prior to deleting
3630 * the catalog record.
3632 * When calling this function, the cnode must exist both in memory and on-disk.
3633 * If there are both resource fork and data fork vnodes, this function should
3634 * be called on both.
3638 hfs_prepare_release_storage (struct hfsmount
*hfsmp
, struct vnode
*vp
) {
3640 struct filefork
*fp
= VTOF(vp
);
3641 struct cnode
*cp
= VTOC(vp
);
3646 /* Cannot truncate an HFS directory! */
3647 if (vnode_isdir(vp
)) {
3652 * See the comment below in hfs_truncate for why we need to call
3653 * setsize here. Essentially we want to avoid pending IO if we
3654 * already know that the blocks are going to be released here.
3655 * This function is only called when totally removing all storage for a file, so
3656 * we can take a shortcut and immediately setsize (0);
3660 /* This should only happen with a corrupt filesystem */
3661 if ((off_t
)fp
->ff_size
< 0)
3665 * We cannot just check if fp->ff_size == length (as an optimization)
3666 * since there may be extra physical blocks that also need truncation.
3669 if ((retval
= hfs_getinoquota(cp
))) {
3674 /* Wipe out any invalid ranges which have yet to be backed by disk */
3675 rl_remove(0, fp
->ff_size
- 1, &fp
->ff_invalidranges
);
3678 * Account for any unmapped blocks. Since we're deleting the
3679 * entire file, we don't have to worry about just shrinking
3680 * to a smaller number of borrowed blocks.
3682 if (fp
->ff_unallocblocks
> 0) {
3683 u_int32_t loanedBlocks
;
3685 hfs_lock_mount (hfsmp
);
3686 loanedBlocks
= fp
->ff_unallocblocks
;
3687 cp
->c_blocks
-= loanedBlocks
;
3688 fp
->ff_blocks
-= loanedBlocks
;
3689 fp
->ff_unallocblocks
= 0;
3691 hfsmp
->loanedBlocks
-= loanedBlocks
;
3693 hfs_unlock_mount (hfsmp
);
3701 * Special wrapper around calling TruncateFileC. This function is useable
3702 * even when the catalog record does not exist any longer, making it ideal
3703 * for use when deleting a file. The simplification here is that we know
3704 * that we are releasing all blocks.
3706 * Note that this function may be called when there is no vnode backing
3707 * the file fork in question. We may call this from hfs_vnop_inactive
3708 * to clear out resource fork data (and may not want to clear out the data
3709 * fork yet). As a result, we pointer-check both sets of inputs before
3710 * doing anything with them.
3712 * The caller is responsible for saving off a copy of the filefork(s)
3713 * embedded within the cnode prior to calling this function. The pointers
3714 * supplied as arguments must be valid even if the cnode is no longer valid.
3718 hfs_release_storage (struct hfsmount
*hfsmp
, struct filefork
*datafork
,
3719 struct filefork
*rsrcfork
, u_int32_t fileid
) {
3722 u_int32_t fileblocks
;
3727 blksize
= hfsmp
->blockSize
;
3731 datafork
->ff_size
= 0;
3733 fileblocks
= datafork
->ff_blocks
;
3734 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3736 /* We killed invalid ranges and loaned blocks before we removed the catalog entry */
3738 while (filebytes
> 0) {
3739 if (filebytes
> HFS_BIGFILE_SIZE
) {
3740 filebytes
-= HFS_BIGFILE_SIZE
;
3745 /* Start a transaction, and wipe out as many blocks as we can in this iteration */
3746 if (hfs_start_transaction(hfsmp
) != 0) {
3751 if (datafork
->ff_unallocblocks
== 0) {
3752 /* Protect extents b-tree and allocation bitmap */
3753 lockflags
= SFL_BITMAP
;
3754 if (overflow_extents(datafork
))
3755 lockflags
|= SFL_EXTENTS
;
3756 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3758 error
= MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp
), datafork
, filebytes
, 1, 0, fileid
, false));
3760 hfs_systemfile_unlock(hfsmp
, lockflags
);
3762 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3764 /* Finish the transaction and start over if necessary */
3765 hfs_end_transaction(hfsmp
);
3774 if (error
== 0 && rsrcfork
) {
3775 rsrcfork
->ff_size
= 0;
3777 fileblocks
= rsrcfork
->ff_blocks
;
3778 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3780 /* We killed invalid ranges and loaned blocks before we removed the catalog entry */
3782 while (filebytes
> 0) {
3783 if (filebytes
> HFS_BIGFILE_SIZE
) {
3784 filebytes
-= HFS_BIGFILE_SIZE
;
3789 /* Start a transaction, and wipe out as many blocks as we can in this iteration */
3790 if (hfs_start_transaction(hfsmp
) != 0) {
3795 if (rsrcfork
->ff_unallocblocks
== 0) {
3796 /* Protect extents b-tree and allocation bitmap */
3797 lockflags
= SFL_BITMAP
;
3798 if (overflow_extents(rsrcfork
))
3799 lockflags
|= SFL_EXTENTS
;
3800 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
3802 error
= MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp
), rsrcfork
, filebytes
, 1, 1, fileid
, false));
3804 hfs_systemfile_unlock(hfsmp
, lockflags
);
3806 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
3808 /* Finish the transaction and start over if necessary */
3809 hfs_end_transaction(hfsmp
);
3820 errno_t
hfs_ubc_setsize(vnode_t vp
, off_t len
, bool have_cnode_lock
)
3825 * Call ubc_setsize to give the VM subsystem a chance to do
3826 * whatever it needs to with existing pages before we delete
3827 * blocks. Note that symlinks don't use the UBC so we'll
3828 * get back ENOENT in that case.
3830 if (have_cnode_lock
) {
3831 error
= ubc_setsize_ex(vp
, len
, UBC_SETSIZE_NO_FS_REENTRY
);
3832 if (error
== EAGAIN
) {
3833 cnode_t
*cp
= VTOC(vp
);
3835 if (cp
->c_truncatelockowner
!= current_thread()) {
3836 #if DEVELOPMENT || DEBUG
3837 panic("hfs: hfs_ubc_setsize called without exclusive truncate lock!");
3839 printf("hfs: hfs_ubc_setsize called without exclusive truncate lock!\n");
3844 error
= ubc_setsize_ex(vp
, len
, 0);
3845 hfs_lock_always(cp
, HFS_EXCLUSIVE_LOCK
);
3848 error
= ubc_setsize_ex(vp
, len
, 0);
3850 return error
== ENOENT
? 0 : error
;
3854 * Truncate a cnode to at most length size, freeing (or adding) the
3858 hfs_truncate(struct vnode
*vp
, off_t length
, int flags
,
3859 int truncateflags
, vfs_context_t context
)
3861 struct filefork
*fp
= VTOF(vp
);
3863 u_int32_t fileblocks
;
3866 struct cnode
*cp
= VTOC(vp
);
3868 /* Cannot truncate an HFS directory! */
3869 if (vnode_isdir(vp
)) {
3872 /* A swap file cannot change size. */
3873 if (vnode_isswap(vp
) && length
&& !ISSET(flags
, IO_NOAUTH
)) {
3877 blksize
= VTOVCB(vp
)->blockSize
;
3878 fileblocks
= fp
->ff_blocks
;
3879 filebytes
= (off_t
)fileblocks
* (off_t
)blksize
;
3881 bool caller_has_cnode_lock
= (cp
->c_lockowner
== current_thread());
3883 error
= hfs_ubc_setsize(vp
, length
, caller_has_cnode_lock
);
3887 if (!caller_has_cnode_lock
) {
3888 error
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
3893 // have to loop truncating or growing files that are
3894 // really big because otherwise transactions can get
3895 // enormous and consume too many kernel resources.
3897 if (length
< filebytes
) {
3898 while (filebytes
> length
) {
3899 if ((filebytes
- length
) > HFS_BIGFILE_SIZE
) {
3900 filebytes
-= HFS_BIGFILE_SIZE
;
3904 cp
->c_flag
|= C_FORCEUPDATE
;
3905 error
= do_hfs_truncate(vp
, filebytes
, flags
, truncateflags
, context
);
3909 } else if (length
> filebytes
) {
3910 while (filebytes
< length
) {
3911 if ((length
- filebytes
) > HFS_BIGFILE_SIZE
) {
3912 filebytes
+= HFS_BIGFILE_SIZE
;
3916 cp
->c_flag
|= C_FORCEUPDATE
;
3917 error
= do_hfs_truncate(vp
, filebytes
, flags
, truncateflags
, context
);
3921 } else /* Same logical size */ {
3923 error
= do_hfs_truncate(vp
, length
, flags
, truncateflags
, context
);
3925 /* Files that are changing size are not hot file candidates. */
3926 if (VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) {
3927 fp
->ff_bytesread
= 0;
3930 if (!caller_has_cnode_lock
)
3933 // Make sure UBC's size matches up (in case we didn't completely succeed)
3934 errno_t err2
= hfs_ubc_setsize(vp
, fp
->ff_size
, caller_has_cnode_lock
);
3943 * Preallocate file storage space.
3946 hfs_vnop_allocate(struct vnop_allocate_args
/* {
3950 off_t *a_bytesallocated;
3952 vfs_context_t a_context;
3955 struct vnode
*vp
= ap
->a_vp
;
3957 struct filefork
*fp
;
3959 off_t length
= ap
->a_length
;
3961 off_t moreBytesRequested
;
3962 off_t actualBytesAdded
;
3964 u_int32_t fileblocks
;
3965 int retval
, retval2
;
3966 u_int32_t blockHint
;
3967 u_int32_t extendFlags
; /* For call to ExtendFileC */
3968 struct hfsmount
*hfsmp
;
3969 kauth_cred_t cred
= vfs_context_ucred(ap
->a_context
);
3973 *(ap
->a_bytesallocated
) = 0;
3975 if (!vnode_isreg(vp
))
3977 if (length
< (off_t
)0)
3982 orig_ctime
= VTOC(vp
)->c_ctime
;
3984 check_for_tracked_file(vp
, orig_ctime
, ap
->a_length
== 0 ? NAMESPACE_HANDLER_TRUNCATE_OP
|NAMESPACE_HANDLER_DELETE_OP
: NAMESPACE_HANDLER_TRUNCATE_OP
, NULL
);
3986 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
3988 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
3996 fileblocks
= fp
->ff_blocks
;
3997 filebytes
= (off_t
)fileblocks
* (off_t
)vcb
->blockSize
;
3999 if ((ap
->a_flags
& ALLOCATEFROMVOL
) && (length
< filebytes
)) {
4004 /* Fill in the flags word for the call to Extend the file */
4006 extendFlags
= kEFNoClumpMask
;
4007 if (ap
->a_flags
& ALLOCATECONTIG
)
4008 extendFlags
|= kEFContigMask
;
4009 if (ap
->a_flags
& ALLOCATEALL
)
4010 extendFlags
|= kEFAllMask
;
4011 if (cred
&& suser(cred
, NULL
) != 0)
4012 extendFlags
|= kEFReserveMask
;
4013 if (hfs_virtualmetafile(cp
))
4014 extendFlags
|= kEFMetadataMask
;
4018 startingPEOF
= filebytes
;
4020 if (ap
->a_flags
& ALLOCATEFROMPEOF
)
4021 length
+= filebytes
;
4022 else if (ap
->a_flags
& ALLOCATEFROMVOL
)
4023 blockHint
= ap
->a_offset
/ VTOVCB(vp
)->blockSize
;
4025 /* If no changes are necesary, then we're done */
4026 if (filebytes
== length
)
4030 * Lengthen the size of the file. We must ensure that the
4031 * last byte of the file is allocated. Since the smallest
4032 * value of filebytes is 0, length will be at least 1.
4034 if (length
> filebytes
) {
4035 off_t total_bytes_added
= 0, orig_request_size
;
4037 orig_request_size
= moreBytesRequested
= length
- filebytes
;
4040 retval
= hfs_chkdq(cp
,
4041 (int64_t)(roundup(moreBytesRequested
, vcb
->blockSize
)),
4048 * Metadata zone checks.
4050 if (hfsmp
->hfs_flags
& HFS_METADATA_ZONE
) {
4052 * Allocate Journal and Quota files in metadata zone.
4054 if (hfs_virtualmetafile(cp
)) {
4055 blockHint
= hfsmp
->hfs_metazone_start
;
4056 } else if ((blockHint
>= hfsmp
->hfs_metazone_start
) &&
4057 (blockHint
<= hfsmp
->hfs_metazone_end
)) {
4059 * Move blockHint outside metadata zone.
4061 blockHint
= hfsmp
->hfs_metazone_end
+ 1;
4066 while ((length
> filebytes
) && (retval
== E_NONE
)) {
4067 off_t bytesRequested
;
4069 if (hfs_start_transaction(hfsmp
) != 0) {
4074 /* Protect extents b-tree and allocation bitmap */
4075 lockflags
= SFL_BITMAP
;
4076 if (overflow_extents(fp
))
4077 lockflags
|= SFL_EXTENTS
;
4078 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
4080 if (moreBytesRequested
>= HFS_BIGFILE_SIZE
) {
4081 bytesRequested
= HFS_BIGFILE_SIZE
;
4083 bytesRequested
= moreBytesRequested
;
4086 if (extendFlags
& kEFContigMask
) {
4087 // if we're on a sparse device, this will force it to do a
4088 // full scan to find the space needed.
4089 hfsmp
->hfs_flags
&= ~HFS_DID_CONTIG_SCAN
;
4092 retval
= MacToVFSError(ExtendFileC(vcb
,
4097 &actualBytesAdded
));
4099 if (retval
== E_NONE
) {
4100 *(ap
->a_bytesallocated
) += actualBytesAdded
;
4101 total_bytes_added
+= actualBytesAdded
;
4102 moreBytesRequested
-= actualBytesAdded
;
4103 if (blockHint
!= 0) {
4104 blockHint
+= actualBytesAdded
/ vcb
->blockSize
;
4107 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)vcb
->blockSize
;
4109 hfs_systemfile_unlock(hfsmp
, lockflags
);
4112 (void) hfs_update(vp
, TRUE
);
4113 (void) hfs_volupdate(hfsmp
, VOL_UPDATE
, 0);
4116 hfs_end_transaction(hfsmp
);
4121 * if we get an error and no changes were made then exit
4122 * otherwise we must do the hfs_update to reflect the changes
4124 if (retval
&& (startingPEOF
== filebytes
))
4128 * Adjust actualBytesAdded to be allocation block aligned, not
4129 * clump size aligned.
4130 * NOTE: So what we are reporting does not affect reality
4131 * until the file is closed, when we truncate the file to allocation
4134 if (total_bytes_added
!= 0 && orig_request_size
< total_bytes_added
)
4135 *(ap
->a_bytesallocated
) =
4136 roundup(orig_request_size
, (off_t
)vcb
->blockSize
);
4138 } else { /* Shorten the size of the file */
4141 * N.B. At present, this code is never called. If and when we
4142 * do start using it, it looks like there might be slightly
4143 * strange semantics with the file size: it's possible for the
4144 * file size to *increase* e.g. if current file size is 5,
4145 * length is 1024 and filebytes is 4096, the file size will
4146 * end up being 1024 bytes. This isn't necessarily a problem
4147 * but it's not consistent with the code above which doesn't
4148 * change the file size.
4151 retval
= hfs_truncate(vp
, length
, 0, 0, ap
->a_context
);
4152 filebytes
= (off_t
)fp
->ff_blocks
* (off_t
)vcb
->blockSize
;
4155 * if we get an error and no changes were made then exit
4156 * otherwise we must do the hfs_update to reflect the changes
4158 if (retval
&& (startingPEOF
== filebytes
)) goto Err_Exit
;
4160 /* These are bytesreleased */
4161 (void) hfs_chkdq(cp
, (int64_t)-((startingPEOF
- filebytes
)), NOCRED
,0);
4164 if (fp
->ff_size
> filebytes
) {
4165 fp
->ff_size
= filebytes
;
4167 hfs_ubc_setsize(vp
, fp
->ff_size
, true);
4172 cp
->c_touch_chgtime
= TRUE
;
4173 cp
->c_touch_modtime
= TRUE
;
4174 retval2
= hfs_update(vp
, MNT_WAIT
);
4179 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
4186 * Pagein for HFS filesystem
4189 hfs_vnop_pagein(struct vnop_pagein_args
*ap
)
4191 struct vnop_pagein_args {
4194 vm_offset_t a_pl_offset,
4198 vfs_context_t a_context;
4204 struct filefork
*fp
;
4207 upl_page_info_t
*pl
;
4209 off_t page_needed_f_offset
;
4214 boolean_t truncate_lock_held
= FALSE
;
4215 boolean_t file_converted
= FALSE
;
4223 if ((error
= cp_handle_vnop(vp
, CP_READ_ACCESS
| CP_WRITE_ACCESS
, 0)) != 0) {
4225 * If we errored here, then this means that one of two things occurred:
4226 * 1. there was a problem with the decryption of the key.
4227 * 2. the device is locked and we are not allowed to access this particular file.
4229 * Either way, this means that we need to shut down this upl now. As long as
4230 * the pl pointer is NULL (meaning that we're supposed to create the UPL ourselves)
4231 * then we create a upl and immediately abort it.
4233 if (ap
->a_pl
== NULL
) {
4234 /* create the upl */
4235 ubc_create_upl (vp
, ap
->a_f_offset
, ap
->a_size
, &upl
, &pl
,
4236 UPL_UBC_PAGEIN
| UPL_RET_ONLY_ABSENT
);
4237 /* mark the range as needed so it doesn't immediately get discarded upon abort */
4238 ubc_upl_range_needed (upl
, ap
->a_pl_offset
/ PAGE_SIZE
, 1);
4240 /* Abort the range */
4241 ubc_upl_abort_range (upl
, 0, ap
->a_size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
4247 #endif /* CONFIG_PROTECT */
4249 if (ap
->a_pl
!= NULL
) {
4251 * this can only happen for swap files now that
4252 * we're asking for V2 paging behavior...
4253 * so don't need to worry about decompression, or
4254 * keeping track of blocks read or taking the truncate lock
4256 error
= cluster_pagein(vp
, ap
->a_pl
, ap
->a_pl_offset
, ap
->a_f_offset
,
4257 ap
->a_size
, (off_t
)fp
->ff_size
, ap
->a_flags
);
4261 page_needed_f_offset
= ap
->a_f_offset
+ ap
->a_pl_offset
;
4265 * take truncate lock (shared/recursive) to guard against
4266 * zero-fill thru fsync interfering, but only for v2
4268 * the HFS_RECURSE_TRUNCLOCK arg indicates that we want the
4269 * lock shared and we are allowed to recurse 1 level if this thread already
4270 * owns the lock exclusively... this can legally occur
4271 * if we are doing a shrinking ftruncate against a file
4272 * that is mapped private, and the pages being truncated
4273 * do not currently exist in the cache... in that case
4274 * we will have to page-in the missing pages in order
4275 * to provide them to the private mapping... we must
4276 * also call hfs_unlock_truncate with a postive been_recursed
4277 * arg to indicate that if we have recursed, there is no need to drop
4278 * the lock. Allowing this simple recursion is necessary
4279 * in order to avoid a certain deadlock... since the ftruncate
4280 * already holds the truncate lock exclusively, if we try
4281 * to acquire it shared to protect the pagein path, we will
4284 * NOTE: The if () block below is a workaround in order to prevent a
4285 * VM deadlock. See rdar://7853471.
4287 * If we are in a forced unmount, then launchd will still have the
4288 * dyld_shared_cache file mapped as it is trying to reboot. If we
4289 * take the truncate lock here to service a page fault, then our
4290 * thread could deadlock with the forced-unmount. The forced unmount
4291 * thread will try to reclaim the dyld_shared_cache vnode, but since it's
4292 * marked C_DELETED, it will call ubc_setsize(0). As a result, the unmount
4293 * thread will think it needs to copy all of the data out of the file
4294 * and into a VM copy object. If we hold the cnode lock here, then that
4295 * VM operation will not be able to proceed, because we'll set a busy page
4296 * before attempting to grab the lock. Note that this isn't as simple as "don't
4297 * call ubc_setsize" because doing that would just shift the problem to the
4298 * ubc_msync done before the vnode is reclaimed.
4300 * So, if a forced unmount on this volume is in flight AND the cnode is
4301 * marked C_DELETED, then just go ahead and do the page in without taking
4302 * the lock (thus suspending pagein_v2 semantics temporarily). Since it's on a file
4303 * that is not going to be available on the next mount, this seems like a
4304 * OK solution from a correctness point of view, even though it is hacky.
4306 if (vfs_isforce(vp
->v_mount
)) {
4307 if (cp
->c_flag
& C_DELETED
) {
4308 /* If we don't get it, then just go ahead and operate without the lock */
4309 truncate_lock_held
= hfs_try_trunclock(cp
, HFS_SHARED_LOCK
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4313 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4314 truncate_lock_held
= TRUE
;
4317 kret
= ubc_create_upl(vp
, ap
->a_f_offset
, ap
->a_size
, &upl
, &pl
, UPL_UBC_PAGEIN
| UPL_RET_ONLY_ABSENT
);
4319 if ((kret
!= KERN_SUCCESS
) || (upl
== (upl_t
) NULL
)) {
4323 ubc_upl_range_needed(upl
, ap
->a_pl_offset
/ PAGE_SIZE
, 1);
4325 upl_size
= isize
= ap
->a_size
;
4328 * Scan from the back to find the last page in the UPL, so that we
4329 * aren't looking at a UPL that may have already been freed by the
4330 * preceding aborts/completions.
4332 for (pg_index
= ((isize
) / PAGE_SIZE
); pg_index
> 0;) {
4333 if (upl_page_present(pl
, --pg_index
))
4335 if (pg_index
== 0) {
4337 * no absent pages were found in the range specified
4338 * just abort the UPL to get rid of it and then we're done
4340 ubc_upl_abort_range(upl
, 0, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4345 * initialize the offset variables before we touch the UPL.
4346 * f_offset is the position into the file, in bytes
4347 * offset is the position into the UPL, in bytes
4348 * pg_index is the pg# of the UPL we're operating on
4349 * isize is the offset into the UPL of the last page that is present.
4351 isize
= ((pg_index
+ 1) * PAGE_SIZE
);
4354 f_offset
= ap
->a_f_offset
;
4360 if ( !upl_page_present(pl
, pg_index
)) {
4362 * we asked for RET_ONLY_ABSENT, so it's possible
4363 * to get back empty slots in the UPL.
4364 * just skip over them
4366 f_offset
+= PAGE_SIZE
;
4367 offset
+= PAGE_SIZE
;
4374 * We know that we have at least one absent page.
4375 * Now checking to see how many in a row we have
4378 xsize
= isize
- PAGE_SIZE
;
4381 if ( !upl_page_present(pl
, pg_index
+ num_of_pages
))
4386 xsize
= num_of_pages
* PAGE_SIZE
;
4389 if (VNODE_IS_RSRC(vp
)) {
4390 /* allow pageins of the resource fork */
4392 int compressed
= hfs_file_is_compressed(VTOC(vp
), 1); /* 1 == don't take the cnode lock */
4396 if (truncate_lock_held
) {
4398 * can't hold the truncate lock when calling into the decmpfs layer
4399 * since it calls back into this layer... even though we're only
4400 * holding the lock in shared mode, and the re-entrant path only
4401 * takes the lock shared, we can deadlock if some other thread
4402 * tries to grab the lock exclusively in between.
4404 hfs_unlock_truncate(cp
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4405 truncate_lock_held
= FALSE
;
4408 ap
->a_pl_offset
= offset
;
4409 ap
->a_f_offset
= f_offset
;
4412 error
= decmpfs_pagein_compressed(ap
, &compressed
, VTOCMP(vp
));
4414 * note that decpfs_pagein_compressed can change the state of
4415 * 'compressed'... it will set it to 0 if the file is no longer
4416 * compressed once the compression lock is successfully taken
4417 * i.e. we would block on that lock while the file is being inflated
4421 /* successful page-in, update the access time */
4422 VTOC(vp
)->c_touch_acctime
= TRUE
;
4424 /* compressed files are not hot file candidates */
4425 if (VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
) {
4426 fp
->ff_bytesread
= 0;
4428 } else if (error
== EAGAIN
) {
4430 * EAGAIN indicates someone else already holds the compression lock...
4431 * to avoid deadlocking, we'll abort this range of pages with an
4432 * indication that the pagein needs to be redriven
4434 ubc_upl_abort_range(upl
, (upl_offset_t
) offset
, xsize
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_RESTART
);
4435 } else if (error
== ENOSPC
) {
4437 if (upl_size
== PAGE_SIZE
)
4438 panic("decmpfs_pagein_compressed: couldn't ubc_upl_map a single page\n");
4440 ubc_upl_abort_range(upl
, (upl_offset_t
) offset
, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4442 ap
->a_size
= PAGE_SIZE
;
4444 ap
->a_pl_offset
= 0;
4445 ap
->a_f_offset
= page_needed_f_offset
;
4449 goto pagein_next_range
;
4453 * Set file_converted only if the file became decompressed while we were
4454 * paging in. If it were still compressed, we would re-start the loop using the goto
4455 * in the above block. This avoid us overloading truncate_lock_held as our retry_pagein
4456 * condition below, since we could have avoided taking the truncate lock to prevent
4457 * a deadlock in the force unmount case.
4459 file_converted
= TRUE
;
4462 if (file_converted
== TRUE
) {
4464 * the file was converted back to a regular file after we first saw it as compressed
4465 * we need to abort the upl, retake the truncate lock, recreate the UPL and start over
4466 * reset a_size so that we consider what remains of the original request
4467 * and null out a_upl and a_pl_offset.
4469 * We should only be able to get into this block if the decmpfs_pagein_compressed
4470 * successfully decompressed the range in question for this file.
4472 ubc_upl_abort_range(upl
, (upl_offset_t
) offset
, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4476 ap
->a_pl_offset
= 0;
4478 /* Reset file_converted back to false so that we don't infinite-loop. */
4479 file_converted
= FALSE
;
4484 error
= cluster_pagein(vp
, upl
, offset
, f_offset
, xsize
, (off_t
)fp
->ff_size
, ap
->a_flags
);
4487 * Keep track of blocks read.
4489 if ( !vnode_isswap(vp
) && VTOHFS(vp
)->hfc_stage
== HFC_RECORDING
&& error
== 0) {
4491 int took_cnode_lock
= 0;
4493 if (ap
->a_f_offset
== 0 && fp
->ff_size
< PAGE_SIZE
)
4494 bytesread
= fp
->ff_size
;
4498 /* When ff_bytesread exceeds 32-bits, update it behind the cnode lock. */
4499 if ((fp
->ff_bytesread
+ bytesread
) > 0x00000000ffffffff && cp
->c_lockowner
!= current_thread()) {
4500 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
4501 took_cnode_lock
= 1;
4504 * If this file hasn't been seen since the start of
4505 * the current sampling period then start over.
4507 if (cp
->c_atime
< VTOHFS(vp
)->hfc_timebase
) {
4510 fp
->ff_bytesread
= bytesread
;
4512 cp
->c_atime
= tv
.tv_sec
;
4514 fp
->ff_bytesread
+= bytesread
;
4516 cp
->c_touch_acctime
= TRUE
;
4517 if (took_cnode_lock
)
4524 pg_index
+= num_of_pages
;
4530 if (truncate_lock_held
== TRUE
) {
4531 /* Note 1 is passed to hfs_unlock_truncate in been_recursed argument */
4532 hfs_unlock_truncate(cp
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4539 * Pageout for HFS filesystem.
4542 hfs_vnop_pageout(struct vnop_pageout_args
*ap
)
4544 struct vnop_pageout_args {
4547 vm_offset_t a_pl_offset,
4551 vfs_context_t a_context;
4555 vnode_t vp
= ap
->a_vp
;
4557 struct filefork
*fp
;
4561 upl_page_info_t
* pl
;
4562 vm_offset_t a_pl_offset
;
4564 int is_pageoutv2
= 0;
4571 * Figure out where the file ends, for pageout purposes. If
4572 * ff_new_size > ff_size, then we're in the middle of extending the
4573 * file via a write, so it is safe (and necessary) that we be able
4574 * to pageout up to that point.
4576 filesize
= fp
->ff_size
;
4577 if (fp
->ff_new_size
> filesize
)
4578 filesize
= fp
->ff_new_size
;
4580 a_flags
= ap
->a_flags
;
4581 a_pl_offset
= ap
->a_pl_offset
;
4584 * we can tell if we're getting the new or old behavior from the UPL
4586 if ((upl
= ap
->a_pl
) == NULL
) {
4591 * we're in control of any UPL we commit
4592 * make sure someone hasn't accidentally passed in UPL_NOCOMMIT
4594 a_flags
&= ~UPL_NOCOMMIT
;
4598 * For V2 semantics, we want to take the cnode truncate lock
4599 * shared to guard against the file size changing via zero-filling.
4601 * However, we have to be careful because we may be invoked
4602 * via the ubc_msync path to write out dirty mmap'd pages
4603 * in response to a lock event on a content-protected
4604 * filesystem (e.g. to write out class A files).
4605 * As a result, we want to take the truncate lock 'SHARED' with
4606 * the mini-recursion locktype so that we don't deadlock/panic
4607 * because we may be already holding the truncate lock exclusive to force any other
4608 * IOs to have blocked behind us.
4610 hfs_lock_truncate(cp
, HFS_SHARED_LOCK
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4612 if (a_flags
& UPL_MSYNC
) {
4613 request_flags
= UPL_UBC_MSYNC
| UPL_RET_ONLY_DIRTY
;
4616 request_flags
= UPL_UBC_PAGEOUT
| UPL_RET_ONLY_DIRTY
;
4619 kret
= ubc_create_upl(vp
, ap
->a_f_offset
, ap
->a_size
, &upl
, &pl
, request_flags
);
4621 if ((kret
!= KERN_SUCCESS
) || (upl
== (upl_t
) NULL
)) {
4627 * from this point forward upl points at the UPL we're working with
4628 * it was either passed in or we succesfully created it
4632 * Now that HFS is opting into VFC_VFSVNOP_PAGEOUTV2, we may need to operate on our own
4633 * UPL instead of relying on the UPL passed into us. We go ahead and do that here,
4634 * scanning for dirty ranges. We'll issue our own N cluster_pageout calls, for
4635 * N dirty ranges in the UPL. Note that this is almost a direct copy of the
4636 * logic in vnode_pageout except that we need to do it after grabbing the truncate
4637 * lock in HFS so that we don't lock invert ourselves.
4639 * Note that we can still get into this function on behalf of the default pager with
4640 * non-V2 behavior (swapfiles). However in that case, we did not grab locks above
4641 * since fsync and other writing threads will grab the locks, then mark the
4642 * relevant pages as busy. But the pageout codepath marks the pages as busy,
4643 * and THEN would attempt to grab the truncate lock, which would result in deadlock. So
4644 * we do not try to grab anything for the pre-V2 case, which should only be accessed
4645 * by the paging/VM system.
4657 f_offset
= ap
->a_f_offset
;
4660 * Scan from the back to find the last page in the UPL, so that we
4661 * aren't looking at a UPL that may have already been freed by the
4662 * preceding aborts/completions.
4664 for (pg_index
= ((isize
) / PAGE_SIZE
); pg_index
> 0;) {
4665 if (upl_page_present(pl
, --pg_index
))
4667 if (pg_index
== 0) {
4668 ubc_upl_abort_range(upl
, 0, isize
, UPL_ABORT_FREE_ON_EMPTY
);
4674 * initialize the offset variables before we touch the UPL.
4675 * a_f_offset is the position into the file, in bytes
4676 * offset is the position into the UPL, in bytes
4677 * pg_index is the pg# of the UPL we're operating on.
4678 * isize is the offset into the UPL of the last non-clean page.
4680 isize
= ((pg_index
+ 1) * PAGE_SIZE
);
4689 if ( !upl_page_present(pl
, pg_index
)) {
4691 * we asked for RET_ONLY_DIRTY, so it's possible
4692 * to get back empty slots in the UPL.
4693 * just skip over them
4695 f_offset
+= PAGE_SIZE
;
4696 offset
+= PAGE_SIZE
;
4702 if ( !upl_dirty_page(pl
, pg_index
)) {
4703 panic ("hfs_vnop_pageout: unforeseen clean page @ index %d for UPL %p\n", pg_index
, upl
);
4707 * We know that we have at least one dirty page.
4708 * Now checking to see how many in a row we have
4711 xsize
= isize
- PAGE_SIZE
;
4714 if ( !upl_dirty_page(pl
, pg_index
+ num_of_pages
))
4719 xsize
= num_of_pages
* PAGE_SIZE
;
4721 if (!vnode_isswap(vp
)) {
4727 if (cp
->c_lockowner
!= current_thread()) {
4728 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
4730 * we're in the v2 path, so we are the
4731 * owner of the UPL... we may have already
4732 * processed some of the UPL, so abort it
4733 * from the current working offset to the
4736 ubc_upl_abort_range(upl
,
4738 ap
->a_size
- offset
,
4739 UPL_ABORT_FREE_ON_EMPTY
);
4744 end_of_range
= f_offset
+ xsize
- 1;
4746 if (end_of_range
>= filesize
) {
4747 end_of_range
= (off_t
)(filesize
- 1);
4749 if (f_offset
< filesize
) {
4750 rl_remove(f_offset
, end_of_range
, &fp
->ff_invalidranges
);
4751 cp
->c_flag
|= C_MODIFIED
; /* leof is dirty */
4757 if ((error
= cluster_pageout(vp
, upl
, offset
, f_offset
,
4758 xsize
, filesize
, a_flags
))) {
4765 pg_index
+= num_of_pages
;
4767 /* capture errnos bubbled out of cluster_pageout if they occurred */
4768 if (error_ret
!= 0) {
4771 } /* end block for v2 pageout behavior */
4773 if (!vnode_isswap(vp
)) {
4777 if (cp
->c_lockowner
!= current_thread()) {
4778 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
))) {
4779 if (!(a_flags
& UPL_NOCOMMIT
)) {
4780 ubc_upl_abort_range(upl
,
4783 UPL_ABORT_FREE_ON_EMPTY
);
4789 end_of_range
= ap
->a_f_offset
+ ap
->a_size
- 1;
4791 if (end_of_range
>= filesize
) {
4792 end_of_range
= (off_t
)(filesize
- 1);
4794 if (ap
->a_f_offset
< filesize
) {
4795 rl_remove(ap
->a_f_offset
, end_of_range
, &fp
->ff_invalidranges
);
4796 cp
->c_flag
|= C_MODIFIED
; /* leof is dirty */
4804 * just call cluster_pageout for old pre-v2 behavior
4806 retval
= cluster_pageout(vp
, upl
, a_pl_offset
, ap
->a_f_offset
,
4807 ap
->a_size
, filesize
, a_flags
);
4811 * If data was written, update the modification time of the file
4812 * but only if it's mapped writable; we will have touched the
4813 * modifcation time for direct writes.
4815 if (retval
== 0 && (ubc_is_mapped_writable(vp
)
4816 || ISSET(cp
->c_flag
, C_MIGHT_BE_DIRTY_FROM_MAPPING
))) {
4817 hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
4819 // Check again with lock
4820 bool mapped_writable
= ubc_is_mapped_writable(vp
);
4822 || ISSET(cp
->c_flag
, C_MIGHT_BE_DIRTY_FROM_MAPPING
)) {
4823 cp
->c_touch_modtime
= TRUE
;
4824 cp
->c_touch_chgtime
= TRUE
;
4827 * We only need to increment the generation counter if
4828 * it's currently mapped writable because we incremented
4829 * the counter in hfs_vnop_mnomap.
4831 if (mapped_writable
)
4832 hfs_incr_gencount(VTOC(vp
));
4835 * If setuid or setgid bits are set and this process is
4836 * not the superuser then clear the setuid and setgid bits
4837 * as a precaution against tampering.
4839 if ((cp
->c_mode
& (S_ISUID
| S_ISGID
)) &&
4840 (vfs_context_suser(ap
->a_context
) != 0)) {
4841 cp
->c_mode
&= ~(S_ISUID
| S_ISGID
);
4851 * Release the truncate lock. Note that because
4852 * we may have taken the lock recursively by
4853 * being invoked via ubc_msync due to lockdown,
4854 * we should release it recursively, too.
4856 hfs_unlock_truncate(cp
, HFS_LOCK_SKIP_IF_EXCLUSIVE
);
4862 * Intercept B-Tree node writes to unswap them if necessary.
4865 hfs_vnop_bwrite(struct vnop_bwrite_args
*ap
)
4868 register struct buf
*bp
= ap
->a_bp
;
4869 register struct vnode
*vp
= buf_vnode(bp
);
4870 BlockDescriptor block
;
4872 /* Trap B-Tree writes */
4873 if ((VTOC(vp
)->c_fileid
== kHFSExtentsFileID
) ||
4874 (VTOC(vp
)->c_fileid
== kHFSCatalogFileID
) ||
4875 (VTOC(vp
)->c_fileid
== kHFSAttributesFileID
) ||
4876 (vp
== VTOHFS(vp
)->hfc_filevp
)) {
4879 * Swap and validate the node if it is in native byte order.
4880 * This is always be true on big endian, so we always validate
4881 * before writing here. On little endian, the node typically has
4882 * been swapped and validated when it was written to the journal,
4883 * so we won't do anything here.
4885 if (((u_int16_t
*)((char *)buf_dataptr(bp
) + buf_count(bp
) - 2))[0] == 0x000e) {
4886 /* Prepare the block pointer */
4887 block
.blockHeader
= bp
;
4888 block
.buffer
= (char *)buf_dataptr(bp
);
4889 block
.blockNum
= buf_lblkno(bp
);
4890 /* not found in cache ==> came from disk */
4891 block
.blockReadFromDisk
= (buf_fromcache(bp
) == 0);
4892 block
.blockSize
= buf_count(bp
);
4894 /* Endian un-swap B-Tree node */
4895 retval
= hfs_swap_BTNode (&block
, vp
, kSwapBTNodeHostToBig
, false);
4897 panic("hfs_vnop_bwrite: about to write corrupt node!\n");
4901 /* This buffer shouldn't be locked anymore but if it is clear it */
4902 if ((buf_flags(bp
) & B_LOCKED
)) {
4904 if (VTOHFS(vp
)->jnl
) {
4905 panic("hfs: CLEARING the lock bit on bp %p\n", bp
);
4907 buf_clearflags(bp
, B_LOCKED
);
4909 retval
= vn_bwrite (ap
);
4915 * Relocate a file to a new location on disk
4916 * cnode must be locked on entry
4918 * Relocation occurs by cloning the file's data from its
4919 * current set of blocks to a new set of blocks. During
4920 * the relocation all of the blocks (old and new) are
4921 * owned by the file.
4928 * ----------------- -----------------
4929 * |///////////////| | | STEP 1 (acquire new blocks)
4930 * ----------------- -----------------
4933 * ----------------- -----------------
4934 * |///////////////| |///////////////| STEP 2 (clone data)
4935 * ----------------- -----------------
4939 * |///////////////| STEP 3 (head truncate blocks)
4943 * During steps 2 and 3 page-outs to file offsets less
4944 * than or equal to N are suspended.
4946 * During step 3 page-ins to the file get suspended.
4949 hfs_relocate(struct vnode
*vp
, u_int32_t blockHint
, kauth_cred_t cred
,
4953 struct filefork
*fp
;
4954 struct hfsmount
*hfsmp
;
4959 u_int32_t nextallocsave
;
4960 daddr64_t sector_a
, sector_b
;
4965 int took_trunc_lock
= 0;
4967 enum vtype vnodetype
;
4969 vnodetype
= vnode_vtype(vp
);
4970 if (vnodetype
!= VREG
) {
4971 /* Not allowed to move symlinks. */
4976 if (hfsmp
->hfs_flags
& HFS_FRAGMENTED_FREESPACE
) {
4982 if (fp
->ff_unallocblocks
)
4987 * <rdar://problem/9118426>
4988 * Disable HFS file relocation on content-protected filesystems
4990 if (cp_fs_protected (hfsmp
->hfs_mp
)) {
4994 /* If it's an SSD, also disable HFS relocation */
4995 if (hfsmp
->hfs_flags
& HFS_SSD
) {
5000 blksize
= hfsmp
->blockSize
;
5002 blockHint
= hfsmp
->nextAllocation
;
5004 if (fp
->ff_size
> 0x7fffffff) {
5009 // We do not believe that this call to hfs_fsync() is
5010 // necessary and it causes a journal transaction
5011 // deadlock so we are removing it.
5013 //if (vnodetype == VREG && !vnode_issystem(vp)) {
5014 // retval = hfs_fsync(vp, MNT_WAIT, 0, p);
5019 if (!vnode_issystem(vp
) && (vnodetype
!= VLNK
)) {
5021 hfs_lock_truncate(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_DEFAULT
);
5022 /* Force lock since callers expects lock to be held. */
5023 if ((retval
= hfs_lock(cp
, HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
))) {
5024 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5027 /* No need to continue if file was removed. */
5028 if (cp
->c_flag
& C_NOEXISTS
) {
5029 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5032 took_trunc_lock
= 1;
5034 headblks
= fp
->ff_blocks
;
5035 datablks
= howmany(fp
->ff_size
, blksize
);
5036 growsize
= datablks
* blksize
;
5037 eflags
= kEFContigMask
| kEFAllMask
| kEFNoClumpMask
;
5038 if (blockHint
>= hfsmp
->hfs_metazone_start
&&
5039 blockHint
<= hfsmp
->hfs_metazone_end
)
5040 eflags
|= kEFMetadataMask
;
5042 if (hfs_start_transaction(hfsmp
) != 0) {
5043 if (took_trunc_lock
)
5044 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5049 * Protect the extents b-tree and the allocation bitmap
5050 * during MapFileBlockC and ExtendFileC operations.
5052 lockflags
= SFL_BITMAP
;
5053 if (overflow_extents(fp
))
5054 lockflags
|= SFL_EXTENTS
;
5055 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5057 retval
= MapFileBlockC(hfsmp
, (FCB
*)fp
, 1, growsize
- 1, §or_a
, NULL
);
5059 retval
= MacToVFSError(retval
);
5064 * STEP 1 - acquire new allocation blocks.
5066 nextallocsave
= hfsmp
->nextAllocation
;
5067 retval
= ExtendFileC(hfsmp
, (FCB
*)fp
, growsize
, blockHint
, eflags
, &newbytes
);
5068 if (eflags
& kEFMetadataMask
) {
5069 hfs_lock_mount(hfsmp
);
5070 HFS_UPDATE_NEXT_ALLOCATION(hfsmp
, nextallocsave
);
5071 MarkVCBDirty(hfsmp
);
5072 hfs_unlock_mount(hfsmp
);
5075 retval
= MacToVFSError(retval
);
5077 cp
->c_flag
|= C_MODIFIED
;
5078 if (newbytes
< growsize
) {
5081 } else if (fp
->ff_blocks
< (headblks
+ datablks
)) {
5082 printf("hfs_relocate: allocation failed id=%u, vol=%s\n", cp
->c_cnid
, hfsmp
->vcbVN
);
5087 retval
= MapFileBlockC(hfsmp
, (FCB
*)fp
, 1, growsize
, §or_b
, NULL
);
5089 retval
= MacToVFSError(retval
);
5090 } else if ((sector_a
+ 1) == sector_b
) {
5093 } else if ((eflags
& kEFMetadataMask
) &&
5094 ((((u_int64_t
)sector_b
* hfsmp
->hfs_logical_block_size
) / blksize
) >
5095 hfsmp
->hfs_metazone_end
)) {
5097 const char * filestr
;
5098 char emptystr
= '\0';
5100 if (cp
->c_desc
.cd_nameptr
!= NULL
) {
5101 filestr
= (const char *)&cp
->c_desc
.cd_nameptr
[0];
5102 } else if (vnode_name(vp
) != NULL
) {
5103 filestr
= vnode_name(vp
);
5105 filestr
= &emptystr
;
5112 /* Done with system locks and journal for now. */
5113 hfs_systemfile_unlock(hfsmp
, lockflags
);
5115 hfs_end_transaction(hfsmp
);
5120 * Check to see if failure is due to excessive fragmentation.
5122 if ((retval
== ENOSPC
) &&
5123 (hfs_freeblks(hfsmp
, 0) > (datablks
* 2))) {
5124 hfsmp
->hfs_flags
|= HFS_FRAGMENTED_FREESPACE
;
5129 * STEP 2 - clone file data into the new allocation blocks.
5132 if (vnodetype
== VLNK
)
5134 else if (vnode_issystem(vp
))
5135 retval
= hfs_clonesysfile(vp
, headblks
, datablks
, blksize
, cred
, p
);
5137 retval
= hfs_clonefile(vp
, headblks
, datablks
, blksize
);
5139 /* Start transaction for step 3 or for a restore. */
5140 if (hfs_start_transaction(hfsmp
) != 0) {
5149 * STEP 3 - switch to cloned data and remove old blocks.
5151 lockflags
= SFL_BITMAP
;
5152 if (overflow_extents(fp
))
5153 lockflags
|= SFL_EXTENTS
;
5154 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5156 retval
= HeadTruncateFile(hfsmp
, (FCB
*)fp
, headblks
);
5158 hfs_systemfile_unlock(hfsmp
, lockflags
);
5163 if (took_trunc_lock
)
5164 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5167 hfs_systemfile_unlock(hfsmp
, lockflags
);
5171 /* Push cnode's new extent data to disk. */
5173 (void) hfs_update(vp
, MNT_WAIT
);
5176 if (cp
->c_cnid
< kHFSFirstUserCatalogNodeID
)
5177 (void) hfs_flushvolumeheader(hfsmp
, MNT_WAIT
, HFS_ALTFLUSH
);
5179 (void) hfs_flushvolumeheader(hfsmp
, MNT_NOWAIT
, 0);
5183 hfs_end_transaction(hfsmp
);
5188 if (fp
->ff_blocks
== headblks
) {
5189 if (took_trunc_lock
)
5190 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5194 * Give back any newly allocated space.
5196 if (lockflags
== 0) {
5197 lockflags
= SFL_BITMAP
;
5198 if (overflow_extents(fp
))
5199 lockflags
|= SFL_EXTENTS
;
5200 lockflags
= hfs_systemfile_lock(hfsmp
, lockflags
, HFS_EXCLUSIVE_LOCK
);
5203 (void) TruncateFileC(hfsmp
, (FCB
*)fp
, fp
->ff_size
, 0, FORK_IS_RSRC(fp
),
5204 FTOC(fp
)->c_fileid
, false);
5206 hfs_systemfile_unlock(hfsmp
, lockflags
);
5209 if (took_trunc_lock
)
5210 hfs_unlock_truncate(cp
, HFS_LOCK_DEFAULT
);
5216 * Clone a file's data within the file.
5220 hfs_clonefile(struct vnode
*vp
, int blkstart
, int blkcnt
, int blksize
)
5231 writebase
= blkstart
* blksize
;
5232 copysize
= blkcnt
* blksize
;
5233 iosize
= bufsize
= MIN(copysize
, 128 * 1024);
5236 hfs_unlock(VTOC(vp
));
5239 if ((error
= cp_handle_vnop(vp
, CP_WRITE_ACCESS
, 0)) != 0) {
5240 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
5243 #endif /* CONFIG_PROTECT */
5245 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&bufp
, bufsize
)) {
5246 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
5250 auio
= uio_create(1, 0, UIO_SYSSPACE
, UIO_READ
);
5252 while (offset
< copysize
) {
5253 iosize
= MIN(copysize
- offset
, iosize
);
5255 uio_reset(auio
, offset
, UIO_SYSSPACE
, UIO_READ
);
5256 uio_addiov(auio
, (uintptr_t)bufp
, iosize
);
5258 error
= cluster_read(vp
, auio
, copysize
, IO_NOCACHE
);
5260 printf("hfs_clonefile: cluster_read failed - %d\n", error
);
5263 if (uio_resid(auio
) != 0) {
5264 printf("hfs_clonefile: cluster_read: uio_resid = %lld\n", (int64_t)uio_resid(auio
));
5269 uio_reset(auio
, writebase
+ offset
, UIO_SYSSPACE
, UIO_WRITE
);
5270 uio_addiov(auio
, (uintptr_t)bufp
, iosize
);
5272 error
= cluster_write(vp
, auio
, writebase
+ offset
,
5273 writebase
+ offset
+ iosize
,
5274 uio_offset(auio
), 0, IO_NOCACHE
| IO_SYNC
);
5276 printf("hfs_clonefile: cluster_write failed - %d\n", error
);
5279 if (uio_resid(auio
) != 0) {
5280 printf("hfs_clonefile: cluster_write failed - uio_resid not zero\n");
5288 if ((blksize
& PAGE_MASK
)) {
5290 * since the copy may not have started on a PAGE
5291 * boundary (or may not have ended on one), we
5292 * may have pages left in the cache since NOCACHE
5293 * will let partially written pages linger...
5294 * lets just flush the entire range to make sure
5295 * we don't have any pages left that are beyond
5296 * (or intersect) the real LEOF of this file
5298 ubc_msync(vp
, writebase
, writebase
+ offset
, NULL
, UBC_INVALIDATE
| UBC_PUSHDIRTY
);
5301 * No need to call ubc_msync or hfs_invalbuf
5302 * since the file was copied using IO_NOCACHE and
5303 * the copy was done starting and ending on a page
5304 * boundary in the file.
5307 kmem_free(kernel_map
, (vm_offset_t
)bufp
, bufsize
);
5309 hfs_lock(VTOC(vp
), HFS_EXCLUSIVE_LOCK
, HFS_LOCK_ALLOW_NOEXISTS
);
5314 * Clone a system (metadata) file.
5318 hfs_clonesysfile(struct vnode
*vp
, int blkstart
, int blkcnt
, int blksize
,
5319 kauth_cred_t cred
, struct proc
*p
)
5325 struct buf
*bp
= NULL
;
5328 daddr64_t start_blk
;
5335 iosize
= GetLogicalBlockSize(vp
);
5336 bufsize
= MIN(blkcnt
* blksize
, 1024 * 1024) & ~(iosize
- 1);
5337 breadcnt
= bufsize
/ iosize
;
5339 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&bufp
, bufsize
)) {
5342 start_blk
= ((daddr64_t
)blkstart
* blksize
) / iosize
;
5343 last_blk
= ((daddr64_t
)blkcnt
* blksize
) / iosize
;
5346 while (blkno
< last_blk
) {
5348 * Read up to a megabyte
5351 for (i
= 0, blk
= blkno
; (i
< breadcnt
) && (blk
< last_blk
); ++i
, ++blk
) {
5352 error
= (int)buf_meta_bread(vp
, blk
, iosize
, cred
, &bp
);
5354 printf("hfs_clonesysfile: meta_bread error %d\n", error
);
5357 if (buf_count(bp
) != iosize
) {
5358 printf("hfs_clonesysfile: b_bcount is only %d\n", buf_count(bp
));
5361 bcopy((char *)buf_dataptr(bp
), offset
, iosize
);
5363 buf_markinvalid(bp
);
5371 * Write up to a megabyte
5374 for (i
= 0; (i
< breadcnt
) && (blkno
< last_blk
); ++i
, ++blkno
) {
5375 bp
= buf_getblk(vp
, start_blk
+ blkno
, iosize
, 0, 0, BLK_META
);
5377 printf("hfs_clonesysfile: getblk failed on blk %qd\n", start_blk
+ blkno
);
5381 bcopy(offset
, (char *)buf_dataptr(bp
), iosize
);
5382 error
= (int)buf_bwrite(bp
);
5394 kmem_free(kernel_map
, (vm_offset_t
)bufp
, bufsize
);
5396 error
= hfs_fsync(vp
, MNT_WAIT
, 0, p
);