]>
git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_readwrite.c
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 /* @(#)hfs_readwrite.c 1.0
24 * (c) 1990, 1992 NeXT Computer, Inc. All Rights Reserved
25 * (c) 1998 Apple Computer, Inc. All Rights Reserved
28 * hfs_readwrite.c -- vnode operations to deal with reading and writing files.
30 * MODIFICATION HISTORY:
31 * 9-Nov-1999 Scott Roberts hfs_allocate now returns sizes based on allocation block boundaries (#2398794)
32 * 3-Feb-1999 Pat Dirks Merged in Joe's change to hfs_truncate to skip vinvalbuf if LEOF isn't changing (#2302796)
33 * Removed superfluous (and potentially dangerous) second call to vinvalbuf() in hfs_truncate.
34 * 2-Dec-1998 Pat Dirks Added support for read/write bootstrap ioctls.
35 * 10-Nov-1998 Pat Dirks Changed read/write/truncate logic to optimize block sizes for first extents of a file.
36 * Changed hfs_strategy to correct I/O sizes from cluser code I/O requests in light of
37 * different block sizing. Changed bexpand to handle RELEASE_BUFFER flag.
38 * 22-Sep-1998 Don Brady Changed truncate zero-fill to use bwrite after several bawrites have been queued.
39 * 11-Sep-1998 Pat Dirks Fixed buffering logic to not rely on B_CACHE, which is set for empty buffers that
40 * have been pre-read by cluster_read (use b_validend > 0 instead).
41 * 27-Aug-1998 Pat Dirks Changed hfs_truncate to use cluster_write in place of bawrite where possible.
42 * 25-Aug-1998 Pat Dirks Changed hfs_write to do small device-block aligned writes into buffers without doing
43 * read-ahead of the buffer. Added bexpand to deal with incomplete [dirty] buffers.
44 * Fixed can_cluster macro to use MAXPHYSIO instead of MAXBSIZE.
45 * 19-Aug-1998 Don Brady Remove optimization in hfs_truncate that prevented extra physical blocks from
46 * being truncated (radar #2265750). Also set fcb->fcbEOF before calling vinvalbuf.
47 * 7-Jul-1998 Pat Dirks Added code to honor IO_NOZEROFILL in hfs_truncate.
48 * 16-Jul-1998 Don Brady In hfs_bmap use MAXPHYSIO instead of MAXBSIZE when calling MapFileBlockC (radar #2263753).
49 * 16-Jul-1998 Don Brady Fix error handling in hfs_allocate (radar #2252265).
50 * 04-Jul-1998 chw Synchronized options in hfs_allocate with flags in call to ExtendFileC
51 * 25-Jun-1998 Don Brady Add missing blockNo incrementing to zero fill loop in hfs_truncate.
52 * 22-Jun-1998 Don Brady Add bp = NULL assignment after brelse in hfs_read.
53 * 4-Jun-1998 Pat Dirks Split off from hfs_vnodeops.c
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/resourcevar.h>
59 #include <sys/kernel.h>
60 #include <sys/fcntl.h>
64 //#include <mach/machine/vm_types.h>
65 #include <sys/vnode.h>
68 #include <miscfs/specfs/specdev.h>
72 #include <vm/vm_pageout.h>
75 #include <sys/kdebug.h>
79 #include "hfs_endian.h"
80 #include "hfscommon/headers/FileMgrInternal.h"
81 #include "hfscommon/headers/BTreesInternal.h"
84 #define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2)))
87 MAXHFSFILESIZE
= 0x7FFFFFFF /* this needs to go in the mount structure */
90 extern u_int32_t
GetLogicalBlockSize(struct vnode
*vp
);
92 #if DBG_VOP_TEST_LOCKS
93 extern void DbgVopTest(int maxSlots
, int retval
, VopDbgStoreRec
*VopDbgStore
, char *funcname
);
97 void debug_check_blocksizes(struct vnode
*vp
);
100 /*****************************************************************************
102 * Operations on vnodes
104 *****************************************************************************/
111 INOUT struct uio *uio;
113 IN struct ucred *cred;
119 struct vop_read_args
/* {
123 struct ucred *a_cred;
126 register struct vnode
*vp
;
128 register struct uio
*uio
;
131 u_long fragSize
, moveSize
, startOffset
, ioxfersize
;
132 long devBlockSize
= 0;
133 off_t bytesRemaining
;
138 DBG_FUNC_NAME("hfs_read");
139 DBG_VOP_LOCKS_DECL(1);
140 DBG_VOP_PRINT_FUNCNAME();
141 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
142 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
147 mode
= hp
->h_meta
->h_mode
;
151 if (uio
->uio_rw
!= UIO_READ
)
152 panic("%s: mode", funcname
);
155 /* Can only read files */
156 if (ap
->a_vp
->v_type
!= VREG
&& ap
->a_vp
->v_type
!= VLNK
) {
157 DBG_VOP_LOCKS_TEST(EISDIR
);
160 DBG_RW(("\tfile size Ox%X\n", (u_int
)fcb
->fcbEOF
));
161 DBG_RW(("\tstarting at offset Ox%X of file, length Ox%X\n", (u_int
)uio
->uio_offset
, (u_int
)uio
->uio_resid
));
164 debug_check_blocksizes(vp
);
168 * If they didn't ask for any data, then we are done.
170 if (uio
->uio_resid
== 0) {
171 DBG_VOP_LOCKS_TEST(E_NONE
);
175 /* cant read from a negative offset */
176 if (uio
->uio_offset
< 0) {
177 DBG_VOP_LOCKS_TEST(EINVAL
);
181 if (uio
->uio_offset
> fcb
->fcbEOF
) {
182 if ( (!ISHFSPLUS(VTOVCB(vp
))) && (uio
->uio_offset
> (off_t
)MAXHFSFILESIZE
))
187 DBG_VOP_LOCKS_TEST(retval
);
191 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
193 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 12)) | DBG_FUNC_START
,
194 (int)uio
->uio_offset
, uio
->uio_resid
, (int)fcb
->fcbEOF
, (int)fcb
->fcbPLen
, 0);
197 retval
= cluster_read(vp
, uio
, (off_t
)fcb
->fcbEOF
, devBlockSize
, 0);
200 for (retval
= 0, bp
= NULL
; uio
->uio_resid
> 0; bp
= NULL
) {
202 if ((bytesRemaining
= (fcb
->fcbEOF
- uio
->uio_offset
)) <= 0)
205 logBlockNo
= (daddr_t
)(uio
->uio_offset
/ PAGE_SIZE_64
);
206 startOffset
= (u_long
) (uio
->uio_offset
& PAGE_MASK_64
);
207 fragSize
= PAGE_SIZE
;
209 if (((logBlockNo
* PAGE_SIZE
) + fragSize
) < fcb
->fcbEOF
)
210 ioxfersize
= fragSize
;
212 ioxfersize
= fcb
->fcbEOF
- (logBlockNo
* PAGE_SIZE
);
213 ioxfersize
= (ioxfersize
+ (devBlockSize
- 1)) & ~(devBlockSize
- 1);
215 DBG_RW(("\tat logBlockNo Ox%X, with Ox%lX left to read\n", logBlockNo
, (UInt32
)uio
->uio_resid
));
216 moveSize
= ioxfersize
;
217 DBG_RW(("\tmoveSize = Ox%lX; ioxfersize = Ox%lX; startOffset = Ox%lX.\n",
218 moveSize
, ioxfersize
, startOffset
));
219 DBG_ASSERT(moveSize
>= startOffset
);
220 moveSize
-= startOffset
;
222 if (bytesRemaining
< moveSize
)
223 moveSize
= bytesRemaining
;
225 if (uio
->uio_resid
< moveSize
) {
226 moveSize
= uio
->uio_resid
;
227 DBG_RW(("\treducing moveSize to Ox%lX (uio->uio_resid).\n", moveSize
));
233 DBG_RW(("\tat logBlockNo Ox%X, extent of Ox%lX, xfer of Ox%lX; moveSize = Ox%lX\n", logBlockNo
, fragSize
, ioxfersize
, moveSize
));
235 if (( uio
->uio_offset
+ fragSize
) >= fcb
->fcbEOF
) {
236 retval
= bread(vp
, logBlockNo
, ioxfersize
, NOCRED
, &bp
);
238 } else if (logBlockNo
- 1 == vp
->v_lastr
&& !(vp
->v_flag
& VRAOFF
)) {
239 daddr_t nextLogBlockNo
= logBlockNo
+ 1;
242 if (((nextLogBlockNo
* PAGE_SIZE
) +
243 (daddr_t
)fragSize
) < fcb
->fcbEOF
)
246 nextsize
= fcb
->fcbEOF
- (nextLogBlockNo
* PAGE_SIZE
);
247 nextsize
= (nextsize
+ (devBlockSize
- 1)) & ~(devBlockSize
- 1);
249 retval
= breadn(vp
, logBlockNo
, ioxfersize
, &nextLogBlockNo
, &nextsize
, 1, NOCRED
, &bp
);
251 retval
= bread(vp
, logBlockNo
, ioxfersize
, NOCRED
, &bp
);
254 if (retval
!= E_NONE
) {
261 vp
->v_lastr
= logBlockNo
;
264 * We should only get non-zero b_resid when an I/O retval
265 * has occurred, which should cause us to break above.
266 * However, if the short read did not cause an retval,
267 * then we want to ensure that we do not uiomove bad
268 * or uninitialized data.
270 ioxfersize
-= bp
->b_resid
;
272 if (ioxfersize
< moveSize
) { /* XXX PPD This should take the offset into account, too! */
275 moveSize
= ioxfersize
;
277 if ((startOffset
+ moveSize
) > bp
->b_bcount
)
278 panic("hfs_read: bad startOffset or moveSize\n");
280 DBG_RW(("\tcopying Ox%lX bytes from %lX; resid = Ox%lX...\n", moveSize
, (char *)bp
->b_data
+ startOffset
, bp
->b_resid
));
282 if ((retval
= uiomove((caddr_t
)bp
->b_data
+ startOffset
, (int)moveSize
, uio
)))
286 (((startOffset
+ moveSize
) == fragSize
) || (uio
->uio_offset
== fcb
->fcbEOF
))) {
287 bp
->b_flags
|= B_AGE
;
290 DBG_ASSERT(bp
->b_bcount
== bp
->b_validend
);
293 /* Start of loop resets bp to NULL before reaching outside this block... */
297 DBG_ASSERT(bp
->b_bcount
== bp
->b_validend
);
302 if (HTOVCB(hp
)->vcbSigWord
== kHFSPlusSigWord
)
303 hp
->h_nodeflags
|= IN_ACCESS
;
305 DBG_VOP_LOCKS_TEST(retval
);
308 debug_check_blocksizes(vp
);
311 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 12)) | DBG_FUNC_END
,
312 (int)uio
->uio_offset
, uio
->uio_resid
, (int)fcb
->fcbEOF
, (int)fcb
->fcbPLen
, 0);
318 * Write data to a file or directory.
323 INOUT struct uio *uio;
325 IN struct ucred *cred;
330 struct vop_write_args
/* {
334 struct ucred *a_cred;
337 struct hfsnode
*hp
= VTOH(ap
->a_vp
);
338 struct uio
*uio
= ap
->a_uio
;
339 struct vnode
*vp
= ap
->a_vp
;
344 FCB
*fcb
= HTOFCB(hp
);
345 ExtendedVCB
*vcb
= HTOVCB(hp
);
346 long devBlockSize
= 0;
349 off_t origFileSize
, currOffset
, writelimit
, bytesToAdd
;
350 off_t actualBytesAdded
;
351 u_long blkoffset
, resid
, xfersize
, clearSize
;
354 DBG_FUNC_NAME("hfs_write");
355 DBG_VOP_LOCKS_DECL(1);
356 DBG_VOP_PRINT_FUNCNAME();
357 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
358 DBG_RW(("\thfsnode 0x%x (%s)\n", (u_int
)hp
, H_NAME(hp
)));
359 DBG_RW(("\tstarting at offset Ox%lX of file, length Ox%lX\n", (UInt32
)uio
->uio_offset
, (UInt32
)uio
->uio_resid
));
361 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
363 dev
= hp
->h_meta
->h_devvp
;
366 debug_check_blocksizes(vp
);
369 if (uio
->uio_offset
< 0) {
370 DBG_VOP_LOCKS_TEST(EINVAL
);
374 if (uio
->uio_resid
== 0) {
375 DBG_VOP_LOCKS_TEST(E_NONE
);
379 if (ap
->a_vp
->v_type
!= VREG
&& ap
->a_vp
->v_type
!= VLNK
) { /* Can only write files */
380 DBG_VOP_LOCKS_TEST(EISDIR
);
385 if (uio
->uio_rw
!= UIO_WRITE
)
386 panic("%s: mode", funcname
);
389 ioflag
= ap
->a_ioflag
;
393 if (ioflag
& IO_APPEND
)
394 uio
->uio_offset
= fcb
->fcbEOF
;
395 if ((hp
->h_meta
->h_pflags
& APPEND
) && uio
->uio_offset
!= fcb
->fcbEOF
)
398 writelimit
= uio
->uio_offset
+ uio
->uio_resid
;
401 * Maybe this should be above the vnode op call, but so long as
402 * file servers have no limits, I don't think it matters.
405 if (vp
->v_type
== VREG
&& p
&&
406 writelimit
> p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
410 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
412 resid
= uio
->uio_resid
;
413 origFileSize
= fcb
->fcbPLen
;
414 flags
= ioflag
& IO_SYNC
? B_SYNC
: 0;
416 DBG_RW(("\tLEOF is 0x%lX, PEOF is 0x%lX.\n", fcb
->fcbEOF
, fcb
->fcbPLen
));
419 NOTE: In the following loop there are two positions tracked:
420 currOffset is the current I/O starting offset. currOffset is never >LEOF; the
421 LEOF is nudged along with currOffset as data is zeroed or written.
422 uio->uio_offset is the start of the current I/O operation. It may be arbitrarily
425 The following is true at all times:
427 currOffset <= LEOF <= uio->uio_offset <= writelimit
429 currOffset
= MIN(uio
->uio_offset
, fcb
->fcbEOF
);
431 DBG_RW(("\tstarting I/O loop at 0x%lX.\n", (u_long
)currOffset
));
435 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 0)) | DBG_FUNC_START
,
436 (int)uio
->uio_offset
, uio
->uio_resid
, (int)fcb
->fcbEOF
, (int)fcb
->fcbPLen
, 0);
439 if (fcb
->fcbEOF
> fcb
->fcbMaxEOF
)
440 fcb
->fcbMaxEOF
= fcb
->fcbEOF
;
442 /* Now test if we need to extend the file */
443 /* Doing so will adjust the fcbPLen for us */
445 while (writelimit
> (off_t
)fcb
->fcbPLen
) {
447 bytesToAdd
= writelimit
- fcb
->fcbPLen
;
448 DBG_RW(("\textending file by 0x%lX bytes; 0x%lX blocks free",
449 (unsigned long)bytesToAdd
, (unsigned long)vcb
->freeBlocks
));
451 /* lock extents b-tree (also protects volume bitmap) */
452 retval
= hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_EXCLUSIVE
, cp
);
453 if (retval
!= E_NONE
)
456 retval
= MacToVFSError(
463 (void) hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_RELEASE
, cp
);
464 DBG_VOP_CONT(("\tactual bytes added = 0x%lX bytes, retval = %d...\n", actualBytesAdded
, retval
));
465 if ((actualBytesAdded
== 0) && (retval
== E_NONE
))
467 if (retval
!= E_NONE
)
470 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 0)) | DBG_FUNC_NONE
,
471 (int)uio
->uio_offset
, uio
->uio_resid
, (int)fcb
->fcbEOF
, (int)fcb
->fcbPLen
, 0);
474 if (UBCISVALID(vp
) && retval
== E_NONE
) {
479 if (writelimit
> fcb
->fcbEOF
)
480 filesize
= writelimit
;
482 filesize
= fcb
->fcbEOF
;
484 lflag
= (ioflag
& IO_SYNC
);
486 if (uio
->uio_offset
> fcb
->fcbMaxEOF
) {
487 zero_off
= fcb
->fcbMaxEOF
;
488 lflag
|= IO_HEADZEROFILL
;
493 * if the write starts beyond the current EOF then
494 * we we'll zero fill from the current EOF to where the write begins
496 retval
= cluster_write(vp
, uio
, fcb
->fcbEOF
, filesize
, zero_off
,
497 (off_t
)0, devBlockSize
, lflag
);
499 if (uio
->uio_offset
> fcb
->fcbEOF
) {
500 fcb
->fcbEOF
= uio
->uio_offset
;
502 if (fcb
->fcbEOF
> fcb
->fcbMaxEOF
)
503 fcb
->fcbMaxEOF
= fcb
->fcbEOF
;
505 ubc_setsize(vp
, (off_t
)fcb
->fcbEOF
); /* XXX check errors */
507 if (resid
> uio
->uio_resid
)
508 hp
->h_nodeflags
|= IN_CHANGE
| IN_UPDATE
;
512 while (retval
== E_NONE
&& uio
->uio_resid
> 0) {
514 logBlockNo
= currOffset
/ PAGE_SIZE
;
515 blkoffset
= currOffset
& PAGE_MASK
;
517 if (((off_t
)(fcb
->fcbPLen
) - currOffset
) < PAGE_SIZE_64
)
518 fragSize
= (off_t
)(fcb
->fcbPLen
) - ((off_t
)logBlockNo
* PAGE_SIZE_64
);
520 fragSize
= PAGE_SIZE
;
521 xfersize
= fragSize
- blkoffset
;
523 DBG_RW(("\tcurrOffset = Ox%lX, logBlockNo = Ox%X, blkoffset = Ox%lX, xfersize = Ox%lX, fragSize = Ox%lX.\n",
524 (unsigned long)currOffset
, logBlockNo
, blkoffset
, xfersize
, fragSize
));
526 /* Make any adjustments for boundary conditions */
527 if (currOffset
+ (off_t
)xfersize
> writelimit
) {
528 xfersize
= writelimit
- currOffset
;
529 DBG_RW(("\ttrimming xfersize to 0x%lX to match writelimit (uio_resid)...\n", xfersize
));
533 * There is no need to read into bp if:
534 * We start on a block boundary and will overwrite the whole block
538 if ((blkoffset
== 0) && (xfersize
>= fragSize
)) {
539 DBG_RW(("\tRequesting %ld-byte block Ox%lX w/o read...\n", fragSize
, (long)logBlockNo
));
541 bp
= getblk(vp
, logBlockNo
, fragSize
, 0, 0, BLK_READ
);
544 if (bp
->b_blkno
== -1) {
546 retval
= EIO
; /* XXX */
551 if (currOffset
== fcb
->fcbEOF
&& blkoffset
== 0) {
552 bp
= getblk(vp
, logBlockNo
, fragSize
, 0, 0, BLK_READ
);
555 if (bp
->b_blkno
== -1) {
557 retval
= EIO
; /* XXX */
563 * This I/O transfer is not sufficiently aligned, so read the affected block into a buffer:
565 DBG_VOP(("\tRequesting block Ox%X, size = 0x%08lX...\n", logBlockNo
, fragSize
));
566 retval
= bread(vp
, logBlockNo
, fragSize
, ap
->a_cred
, &bp
);
568 if (retval
!= E_NONE
) {
576 /* See if we are starting to write within file boundaries:
577 If not, then we need to present a "hole" for the area between
578 the current EOF and the start of the current I/O operation:
580 Note that currOffset is only less than uio_offset if uio_offset > LEOF...
582 if (uio
->uio_offset
> currOffset
) {
583 clearSize
= MIN(uio
->uio_offset
- currOffset
, xfersize
);
584 DBG_RW(("\tzeroing Ox%lX bytes Ox%lX bytes into block Ox%X...\n", clearSize
, blkoffset
, logBlockNo
));
585 bzero(bp
->b_data
+ blkoffset
, clearSize
);
586 currOffset
+= clearSize
;
587 blkoffset
+= clearSize
;
588 xfersize
-= clearSize
;
592 DBG_RW(("\tCopying Ox%lX bytes Ox%lX bytes into block Ox%X... ioflag == 0x%X\n",
593 xfersize
, blkoffset
, logBlockNo
, ioflag
));
594 retval
= uiomove((caddr_t
)bp
->b_data
+ blkoffset
, (int)xfersize
, uio
);
595 currOffset
+= xfersize
;
597 DBG_ASSERT((bp
->b_bcount
% devBlockSize
) == 0);
599 if (ioflag
& IO_SYNC
) {
600 (void)VOP_BWRITE(bp
);
601 //DBG_RW(("\tissuing bwrite\n"));
602 } else if ((xfersize
+ blkoffset
) == fragSize
) {
603 //DBG_RW(("\tissuing bawrite\n"));
604 bp
->b_flags
|= B_AGE
;
607 //DBG_RW(("\tissuing bdwrite\n"));
611 /* Update the EOF if we just extended the file
612 (the PEOF has already been moved out and the block mapping table has been updated): */
613 if (currOffset
> fcb
->fcbEOF
) {
614 DBG_VOP(("\textending EOF to 0x%lX...\n", (UInt32
)fcb
->fcbEOF
));
615 fcb
->fcbEOF
= currOffset
;
617 if (fcb
->fcbEOF
> fcb
->fcbMaxEOF
)
618 fcb
->fcbMaxEOF
= fcb
->fcbEOF
;
621 ubc_setsize(vp
, (off_t
)fcb
->fcbEOF
); /* XXX check errors */
624 if (retval
|| (resid
== 0))
626 hp
->h_nodeflags
|= IN_CHANGE
| IN_UPDATE
;
630 * If we successfully wrote any data, and we are not the superuser
631 * we clear the setuid and setgid bits as a precaution against
634 if (resid
> uio
->uio_resid
&& ap
->a_cred
&& ap
->a_cred
->cr_uid
!= 0)
635 hp
->h_meta
->h_mode
&= ~(ISUID
| ISGID
);
638 if (ioflag
& IO_UNIT
) {
639 (void)VOP_TRUNCATE(vp
, origFileSize
,
640 ioflag
& IO_SYNC
, ap
->a_cred
, uio
->uio_procp
);
641 uio
->uio_offset
-= resid
- uio
->uio_resid
;
642 uio
->uio_resid
= resid
;
644 } else if (resid
> uio
->uio_resid
&& (ioflag
& IO_SYNC
)) {
646 retval
= VOP_UPDATE(vp
, &tv
, &tv
, 1);
650 debug_check_blocksizes(vp
);
653 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 0)) | DBG_FUNC_END
,
654 (int)uio
->uio_offset
, uio
->uio_resid
, (int)fcb
->fcbEOF
, (int)fcb
->fcbPLen
, 0);
656 DBG_VOP_LOCKS_TEST(retval
);
670 IN struct ucred *cred;
679 struct vop_ioctl_args
/* {
684 struct ucred *a_cred;
688 DBG_FUNC_NAME("hfs_ioctl");
689 DBG_VOP_LOCKS_DECL(1);
690 DBG_VOP_PRINT_FUNCNAME();
691 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
693 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_UNLOCKED
, VOPDBG_UNLOCKED
, VOPDBG_UNLOCKED
, VOPDBG_POS
);
695 switch (ap
->a_command
) {
698 { register struct hfsnode
*hp
;
699 register struct vnode
*vp
;
700 register struct radvisory
*ra
;
702 int devBlockSize
= 0;
707 VOP_LEASE(vp
, ap
->a_p
, ap
->a_cred
, LEASE_READ
);
708 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, ap
->a_p
);
710 ra
= (struct radvisory
*)(ap
->a_data
);
715 if (ra
->ra_offset
>= fcb
->fcbEOF
) {
716 VOP_UNLOCK(vp
, 0, ap
->a_p
);
717 DBG_VOP_LOCKS_TEST(EFBIG
);
720 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
722 error
= advisory_read(vp
, fcb
->fcbEOF
, ra
->ra_offset
, ra
->ra_count
, devBlockSize
);
723 VOP_UNLOCK(vp
, 0, ap
->a_p
);
725 DBG_VOP_LOCKS_TEST(error
);
729 case 2: /* F_READBOOTBLOCKS */
730 case 3: /* F_WRITEBOOTBLOCKS */
732 struct vnode
*vp
= ap
->a_vp
;
733 struct hfsnode
*hp
= VTOH(vp
);
734 struct fbootstraptransfer
*btd
= (struct fbootstraptransfer
*)ap
->a_data
;
744 if ((vp
->v_flag
& VROOT
) == 0) return EINVAL
;
745 if (btd
->fbt_offset
+ btd
->fbt_length
> 1024) return EINVAL
;
747 aiov
.iov_base
= btd
->fbt_buffer
;
748 aiov
.iov_len
= btd
->fbt_length
;
750 auio
.uio_iov
= &aiov
;
752 auio
.uio_offset
= btd
->fbt_offset
;
753 auio
.uio_resid
= btd
->fbt_length
;
754 auio
.uio_segflg
= UIO_USERSPACE
;
755 auio
.uio_rw
= (ap
->a_command
== 3) ? UIO_WRITE
: UIO_READ
; /* F_WRITEBOOTSTRAP / F_READBOOTSTRAP */
756 auio
.uio_procp
= ap
->a_p
;
758 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
760 while (auio
.uio_resid
> 0) {
761 blockNumber
= auio
.uio_offset
/ devBlockSize
;
762 error
= bread(hp
->h_meta
->h_devvp
, blockNumber
, devBlockSize
, ap
->a_cred
, &bp
);
768 blockOffset
= auio
.uio_offset
% devBlockSize
;
769 xfersize
= devBlockSize
- blockOffset
;
770 error
= uiomove((caddr_t
)bp
->b_data
+ blockOffset
, (int)xfersize
, &auio
);
775 if (auio
.uio_rw
== UIO_WRITE
) {
776 error
= VOP_BWRITE(bp
);
777 if (error
) return error
;
786 DBG_VOP_LOCKS_TEST(ENOTTY
);
796 struct vop_select_args
/* {
800 struct ucred *a_cred;
804 DBG_FUNC_NAME("hfs_select");
805 DBG_VOP_LOCKS_DECL(1);
806 DBG_VOP_PRINT_FUNCNAME();
807 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
809 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_POS
);
812 * We should really check to see if I/O is possible.
814 DBG_VOP_LOCKS_TEST(1);
823 * NB Currently unsupported.
829 IN struct ucred *cred;
838 struct vop_mmap_args
/* {
841 struct ucred *a_cred;
845 DBG_FUNC_NAME("hfs_mmap");
846 DBG_VOP_LOCKS_DECL(1);
847 DBG_VOP_PRINT_FUNCNAME();
848 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
850 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_POS
);
852 DBG_VOP_LOCKS_TEST(EINVAL
);
861 * Nothing to do, so just return.
863 # Needs work: Is newoff right? What's it mean?
869 IN struct ucred *cred;
874 struct vop_seek_args
/* {
878 struct ucred *a_cred;
881 DBG_FUNC_NAME("hfs_seek");
882 DBG_VOP_LOCKS_DECL(1);
883 DBG_VOP_PRINT_FUNCNAME();
884 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
886 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_POS
);
888 DBG_VOP_LOCKS_TEST(E_NONE
);
894 * Bmap converts a the logical block number of a file to its physical block
895 * number on the disk.
899 * vp - address of vnode file the file
900 * bn - which logical block to convert to a physical block number.
901 * vpp - returns the vnode for the block special file holding the filesystem
902 * containing the file of interest
903 * bnp - address of where to return the filesystem physical block number
910 OUT struct vnode **vpp;
915 * Converts a logical block number to a physical block, and optionally returns
916 * the amount of remaining blocks in a run. The logical block is based on hfsNode.logBlockSize.
917 * The physical block number is based on the device block size, currently its 512.
918 * The block run is returned in logical blocks, and is the REMAINING amount of blocks
923 struct vop_bmap_args
/* {
926 struct vnode **a_vpp;
931 struct hfsnode
*hp
= VTOH(ap
->a_vp
);
932 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
934 daddr_t logBlockSize
;
935 size_t bytesContAvail
= 0;
936 struct proc
*p
= NULL
;
941 DBG_FUNC_NAME("hfs_bmap");
942 DBG_VOP_LOCKS_DECL(2);
943 DBG_VOP_PRINT_FUNCNAME();
944 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);
946 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
947 if (ap
->a_vpp
!= NULL
) {
948 DBG_VOP_LOCKS_INIT(1,*ap
->a_vpp
, VOPDBG_IGNORE
, VOPDBG_UNLOCKED
, VOPDBG_IGNORE
, VOPDBG_POS
);
950 DBG_VOP_LOCKS_INIT(1,NULL
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_IGNORE
, VOPDBG_POS
);
954 DBG_IO(("\tMapped blk %d --> ", ap
->a_bn
));
956 * Check for underlying vnode requests and ensure that logical
957 * to physical mapping is requested.
959 if (ap
->a_vpp
!= NULL
)
960 *ap
->a_vpp
= VTOH(ap
->a_vp
)->h_meta
->h_devvp
;
961 if (ap
->a_bnp
== NULL
)
964 lockExtBtree
= hasOverflowExtents(hp
);
968 retval
= hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_EXCLUSIVE
| LK_CANRECURSE
, p
);
973 logBlockSize
= GetLogicalBlockSize(ap
->a_vp
);
975 retval
= MacToVFSError(
976 MapFileBlockC (HFSTOVCB(hfsmp
),
979 (off_t
)(ap
->a_bn
* logBlockSize
),
983 if (lockExtBtree
) (void) hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_RELEASE
, p
);
985 if (retval
== E_NONE
) {
986 /* Figure out how many read ahead blocks there are */
987 if (ap
->a_runp
!= NULL
) {
988 if (can_cluster(logBlockSize
)) {
989 /* Make sure this result never goes negative: */
990 *ap
->a_runp
= (bytesContAvail
< logBlockSize
) ? 0 : (bytesContAvail
/ logBlockSize
) - 1;
997 DBG_IO(("%d:%d.\n", *ap
->a_bnp
, (bytesContAvail
< logBlockSize
) ? 0 : (bytesContAvail
/ logBlockSize
) - 1));
1001 DBG_VOP_LOCKS_TEST(retval
);
1005 DBG_ASSERT((*ap
->a_runp
* logBlockSize
) < bytesContAvail
); /* At least *ap->a_runp blocks left and ... */
1006 if (can_cluster(logBlockSize
)) {
1007 DBG_ASSERT(bytesContAvail
- (*ap
->a_runp
* logBlockSize
) < (2*logBlockSize
)); /* ... at most 1 logical block accounted for by current block */
1008 /* ... plus some sub-logical block sized piece */
1015 /* blktooff converts logical block number to file offset */
1019 struct vop_blktooff_args
/* {
1025 if (ap
->a_vp
== NULL
)
1027 *ap
->a_offset
= (off_t
)ap
->a_lblkno
* PAGE_SIZE_64
;
1034 struct vop_offtoblk_args
/* {
1042 if (ap
->a_vp
== NULL
)
1044 *ap
->a_lblkno
= ap
->a_offset
/ PAGE_SIZE_64
;
1051 struct vop_cmap_args
/* {
1060 struct hfsnode
*hp
= VTOH(ap
->a_vp
);
1061 struct hfsmount
*hfsmp
= VTOHFS(ap
->a_vp
);
1062 size_t bytesContAvail
= 0;
1063 int retval
= E_NONE
;
1065 struct proc
*p
= NULL
;
1067 #define DEBUG_CMAP 0
1069 DBG_FUNC_NAME("hfs_cmap");
1070 DBG_VOP_LOCKS_DECL(2);
1071 DBG_VOP_PRINT_FUNCNAME();
1072 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);
1074 DBG_VOP_LOCKS_INIT(0, ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
1077 DBG_IO(("\tMapped offset %qx --> ", ap
->a_foffset
));
1079 * Check for underlying vnode requests and ensure that logical
1080 * to physical mapping is requested.
1082 if (ap
->a_bpn
== NULL
)
1085 if (lockExtBtree
= hasOverflowExtents(hp
))
1088 if (retval
= hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_EXCLUSIVE
| LK_CANRECURSE
, p
))
1091 retval
= MacToVFSError(
1092 MapFileBlockC (HFSTOVCB(hfsmp
),
1099 if (lockExtBtree
) (void) hfs_metafilelocking(hfsmp
, kHFSExtentsFileID
, LK_RELEASE
, p
);
1101 if ((retval
== E_NONE
) && (ap
->a_run
))
1102 *ap
->a_run
= bytesContAvail
;
1105 *(int *)ap
->a_poff
= 0;
1107 DBG_IO(("%d:%d.\n", *ap
->a_bpn
, bytesContAvail
));
1111 DBG_VOP_LOCKS_TEST(retval
);
1119 * Calculate the logical to physical mapping if not done already,
1120 * then call the device strategy routine.
1123 # IN struct buf *bp;
1127 struct vop_strategy_args
/* {
1131 register struct buf
*bp
= ap
->a_bp
;
1132 register struct vnode
*vp
= bp
->b_vp
;
1133 register struct hfsnode
*hp
;
1136 DBG_FUNC_NAME("hfs_strategy");
1138 // DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\n"));
1142 if ( !(bp
->b_flags
& B_VECTORLIST
)) {
1144 if (vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
)
1145 panic("hfs_strategy: device vnode passed!");
1147 if (bp
->b_flags
& B_PAGELIST
) {
1149 * if we have a page list associated with this bp,
1150 * then go through cluste_bp since it knows how to
1151 * deal with a page request that might span non-contiguous
1152 * physical blocks on the disk...
1154 retval
= cluster_bp(bp
);
1155 vp
= hp
->h_meta
->h_devvp
;
1156 bp
->b_dev
= vp
->v_rdev
;
1161 * If we don't already know the filesystem relative block number
1162 * then get it using VOP_BMAP(). If VOP_BMAP() returns the block
1163 * number as -1 then we've got a hole in the file. HFS filesystems
1164 * don't allow files with holes, so we shouldn't ever see this.
1166 if (bp
->b_blkno
== bp
->b_lblkno
) {
1167 if ((retval
= VOP_BMAP(vp
, bp
->b_lblkno
, NULL
, &bp
->b_blkno
, NULL
))) {
1168 bp
->b_error
= retval
;
1169 bp
->b_flags
|= B_ERROR
;
1173 if ((long)bp
->b_blkno
== -1)
1176 if ((long)bp
->b_blkno
== -1) {
1180 if (bp
->b_validend
== 0) {
1181 /* Record the exact size of the I/O transfer about to be made: */
1182 DBG_ASSERT(bp
->b_validoff
== 0);
1183 bp
->b_validend
= bp
->b_bcount
;
1184 DBG_ASSERT(bp
->b_dirtyoff
== 0);
1187 vp
= hp
->h_meta
->h_devvp
;
1188 bp
->b_dev
= vp
->v_rdev
;
1189 DBG_IO(("\t\t>>>%s: continuing w/ vp: 0x%x with logBlk Ox%X and phyBlk Ox%X\n", funcname
, (u_int
)vp
, bp
->b_lblkno
, bp
->b_blkno
));
1191 return VOCALL (vp
->v_op
, VOFFSET(vop_strategy
), ap
);
1196 #% reallocblks vp L L L
1199 IN struct vnode *vp;
1200 IN struct cluster_save *buflist;
1206 struct vop_reallocblks_args
/* {
1208 struct cluster_save *a_buflist;
1211 DBG_FUNC_NAME("hfs_reallocblks");
1212 DBG_VOP_LOCKS_DECL(1);
1213 DBG_VOP_PRINT_FUNCNAME();
1214 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
1216 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
1218 /* Currently no support for clustering */ /* XXX */
1219 DBG_VOP_LOCKS_TEST(ENOSPC
);
1227 #% truncate vp L L L
1230 IN struct vnode *vp;
1232 IN int flags; (IO_SYNC)
1233 IN struct ucred *cred;
1236 * Truncate the hfsnode hp to at most length size, freeing (or adding) the
1239 int hfs_truncate(ap
)
1240 struct vop_truncate_args
/* {
1244 struct ucred *a_cred;
1248 register struct vnode
*vp
= ap
->a_vp
;
1249 register struct hfsnode
*hp
= VTOH(vp
);
1250 off_t length
= ap
->a_length
;
1256 off_t actualBytesAdded
;
1257 DBG_FUNC_NAME("hfs_truncate");
1258 DBG_VOP_LOCKS_DECL(1);
1259 DBG_VOP_PRINT_FUNCNAME();
1260 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
1261 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
1264 debug_check_blocksizes(ap
->a_vp
);
1269 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 7)) | DBG_FUNC_START
,
1270 (int)length
, fcb
->fcbEOF
, fcb
->fcbPLen
, 0, 0);
1273 DBG_VOP_LOCKS_TEST(EINVAL
);
1277 if ((!ISHFSPLUS(VTOVCB(vp
))) && (length
> (off_t
)MAXHFSFILESIZE
)) {
1278 DBG_VOP_LOCKS_TEST(EFBIG
);
1282 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VLNK
) {
1283 DBG_VOP_LOCKS_TEST(EISDIR
);
1284 return (EISDIR
); /* hfs doesn't support truncating of directories */
1290 DBG_RW(("%s: truncate from Ox%lX to Ox%X bytes\n", funcname
, fcb
->fcbPLen
, length
));
1293 * we cannot just check if fcb->fcbEOF == length (as an optimization)
1294 * since there may be extra physical blocks that also need truncation
1297 if (fcb
->fcbEOF
> fcb
->fcbMaxEOF
)
1298 fcb
->fcbMaxEOF
= fcb
->fcbEOF
;
1301 * Lengthen the size of the file. We must ensure that the
1302 * last byte of the file is allocated. Since the smallest
1303 * value of fcbEOF is 0, length will be at least 1.
1305 if (length
> fcb
->fcbEOF
) {
1312 struct buf
*bp
=NULL
;
1315 * If we don't have enough physical space then
1316 * we need to extend the physical size.
1318 if (length
> fcb
->fcbPLen
) {
1319 /* lock extents b-tree (also protects volume bitmap) */
1320 retval
= hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1324 while ((length
> fcb
->fcbPLen
) && (retval
== E_NONE
)) {
1325 bytesToAdd
= length
- fcb
->fcbPLen
;
1326 retval
= MacToVFSError(
1327 ExtendFileC (HTOVCB(hp
),
1330 kEFAllMask
, /* allocate all requested bytes or none */
1331 &actualBytesAdded
));
1333 if (actualBytesAdded
== 0 && retval
== E_NONE
) {
1334 if (length
> fcb
->fcbPLen
)
1335 length
= fcb
->fcbPLen
;
1339 (void) hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1343 DBG_ASSERT(length
<= fcb
->fcbPLen
);
1344 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 7)) | DBG_FUNC_NONE
,
1345 (int)length
, fcb
->fcbEOF
, fcb
->fcbPLen
, 0, 0);
1348 if (! (ap
->a_flags
& IO_NOZEROFILL
)) {
1350 if (UBCISVALID(vp
) && retval
== E_NONE
) {
1351 u_long devBlockSize
;
1353 if (length
> fcb
->fcbMaxEOF
) {
1355 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
1357 retval
= cluster_write(vp
, (struct uio
*) 0, fcb
->fcbEOF
, length
, fcb
->fcbMaxEOF
,
1358 (off_t
)0, devBlockSize
, ((ap
->a_flags
& IO_SYNC
) | IO_HEADZEROFILL
));
1366 * zero out any new logical space...
1368 bytestoclear
= length
- fcb
->fcbEOF
;
1369 filePosition
= fcb
->fcbEOF
;
1371 while (bytestoclear
> 0) {
1372 logBlockNo
= (daddr_t
)(filePosition
/ PAGE_SIZE_64
);
1373 blkOffset
= (long)(filePosition
& PAGE_MASK_64
);
1375 if (((off_t
)(fcb
->fcbPLen
) - ((off_t
)logBlockNo
* (off_t
)PAGE_SIZE
)) < PAGE_SIZE_64
)
1376 logBlockSize
= (off_t
)(fcb
->fcbPLen
) - ((off_t
)logBlockNo
* PAGE_SIZE_64
);
1378 logBlockSize
= PAGE_SIZE
;
1380 if (logBlockSize
< blkOffset
)
1381 panic("hfs_truncate: bad logBlockSize computed\n");
1383 blockZeroCount
= MIN(bytestoclear
, logBlockSize
- blkOffset
);
1385 if (blkOffset
== 0 && ((bytestoclear
>= logBlockSize
) || filePosition
>= fcb
->fcbEOF
)) {
1386 bp
= getblk(vp
, logBlockNo
, logBlockSize
, 0, 0, BLK_WRITE
);
1390 retval
= bread(vp
, logBlockNo
, logBlockSize
, ap
->a_cred
, &bp
);
1396 bzero((char *)bp
->b_data
+ blkOffset
, blockZeroCount
);
1398 bp
->b_flags
|= B_DIRTY
| B_AGE
;
1400 if (ap
->a_flags
& IO_SYNC
)
1402 else if (logBlockNo
% 32)
1405 VOP_BWRITE(bp
); /* wait after we issue 32 requests */
1407 bytestoclear
-= blockZeroCount
;
1408 filePosition
+= blockZeroCount
;
1412 fcb
->fcbEOF
= length
;
1414 if (fcb
->fcbEOF
> fcb
->fcbMaxEOF
)
1415 fcb
->fcbMaxEOF
= fcb
->fcbEOF
;
1418 ubc_setsize(vp
, (off_t
)fcb
->fcbEOF
); /* XXX check errors */
1420 } else { /* Shorten the size of the file */
1422 if (fcb
->fcbEOF
> length
) {
1424 * Any buffers that are past the truncation point need to be
1425 * invalidated (to maintain buffer cache consistency). For
1426 * simplicity, we invalidate all the buffers by calling vinvalbuf.
1429 ubc_setsize(vp
, (off_t
)length
); /* XXX check errors */
1431 vflags
= ((length
> 0) ? V_SAVE
: 0) | V_SAVEMETA
;
1432 retval
= vinvalbuf(vp
, vflags
, ap
->a_cred
, ap
->a_p
, 0, 0);
1436 * For a TBE process the deallocation of the file blocks is
1437 * delayed until the file is closed. And hfs_close calls
1438 * truncate with the IO_NDELAY flag set. So when IO_NDELAY
1439 * isn't set, we make sure this isn't a TBE process.
1441 if ((ap
->a_flags
& IO_NDELAY
) || (!ISSET(ap
->a_p
->p_flag
, P_TBE
))) {
1443 /* lock extents b-tree (also protects volume bitmap) */
1444 retval
= hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1447 retval
= MacToVFSError(
1453 (void) hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1457 fcb
->fcbMaxEOF
= length
;
1459 fcb
->fcbEOF
= length
;
1461 if (fcb
->fcbFlags
& fcbModifiedMask
)
1462 hp
->h_nodeflags
|= IN_MODIFIED
;
1464 hp
->h_nodeflags
|= IN_CHANGE
| IN_UPDATE
;
1465 retval
= VOP_UPDATE(vp
, &tv
, &tv
, MNT_WAIT
);
1467 DBG_ERR(("Could not update truncate"));
1469 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 7)) | DBG_FUNC_NONE
,
1470 -1, -1, -1, retval
, 0);
1475 debug_check_blocksizes(ap
->a_vp
);
1478 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 7)) | DBG_FUNC_END
,
1479 (int)length
, fcb
->fcbEOF
, fcb
->fcbPLen
, retval
, 0);
1481 DBG_VOP_LOCKS_TEST(retval
);
1489 #% allocate vp L L L
1492 IN struct vnode *vp;
1495 IN struct ucred *cred;
1498 * allocate the hfsnode hp to at most length size
1500 int hfs_allocate(ap
)
1501 struct vop_allocate_args
/* {
1505 off_t *a_bytesallocated;
1506 struct ucred *a_cred;
1510 register struct vnode
*vp
= ap
->a_vp
;
1511 register struct hfsnode
*hp
= VTOH(vp
);
1512 off_t length
= ap
->a_length
;
1514 off_t moreBytesRequested
;
1515 off_t actualBytesAdded
;
1518 int retval
, retval2
;
1520 UInt32 extendFlags
=0; /* For call to ExtendFileC */
1521 DBG_FUNC_NAME("hfs_allocate");
1522 DBG_VOP_LOCKS_DECL(1);
1523 DBG_VOP_PRINT_FUNCNAME();
1524 DBG_VOP_PRINT_VNODE_INFO(ap
->a_vp
);DBG_VOP_CONT(("\n"));
1525 DBG_VOP_LOCKS_INIT(0,ap
->a_vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
1527 /* Set the number of bytes allocated to 0 so that the caller will know that we
1528 did nothing. ExtendFileC will fill this in for us if we actually allocate space */
1530 *(ap
->a_bytesallocated
) = 0;
1532 /* Now for some error checking */
1534 if (length
< (off_t
)0) {
1535 DBG_VOP_LOCKS_TEST(EINVAL
);
1539 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VLNK
) {
1540 DBG_VOP_LOCKS_TEST(EISDIR
);
1541 return (EISDIR
); /* hfs doesn't support truncating of directories */
1544 /* Fill in the flags word for the call to Extend the file */
1546 if (ap
->a_flags
& ALLOCATECONTIG
) {
1547 extendFlags
|= kEFContigMask
;
1550 if (ap
->a_flags
& ALLOCATEALL
) {
1551 extendFlags
|= kEFAllMask
;
1557 startingPEOF
= fcb
->fcbPLen
;
1559 if (ap
->a_flags
& ALLOCATEFROMPEOF
) {
1560 length
+= fcb
->fcbPLen
;
1563 DBG_RW(("%s: allocate from Ox%lX to Ox%X bytes\n", funcname
, fcb
->fcbPLen
, (u_int
)length
));
1565 /* If no changes are necesary, then we're done */
1566 if (fcb
->fcbPLen
== length
)
1570 * Lengthen the size of the file. We must ensure that the
1571 * last byte of the file is allocated. Since the smallest
1572 * value of fcbPLen is 0, length will be at least 1.
1574 if (length
> fcb
->fcbPLen
) {
1575 moreBytesRequested
= length
- fcb
->fcbPLen
;
1577 /* lock extents b-tree (also protects volume bitmap) */
1578 retval
= hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1579 if (retval
) goto Err_Exit
;
1581 retval
= MacToVFSError(
1582 ExtendFileC(HTOVCB(hp
),
1586 &actualBytesAdded
));
1588 *(ap
->a_bytesallocated
) = actualBytesAdded
;
1590 (void) hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1592 DBG_ASSERT(length
<= fcb
->fcbPLen
);
1595 * if we get an error and no changes were made then exit
1596 * otherwise we must do the VOP_UPDATE to reflect the changes
1598 if (retval
&& (startingPEOF
== fcb
->fcbPLen
)) goto Err_Exit
;
1601 * Adjust actualBytesAdded to be allocation block aligned, not
1602 * clump size aligned.
1603 * NOTE: So what we are reporting does not affect reality
1604 * until the file is closed, when we truncate the file to allocation
1608 if ((actualBytesAdded
!= 0) && (moreBytesRequested
< actualBytesAdded
)) {
1609 u_long blks
, blocksize
;
1611 blocksize
= VTOVCB(vp
)->blockSize
;
1612 blks
= moreBytesRequested
/ blocksize
;
1613 if ((blks
* blocksize
) != moreBytesRequested
)
1616 *(ap
->a_bytesallocated
) = blks
* blocksize
;
1619 } else { /* Shorten the size of the file */
1621 if (fcb
->fcbEOF
> length
) {
1623 * Any buffers that are past the truncation point need to be
1624 * invalidated (to maintain buffer cache consistency). For
1625 * simplicity, we invalidate all the buffers by calling vinvalbuf.
1627 vflags
= ((length
> 0) ? V_SAVE
: 0) | V_SAVEMETA
;
1628 (void) vinvalbuf(vp
, vflags
, ap
->a_cred
, ap
->a_p
, 0, 0);
1631 /* lock extents b-tree (also protects volume bitmap) */
1632 retval
= hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_EXCLUSIVE
, ap
->a_p
);
1633 if (retval
) goto Err_Exit
;
1635 retval
= MacToVFSError(
1641 (void) hfs_metafilelocking(HTOHFS(hp
), kHFSExtentsFileID
, LK_RELEASE
, ap
->a_p
);
1644 * if we get an error and no changes were made then exit
1645 * otherwise we must do the VOP_UPDATE to reflect the changes
1647 if (retval
&& (startingPEOF
== fcb
->fcbPLen
)) goto Err_Exit
;
1648 if (fcb
->fcbFlags
& fcbModifiedMask
)
1649 hp
->h_nodeflags
|= IN_MODIFIED
;
1651 DBG_ASSERT(length
<= fcb
->fcbPLen
) // DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG
1653 if (fcb
->fcbEOF
> fcb
->fcbPLen
) {
1654 fcb
->fcbEOF
= fcb
->fcbPLen
;
1655 fcb
->fcbMaxEOF
= fcb
->fcbPLen
;
1658 ubc_setsize(vp
, (off_t
)fcb
->fcbEOF
); /* XXX check errors */
1663 hp
->h_nodeflags
|= IN_CHANGE
| IN_UPDATE
;
1664 retval2
= VOP_UPDATE(vp
, &tv
, &tv
, MNT_WAIT
);
1666 if (retval
== 0) retval
= retval2
;
1669 DBG_VOP_LOCKS_TEST(retval
);
1676 /* pagein for HFS filesystem, similar to hfs_read(), but without cluster_read() */
1679 struct vop_pagein_args
/* {
1682 vm_offset_t a_pl_offset,
1685 struct ucred *a_cred,
1689 register struct vnode
*vp
;
1692 long devBlockSize
= 0;
1695 DBG_FUNC_NAME("hfs_pagein");
1696 DBG_VOP_LOCKS_DECL(1);
1697 DBG_VOP_PRINT_FUNCNAME();
1698 DBG_VOP_PRINT_VNODE_INFO(vp
);DBG_VOP_CONT(("\n"));
1699 DBG_VOP_LOCKS_INIT(0,vp
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
1705 if (vp
->v_type
!= VREG
&& vp
->v_type
!= VLNK
)
1706 panic("hfs_pagein: vp not UBC type\n");
1708 DBG_VOP(("\tfile size Ox%X\n", (u_int
)fcb
->fcbEOF
));
1709 DBG_VOP(("\tstarting at offset Ox%X of file, length Ox%X\n", (u_int
)ap
->a_f_offset
, (u_int
)ap
->a_size
));
1712 debug_check_blocksizes(vp
);
1715 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
1717 retval
= cluster_pagein(vp
, ap
->a_pl
, ap
->a_pl_offset
, ap
->a_f_offset
,
1718 ap
->a_size
, (off_t
)fcb
->fcbEOF
, devBlockSize
,
1722 debug_check_blocksizes(vp
);
1724 DBG_VOP_LOCKS_TEST(retval
);
1730 * pageout for HFS filesystem.
1734 struct vop_pageout_args
/* {
1737 vm_offset_t a_pl_offset,
1740 struct ucred *a_cred,
1744 struct vnode
*vp
= ap
->a_vp
;
1745 struct hfsnode
*hp
= VTOH(vp
);
1746 FCB
*fcb
= HTOFCB(hp
);
1748 long devBlockSize
= 0;
1750 DBG_FUNC_NAME("hfs_pageout");
1751 DBG_VOP_LOCKS_DECL(1);
1752 DBG_VOP_PRINT_FUNCNAME();
1753 DBG_VOP_PRINT_VNODE_INFO(vp
);DBG_VOP_CONT(("\n"));
1754 DBG_VOP(("\thfsnode 0x%x (%s)\n", (u_int
)hp
, H_NAME(hp
)));
1755 DBG_VOP(("\tstarting at offset Ox%lX of file, length Ox%lX\n",
1756 (UInt32
)ap
->a_f_offset
, (UInt32
)ap
->a_size
));
1758 DBG_VOP_LOCKS_INIT(0, vp
, VOPDBG_LOCKED
,
1759 VOPDBG_LOCKED
, VOPDBG_LOCKED
, VOPDBG_POS
);
1762 debug_check_blocksizes(vp
);
1766 panic("hfs_pageout: Not a VREG: vp=%x", vp
);
1768 VOP_DEVBLOCKSIZE(hp
->h_meta
->h_devvp
, &devBlockSize
);
1770 retval
= cluster_pageout(vp
, ap
->a_pl
, ap
->a_pl_offset
, ap
->a_f_offset
, ap
->a_size
,
1771 (off_t
)fcb
->fcbEOF
, devBlockSize
, ap
->a_flags
);
1773 * If we successfully wrote any data, and we are not the superuser
1774 * we clear the setuid and setgid bits as a precaution against
1777 if (retval
== 0 && ap
->a_cred
&& ap
->a_cred
->cr_uid
!= 0)
1778 hp
->h_meta
->h_mode
&= ~(ISUID
| ISGID
);
1781 debug_check_blocksizes(vp
);
1784 DBG_VOP_LOCKS_TEST(retval
);
1789 * Intercept B-Tree node writes to unswap them if necessary.
1792 # IN struct buf *bp;
1796 struct vop_bwrite_args
/* {
1800 register struct buf
*bp
= ap
->a_bp
;
1801 register struct vnode
*vp
= bp
->b_vp
;
1802 BlockDescriptor block
;
1805 DBG_FUNC_NAME("hfs_bwrite");
1807 #if BYTE_ORDER == LITTLE_ENDIAN
1808 /* Trap B-Tree writes */
1809 if ((H_FILEID(VTOH(vp
)) == kHFSExtentsFileID
) ||
1810 (H_FILEID(VTOH(vp
)) == kHFSCatalogFileID
)) {
1812 /* Swap if the B-Tree node is in native byte order */
1813 if (((UInt16
*)((char *)bp
->b_data
+ bp
->b_bcount
- 2))[0] == 0x000e) {
1814 /* Prepare the block pointer */
1815 block
.blockHeader
= bp
;
1816 block
.buffer
= bp
->b_data
+ IOBYTEOFFSETFORBLK(bp
->b_blkno
, VTOHFS(vp
)->hfs_phys_block_size
);
1817 block
.blockReadFromDisk
= (bp
->b_flags
& B_CACHE
) == 0; /* not found in cache ==> came from disk */
1818 block
.blockSize
= bp
->b_bcount
;
1820 /* Endian un-swap B-Tree node */
1821 SWAP_BT_NODE (&block
, ISHFSPLUS (VTOVCB(vp
)), H_FILEID(VTOH(vp
)), 1);
1824 /* We don't check to make sure that it's 0x0e00 because it could be all zeros */
1828 retval
= vn_bwrite (ap
);