]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* @(#)hfs_readwrite.c 1.0 | |
23 | * | |
24 | * (c) 1990, 1992 NeXT Computer, Inc. All Rights Reserved | |
25 | * (c) 1998 Apple Computer, Inc. All Rights Reserved | |
26 | * | |
27 | * | |
28 | * hfs_readwrite.c -- vnode operations to deal with reading and writing files. | |
29 | * | |
30 | * MODIFICATION HISTORY: | |
31 | * 9-Nov-1999 Scott Roberts hfs_allocate now returns sizes based on allocation block boundaries (#2398794) | |
32 | * 3-Feb-1999 Pat Dirks Merged in Joe's change to hfs_truncate to skip vinvalbuf if LEOF isn't changing (#2302796) | |
33 | * Removed superfluous (and potentially dangerous) second call to vinvalbuf() in hfs_truncate. | |
34 | * 2-Dec-1998 Pat Dirks Added support for read/write bootstrap ioctls. | |
35 | * 10-Nov-1998 Pat Dirks Changed read/write/truncate logic to optimize block sizes for first extents of a file. | |
36 | * Changed hfs_strategy to correct I/O sizes from cluser code I/O requests in light of | |
37 | * different block sizing. Changed bexpand to handle RELEASE_BUFFER flag. | |
38 | * 22-Sep-1998 Don Brady Changed truncate zero-fill to use bwrite after several bawrites have been queued. | |
39 | * 11-Sep-1998 Pat Dirks Fixed buffering logic to not rely on B_CACHE, which is set for empty buffers that | |
40 | * have been pre-read by cluster_read (use b_validend > 0 instead). | |
41 | * 27-Aug-1998 Pat Dirks Changed hfs_truncate to use cluster_write in place of bawrite where possible. | |
42 | * 25-Aug-1998 Pat Dirks Changed hfs_write to do small device-block aligned writes into buffers without doing | |
43 | * read-ahead of the buffer. Added bexpand to deal with incomplete [dirty] buffers. | |
44 | * Fixed can_cluster macro to use MAXPHYSIO instead of MAXBSIZE. | |
45 | * 19-Aug-1998 Don Brady Remove optimization in hfs_truncate that prevented extra physical blocks from | |
46 | * being truncated (radar #2265750). Also set fcb->fcbEOF before calling vinvalbuf. | |
47 | * 7-Jul-1998 Pat Dirks Added code to honor IO_NOZEROFILL in hfs_truncate. | |
48 | * 16-Jul-1998 Don Brady In hfs_bmap use MAXPHYSIO instead of MAXBSIZE when calling MapFileBlockC (radar #2263753). | |
49 | * 16-Jul-1998 Don Brady Fix error handling in hfs_allocate (radar #2252265). | |
50 | * 04-Jul-1998 chw Synchronized options in hfs_allocate with flags in call to ExtendFileC | |
51 | * 25-Jun-1998 Don Brady Add missing blockNo incrementing to zero fill loop in hfs_truncate. | |
52 | * 22-Jun-1998 Don Brady Add bp = NULL assignment after brelse in hfs_read. | |
53 | * 4-Jun-1998 Pat Dirks Split off from hfs_vnodeops.c | |
54 | */ | |
55 | ||
56 | #include <sys/param.h> | |
57 | #include <sys/systm.h> | |
58 | #include <sys/resourcevar.h> | |
59 | #include <sys/kernel.h> | |
60 | #include <sys/fcntl.h> | |
61 | #include <sys/stat.h> | |
62 | #include <sys/buf.h> | |
63 | #include <sys/proc.h> | |
64 | //#include <mach/machine/vm_types.h> | |
65 | #include <sys/vnode.h> | |
66 | #include <sys/uio.h> | |
67 | ||
68 | #include <miscfs/specfs/specdev.h> | |
69 | ||
70 | ||
71 | #include <sys/ubc.h> | |
72 | #include <vm/vm_pageout.h> | |
73 | ||
74 | ||
75 | #include <sys/kdebug.h> | |
76 | ||
77 | #include "hfs.h" | |
78 | #include "hfs_dbg.h" | |
79 | #include "hfs_endian.h" | |
80 | #include "hfscommon/headers/FileMgrInternal.h" | |
81 | #include "hfscommon/headers/BTreesInternal.h" | |
82 | ||
83 | ||
84 | #define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2))) | |
85 | ||
86 | enum { | |
87 | MAXHFSFILESIZE = 0x7FFFFFFF /* this needs to go in the mount structure */ | |
88 | }; | |
89 | ||
90 | extern u_int32_t GetLogicalBlockSize(struct vnode *vp); | |
91 | ||
92 | #if DBG_VOP_TEST_LOCKS | |
93 | extern void DbgVopTest(int maxSlots, int retval, VopDbgStoreRec *VopDbgStore, char *funcname); | |
94 | #endif | |
95 | ||
96 | #if HFS_DIAGNOSTIC | |
97 | void debug_check_blocksizes(struct vnode *vp); | |
98 | #endif | |
99 | ||
100 | /***************************************************************************** | |
101 | * | |
102 | * Operations on vnodes | |
103 | * | |
104 | *****************************************************************************/ | |
105 | ||
106 | /* | |
107 | #% read vp L L L | |
108 | # | |
109 | vop_read { | |
110 | IN struct vnode *vp; | |
111 | INOUT struct uio *uio; | |
112 | IN int ioflag; | |
113 | IN struct ucred *cred; | |
114 | ||
115 | */ | |
116 | ||
117 | int | |
118 | hfs_read(ap) | |
119 | struct vop_read_args /* { | |
120 | struct vnode *a_vp; | |
121 | struct uio *a_uio; | |
122 | int a_ioflag; | |
123 | struct ucred *a_cred; | |
124 | } */ *ap; | |
125 | { | |
126 | register struct vnode *vp; | |
127 | struct hfsnode *hp; | |
128 | register struct uio *uio; | |
129 | struct buf *bp; | |
130 | daddr_t logBlockNo; | |
131 | u_long fragSize, moveSize, startOffset, ioxfersize; | |
132 | long devBlockSize = 0; | |
133 | off_t bytesRemaining; | |
134 | int retval; | |
135 | u_short mode; | |
136 | FCB *fcb; | |
137 | ||
138 | DBG_FUNC_NAME("hfs_read"); | |
139 | DBG_VOP_LOCKS_DECL(1); | |
140 | DBG_VOP_PRINT_FUNCNAME(); | |
141 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
142 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
143 | ||
144 | vp = ap->a_vp; | |
145 | hp = VTOH(vp); | |
146 | fcb = HTOFCB(hp); | |
147 | mode = hp->h_meta->h_mode; | |
148 | uio = ap->a_uio; | |
149 | ||
150 | #if HFS_DIAGNOSTIC | |
151 | if (uio->uio_rw != UIO_READ) | |
152 | panic("%s: mode", funcname); | |
153 | #endif | |
154 | ||
155 | /* Can only read files */ | |
156 | if (ap->a_vp->v_type != VREG && ap->a_vp->v_type != VLNK) { | |
157 | DBG_VOP_LOCKS_TEST(EISDIR); | |
158 | return (EISDIR); | |
159 | } | |
160 | DBG_RW(("\tfile size Ox%X\n", (u_int)fcb->fcbEOF)); | |
161 | DBG_RW(("\tstarting at offset Ox%X of file, length Ox%X\n", (u_int)uio->uio_offset, (u_int)uio->uio_resid)); | |
162 | ||
163 | #if HFS_DIAGNOSTIC | |
164 | debug_check_blocksizes(vp); | |
165 | #endif | |
166 | ||
167 | /* | |
168 | * If they didn't ask for any data, then we are done. | |
169 | */ | |
170 | if (uio->uio_resid == 0) { | |
171 | DBG_VOP_LOCKS_TEST(E_NONE); | |
172 | return (E_NONE); | |
173 | } | |
174 | ||
175 | /* cant read from a negative offset */ | |
176 | if (uio->uio_offset < 0) { | |
177 | DBG_VOP_LOCKS_TEST(EINVAL); | |
178 | return (EINVAL); | |
179 | } | |
180 | ||
181 | if (uio->uio_offset > fcb->fcbEOF) { | |
182 | if ( (!ISHFSPLUS(VTOVCB(vp))) && (uio->uio_offset > (off_t)MAXHFSFILESIZE)) | |
183 | retval = EFBIG; | |
184 | else | |
185 | retval = E_NONE; | |
186 | ||
187 | DBG_VOP_LOCKS_TEST(retval); | |
188 | return (retval); | |
189 | } | |
190 | ||
191 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
192 | ||
193 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_START, | |
194 | (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); | |
195 | ||
196 | if (UBCISVALID(vp)) | |
197 | retval = cluster_read(vp, uio, (off_t)fcb->fcbEOF, devBlockSize, 0); | |
198 | else { | |
199 | ||
200 | for (retval = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { | |
201 | ||
202 | if ((bytesRemaining = (fcb->fcbEOF - uio->uio_offset)) <= 0) | |
203 | break; | |
204 | ||
205 | logBlockNo = (daddr_t)(uio->uio_offset / PAGE_SIZE_64); | |
206 | startOffset = (u_long) (uio->uio_offset & PAGE_MASK_64); | |
207 | fragSize = PAGE_SIZE; | |
208 | ||
209 | if (((logBlockNo * PAGE_SIZE) + fragSize) < fcb->fcbEOF) | |
210 | ioxfersize = fragSize; | |
211 | else { | |
212 | ioxfersize = fcb->fcbEOF - (logBlockNo * PAGE_SIZE); | |
213 | ioxfersize = (ioxfersize + (devBlockSize - 1)) & ~(devBlockSize - 1); | |
214 | } | |
215 | DBG_RW(("\tat logBlockNo Ox%X, with Ox%lX left to read\n", logBlockNo, (UInt32)uio->uio_resid)); | |
216 | moveSize = ioxfersize; | |
217 | DBG_RW(("\tmoveSize = Ox%lX; ioxfersize = Ox%lX; startOffset = Ox%lX.\n", | |
218 | moveSize, ioxfersize, startOffset)); | |
219 | DBG_ASSERT(moveSize >= startOffset); | |
220 | moveSize -= startOffset; | |
221 | ||
222 | if (bytesRemaining < moveSize) | |
223 | moveSize = bytesRemaining; | |
224 | ||
225 | if (uio->uio_resid < moveSize) { | |
226 | moveSize = uio->uio_resid; | |
227 | DBG_RW(("\treducing moveSize to Ox%lX (uio->uio_resid).\n", moveSize)); | |
228 | }; | |
229 | if (moveSize == 0) { | |
230 | break; | |
231 | }; | |
232 | ||
233 | DBG_RW(("\tat logBlockNo Ox%X, extent of Ox%lX, xfer of Ox%lX; moveSize = Ox%lX\n", logBlockNo, fragSize, ioxfersize, moveSize)); | |
234 | ||
235 | if (( uio->uio_offset + fragSize) >= fcb->fcbEOF) { | |
236 | retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp); | |
237 | ||
238 | } else if (logBlockNo - 1 == vp->v_lastr && !(vp->v_flag & VRAOFF)) { | |
239 | daddr_t nextLogBlockNo = logBlockNo + 1; | |
240 | int nextsize; | |
241 | ||
242 | if (((nextLogBlockNo * PAGE_SIZE) + | |
243 | (daddr_t)fragSize) < fcb->fcbEOF) | |
244 | nextsize = fragSize; | |
245 | else { | |
246 | nextsize = fcb->fcbEOF - (nextLogBlockNo * PAGE_SIZE); | |
247 | nextsize = (nextsize + (devBlockSize - 1)) & ~(devBlockSize - 1); | |
248 | } | |
249 | retval = breadn(vp, logBlockNo, ioxfersize, &nextLogBlockNo, &nextsize, 1, NOCRED, &bp); | |
250 | } else { | |
251 | retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp); | |
252 | }; | |
253 | ||
254 | if (retval != E_NONE) { | |
255 | if (bp) { | |
256 | brelse(bp); | |
257 | bp = NULL; | |
258 | } | |
259 | break; | |
260 | }; | |
261 | vp->v_lastr = logBlockNo; | |
262 | ||
263 | /* | |
264 | * We should only get non-zero b_resid when an I/O retval | |
265 | * has occurred, which should cause us to break above. | |
266 | * However, if the short read did not cause an retval, | |
267 | * then we want to ensure that we do not uiomove bad | |
268 | * or uninitialized data. | |
269 | */ | |
270 | ioxfersize -= bp->b_resid; | |
271 | ||
272 | if (ioxfersize < moveSize) { /* XXX PPD This should take the offset into account, too! */ | |
273 | if (ioxfersize == 0) | |
274 | break; | |
275 | moveSize = ioxfersize; | |
276 | } | |
277 | if ((startOffset + moveSize) > bp->b_bcount) | |
278 | panic("hfs_read: bad startOffset or moveSize\n"); | |
279 | ||
280 | DBG_RW(("\tcopying Ox%lX bytes from %lX; resid = Ox%lX...\n", moveSize, (char *)bp->b_data + startOffset, bp->b_resid)); | |
281 | ||
282 | if ((retval = uiomove((caddr_t)bp->b_data + startOffset, (int)moveSize, uio))) | |
283 | break; | |
284 | ||
285 | if (S_ISREG(mode) && | |
286 | (((startOffset + moveSize) == fragSize) || (uio->uio_offset == fcb->fcbEOF))) { | |
287 | bp->b_flags |= B_AGE; | |
288 | }; | |
289 | ||
290 | DBG_ASSERT(bp->b_bcount == bp->b_validend); | |
291 | ||
292 | brelse(bp); | |
293 | /* Start of loop resets bp to NULL before reaching outside this block... */ | |
294 | } | |
295 | ||
296 | if (bp != NULL) { | |
297 | DBG_ASSERT(bp->b_bcount == bp->b_validend); | |
298 | brelse(bp); | |
299 | }; | |
300 | } | |
301 | ||
302 | if (HTOVCB(hp)->vcbSigWord == kHFSPlusSigWord) | |
303 | hp->h_nodeflags |= IN_ACCESS; | |
304 | ||
305 | DBG_VOP_LOCKS_TEST(retval); | |
306 | ||
307 | #if HFS_DIAGNOSTIC | |
308 | debug_check_blocksizes(vp); | |
309 | #endif | |
310 | ||
311 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_END, | |
312 | (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); | |
313 | ||
314 | return (retval); | |
315 | } | |
316 | ||
317 | /* | |
318 | * Write data to a file or directory. | |
319 | #% write vp L L L | |
320 | # | |
321 | vop_write { | |
322 | IN struct vnode *vp; | |
323 | INOUT struct uio *uio; | |
324 | IN int ioflag; | |
325 | IN struct ucred *cred; | |
326 | ||
327 | */ | |
328 | int | |
329 | hfs_write(ap) | |
330 | struct vop_write_args /* { | |
331 | struct vnode *a_vp; | |
332 | struct uio *a_uio; | |
333 | int a_ioflag; | |
334 | struct ucred *a_cred; | |
335 | } */ *ap; | |
336 | { | |
337 | struct hfsnode *hp = VTOH(ap->a_vp); | |
338 | struct uio *uio = ap->a_uio; | |
339 | struct vnode *vp = ap->a_vp ; | |
340 | struct vnode *dev; | |
341 | struct buf *bp; | |
342 | struct proc *p, *cp; | |
343 | struct timeval tv; | |
344 | FCB *fcb = HTOFCB(hp); | |
345 | ExtendedVCB *vcb = HTOVCB(hp); | |
346 | long devBlockSize = 0; | |
347 | daddr_t logBlockNo; | |
348 | long fragSize; | |
349 | off_t origFileSize, currOffset, writelimit, bytesToAdd; | |
350 | off_t actualBytesAdded; | |
351 | u_long blkoffset, resid, xfersize, clearSize; | |
352 | int flags, ioflag; | |
353 | int retval; | |
354 | DBG_FUNC_NAME("hfs_write"); | |
355 | DBG_VOP_LOCKS_DECL(1); | |
356 | DBG_VOP_PRINT_FUNCNAME(); | |
357 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
358 | DBG_RW(("\thfsnode 0x%x (%s)\n", (u_int)hp, H_NAME(hp))); | |
359 | DBG_RW(("\tstarting at offset Ox%lX of file, length Ox%lX\n", (UInt32)uio->uio_offset, (UInt32)uio->uio_resid)); | |
360 | ||
361 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
362 | ||
363 | dev = hp->h_meta->h_devvp; | |
364 | ||
365 | #if HFS_DIAGNOSTIC | |
366 | debug_check_blocksizes(vp); | |
367 | #endif | |
368 | ||
369 | if (uio->uio_offset < 0) { | |
370 | DBG_VOP_LOCKS_TEST(EINVAL); | |
371 | return (EINVAL); | |
372 | } | |
373 | ||
374 | if (uio->uio_resid == 0) { | |
375 | DBG_VOP_LOCKS_TEST(E_NONE); | |
376 | return (E_NONE); | |
377 | } | |
378 | ||
379 | if (ap->a_vp->v_type != VREG && ap->a_vp->v_type != VLNK) { /* Can only write files */ | |
380 | DBG_VOP_LOCKS_TEST(EISDIR); | |
381 | return (EISDIR); | |
382 | }; | |
383 | ||
384 | #if HFS_DIAGNOSTIC | |
385 | if (uio->uio_rw != UIO_WRITE) | |
386 | panic("%s: mode", funcname); | |
387 | #endif | |
388 | ||
389 | ioflag = ap->a_ioflag; | |
390 | uio = ap->a_uio; | |
391 | vp = ap->a_vp; | |
392 | ||
393 | if (ioflag & IO_APPEND) | |
394 | uio->uio_offset = fcb->fcbEOF; | |
395 | if ((hp->h_meta->h_pflags & APPEND) && uio->uio_offset != fcb->fcbEOF) | |
396 | return (EPERM); | |
397 | ||
398 | writelimit = uio->uio_offset + uio->uio_resid; | |
399 | ||
400 | /* | |
401 | * Maybe this should be above the vnode op call, but so long as | |
402 | * file servers have no limits, I don't think it matters. | |
403 | */ | |
404 | p = uio->uio_procp; | |
405 | if (vp->v_type == VREG && p && | |
406 | writelimit > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { | |
407 | psignal(p, SIGXFSZ); | |
408 | return (EFBIG); | |
409 | }; | |
410 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
411 | ||
412 | resid = uio->uio_resid; | |
413 | origFileSize = fcb->fcbPLen; | |
414 | flags = ioflag & IO_SYNC ? B_SYNC : 0; | |
415 | ||
416 | DBG_RW(("\tLEOF is 0x%lX, PEOF is 0x%lX.\n", fcb->fcbEOF, fcb->fcbPLen)); | |
417 | ||
418 | /* | |
419 | NOTE: In the following loop there are two positions tracked: | |
420 | currOffset is the current I/O starting offset. currOffset is never >LEOF; the | |
421 | LEOF is nudged along with currOffset as data is zeroed or written. | |
422 | uio->uio_offset is the start of the current I/O operation. It may be arbitrarily | |
423 | beyond currOffset. | |
424 | ||
425 | The following is true at all times: | |
426 | ||
427 | currOffset <= LEOF <= uio->uio_offset <= writelimit | |
428 | */ | |
429 | currOffset = MIN(uio->uio_offset, fcb->fcbEOF); | |
430 | ||
431 | DBG_RW(("\tstarting I/O loop at 0x%lX.\n", (u_long)currOffset)); | |
432 | ||
433 | cp = current_proc(); | |
434 | ||
435 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START, | |
436 | (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); | |
437 | retval = 0; | |
438 | ||
439 | if (fcb->fcbEOF > fcb->fcbMaxEOF) | |
440 | fcb->fcbMaxEOF = fcb->fcbEOF; | |
441 | ||
442 | /* Now test if we need to extend the file */ | |
443 | /* Doing so will adjust the fcbPLen for us */ | |
444 | ||
445 | while (writelimit > (off_t)fcb->fcbPLen) { | |
446 | ||
447 | bytesToAdd = writelimit - fcb->fcbPLen; | |
448 | DBG_RW(("\textending file by 0x%lX bytes; 0x%lX blocks free", | |
449 | (unsigned long)bytesToAdd, (unsigned long)vcb->freeBlocks)); | |
450 | ||
451 | /* lock extents b-tree (also protects volume bitmap) */ | |
452 | retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, cp); | |
453 | if (retval != E_NONE) | |
454 | break; | |
455 | ||
456 | retval = MacToVFSError( | |
457 | ExtendFileC (vcb, | |
458 | fcb, | |
459 | bytesToAdd, | |
460 | kEFContigBit, | |
461 | &actualBytesAdded)); | |
462 | ||
463 | (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, cp); | |
464 | DBG_VOP_CONT(("\tactual bytes added = 0x%lX bytes, retval = %d...\n", actualBytesAdded, retval)); | |
465 | if ((actualBytesAdded == 0) && (retval == E_NONE)) | |
466 | retval = ENOSPC; | |
467 | if (retval != E_NONE) | |
468 | break; | |
469 | ||
470 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_NONE, | |
471 | (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); | |
472 | }; | |
473 | ||
474 | if (UBCISVALID(vp) && retval == E_NONE) { | |
475 | off_t filesize; | |
476 | off_t zero_off; | |
477 | int lflag; | |
478 | ||
479 | if (writelimit > fcb->fcbEOF) | |
480 | filesize = writelimit; | |
481 | else | |
482 | filesize = fcb->fcbEOF; | |
483 | ||
484 | lflag = (ioflag & IO_SYNC); | |
485 | ||
486 | if (uio->uio_offset > fcb->fcbMaxEOF) { | |
487 | zero_off = fcb->fcbMaxEOF; | |
488 | lflag |= IO_HEADZEROFILL; | |
489 | } else | |
490 | zero_off = 0; | |
491 | ||
492 | /* | |
493 | * if the write starts beyond the current EOF then | |
494 | * we we'll zero fill from the current EOF to where the write begins | |
495 | */ | |
496 | retval = cluster_write(vp, uio, fcb->fcbEOF, filesize, zero_off, | |
497 | (off_t)0, devBlockSize, lflag); | |
498 | ||
499 | if (uio->uio_offset > fcb->fcbEOF) { | |
500 | fcb->fcbEOF = uio->uio_offset; | |
501 | ||
502 | if (fcb->fcbEOF > fcb->fcbMaxEOF) | |
503 | fcb->fcbMaxEOF = fcb->fcbEOF; | |
504 | ||
505 | ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ | |
506 | } | |
507 | if (resid > uio->uio_resid) | |
508 | hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; | |
509 | ||
510 | } else { | |
511 | ||
512 | while (retval == E_NONE && uio->uio_resid > 0) { | |
513 | ||
514 | logBlockNo = currOffset / PAGE_SIZE; | |
515 | blkoffset = currOffset & PAGE_MASK; | |
516 | ||
517 | if (((off_t)(fcb->fcbPLen) - currOffset) < PAGE_SIZE_64) | |
518 | fragSize = (off_t)(fcb->fcbPLen) - ((off_t)logBlockNo * PAGE_SIZE_64); | |
519 | else | |
520 | fragSize = PAGE_SIZE; | |
521 | xfersize = fragSize - blkoffset; | |
522 | ||
523 | DBG_RW(("\tcurrOffset = Ox%lX, logBlockNo = Ox%X, blkoffset = Ox%lX, xfersize = Ox%lX, fragSize = Ox%lX.\n", | |
524 | (unsigned long)currOffset, logBlockNo, blkoffset, xfersize, fragSize)); | |
525 | ||
526 | /* Make any adjustments for boundary conditions */ | |
527 | if (currOffset + (off_t)xfersize > writelimit) { | |
528 | xfersize = writelimit - currOffset; | |
529 | DBG_RW(("\ttrimming xfersize to 0x%lX to match writelimit (uio_resid)...\n", xfersize)); | |
530 | }; | |
531 | ||
532 | /* | |
533 | * There is no need to read into bp if: | |
534 | * We start on a block boundary and will overwrite the whole block | |
535 | * | |
536 | * OR | |
537 | */ | |
538 | if ((blkoffset == 0) && (xfersize >= fragSize)) { | |
539 | DBG_RW(("\tRequesting %ld-byte block Ox%lX w/o read...\n", fragSize, (long)logBlockNo)); | |
540 | ||
541 | bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ); | |
542 | retval = 0; | |
543 | ||
544 | if (bp->b_blkno == -1) { | |
545 | brelse(bp); | |
546 | retval = EIO; /* XXX */ | |
547 | break; | |
548 | } | |
549 | } else { | |
550 | ||
551 | if (currOffset == fcb->fcbEOF && blkoffset == 0) { | |
552 | bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ); | |
553 | retval = 0; | |
554 | ||
555 | if (bp->b_blkno == -1) { | |
556 | brelse(bp); | |
557 | retval = EIO; /* XXX */ | |
558 | break; | |
559 | } | |
560 | ||
561 | } else { | |
562 | /* | |
563 | * This I/O transfer is not sufficiently aligned, so read the affected block into a buffer: | |
564 | */ | |
565 | DBG_VOP(("\tRequesting block Ox%X, size = 0x%08lX...\n", logBlockNo, fragSize)); | |
566 | retval = bread(vp, logBlockNo, fragSize, ap->a_cred, &bp); | |
567 | ||
568 | if (retval != E_NONE) { | |
569 | if (bp) | |
570 | brelse(bp); | |
571 | break; | |
572 | } | |
573 | } | |
574 | } | |
575 | ||
576 | /* See if we are starting to write within file boundaries: | |
577 | If not, then we need to present a "hole" for the area between | |
578 | the current EOF and the start of the current I/O operation: | |
579 | ||
580 | Note that currOffset is only less than uio_offset if uio_offset > LEOF... | |
581 | */ | |
582 | if (uio->uio_offset > currOffset) { | |
583 | clearSize = MIN(uio->uio_offset - currOffset, xfersize); | |
584 | DBG_RW(("\tzeroing Ox%lX bytes Ox%lX bytes into block Ox%X...\n", clearSize, blkoffset, logBlockNo)); | |
585 | bzero(bp->b_data + blkoffset, clearSize); | |
586 | currOffset += clearSize; | |
587 | blkoffset += clearSize; | |
588 | xfersize -= clearSize; | |
589 | }; | |
590 | ||
591 | if (xfersize > 0) { | |
592 | DBG_RW(("\tCopying Ox%lX bytes Ox%lX bytes into block Ox%X... ioflag == 0x%X\n", | |
593 | xfersize, blkoffset, logBlockNo, ioflag)); | |
594 | retval = uiomove((caddr_t)bp->b_data + blkoffset, (int)xfersize, uio); | |
595 | currOffset += xfersize; | |
596 | }; | |
597 | DBG_ASSERT((bp->b_bcount % devBlockSize) == 0); | |
598 | ||
599 | if (ioflag & IO_SYNC) { | |
600 | (void)VOP_BWRITE(bp); | |
601 | //DBG_RW(("\tissuing bwrite\n")); | |
602 | } else if ((xfersize + blkoffset) == fragSize) { | |
603 | //DBG_RW(("\tissuing bawrite\n")); | |
604 | bp->b_flags |= B_AGE; | |
605 | bawrite(bp); | |
606 | } else { | |
607 | //DBG_RW(("\tissuing bdwrite\n")); | |
608 | bdwrite(bp); | |
609 | }; | |
610 | ||
611 | /* Update the EOF if we just extended the file | |
612 | (the PEOF has already been moved out and the block mapping table has been updated): */ | |
613 | if (currOffset > fcb->fcbEOF) { | |
614 | DBG_VOP(("\textending EOF to 0x%lX...\n", (UInt32)fcb->fcbEOF)); | |
615 | fcb->fcbEOF = currOffset; | |
616 | ||
617 | if (fcb->fcbEOF > fcb->fcbMaxEOF) | |
618 | fcb->fcbMaxEOF = fcb->fcbEOF; | |
619 | ||
620 | if (UBCISVALID(vp)) | |
621 | ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ | |
622 | }; | |
623 | ||
624 | if (retval || (resid == 0)) | |
625 | break; | |
626 | hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; | |
627 | }; | |
628 | }; | |
629 | /* | |
630 | * If we successfully wrote any data, and we are not the superuser | |
631 | * we clear the setuid and setgid bits as a precaution against | |
632 | * tampering. | |
633 | */ | |
634 | if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) | |
635 | hp->h_meta->h_mode &= ~(ISUID | ISGID); | |
636 | ||
637 | if (retval) { | |
638 | if (ioflag & IO_UNIT) { | |
639 | (void)VOP_TRUNCATE(vp, origFileSize, | |
640 | ioflag & IO_SYNC, ap->a_cred, uio->uio_procp); | |
641 | uio->uio_offset -= resid - uio->uio_resid; | |
642 | uio->uio_resid = resid; | |
643 | } | |
644 | } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) { | |
645 | tv = time; | |
646 | retval = VOP_UPDATE(vp, &tv, &tv, 1); | |
647 | } | |
648 | ||
649 | #if HFS_DIAGNOSTIC | |
650 | debug_check_blocksizes(vp); | |
651 | #endif | |
652 | ||
653 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_END, | |
654 | (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); | |
655 | ||
656 | DBG_VOP_LOCKS_TEST(retval); | |
657 | return (retval); | |
658 | } | |
659 | ||
660 | ||
661 | /* | |
662 | ||
663 | #% ioctl vp U U U | |
664 | # | |
665 | vop_ioctl { | |
666 | IN struct vnode *vp; | |
667 | IN u_long command; | |
668 | IN caddr_t data; | |
669 | IN int fflag; | |
670 | IN struct ucred *cred; | |
671 | IN struct proc *p; | |
672 | ||
673 | */ | |
674 | ||
675 | ||
676 | /* ARGSUSED */ | |
677 | int | |
678 | hfs_ioctl(ap) | |
679 | struct vop_ioctl_args /* { | |
680 | struct vnode *a_vp; | |
681 | int a_command; | |
682 | caddr_t a_data; | |
683 | int a_fflag; | |
684 | struct ucred *a_cred; | |
685 | struct proc *a_p; | |
686 | } */ *ap; | |
687 | { | |
688 | DBG_FUNC_NAME("hfs_ioctl"); | |
689 | DBG_VOP_LOCKS_DECL(1); | |
690 | DBG_VOP_PRINT_FUNCNAME(); | |
691 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
692 | ||
693 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); | |
694 | ||
695 | switch (ap->a_command) { | |
696 | ||
697 | case 1: | |
698 | { register struct hfsnode *hp; | |
699 | register struct vnode *vp; | |
700 | register struct radvisory *ra; | |
701 | FCB *fcb; | |
702 | int devBlockSize = 0; | |
703 | int error; | |
704 | ||
705 | vp = ap->a_vp; | |
706 | ||
707 | VOP_LEASE(vp, ap->a_p, ap->a_cred, LEASE_READ); | |
708 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); | |
709 | ||
710 | ra = (struct radvisory *)(ap->a_data); | |
711 | hp = VTOH(vp); | |
712 | ||
713 | fcb = HTOFCB(hp); | |
714 | ||
715 | if (ra->ra_offset >= fcb->fcbEOF) { | |
716 | VOP_UNLOCK(vp, 0, ap->a_p); | |
717 | DBG_VOP_LOCKS_TEST(EFBIG); | |
718 | return (EFBIG); | |
719 | } | |
720 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
721 | ||
722 | error = advisory_read(vp, fcb->fcbEOF, ra->ra_offset, ra->ra_count, devBlockSize); | |
723 | VOP_UNLOCK(vp, 0, ap->a_p); | |
724 | ||
725 | DBG_VOP_LOCKS_TEST(error); | |
726 | return (error); | |
727 | } | |
728 | ||
729 | case 2: /* F_READBOOTBLOCKS */ | |
730 | case 3: /* F_WRITEBOOTBLOCKS */ | |
731 | { | |
732 | struct vnode *vp = ap->a_vp; | |
733 | struct hfsnode *hp = VTOH(vp); | |
734 | struct fbootstraptransfer *btd = (struct fbootstraptransfer *)ap->a_data; | |
735 | u_long devBlockSize; | |
736 | int error; | |
737 | struct iovec aiov; | |
738 | struct uio auio; | |
739 | u_long blockNumber; | |
740 | u_long blockOffset; | |
741 | u_long xfersize; | |
742 | struct buf *bp; | |
743 | ||
744 | if ((vp->v_flag & VROOT) == 0) return EINVAL; | |
745 | if (btd->fbt_offset + btd->fbt_length > 1024) return EINVAL; | |
746 | ||
747 | aiov.iov_base = btd->fbt_buffer; | |
748 | aiov.iov_len = btd->fbt_length; | |
749 | ||
750 | auio.uio_iov = &aiov; | |
751 | auio.uio_iovcnt = 1; | |
752 | auio.uio_offset = btd->fbt_offset; | |
753 | auio.uio_resid = btd->fbt_length; | |
754 | auio.uio_segflg = UIO_USERSPACE; | |
755 | auio.uio_rw = (ap->a_command == 3) ? UIO_WRITE : UIO_READ; /* F_WRITEBOOTSTRAP / F_READBOOTSTRAP */ | |
756 | auio.uio_procp = ap->a_p; | |
757 | ||
758 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
759 | ||
760 | while (auio.uio_resid > 0) { | |
761 | blockNumber = auio.uio_offset / devBlockSize; | |
762 | error = bread(hp->h_meta->h_devvp, blockNumber, devBlockSize, ap->a_cred, &bp); | |
763 | if (error) { | |
764 | if (bp) brelse(bp); | |
765 | return error; | |
766 | }; | |
767 | ||
768 | blockOffset = auio.uio_offset % devBlockSize; | |
769 | xfersize = devBlockSize - blockOffset; | |
770 | error = uiomove((caddr_t)bp->b_data + blockOffset, (int)xfersize, &auio); | |
771 | if (error) { | |
772 | brelse(bp); | |
773 | return error; | |
774 | }; | |
775 | if (auio.uio_rw == UIO_WRITE) { | |
776 | error = VOP_BWRITE(bp); | |
777 | if (error) return error; | |
778 | } else { | |
779 | brelse(bp); | |
780 | }; | |
781 | }; | |
782 | }; | |
783 | return 0; | |
784 | ||
785 | default: | |
786 | DBG_VOP_LOCKS_TEST(ENOTTY); | |
787 | return (ENOTTY); | |
788 | } | |
789 | ||
790 | return 0; | |
791 | } | |
792 | ||
793 | /* ARGSUSED */ | |
794 | int | |
795 | hfs_select(ap) | |
796 | struct vop_select_args /* { | |
797 | struct vnode *a_vp; | |
798 | int a_which; | |
799 | int a_fflags; | |
800 | struct ucred *a_cred; | |
801 | struct proc *a_p; | |
802 | } */ *ap; | |
803 | { | |
804 | DBG_FUNC_NAME("hfs_select"); | |
805 | DBG_VOP_LOCKS_DECL(1); | |
806 | DBG_VOP_PRINT_FUNCNAME(); | |
807 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
808 | ||
809 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); | |
810 | ||
811 | /* | |
812 | * We should really check to see if I/O is possible. | |
813 | */ | |
814 | DBG_VOP_LOCKS_TEST(1); | |
815 | return (1); | |
816 | } | |
817 | ||
818 | ||
819 | ||
820 | /* | |
821 | * Mmap a file | |
822 | * | |
823 | * NB Currently unsupported. | |
824 | # XXX - not used | |
825 | # | |
826 | vop_mmap { | |
827 | IN struct vnode *vp; | |
828 | IN int fflags; | |
829 | IN struct ucred *cred; | |
830 | IN struct proc *p; | |
831 | ||
832 | */ | |
833 | ||
834 | /* ARGSUSED */ | |
835 | ||
836 | int | |
837 | hfs_mmap(ap) | |
838 | struct vop_mmap_args /* { | |
839 | struct vnode *a_vp; | |
840 | int a_fflags; | |
841 | struct ucred *a_cred; | |
842 | struct proc *a_p; | |
843 | } */ *ap; | |
844 | { | |
845 | DBG_FUNC_NAME("hfs_mmap"); | |
846 | DBG_VOP_LOCKS_DECL(1); | |
847 | DBG_VOP_PRINT_FUNCNAME(); | |
848 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
849 | ||
850 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); | |
851 | ||
852 | DBG_VOP_LOCKS_TEST(EINVAL); | |
853 | return (EINVAL); | |
854 | } | |
855 | ||
856 | ||
857 | ||
858 | /* | |
859 | * Seek on a file | |
860 | * | |
861 | * Nothing to do, so just return. | |
862 | # XXX - not used | |
863 | # Needs work: Is newoff right? What's it mean? | |
864 | # | |
865 | vop_seek { | |
866 | IN struct vnode *vp; | |
867 | IN off_t oldoff; | |
868 | IN off_t newoff; | |
869 | IN struct ucred *cred; | |
870 | */ | |
871 | /* ARGSUSED */ | |
872 | int | |
873 | hfs_seek(ap) | |
874 | struct vop_seek_args /* { | |
875 | struct vnode *a_vp; | |
876 | off_t a_oldoff; | |
877 | off_t a_newoff; | |
878 | struct ucred *a_cred; | |
879 | } */ *ap; | |
880 | { | |
881 | DBG_FUNC_NAME("hfs_seek"); | |
882 | DBG_VOP_LOCKS_DECL(1); | |
883 | DBG_VOP_PRINT_FUNCNAME(); | |
884 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
885 | ||
886 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); | |
887 | ||
888 | DBG_VOP_LOCKS_TEST(E_NONE); | |
889 | return (E_NONE); | |
890 | } | |
891 | ||
892 | ||
893 | /* | |
894 | * Bmap converts a the logical block number of a file to its physical block | |
895 | * number on the disk. | |
896 | */ | |
897 | ||
898 | /* | |
899 | * vp - address of vnode file the file | |
900 | * bn - which logical block to convert to a physical block number. | |
901 | * vpp - returns the vnode for the block special file holding the filesystem | |
902 | * containing the file of interest | |
903 | * bnp - address of where to return the filesystem physical block number | |
904 | #% bmap vp L L L | |
905 | #% bmap vpp - U - | |
906 | # | |
907 | vop_bmap { | |
908 | IN struct vnode *vp; | |
909 | IN daddr_t bn; | |
910 | OUT struct vnode **vpp; | |
911 | IN daddr_t *bnp; | |
912 | OUT int *runp; | |
913 | */ | |
914 | /* | |
915 | * Converts a logical block number to a physical block, and optionally returns | |
916 | * the amount of remaining blocks in a run. The logical block is based on hfsNode.logBlockSize. | |
917 | * The physical block number is based on the device block size, currently its 512. | |
918 | * The block run is returned in logical blocks, and is the REMAINING amount of blocks | |
919 | */ | |
920 | ||
921 | int | |
922 | hfs_bmap(ap) | |
923 | struct vop_bmap_args /* { | |
924 | struct vnode *a_vp; | |
925 | daddr_t a_bn; | |
926 | struct vnode **a_vpp; | |
927 | daddr_t *a_bnp; | |
928 | int *a_runp; | |
929 | } */ *ap; | |
930 | { | |
931 | struct hfsnode *hp = VTOH(ap->a_vp); | |
932 | struct hfsmount *hfsmp = VTOHFS(ap->a_vp); | |
933 | int retval = E_NONE; | |
934 | daddr_t logBlockSize; | |
935 | size_t bytesContAvail = 0; | |
936 | struct proc *p = NULL; | |
937 | int lockExtBtree; | |
938 | ||
939 | #define DEBUG_BMAP 0 | |
940 | #if DEBUG_BMAP | |
941 | DBG_FUNC_NAME("hfs_bmap"); | |
942 | DBG_VOP_LOCKS_DECL(2); | |
943 | DBG_VOP_PRINT_FUNCNAME(); | |
944 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); | |
945 | ||
946 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
947 | if (ap->a_vpp != NULL) { | |
948 | DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_UNLOCKED, VOPDBG_IGNORE, VOPDBG_POS); | |
949 | } else { | |
950 | DBG_VOP_LOCKS_INIT(1,NULL, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); | |
951 | }; | |
952 | #endif | |
953 | ||
954 | DBG_IO(("\tMapped blk %d --> ", ap->a_bn)); | |
955 | /* | |
956 | * Check for underlying vnode requests and ensure that logical | |
957 | * to physical mapping is requested. | |
958 | */ | |
959 | if (ap->a_vpp != NULL) | |
960 | *ap->a_vpp = VTOH(ap->a_vp)->h_meta->h_devvp; | |
961 | if (ap->a_bnp == NULL) | |
962 | return (0); | |
963 | ||
964 | lockExtBtree = hasOverflowExtents(hp); | |
965 | if (lockExtBtree) | |
966 | { | |
967 | p = current_proc(); | |
968 | retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p); | |
969 | if (retval) | |
970 | return (retval); | |
971 | } | |
972 | ||
973 | logBlockSize = GetLogicalBlockSize(ap->a_vp); | |
974 | ||
975 | retval = MacToVFSError( | |
976 | MapFileBlockC (HFSTOVCB(hfsmp), | |
977 | HTOFCB(hp), | |
978 | MAXPHYSIO, | |
979 | (off_t)(ap->a_bn * logBlockSize), | |
980 | ap->a_bnp, | |
981 | &bytesContAvail)); | |
982 | ||
983 | if (lockExtBtree) (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); | |
984 | ||
985 | if (retval == E_NONE) { | |
986 | /* Figure out how many read ahead blocks there are */ | |
987 | if (ap->a_runp != NULL) { | |
988 | if (can_cluster(logBlockSize)) { | |
989 | /* Make sure this result never goes negative: */ | |
990 | *ap->a_runp = (bytesContAvail < logBlockSize) ? 0 : (bytesContAvail / logBlockSize) - 1; | |
991 | } else { | |
992 | *ap->a_runp = 0; | |
993 | }; | |
994 | }; | |
995 | }; | |
996 | ||
997 | DBG_IO(("%d:%d.\n", *ap->a_bnp, (bytesContAvail < logBlockSize) ? 0 : (bytesContAvail / logBlockSize) - 1)); | |
998 | ||
999 | #if DEBUG_BMAP | |
1000 | ||
1001 | DBG_VOP_LOCKS_TEST(retval); | |
1002 | #endif | |
1003 | ||
1004 | if (ap->a_runp) { | |
1005 | DBG_ASSERT((*ap->a_runp * logBlockSize) < bytesContAvail); /* At least *ap->a_runp blocks left and ... */ | |
1006 | if (can_cluster(logBlockSize)) { | |
1007 | DBG_ASSERT(bytesContAvail - (*ap->a_runp * logBlockSize) < (2*logBlockSize)); /* ... at most 1 logical block accounted for by current block */ | |
1008 | /* ... plus some sub-logical block sized piece */ | |
1009 | }; | |
1010 | }; | |
1011 | ||
1012 | return (retval); | |
1013 | } | |
1014 | ||
1015 | /* blktooff converts logical block number to file offset */ | |
1016 | ||
1017 | int | |
1018 | hfs_blktooff(ap) | |
1019 | struct vop_blktooff_args /* { | |
1020 | struct vnode *a_vp; | |
1021 | daddr_t a_lblkno; | |
1022 | off_t *a_offset; | |
1023 | } */ *ap; | |
1024 | { | |
1025 | if (ap->a_vp == NULL) | |
1026 | return (EINVAL); | |
1027 | *ap->a_offset = (off_t)ap->a_lblkno * PAGE_SIZE_64; | |
1028 | ||
1029 | return(0); | |
1030 | } | |
1031 | ||
1032 | int | |
1033 | hfs_offtoblk(ap) | |
1034 | struct vop_offtoblk_args /* { | |
1035 | struct vnode *a_vp; | |
1036 | off_t a_offset; | |
1037 | daddr_t *a_lblkno; | |
1038 | } */ *ap; | |
1039 | { | |
1040 | long lbsize, boff; | |
1041 | ||
1042 | if (ap->a_vp == NULL) | |
1043 | return (EINVAL); | |
1044 | *ap->a_lblkno = ap->a_offset / PAGE_SIZE_64; | |
1045 | ||
1046 | return(0); | |
1047 | } | |
1048 | ||
1049 | int | |
1050 | hfs_cmap(ap) | |
1051 | struct vop_cmap_args /* { | |
1052 | struct vnode *a_vp; | |
1053 | off_t a_foffset; | |
1054 | size_t a_size; | |
1055 | daddr_t *a_bpn; | |
1056 | size_t *a_run; | |
1057 | void *a_poff; | |
1058 | } */ *ap; | |
1059 | { | |
1060 | struct hfsnode *hp = VTOH(ap->a_vp); | |
1061 | struct hfsmount *hfsmp = VTOHFS(ap->a_vp); | |
1062 | size_t bytesContAvail = 0; | |
1063 | int retval = E_NONE; | |
1064 | int lockExtBtree; | |
1065 | struct proc *p = NULL; | |
1066 | ||
1067 | #define DEBUG_CMAP 0 | |
1068 | #if DEBUG_CMAP | |
1069 | DBG_FUNC_NAME("hfs_cmap"); | |
1070 | DBG_VOP_LOCKS_DECL(2); | |
1071 | DBG_VOP_PRINT_FUNCNAME(); | |
1072 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); | |
1073 | ||
1074 | DBG_VOP_LOCKS_INIT(0, ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
1075 | #endif | |
1076 | ||
1077 | DBG_IO(("\tMapped offset %qx --> ", ap->a_foffset)); | |
1078 | /* | |
1079 | * Check for underlying vnode requests and ensure that logical | |
1080 | * to physical mapping is requested. | |
1081 | */ | |
1082 | if (ap->a_bpn == NULL) | |
1083 | return (0); | |
1084 | ||
1085 | if (lockExtBtree = hasOverflowExtents(hp)) | |
1086 | { | |
1087 | p = current_proc(); | |
1088 | if (retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p)) | |
1089 | return (retval); | |
1090 | } | |
1091 | retval = MacToVFSError( | |
1092 | MapFileBlockC (HFSTOVCB(hfsmp), | |
1093 | HTOFCB(hp), | |
1094 | ap->a_size, | |
1095 | ap->a_foffset, | |
1096 | ap->a_bpn, | |
1097 | &bytesContAvail)); | |
1098 | ||
1099 | if (lockExtBtree) (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); | |
1100 | ||
1101 | if ((retval == E_NONE) && (ap->a_run)) | |
1102 | *ap->a_run = bytesContAvail; | |
1103 | ||
1104 | if (ap->a_poff) | |
1105 | *(int *)ap->a_poff = 0; | |
1106 | ||
1107 | DBG_IO(("%d:%d.\n", *ap->a_bpn, bytesContAvail)); | |
1108 | ||
1109 | #if DEBUG_BMAP | |
1110 | ||
1111 | DBG_VOP_LOCKS_TEST(retval); | |
1112 | #endif | |
1113 | ||
1114 | return (retval); | |
1115 | ||
1116 | } | |
1117 | ||
1118 | /* | |
1119 | * Calculate the logical to physical mapping if not done already, | |
1120 | * then call the device strategy routine. | |
1121 | # | |
1122 | #vop_strategy { | |
1123 | # IN struct buf *bp; | |
1124 | */ | |
1125 | int | |
1126 | hfs_strategy(ap) | |
1127 | struct vop_strategy_args /* { | |
1128 | struct buf *a_bp; | |
1129 | } */ *ap; | |
1130 | { | |
1131 | register struct buf *bp = ap->a_bp; | |
1132 | register struct vnode *vp = bp->b_vp; | |
1133 | register struct hfsnode *hp; | |
1134 | int retval = 0; | |
1135 | ||
1136 | DBG_FUNC_NAME("hfs_strategy"); | |
1137 | ||
1138 | // DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\n")); | |
1139 | ||
1140 | hp = VTOH(vp); | |
1141 | ||
1142 | if ( !(bp->b_flags & B_VECTORLIST)) { | |
1143 | ||
1144 | if (vp->v_type == VBLK || vp->v_type == VCHR) | |
1145 | panic("hfs_strategy: device vnode passed!"); | |
1146 | ||
1147 | if (bp->b_flags & B_PAGELIST) { | |
1148 | /* | |
1149 | * if we have a page list associated with this bp, | |
1150 | * then go through cluste_bp since it knows how to | |
1151 | * deal with a page request that might span non-contiguous | |
1152 | * physical blocks on the disk... | |
1153 | */ | |
1154 | retval = cluster_bp(bp); | |
1155 | vp = hp->h_meta->h_devvp; | |
1156 | bp->b_dev = vp->v_rdev; | |
1157 | ||
1158 | return (retval); | |
1159 | } | |
1160 | /* | |
1161 | * If we don't already know the filesystem relative block number | |
1162 | * then get it using VOP_BMAP(). If VOP_BMAP() returns the block | |
1163 | * number as -1 then we've got a hole in the file. HFS filesystems | |
1164 | * don't allow files with holes, so we shouldn't ever see this. | |
1165 | */ | |
1166 | if (bp->b_blkno == bp->b_lblkno) { | |
1167 | if ((retval = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL))) { | |
1168 | bp->b_error = retval; | |
1169 | bp->b_flags |= B_ERROR; | |
1170 | biodone(bp); | |
1171 | return (retval); | |
1172 | } | |
1173 | if ((long)bp->b_blkno == -1) | |
1174 | clrbuf(bp); | |
1175 | } | |
1176 | if ((long)bp->b_blkno == -1) { | |
1177 | biodone(bp); | |
1178 | return (0); | |
1179 | } | |
1180 | if (bp->b_validend == 0) { | |
1181 | /* Record the exact size of the I/O transfer about to be made: */ | |
1182 | DBG_ASSERT(bp->b_validoff == 0); | |
1183 | bp->b_validend = bp->b_bcount; | |
1184 | DBG_ASSERT(bp->b_dirtyoff == 0); | |
1185 | }; | |
1186 | } | |
1187 | vp = hp->h_meta->h_devvp; | |
1188 | bp->b_dev = vp->v_rdev; | |
1189 | DBG_IO(("\t\t>>>%s: continuing w/ vp: 0x%x with logBlk Ox%X and phyBlk Ox%X\n", funcname, (u_int)vp, bp->b_lblkno, bp->b_blkno)); | |
1190 | ||
1191 | return VOCALL (vp->v_op, VOFFSET(vop_strategy), ap); | |
1192 | } | |
1193 | ||
1194 | ||
1195 | /* | |
1196 | #% reallocblks vp L L L | |
1197 | # | |
1198 | vop_reallocblks { | |
1199 | IN struct vnode *vp; | |
1200 | IN struct cluster_save *buflist; | |
1201 | ||
1202 | */ | |
1203 | ||
1204 | int | |
1205 | hfs_reallocblks(ap) | |
1206 | struct vop_reallocblks_args /* { | |
1207 | struct vnode *a_vp; | |
1208 | struct cluster_save *a_buflist; | |
1209 | } */ *ap; | |
1210 | { | |
1211 | DBG_FUNC_NAME("hfs_reallocblks"); | |
1212 | DBG_VOP_LOCKS_DECL(1); | |
1213 | DBG_VOP_PRINT_FUNCNAME(); | |
1214 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
1215 | ||
1216 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
1217 | ||
1218 | /* Currently no support for clustering */ /* XXX */ | |
1219 | DBG_VOP_LOCKS_TEST(ENOSPC); | |
1220 | return (ENOSPC); | |
1221 | } | |
1222 | ||
1223 | ||
1224 | ||
1225 | /* | |
1226 | # | |
1227 | #% truncate vp L L L | |
1228 | # | |
1229 | vop_truncate { | |
1230 | IN struct vnode *vp; | |
1231 | IN off_t length; | |
1232 | IN int flags; (IO_SYNC) | |
1233 | IN struct ucred *cred; | |
1234 | IN struct proc *p; | |
1235 | }; | |
1236 | * Truncate the hfsnode hp to at most length size, freeing (or adding) the | |
1237 | * disk blocks. | |
1238 | */ | |
1239 | int hfs_truncate(ap) | |
1240 | struct vop_truncate_args /* { | |
1241 | struct vnode *a_vp; | |
1242 | off_t a_length; | |
1243 | int a_flags; | |
1244 | struct ucred *a_cred; | |
1245 | struct proc *a_p; | |
1246 | } */ *ap; | |
1247 | { | |
1248 | register struct vnode *vp = ap->a_vp; | |
1249 | register struct hfsnode *hp = VTOH(vp); | |
1250 | off_t length = ap->a_length; | |
1251 | long vflags; | |
1252 | struct timeval tv; | |
1253 | int retval; | |
1254 | FCB *fcb; | |
1255 | off_t bytesToAdd; | |
1256 | off_t actualBytesAdded; | |
1257 | DBG_FUNC_NAME("hfs_truncate"); | |
1258 | DBG_VOP_LOCKS_DECL(1); | |
1259 | DBG_VOP_PRINT_FUNCNAME(); | |
1260 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
1261 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
1262 | ||
1263 | #if HFS_DIAGNOSTIC | |
1264 | debug_check_blocksizes(ap->a_vp); | |
1265 | #endif | |
1266 | ||
1267 | fcb = HTOFCB(hp); | |
1268 | ||
1269 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_START, | |
1270 | (int)length, fcb->fcbEOF, fcb->fcbPLen, 0, 0); | |
1271 | ||
1272 | if (length < 0) { | |
1273 | DBG_VOP_LOCKS_TEST(EINVAL); | |
1274 | return (EINVAL); | |
1275 | } | |
1276 | ||
1277 | if ((!ISHFSPLUS(VTOVCB(vp))) && (length > (off_t)MAXHFSFILESIZE)) { | |
1278 | DBG_VOP_LOCKS_TEST(EFBIG); | |
1279 | return (EFBIG); | |
1280 | } | |
1281 | ||
1282 | if (vp->v_type != VREG && vp->v_type != VLNK) { | |
1283 | DBG_VOP_LOCKS_TEST(EISDIR); | |
1284 | return (EISDIR); /* hfs doesn't support truncating of directories */ | |
1285 | } | |
1286 | ||
1287 | tv = time; | |
1288 | retval = E_NONE; | |
1289 | ||
1290 | DBG_RW(("%s: truncate from Ox%lX to Ox%X bytes\n", funcname, fcb->fcbPLen, length)); | |
1291 | ||
1292 | /* | |
1293 | * we cannot just check if fcb->fcbEOF == length (as an optimization) | |
1294 | * since there may be extra physical blocks that also need truncation | |
1295 | */ | |
1296 | ||
1297 | if (fcb->fcbEOF > fcb->fcbMaxEOF) | |
1298 | fcb->fcbMaxEOF = fcb->fcbEOF; | |
1299 | ||
1300 | /* | |
1301 | * Lengthen the size of the file. We must ensure that the | |
1302 | * last byte of the file is allocated. Since the smallest | |
1303 | * value of fcbEOF is 0, length will be at least 1. | |
1304 | */ | |
1305 | if (length > fcb->fcbEOF) { | |
1306 | off_t filePosition; | |
1307 | daddr_t logBlockNo; | |
1308 | long logBlockSize; | |
1309 | long blkOffset; | |
1310 | off_t bytestoclear; | |
1311 | int blockZeroCount; | |
1312 | struct buf *bp=NULL; | |
1313 | ||
1314 | /* | |
1315 | * If we don't have enough physical space then | |
1316 | * we need to extend the physical size. | |
1317 | */ | |
1318 | if (length > fcb->fcbPLen) { | |
1319 | /* lock extents b-tree (also protects volume bitmap) */ | |
1320 | retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); | |
1321 | if (retval) | |
1322 | goto Err_Exit; | |
1323 | ||
1324 | while ((length > fcb->fcbPLen) && (retval == E_NONE)) { | |
1325 | bytesToAdd = length - fcb->fcbPLen; | |
1326 | retval = MacToVFSError( | |
1327 | ExtendFileC (HTOVCB(hp), | |
1328 | fcb, | |
1329 | bytesToAdd, | |
1330 | kEFAllMask, /* allocate all requested bytes or none */ | |
1331 | &actualBytesAdded)); | |
1332 | ||
1333 | if (actualBytesAdded == 0 && retval == E_NONE) { | |
1334 | if (length > fcb->fcbPLen) | |
1335 | length = fcb->fcbPLen; | |
1336 | break; | |
1337 | } | |
1338 | } | |
1339 | (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); | |
1340 | if (retval) | |
1341 | goto Err_Exit; | |
1342 | ||
1343 | DBG_ASSERT(length <= fcb->fcbPLen); | |
1344 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE, | |
1345 | (int)length, fcb->fcbEOF, fcb->fcbPLen, 0, 0); | |
1346 | } | |
1347 | ||
1348 | if (! (ap->a_flags & IO_NOZEROFILL)) { | |
1349 | ||
1350 | if (UBCISVALID(vp) && retval == E_NONE) { | |
1351 | u_long devBlockSize; | |
1352 | ||
1353 | if (length > fcb->fcbMaxEOF) { | |
1354 | ||
1355 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
1356 | ||
1357 | retval = cluster_write(vp, (struct uio *) 0, fcb->fcbEOF, length, fcb->fcbMaxEOF, | |
1358 | (off_t)0, devBlockSize, ((ap->a_flags & IO_SYNC) | IO_HEADZEROFILL)); | |
1359 | ||
1360 | if (retval) | |
1361 | goto Err_Exit; | |
1362 | } | |
1363 | } else { | |
1364 | ||
1365 | /* | |
1366 | * zero out any new logical space... | |
1367 | */ | |
1368 | bytestoclear = length - fcb->fcbEOF; | |
1369 | filePosition = fcb->fcbEOF; | |
1370 | ||
1371 | while (bytestoclear > 0) { | |
1372 | logBlockNo = (daddr_t)(filePosition / PAGE_SIZE_64); | |
1373 | blkOffset = (long)(filePosition & PAGE_MASK_64); | |
1374 | ||
1375 | if (((off_t)(fcb->fcbPLen) - ((off_t)logBlockNo * (off_t)PAGE_SIZE)) < PAGE_SIZE_64) | |
1376 | logBlockSize = (off_t)(fcb->fcbPLen) - ((off_t)logBlockNo * PAGE_SIZE_64); | |
1377 | else | |
1378 | logBlockSize = PAGE_SIZE; | |
1379 | ||
1380 | if (logBlockSize < blkOffset) | |
1381 | panic("hfs_truncate: bad logBlockSize computed\n"); | |
1382 | ||
1383 | blockZeroCount = MIN(bytestoclear, logBlockSize - blkOffset); | |
1384 | ||
1385 | if (blkOffset == 0 && ((bytestoclear >= logBlockSize) || filePosition >= fcb->fcbEOF)) { | |
1386 | bp = getblk(vp, logBlockNo, logBlockSize, 0, 0, BLK_WRITE); | |
1387 | retval = 0; | |
1388 | ||
1389 | } else { | |
1390 | retval = bread(vp, logBlockNo, logBlockSize, ap->a_cred, &bp); | |
1391 | if (retval) { | |
1392 | brelse(bp); | |
1393 | goto Err_Exit; | |
1394 | } | |
1395 | } | |
1396 | bzero((char *)bp->b_data + blkOffset, blockZeroCount); | |
1397 | ||
1398 | bp->b_flags |= B_DIRTY | B_AGE; | |
1399 | ||
1400 | if (ap->a_flags & IO_SYNC) | |
1401 | VOP_BWRITE(bp); | |
1402 | else if (logBlockNo % 32) | |
1403 | bawrite(bp); | |
1404 | else | |
1405 | VOP_BWRITE(bp); /* wait after we issue 32 requests */ | |
1406 | ||
1407 | bytestoclear -= blockZeroCount; | |
1408 | filePosition += blockZeroCount; | |
1409 | } | |
1410 | }; | |
1411 | } | |
1412 | fcb->fcbEOF = length; | |
1413 | ||
1414 | if (fcb->fcbEOF > fcb->fcbMaxEOF) | |
1415 | fcb->fcbMaxEOF = fcb->fcbEOF; | |
1416 | ||
1417 | if (UBCISVALID(vp)) | |
1418 | ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ | |
1419 | ||
1420 | } else { /* Shorten the size of the file */ | |
1421 | ||
1422 | if (fcb->fcbEOF > length) { | |
1423 | /* | |
1424 | * Any buffers that are past the truncation point need to be | |
1425 | * invalidated (to maintain buffer cache consistency). For | |
1426 | * simplicity, we invalidate all the buffers by calling vinvalbuf. | |
1427 | */ | |
1428 | if (UBCISVALID(vp)) | |
1429 | ubc_setsize(vp, (off_t)length); /* XXX check errors */ | |
1430 | ||
1431 | vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; | |
1432 | retval = vinvalbuf(vp, vflags, ap->a_cred, ap->a_p, 0, 0); | |
1433 | } | |
1434 | ||
1435 | /* | |
1436 | * For a TBE process the deallocation of the file blocks is | |
1437 | * delayed until the file is closed. And hfs_close calls | |
1438 | * truncate with the IO_NDELAY flag set. So when IO_NDELAY | |
1439 | * isn't set, we make sure this isn't a TBE process. | |
1440 | */ | |
1441 | if ((ap->a_flags & IO_NDELAY) || (!ISSET(ap->a_p->p_flag, P_TBE))) { | |
1442 | ||
1443 | /* lock extents b-tree (also protects volume bitmap) */ | |
1444 | retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); | |
1445 | if (retval) | |
1446 | goto Err_Exit; | |
1447 | retval = MacToVFSError( | |
1448 | TruncateFileC( | |
1449 | HTOVCB(hp), | |
1450 | fcb, | |
1451 | length, | |
1452 | false)); | |
1453 | (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); | |
1454 | if (retval) | |
1455 | goto Err_Exit; | |
1456 | ||
1457 | fcb->fcbMaxEOF = length; | |
1458 | } | |
1459 | fcb->fcbEOF = length; | |
1460 | ||
1461 | if (fcb->fcbFlags & fcbModifiedMask) | |
1462 | hp->h_nodeflags |= IN_MODIFIED; | |
1463 | } | |
1464 | hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; | |
1465 | retval = VOP_UPDATE(vp, &tv, &tv, MNT_WAIT); | |
1466 | if (retval) { | |
1467 | DBG_ERR(("Could not update truncate")); | |
1468 | ||
1469 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE, | |
1470 | -1, -1, -1, retval, 0); | |
1471 | } | |
1472 | Err_Exit:; | |
1473 | ||
1474 | #if HFS_DIAGNOSTIC | |
1475 | debug_check_blocksizes(ap->a_vp); | |
1476 | #endif | |
1477 | ||
1478 | KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_END, | |
1479 | (int)length, fcb->fcbEOF, fcb->fcbPLen, retval, 0); | |
1480 | ||
1481 | DBG_VOP_LOCKS_TEST(retval); | |
1482 | return (retval); | |
1483 | } | |
1484 | ||
1485 | ||
1486 | ||
1487 | /* | |
1488 | # | |
1489 | #% allocate vp L L L | |
1490 | # | |
1491 | vop_allocate { | |
1492 | IN struct vnode *vp; | |
1493 | IN off_t length; | |
1494 | IN int flags; | |
1495 | IN struct ucred *cred; | |
1496 | IN struct proc *p; | |
1497 | }; | |
1498 | * allocate the hfsnode hp to at most length size | |
1499 | */ | |
1500 | int hfs_allocate(ap) | |
1501 | struct vop_allocate_args /* { | |
1502 | struct vnode *a_vp; | |
1503 | off_t a_length; | |
1504 | u_int32_t a_flags; | |
1505 | off_t *a_bytesallocated; | |
1506 | struct ucred *a_cred; | |
1507 | struct proc *a_p; | |
1508 | } */ *ap; | |
1509 | { | |
1510 | register struct vnode *vp = ap->a_vp; | |
1511 | register struct hfsnode *hp = VTOH(vp); | |
1512 | off_t length = ap->a_length; | |
1513 | off_t startingPEOF; | |
1514 | off_t moreBytesRequested; | |
1515 | off_t actualBytesAdded; | |
1516 | long vflags; | |
1517 | struct timeval tv; | |
1518 | int retval, retval2; | |
1519 | FCB *fcb; | |
1520 | UInt32 extendFlags =0; /* For call to ExtendFileC */ | |
1521 | DBG_FUNC_NAME("hfs_allocate"); | |
1522 | DBG_VOP_LOCKS_DECL(1); | |
1523 | DBG_VOP_PRINT_FUNCNAME(); | |
1524 | DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); | |
1525 | DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
1526 | ||
1527 | /* Set the number of bytes allocated to 0 so that the caller will know that we | |
1528 | did nothing. ExtendFileC will fill this in for us if we actually allocate space */ | |
1529 | ||
1530 | *(ap->a_bytesallocated) = 0; | |
1531 | ||
1532 | /* Now for some error checking */ | |
1533 | ||
1534 | if (length < (off_t)0) { | |
1535 | DBG_VOP_LOCKS_TEST(EINVAL); | |
1536 | return (EINVAL); | |
1537 | } | |
1538 | ||
1539 | if (vp->v_type != VREG && vp->v_type != VLNK) { | |
1540 | DBG_VOP_LOCKS_TEST(EISDIR); | |
1541 | return (EISDIR); /* hfs doesn't support truncating of directories */ | |
1542 | } | |
1543 | ||
1544 | /* Fill in the flags word for the call to Extend the file */ | |
1545 | ||
1546 | if (ap->a_flags & ALLOCATECONTIG) { | |
1547 | extendFlags |= kEFContigMask; | |
1548 | } | |
1549 | ||
1550 | if (ap->a_flags & ALLOCATEALL) { | |
1551 | extendFlags |= kEFAllMask; | |
1552 | } | |
1553 | ||
1554 | fcb = HTOFCB(hp); | |
1555 | tv = time; | |
1556 | retval = E_NONE; | |
1557 | startingPEOF = fcb->fcbPLen; | |
1558 | ||
1559 | if (ap->a_flags & ALLOCATEFROMPEOF) { | |
1560 | length += fcb->fcbPLen; | |
1561 | } | |
1562 | ||
1563 | DBG_RW(("%s: allocate from Ox%lX to Ox%X bytes\n", funcname, fcb->fcbPLen, (u_int)length)); | |
1564 | ||
1565 | /* If no changes are necesary, then we're done */ | |
1566 | if (fcb->fcbPLen == length) | |
1567 | goto Std_Exit; | |
1568 | ||
1569 | /* | |
1570 | * Lengthen the size of the file. We must ensure that the | |
1571 | * last byte of the file is allocated. Since the smallest | |
1572 | * value of fcbPLen is 0, length will be at least 1. | |
1573 | */ | |
1574 | if (length > fcb->fcbPLen) { | |
1575 | moreBytesRequested = length - fcb->fcbPLen; | |
1576 | ||
1577 | /* lock extents b-tree (also protects volume bitmap) */ | |
1578 | retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); | |
1579 | if (retval) goto Err_Exit; | |
1580 | ||
1581 | retval = MacToVFSError( | |
1582 | ExtendFileC(HTOVCB(hp), | |
1583 | fcb, | |
1584 | moreBytesRequested, | |
1585 | extendFlags, | |
1586 | &actualBytesAdded)); | |
1587 | ||
1588 | *(ap->a_bytesallocated) = actualBytesAdded; | |
1589 | ||
1590 | (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); | |
1591 | ||
1592 | DBG_ASSERT(length <= fcb->fcbPLen); | |
1593 | ||
1594 | /* | |
1595 | * if we get an error and no changes were made then exit | |
1596 | * otherwise we must do the VOP_UPDATE to reflect the changes | |
1597 | */ | |
1598 | if (retval && (startingPEOF == fcb->fcbPLen)) goto Err_Exit; | |
1599 | ||
1600 | /* | |
1601 | * Adjust actualBytesAdded to be allocation block aligned, not | |
1602 | * clump size aligned. | |
1603 | * NOTE: So what we are reporting does not affect reality | |
1604 | * until the file is closed, when we truncate the file to allocation | |
1605 | * block size. | |
1606 | */ | |
1607 | ||
1608 | if ((actualBytesAdded != 0) && (moreBytesRequested < actualBytesAdded)) { | |
1609 | u_long blks, blocksize; | |
1610 | ||
1611 | blocksize = VTOVCB(vp)->blockSize; | |
1612 | blks = moreBytesRequested / blocksize; | |
1613 | if ((blks * blocksize) != moreBytesRequested) | |
1614 | blks++; | |
1615 | ||
1616 | *(ap->a_bytesallocated) = blks * blocksize; | |
1617 | } | |
1618 | ||
1619 | } else { /* Shorten the size of the file */ | |
1620 | ||
1621 | if (fcb->fcbEOF > length) { | |
1622 | /* | |
1623 | * Any buffers that are past the truncation point need to be | |
1624 | * invalidated (to maintain buffer cache consistency). For | |
1625 | * simplicity, we invalidate all the buffers by calling vinvalbuf. | |
1626 | */ | |
1627 | vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; | |
1628 | (void) vinvalbuf(vp, vflags, ap->a_cred, ap->a_p, 0, 0); | |
1629 | } | |
1630 | ||
1631 | /* lock extents b-tree (also protects volume bitmap) */ | |
1632 | retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); | |
1633 | if (retval) goto Err_Exit; | |
1634 | ||
1635 | retval = MacToVFSError( | |
1636 | TruncateFileC( | |
1637 | HTOVCB(hp), | |
1638 | fcb, | |
1639 | length, | |
1640 | false)); | |
1641 | (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); | |
1642 | ||
1643 | /* | |
1644 | * if we get an error and no changes were made then exit | |
1645 | * otherwise we must do the VOP_UPDATE to reflect the changes | |
1646 | */ | |
1647 | if (retval && (startingPEOF == fcb->fcbPLen)) goto Err_Exit; | |
1648 | if (fcb->fcbFlags & fcbModifiedMask) | |
1649 | hp->h_nodeflags |= IN_MODIFIED; | |
1650 | ||
1651 | DBG_ASSERT(length <= fcb->fcbPLen) // DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG | |
1652 | ||
1653 | if (fcb->fcbEOF > fcb->fcbPLen) { | |
1654 | fcb->fcbEOF = fcb->fcbPLen; | |
1655 | fcb->fcbMaxEOF = fcb->fcbPLen; | |
1656 | ||
1657 | if (UBCISVALID(vp)) | |
1658 | ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ | |
1659 | } | |
1660 | } | |
1661 | ||
1662 | Std_Exit: | |
1663 | hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; | |
1664 | retval2 = VOP_UPDATE(vp, &tv, &tv, MNT_WAIT); | |
1665 | ||
1666 | if (retval == 0) retval = retval2; | |
1667 | ||
1668 | Err_Exit: | |
1669 | DBG_VOP_LOCKS_TEST(retval); | |
1670 | return (retval); | |
1671 | } | |
1672 | ||
1673 | ||
1674 | ||
1675 | ||
1676 | /* pagein for HFS filesystem, similar to hfs_read(), but without cluster_read() */ | |
1677 | int | |
1678 | hfs_pagein(ap) | |
1679 | struct vop_pagein_args /* { | |
1680 | struct vnode *a_vp, | |
1681 | upl_t a_pl, | |
1682 | vm_offset_t a_pl_offset, | |
1683 | off_t a_f_offset, | |
1684 | size_t a_size, | |
1685 | struct ucred *a_cred, | |
1686 | int a_flags | |
1687 | } */ *ap; | |
1688 | { | |
1689 | register struct vnode *vp; | |
1690 | struct hfsnode *hp; | |
1691 | FCB *fcb; | |
1692 | long devBlockSize = 0; | |
1693 | int retval; | |
1694 | ||
1695 | DBG_FUNC_NAME("hfs_pagein"); | |
1696 | DBG_VOP_LOCKS_DECL(1); | |
1697 | DBG_VOP_PRINT_FUNCNAME(); | |
1698 | DBG_VOP_PRINT_VNODE_INFO(vp);DBG_VOP_CONT(("\n")); | |
1699 | DBG_VOP_LOCKS_INIT(0,vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
1700 | ||
1701 | vp = ap->a_vp; | |
1702 | hp = VTOH(vp); | |
1703 | fcb = HTOFCB(hp); | |
1704 | ||
1705 | if (vp->v_type != VREG && vp->v_type != VLNK) | |
1706 | panic("hfs_pagein: vp not UBC type\n"); | |
1707 | ||
1708 | DBG_VOP(("\tfile size Ox%X\n", (u_int)fcb->fcbEOF)); | |
1709 | DBG_VOP(("\tstarting at offset Ox%X of file, length Ox%X\n", (u_int)ap->a_f_offset, (u_int)ap->a_size)); | |
1710 | ||
1711 | #if HFS_DIAGNOSTIC | |
1712 | debug_check_blocksizes(vp); | |
1713 | #endif | |
1714 | ||
1715 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
1716 | ||
1717 | retval = cluster_pagein(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, | |
1718 | ap->a_size, (off_t)fcb->fcbEOF, devBlockSize, | |
1719 | ap->a_flags); | |
1720 | ||
1721 | #if HFS_DIAGNOSTIC | |
1722 | debug_check_blocksizes(vp); | |
1723 | #endif | |
1724 | DBG_VOP_LOCKS_TEST(retval); | |
1725 | ||
1726 | return (retval); | |
1727 | } | |
1728 | ||
1729 | /* | |
1730 | * pageout for HFS filesystem. | |
1731 | */ | |
1732 | int | |
1733 | hfs_pageout(ap) | |
1734 | struct vop_pageout_args /* { | |
1735 | struct vnode *a_vp, | |
1736 | upl_t a_pl, | |
1737 | vm_offset_t a_pl_offset, | |
1738 | off_t a_f_offset, | |
1739 | size_t a_size, | |
1740 | struct ucred *a_cred, | |
1741 | int a_flags | |
1742 | } */ *ap; | |
1743 | { | |
1744 | struct vnode *vp = ap->a_vp; | |
1745 | struct hfsnode *hp = VTOH(vp); | |
1746 | FCB *fcb = HTOFCB(hp); | |
1747 | int retval; | |
1748 | long devBlockSize = 0; | |
1749 | ||
1750 | DBG_FUNC_NAME("hfs_pageout"); | |
1751 | DBG_VOP_LOCKS_DECL(1); | |
1752 | DBG_VOP_PRINT_FUNCNAME(); | |
1753 | DBG_VOP_PRINT_VNODE_INFO(vp);DBG_VOP_CONT(("\n")); | |
1754 | DBG_VOP(("\thfsnode 0x%x (%s)\n", (u_int)hp, H_NAME(hp))); | |
1755 | DBG_VOP(("\tstarting at offset Ox%lX of file, length Ox%lX\n", | |
1756 | (UInt32)ap->a_f_offset, (UInt32)ap->a_size)); | |
1757 | ||
1758 | DBG_VOP_LOCKS_INIT(0, vp, VOPDBG_LOCKED, | |
1759 | VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); | |
1760 | ||
1761 | #if HFS_DIAGNOSTIC | |
1762 | debug_check_blocksizes(vp); | |
1763 | #endif | |
1764 | ||
1765 | if (UBCINVALID(vp)) | |
1766 | panic("hfs_pageout: Not a VREG: vp=%x", vp); | |
1767 | ||
1768 | VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); | |
1769 | ||
1770 | retval = cluster_pageout(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size, | |
1771 | (off_t)fcb->fcbEOF, devBlockSize, ap->a_flags); | |
1772 | /* | |
1773 | * If we successfully wrote any data, and we are not the superuser | |
1774 | * we clear the setuid and setgid bits as a precaution against | |
1775 | * tampering. | |
1776 | */ | |
1777 | if (retval == 0 && ap->a_cred && ap->a_cred->cr_uid != 0) | |
1778 | hp->h_meta->h_mode &= ~(ISUID | ISGID); | |
1779 | ||
1780 | #if HFS_DIAGNOSTIC | |
1781 | debug_check_blocksizes(vp); | |
1782 | #endif | |
1783 | ||
1784 | DBG_VOP_LOCKS_TEST(retval); | |
1785 | return (retval); | |
1786 | } | |
1787 | ||
1788 | /* | |
1789 | * Intercept B-Tree node writes to unswap them if necessary. | |
1790 | # | |
1791 | #vop_bwrite { | |
1792 | # IN struct buf *bp; | |
1793 | */ | |
1794 | int | |
1795 | hfs_bwrite(ap) | |
1796 | struct vop_bwrite_args /* { | |
1797 | struct buf *a_bp; | |
1798 | } */ *ap; | |
1799 | { | |
1800 | register struct buf *bp = ap->a_bp; | |
1801 | register struct vnode *vp = bp->b_vp; | |
1802 | BlockDescriptor block; | |
1803 | int retval = 0; | |
1804 | ||
1805 | DBG_FUNC_NAME("hfs_bwrite"); | |
1806 | ||
1807 | #if BYTE_ORDER == LITTLE_ENDIAN | |
1808 | /* Trap B-Tree writes */ | |
1809 | if ((H_FILEID(VTOH(vp)) == kHFSExtentsFileID) || | |
1810 | (H_FILEID(VTOH(vp)) == kHFSCatalogFileID)) { | |
1811 | ||
1812 | /* Swap if the B-Tree node is in native byte order */ | |
1813 | if (((UInt16 *)((char *)bp->b_data + bp->b_bcount - 2))[0] == 0x000e) { | |
1814 | /* Prepare the block pointer */ | |
1815 | block.blockHeader = bp; | |
1816 | block.buffer = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VTOHFS(vp)->hfs_phys_block_size); | |
1817 | block.blockReadFromDisk = (bp->b_flags & B_CACHE) == 0; /* not found in cache ==> came from disk */ | |
1818 | block.blockSize = bp->b_bcount; | |
1819 | ||
1820 | /* Endian un-swap B-Tree node */ | |
1821 | SWAP_BT_NODE (&block, ISHFSPLUS (VTOVCB(vp)), H_FILEID(VTOH(vp)), 1); | |
1822 | } | |
1823 | ||
1824 | /* We don't check to make sure that it's 0x0e00 because it could be all zeros */ | |
1825 | } | |
1826 | #endif | |
1827 | ||
1828 | retval = vn_bwrite (ap); | |
1829 | ||
1830 | return (retval); | |
1831 | } |