]> git.saurik.com Git - apple/hfs.git/blob - livefiles_hfs_plugin/lf_hfs_readwrite_ops.c
hfs-556.100.11.tar.gz
[apple/hfs.git] / livefiles_hfs_plugin / lf_hfs_readwrite_ops.c
1 //
2 // lf_hfs_readwrite_ops.c
3 // livefiles_hfs
4 //
5 // Created by Yakov Ben Zaken on 22/03/2018.
6 //
7
8 #include "lf_hfs_readwrite_ops.h"
9 #include "lf_hfs_rangelist.h"
10 #include "lf_hfs_vfsutils.h"
11 #include "lf_hfs_file_extent_mapping.h"
12 #include "lf_hfs_vfsops.h"
13 #include "lf_hfs_cnode.h"
14 #include "lf_hfs_file_mgr_internal.h"
15 #include "lf_hfs_utils.h"
16 #include "lf_hfs_vnops.h"
17 #include "lf_hfs_raw_read_write.h"
18
19 #include <assert.h>
20
21 static int do_hfs_truncate(struct vnode *vp, off_t length, int flags, int skip);
22
23 static int
24 do_hfs_truncate(struct vnode *vp, off_t length, int flags, int truncateflags)
25 {
26 register struct cnode *cp = VTOC(vp);
27 struct filefork *fp = VTOF(vp);
28 int retval;
29 off_t bytesToAdd;
30 off_t actualBytesAdded;
31 off_t filebytes;
32 u_int32_t fileblocks;
33 int blksize;
34 struct hfsmount *hfsmp;
35 int lockflags;
36 int suppress_times = (truncateflags & HFS_TRUNCATE_SKIPTIMES);
37
38 blksize = VTOVCB(vp)->blockSize;
39 fileblocks = fp->ff_blocks;
40 filebytes = (off_t)fileblocks * (off_t)blksize;
41
42 if (length < 0)
43 return (EINVAL);
44
45 /* This should only happen with a corrupt filesystem */
46 if ((off_t)fp->ff_size < 0)
47 return (EINVAL);
48
49 hfsmp = VTOHFS(vp);
50
51 retval = E_NONE;
52 /*
53 * Lengthen the size of the file. We must ensure that the
54 * last byte of the file is allocated. Since the smallest
55 * value of ff_size is 0, length will be at least 1.
56 */
57 if (length > (off_t)fp->ff_size) {
58 /*
59 * If we don't have enough physical space then
60 * we need to extend the physical size.
61 */
62 if (length > filebytes) {
63 int eflags = kEFReserveMask;
64 u_int32_t blockHint = 0;
65
66 /* All or nothing and don't round up to clumpsize. */
67 eflags |= kEFAllMask | kEFNoClumpMask;
68
69 if (hfs_start_transaction(hfsmp) != 0) {
70 retval = EINVAL;
71 goto Err_Exit;
72 }
73
74 /* Protect extents b-tree and allocation bitmap */
75 lockflags = SFL_BITMAP;
76 if (overflow_extents(fp))
77 lockflags |= SFL_EXTENTS;
78 lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
79
80 /*
81 * Keep growing the file as long as the current EOF is
82 * less than the desired value.
83 */
84 while ((length > filebytes) && (retval == E_NONE)) {
85 bytesToAdd = length - filebytes;
86 retval = MacToVFSError(ExtendFileC(VTOVCB(vp),
87 (FCB*)fp,
88 bytesToAdd,
89 blockHint,
90 eflags,
91 &actualBytesAdded));
92
93 filebytes = (off_t)fp->ff_blocks * (off_t)blksize;
94 if (actualBytesAdded == 0 && retval == E_NONE) {
95 if (length > filebytes)
96 length = filebytes;
97 break;
98 }
99 } /* endwhile */
100
101 hfs_systemfile_unlock(hfsmp, lockflags);
102
103 if (hfsmp->jnl) {
104 hfs_update(vp, 0);
105 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
106 }
107
108 hfs_end_transaction(hfsmp);
109
110 if (retval)
111 goto Err_Exit;
112 }
113
114 if (ISSET(flags, IO_NOZEROFILL))
115 {
116
117 }
118 else
119 {
120 if (!vnode_issystem(vp) && retval == E_NONE) {
121 if (length > (off_t)fp->ff_size) {
122 struct timeval tv;
123
124 /* Extending the file: time to fill out the current last page w. zeroes? */
125 retval = raw_readwrite_zero_fill_last_block_suffix(vp);
126 if (retval) goto Err_Exit;
127
128 microuptime(&tv);
129 // Currently disabling the rl_add, sice the
130 // data is being filled with 0's and that a valid content for us
131 // rl_add(fp->ff_size, length - 1, &fp->ff_invalidranges);
132 cp->c_zftimeout = (uint32_t)tv.tv_sec + ZFTIMELIMIT;
133 }
134 }else{
135 LFHFS_LOG(LEVEL_ERROR, "hfs_truncate: invoked on non-UBC object?!");
136 hfs_assert(0);
137 }
138 }
139 if (suppress_times == 0) {
140 cp->c_touch_modtime = TRUE;
141 }
142 fp->ff_size = length;
143
144 } else { /* Shorten the size of the file */
145
146 if ((off_t)fp->ff_size > length) {
147 /* Any space previously marked as invalid is now irrelevant: */
148 rl_remove(length, fp->ff_size - 1, &fp->ff_invalidranges);
149 }
150
151 /*
152 * Account for any unmapped blocks. Note that the new
153 * file length can still end up with unmapped blocks.
154 */
155 if (fp->ff_unallocblocks > 0) {
156 u_int32_t finalblks;
157 u_int32_t loanedBlocks;
158
159 hfs_lock_mount(hfsmp);
160 loanedBlocks = fp->ff_unallocblocks;
161 cp->c_blocks -= loanedBlocks;
162 fp->ff_blocks -= loanedBlocks;
163 fp->ff_unallocblocks = 0;
164
165 hfsmp->loanedBlocks -= loanedBlocks;
166
167 finalblks = (uint32_t)((length + blksize - 1) / blksize);
168 if (finalblks > fp->ff_blocks) {
169 /* calculate required unmapped blocks */
170 loanedBlocks = finalblks - fp->ff_blocks;
171 hfsmp->loanedBlocks += loanedBlocks;
172
173 fp->ff_unallocblocks = loanedBlocks;
174 cp->c_blocks += loanedBlocks;
175 fp->ff_blocks += loanedBlocks;
176 }
177 hfs_unlock_mount (hfsmp);
178 }
179 if (hfs_start_transaction(hfsmp) != 0) {
180 retval = EINVAL;
181 goto Err_Exit;
182 }
183
184 if (fp->ff_unallocblocks == 0) {
185 /* Protect extents b-tree and allocation bitmap */
186 lockflags = SFL_BITMAP;
187 if (overflow_extents(fp))
188 lockflags |= SFL_EXTENTS;
189 lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
190
191 retval = MacToVFSError(TruncateFileC(VTOVCB(vp), (FCB*)fp, length, 0,
192 FORK_IS_RSRC (fp), FTOC(fp)->c_fileid, false));
193
194 hfs_systemfile_unlock(hfsmp, lockflags);
195 }
196 if (hfsmp->jnl) {
197 if (retval == 0) {
198 fp->ff_size = length;
199 }
200 hfs_update(vp, 0);
201 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
202 }
203
204 hfs_end_transaction(hfsmp);
205
206 if (retval) goto Err_Exit;
207
208 /*
209 * Only set update flag if the logical length changes & we aren't
210 * suppressing modtime updates.
211 */
212 if (((off_t)fp->ff_size != length) && (suppress_times == 0)) {
213 cp->c_touch_modtime = TRUE;
214 }
215 fp->ff_size = length;
216 }
217
218 cp->c_flag |= C_MODIFIED;
219 cp->c_touch_chgtime = TRUE; /* status changed */
220 if (suppress_times == 0) {
221 cp->c_touch_modtime = TRUE; /* file data was modified */
222
223 /*
224 * If we are not suppressing the modtime update, then
225 * update the gen count as well.
226 */
227 if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK (cp->c_attr.ca_mode)) {
228 hfs_incr_gencount(cp);
229 }
230 }
231
232 retval = hfs_update(vp, 0);
233
234 Err_Exit:
235
236 return (retval);
237 }
238
239 int
240 hfs_vnop_blockmap(struct vnop_blockmap_args *ap)
241 {
242 struct vnode *vp = ap->a_vp;
243 struct cnode *cp;
244 struct filefork *fp;
245 struct hfsmount *hfsmp;
246 size_t bytesContAvail = ap->a_size;
247 int retval = E_NONE;
248 int syslocks = 0;
249 int lockflags = 0;
250 struct rl_entry *invalid_range;
251 enum rl_overlaptype overlaptype;
252 int started_tr = 0;
253 int tooklock = 0;
254
255 /* Do not allow blockmap operation on a directory */
256 if (vnode_isdir(vp)) {
257 return (ENOTSUP);
258 }
259
260 /*
261 * Check for underlying vnode requests and ensure that logical
262 * to physical mapping is requested.
263 */
264 if (ap->a_bpn == NULL)
265 return (0);
266
267 hfsmp = VTOHFS(vp);
268 cp = VTOC(vp);
269 fp = VTOF(vp);
270
271 if ( !vnode_issystem(vp) && !vnode_islnk(vp) ) {
272 if (cp->c_lockowner != pthread_self()) {
273 hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
274 tooklock = 1;
275 }
276
277 // For reads, check the invalid ranges
278 if (ISSET(ap->a_flags, VNODE_READ)) {
279 if (ap->a_foffset >= fp->ff_size) {
280 retval = ERANGE;
281 goto exit;
282 }
283
284 overlaptype = rl_scan(&fp->ff_invalidranges, ap->a_foffset,
285 ap->a_foffset + (off_t)bytesContAvail - 1,
286 &invalid_range);
287 switch(overlaptype) {
288 case RL_MATCHINGOVERLAP:
289 case RL_OVERLAPCONTAINSRANGE:
290 case RL_OVERLAPSTARTSBEFORE:
291 /* There's no valid block for this byte offset */
292 *ap->a_bpn = (daddr64_t)-1;
293 /* There's no point limiting the amount to be returned
294 * if the invalid range that was hit extends all the way
295 * to the EOF (i.e. there's no valid bytes between the
296 * end of this range and the file's EOF):
297 */
298 if (((off_t)fp->ff_size > (invalid_range->rl_end + 1)) &&
299 ((size_t)(invalid_range->rl_end + 1 - ap->a_foffset) < bytesContAvail)) {
300 bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset;
301 }
302
303 retval = 0;
304 goto exit;
305
306 case RL_OVERLAPISCONTAINED:
307 case RL_OVERLAPENDSAFTER:
308 /* The range of interest hits an invalid block before the end: */
309 if (invalid_range->rl_start == ap->a_foffset) {
310 /* There's actually no valid information to be had starting here: */
311 *ap->a_bpn = (daddr64_t)-1;
312 if (((off_t)fp->ff_size > (invalid_range->rl_end + 1)) &&
313 ((size_t)(invalid_range->rl_end + 1 - ap->a_foffset) < bytesContAvail)) {
314 bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset;
315 }
316
317 retval = 0;
318 goto exit;
319 } else {
320 /*
321 * Sadly, the lower layers don't like us to
322 * return unaligned ranges, so we skip over
323 * any invalid ranges here that are less than
324 * a page: zeroing of those bits is not our
325 * responsibility (it's dealt with elsewhere).
326 */
327 do {
328 off_t rounded_start = (((uint64_t)(invalid_range->rl_start) + (off_t)PAGE_MASK) & ~((off_t)PAGE_MASK));
329 if ((off_t)bytesContAvail < rounded_start - ap->a_foffset)
330 break;
331 if (rounded_start < invalid_range->rl_end + 1) {
332 bytesContAvail = rounded_start - ap->a_foffset;
333 break;
334 }
335 } while ((invalid_range = TAILQ_NEXT(invalid_range,
336 rl_link)));
337 }
338 break;
339
340 case RL_NOOVERLAP:
341 break;
342 } // switch
343 }
344 }
345
346 retry:
347
348 /* Check virtual blocks only when performing write operation */
349 if ((ap->a_flags & VNODE_WRITE) && (fp->ff_unallocblocks != 0)) {
350 if (hfs_start_transaction(hfsmp) != 0) {
351 retval = EINVAL;
352 goto exit;
353 } else {
354 started_tr = 1;
355 }
356 syslocks = SFL_EXTENTS | SFL_BITMAP;
357
358 } else if (overflow_extents(fp)) {
359 syslocks = SFL_EXTENTS;
360 }
361
362 if (syslocks)
363 lockflags = hfs_systemfile_lock(hfsmp, syslocks, HFS_EXCLUSIVE_LOCK);
364
365 /*
366 * Check for any delayed allocations.
367 */
368 if ((ap->a_flags & VNODE_WRITE) && (fp->ff_unallocblocks != 0)) {
369 int64_t actbytes;
370 u_int32_t loanedBlocks;
371
372 //
373 // Make sure we have a transaction. It's possible
374 // that we came in and fp->ff_unallocblocks was zero
375 // but during the time we blocked acquiring the extents
376 // btree, ff_unallocblocks became non-zero and so we
377 // will need to start a transaction.
378 //
379 if (started_tr == 0) {
380 if (syslocks) {
381 hfs_systemfile_unlock(hfsmp, lockflags);
382 syslocks = 0;
383 }
384 goto retry;
385 }
386
387 /*
388 * Note: ExtendFileC will Release any blocks on loan and
389 * aquire real blocks. So we ask to extend by zero bytes
390 * since ExtendFileC will account for the virtual blocks.
391 */
392
393 loanedBlocks = fp->ff_unallocblocks;
394 retval = ExtendFileC(hfsmp, (FCB*)fp, 0, 0,
395 kEFAllMask | kEFNoClumpMask, &actbytes);
396
397 if (retval) {
398 fp->ff_unallocblocks = loanedBlocks;
399 cp->c_blocks += loanedBlocks;
400 fp->ff_blocks += loanedBlocks;
401
402 hfs_lock_mount (hfsmp);
403 hfsmp->loanedBlocks += loanedBlocks;
404 hfs_unlock_mount (hfsmp);
405
406 hfs_systemfile_unlock(hfsmp, lockflags);
407 cp->c_flag |= C_MODIFIED;
408 if (started_tr) {
409 (void) hfs_update(vp, 0);
410 (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
411
412 hfs_end_transaction(hfsmp);
413 started_tr = 0;
414 }
415 goto exit;
416 }
417 }
418
419 retval = MapFileBlockC(hfsmp, (FCB *)fp, bytesContAvail, ap->a_foffset,
420 ap->a_bpn, &bytesContAvail);
421 if (syslocks) {
422 hfs_systemfile_unlock(hfsmp, lockflags);
423 }
424
425 if (retval) {
426 /* On write, always return error because virtual blocks, if any,
427 * should have been allocated in ExtendFileC(). We do not
428 * allocate virtual blocks on read, therefore return error
429 * only if no virtual blocks are allocated. Otherwise we search
430 * rangelist for zero-fills
431 */
432 if ((MacToVFSError(retval) != ERANGE) ||
433 (ap->a_flags & VNODE_WRITE) ||
434 ((ap->a_flags & VNODE_READ) && (fp->ff_unallocblocks == 0))) {
435 goto exit;
436 }
437
438 /* Validate if the start offset is within logical file size */
439 if (ap->a_foffset >= fp->ff_size) {
440 goto exit;
441 }
442
443 /*
444 * At this point, we have encountered a failure during
445 * MapFileBlockC that resulted in ERANGE, and we are not
446 * servicing a write, and there are borrowed blocks.
447 *
448 * However, the cluster layer will not call blockmap for
449 * blocks that are borrowed and in-cache. We have to assume
450 * that because we observed ERANGE being emitted from
451 * MapFileBlockC, this extent range is not valid on-disk. So
452 * we treat this as a mapping that needs to be zero-filled
453 * prior to reading.
454 */
455
456 if (fp->ff_size - ap->a_foffset < (off_t)bytesContAvail)
457 bytesContAvail = fp->ff_size - ap->a_foffset;
458
459 *ap->a_bpn = (daddr64_t) -1;
460 retval = 0;
461
462 goto exit;
463 }
464
465 exit:
466 if (retval == 0) {
467 if (ISSET(ap->a_flags, VNODE_WRITE)) {
468 struct rl_entry *r = TAILQ_FIRST(&fp->ff_invalidranges);
469
470 // See if we might be overlapping invalid ranges...
471 if (r && (ap->a_foffset + (off_t)bytesContAvail) > r->rl_start) {
472 /*
473 * Mark the file as needing an update if we think the
474 * on-disk EOF has changed.
475 */
476 if (ap->a_foffset <= r->rl_start)
477 SET(cp->c_flag, C_MODIFIED);
478
479 /*
480 * This isn't the ideal place to put this. Ideally, we
481 * should do something *after* we have successfully
482 * written to the range, but that's difficult to do
483 * because we cannot take locks in the callback. At
484 * present, the cluster code will call us with VNODE_WRITE
485 * set just before it's about to write the data so we know
486 * that data is about to be written. If we get an I/O
487 * error at this point then chances are the metadata
488 * update to follow will also have an I/O error so the
489 * risk here is small.
490 */
491 rl_remove(ap->a_foffset, ap->a_foffset + bytesContAvail - 1,
492 &fp->ff_invalidranges);
493
494 if (!TAILQ_FIRST(&fp->ff_invalidranges)) {
495 cp->c_flag &= ~C_ZFWANTSYNC;
496 cp->c_zftimeout = 0;
497 }
498 }
499 }
500
501 if (ap->a_run)
502 *ap->a_run = bytesContAvail;
503
504 if (ap->a_poff)
505 *(int *)ap->a_poff = 0;
506 }
507
508 if (started_tr) {
509 hfs_update(vp, TRUE);
510 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
511 hfs_end_transaction(hfsmp);
512 }
513
514 if (tooklock)
515 hfs_unlock(cp);
516
517 return (MacToVFSError(retval));
518 }
519
520 int
521 hfs_prepare_release_storage (struct hfsmount *hfsmp, struct vnode *vp) {
522
523 struct filefork *fp = VTOF(vp);
524 struct cnode *cp = VTOC(vp);
525
526 /* Cannot truncate an HFS directory! */
527 if (IS_DIR(vp))
528 {
529 return (EISDIR);
530 }
531
532 /* This should only happen with a corrupt filesystem */
533 if ((off_t)fp->ff_size < 0)
534 return (EINVAL);
535
536 /*
537 * We cannot just check if fp->ff_size == length (as an optimization)
538 * since there may be extra physical blocks that also need truncation.
539 */
540
541 /* Wipe out any invalid ranges which have yet to be backed by disk */
542 rl_remove(0, fp->ff_size - 1, &fp->ff_invalidranges);
543
544 /*
545 * Account for any unmapped blocks. Since we're deleting the
546 * entire file, we don't have to worry about just shrinking
547 * to a smaller number of borrowed blocks.
548 */
549 if (fp->ff_unallocblocks > 0)
550 {
551 u_int32_t loanedBlocks;
552
553 hfs_lock_mount (hfsmp);
554 loanedBlocks = fp->ff_unallocblocks;
555 cp->c_blocks -= loanedBlocks;
556 fp->ff_blocks -= loanedBlocks;
557 fp->ff_unallocblocks = 0;
558
559 hfsmp->loanedBlocks -= loanedBlocks;
560
561 hfs_unlock_mount (hfsmp);
562 }
563
564 return 0;
565 }
566
567 int
568 hfs_release_storage (struct hfsmount *hfsmp, struct filefork *datafork, struct filefork *rsrcfork, u_int32_t fileid)
569 {
570 int error = 0;
571 int blksize = hfsmp->blockSize;
572
573 /* Data Fork */
574 if (datafork)
575 {
576 datafork->ff_size = 0;
577
578 u_int32_t fileblocks = datafork->ff_blocks;
579 off_t filebytes = (off_t)fileblocks * (off_t)blksize;
580
581 /* We killed invalid ranges and loaned blocks before we removed the catalog entry */
582
583 while (filebytes > 0) {
584 if (filebytes > HFS_BIGFILE_SIZE) {
585 filebytes -= HFS_BIGFILE_SIZE;
586 } else {
587 filebytes = 0;
588 }
589
590 /* Start a transaction, and wipe out as many blocks as we can in this iteration */
591 if (hfs_start_transaction(hfsmp) != 0) {
592 error = EINVAL;
593 break;
594 }
595
596 if (datafork->ff_unallocblocks == 0)
597 {
598 /* Protect extents b-tree and allocation bitmap */
599 int lockflags = SFL_BITMAP;
600 if (overflow_extents(datafork))
601 lockflags |= SFL_EXTENTS;
602 lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
603
604 error = MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp), datafork, filebytes, 1, 0, fileid, false));
605
606 hfs_systemfile_unlock(hfsmp, lockflags);
607 }
608 (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
609
610 /* Finish the transaction and start over if necessary */
611 hfs_end_transaction(hfsmp);
612
613 if (error) {
614 break;
615 }
616 }
617 }
618
619 /* Resource fork */
620 if (error == 0 && rsrcfork)
621 {
622 rsrcfork->ff_size = 0;
623
624 u_int32_t fileblocks = rsrcfork->ff_blocks;
625 off_t filebytes = (off_t)fileblocks * (off_t)blksize;
626
627 /* We killed invalid ranges and loaned blocks before we removed the catalog entry */
628
629 while (filebytes > 0)
630 {
631 if (filebytes > HFS_BIGFILE_SIZE)
632 {
633 filebytes -= HFS_BIGFILE_SIZE;
634 }
635 else
636 {
637 filebytes = 0;
638 }
639
640 /* Start a transaction, and wipe out as many blocks as we can in this iteration */
641 if (hfs_start_transaction(hfsmp) != 0)
642 {
643 error = EINVAL;
644 break;
645 }
646
647 if (rsrcfork->ff_unallocblocks == 0)
648 {
649 /* Protect extents b-tree and allocation bitmap */
650 int lockflags = SFL_BITMAP;
651 if (overflow_extents(rsrcfork))
652 lockflags |= SFL_EXTENTS;
653 lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
654
655 error = MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp), rsrcfork, filebytes, 1, 1, fileid, false));
656
657 hfs_systemfile_unlock(hfsmp, lockflags);
658 }
659 (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
660
661 /* Finish the transaction and start over if necessary */
662 hfs_end_transaction(hfsmp);
663
664 if (error)
665 {
666 break;
667 }
668 }
669 }
670
671 return error;
672 }
673
674 /*
675 * Truncate a cnode to at most length size, freeing (or adding) the
676 * disk blocks.
677 */
678 int
679 hfs_truncate(struct vnode *vp, off_t length, int flags, int truncateflags)
680 {
681 struct filefork *fp = VTOF(vp);
682 off_t filebytes;
683 u_int32_t fileblocks;
684 int blksize;
685 errno_t error = 0;
686 struct cnode *cp = VTOC(vp);
687 hfsmount_t *hfsmp = VTOHFS(vp);
688
689 /* Cannot truncate an HFS directory! */
690 if (vnode_isdir(vp)) {
691 return (EISDIR);
692 }
693
694 blksize = hfsmp->blockSize;
695 fileblocks = fp->ff_blocks;
696 filebytes = (off_t)fileblocks * (off_t)blksize;
697
698 bool caller_has_cnode_lock = (cp->c_lockowner == pthread_self());
699
700 if (!caller_has_cnode_lock) {
701 error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
702 if (error)
703 return error;
704 }
705
706 if (vnode_islnk(vp) && cp->c_datafork->ff_symlinkptr) {
707 hfs_free(cp->c_datafork->ff_symlinkptr);
708 cp->c_datafork->ff_symlinkptr = NULL;
709 }
710
711 // have to loop truncating or growing files that are
712 // really big because otherwise transactions can get
713 // enormous and consume too many kernel resources.
714
715 if (length < filebytes) {
716 while (filebytes > length) {
717 if ((filebytes - length) > HFS_BIGFILE_SIZE) {
718 filebytes -= HFS_BIGFILE_SIZE;
719 } else {
720 filebytes = length;
721 }
722 error = do_hfs_truncate(vp, filebytes, flags, truncateflags);
723 if (error)
724 break;
725 }
726 } else if (length > filebytes) {
727 const bool keep_reserve = false; //cred && suser(cred, NULL) != 0;
728
729 if (hfs_freeblks(hfsmp, keep_reserve) < howmany(length - filebytes, blksize))
730 {
731 error = ENOSPC;
732 }
733 else
734 {
735 while (filebytes < length) {
736 if ((length - filebytes) > HFS_BIGFILE_SIZE) {
737 filebytes += HFS_BIGFILE_SIZE;
738 } else {
739 filebytes = length;
740 }
741 error = do_hfs_truncate(vp, filebytes, flags, truncateflags);
742 if (error)
743 break;
744 }
745 }
746 } else /* Same logical size */ {
747
748 error = do_hfs_truncate(vp, length, flags, truncateflags);
749 }
750
751 if (!caller_has_cnode_lock)
752 hfs_unlock(cp);
753
754 return error;
755 }
756
757 /*
758 * Preallocate file storage space.
759 */
760 int
761 hfs_vnop_preallocate(struct vnode * vp, LIFilePreallocateArgs_t* psPreAllocReq, LIFilePreallocateArgs_t* psPreAllocRes)
762 {
763 struct cnode *cp = VTOC(vp);
764 struct filefork *fp = VTOF(vp);
765 struct hfsmount *hfsmp = VTOHFS(vp);
766 ExtendedVCB *vcb = VTOVCB(vp);
767 int retval = E_NONE , retval2 = E_NONE;
768
769 off_t length = psPreAllocReq->length;
770 psPreAllocRes->bytesallocated = 0;
771
772 if (vnode_isdir(vp) || vnode_islnk(vp)) {
773 LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_preallocate: Cannot change size of a directory or symlink!");
774 return EPERM;
775 }
776
777 if (length == 0)
778 return (0);
779
780 if (psPreAllocReq->flags & LI_PREALLOCATE_ALLOCATEFROMVOL){
781 LFHFS_LOG(LEVEL_ERROR, "hfs_vnop_preallocate: Not supporting LI_PREALLOCATE_ALLOCATEFROMVOL mode\n");
782 return ENOTSUP;
783 }
784
785 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
786
787 if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
788 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
789 return (retval);
790 }
791
792 off_t filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
793 off_t startingPEOF = filebytes;
794
795 /* If no changes are necesary, then we're done */
796 if (filebytes == length)
797 goto exit;
798
799 u_int32_t extendFlags = kEFNoClumpMask;
800 if (psPreAllocReq->flags & LI_PREALLOCATE_ALLOCATECONTIG)
801 extendFlags |= kEFContigMask;
802 if (psPreAllocReq->flags & LI_PREALLOCATE_ALLOCATEALL)
803 extendFlags |= kEFAllMask;
804
805
806 /*
807 * Lengthen the size of the file. We must ensure that the
808 * last byte of the file is allocated. Since the smallest
809 * value of filebytes is 0, length will be at least 1.
810 */
811 if (length > filebytes)
812 {
813 off_t total_bytes_added = 0, orig_request_size, moreBytesRequested, actualBytesAdded;
814 orig_request_size = moreBytesRequested = length - filebytes;
815
816 while ((length > filebytes) && (retval == E_NONE))
817 {
818 off_t bytesRequested;
819
820 if (hfs_start_transaction(hfsmp) != 0)
821 {
822 retval = EINVAL;
823 goto err_exit;
824 }
825
826 /* Protect extents b-tree and allocation bitmap */
827 int lockflags = SFL_BITMAP;
828 if (overflow_extents(fp))
829 lockflags |= SFL_EXTENTS;
830 lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
831
832 if (moreBytesRequested >= HFS_BIGFILE_SIZE) {
833 bytesRequested = HFS_BIGFILE_SIZE;
834 } else {
835 bytesRequested = moreBytesRequested;
836 }
837
838 retval = MacToVFSError(ExtendFileC(vcb,
839 (FCB*)fp,
840 bytesRequested,
841 0,
842 extendFlags,
843 &actualBytesAdded));
844
845 if (retval == E_NONE)
846 {
847 psPreAllocRes->bytesallocated += actualBytesAdded;
848 total_bytes_added += actualBytesAdded;
849 moreBytesRequested -= actualBytesAdded;
850 }
851
852 filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
853 hfs_systemfile_unlock(hfsmp, lockflags);
854
855 if (hfsmp->jnl) {
856 (void) hfs_update(vp, 0);
857 (void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
858 }
859
860 hfs_end_transaction(hfsmp);
861 }
862
863 /*
864 * if we get an error and no changes were made then exit
865 * otherwise we must do the hfs_update to reflect the changes
866 */
867 if (retval && (startingPEOF == filebytes))
868 goto err_exit;
869
870 /*
871 * Adjust actualBytesAdded to be allocation block aligned, not
872 * clump size aligned.
873 * NOTE: So what we are reporting does not affect reality
874 * until the file is closed, when we truncate the file to allocation
875 * block size.
876 */
877 if (total_bytes_added != 0 && orig_request_size < total_bytes_added)
878 psPreAllocRes->bytesallocated = roundup(orig_request_size, (off_t)vcb->blockSize);
879 } else {
880 //No need to touch anything else, just unlock and go out
881 goto err_exit;
882 }
883
884 exit:
885 cp->c_flag |= C_MODIFIED;
886 cp->c_touch_chgtime = TRUE;
887 cp->c_touch_modtime = TRUE;
888 retval2 = hfs_update(vp, 0);
889
890 if (retval == 0)
891 retval = retval2;
892
893 err_exit:
894 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
895 hfs_unlock(cp);
896 return (retval);
897 }