2 * Copyright (c) 1995-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 // This file implements a simple write-ahead journaling layer.
30 // In theory any file system can make use of it by calling these
31 // functions when the fs wants to modify meta-data blocks. See
32 // vfs_journal.h for a more detailed description of the api and
35 // Dominic Giampaolo (dbg@apple.com)
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file_internal.h>
45 #include <sys/buf_internal.h>
46 #include <sys/proc_internal.h>
47 #include <sys/mount_internal.h>
48 #include <sys/namei.h>
49 #include <sys/vnode_internal.h>
50 #include <sys/ioctl.h>
53 #include <sys/malloc.h>
54 #include <kern/thread.h>
56 #include <miscfs/specfs/specdev.h>
58 extern task_t kernel_task
;
70 #include <sys/types.h>
75 #include "vfs_journal.h"
78 // number of bytes to checksum in a block_list_header
79 // NOTE: this should be enough to clear out the header
80 // fields as well as the first entry of binfo[]
81 #define BLHDR_CHECKSUM_SIZE 32
85 static int end_transaction(transaction
*tr
, int force_it
);
86 static void abort_transaction(journal
*jnl
, transaction
*tr
);
87 static void dump_journal(journal
*jnl
);
89 static __inline__
void lock_journal(journal
*jnl
);
90 static __inline__
void unlock_journal(journal
*jnl
);
91 static __inline__
void lock_oldstart(journal
*jnl
);
92 static __inline__
void unlock_oldstart(journal
*jnl
);
98 // 3105942 - Coalesce writes to the same block on journal replay
101 typedef struct bucket
{
107 #define STARTING_BUCKETS 256
109 static int add_block(journal
*jnl
, struct bucket
**buf_ptr
, off_t block_num
, size_t size
, size_t offset
, int *num_buckets_ptr
, int *num_full_ptr
);
110 static int grow_table(struct bucket
**buf_ptr
, int num_buckets
, int new_size
);
111 static int lookup_bucket(struct bucket
**buf_ptr
, off_t block_num
, int num_full
);
112 static int do_overlap(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t block_num
, size_t size
, size_t offset
, int *num_buckets_ptr
, int *num_full_ptr
);
113 static int insert_block(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t num
, size_t size
, size_t offset
, int *num_buckets_ptr
, int *num_full_ptr
, int overwriting
);
115 #define CHECK_JOURNAL(jnl) \
118 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__);\
120 if (jnl->jdev == NULL) { \
121 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__);\
123 if (jnl->fsdev == NULL) { \
124 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__);\
126 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) {\
127 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n",\
128 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);\
130 if ( jnl->jhdr->start <= 0 \
131 || jnl->jhdr->start > jnl->jhdr->size\
132 || jnl->jhdr->start > 1024*1024*1024) {\
133 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
134 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\
136 if ( jnl->jhdr->end <= 0 \
137 || jnl->jhdr->end > jnl->jhdr->size\
138 || jnl->jhdr->end > 1024*1024*1024) {\
139 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
140 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\
142 if (jnl->jhdr->size > 1024*1024*1024) {\
143 panic("%s:%d: jhdr size looks bad (0x%llx)\n",\
144 __FILE__, __LINE__, jnl->jhdr->size);\
148 #define CHECK_TRANSACTION(tr) \
151 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__);\
153 if (tr->jnl == NULL) {\
154 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__);\
156 if (tr->blhdr != (block_list_header *)tr->tbuffer) {\
157 panic("%s:%d: blhdr (0x%x) != tbuffer (0x%x)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer);\
159 if (tr->total_bytes < 0) {\
160 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\
162 if (tr->journal_start < 0 || tr->journal_start > 1024*1024*1024) {\
163 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\
165 if (tr->journal_end < 0 || tr->journal_end > 1024*1024*1024) {\
166 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\
168 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\
169 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\
176 // this isn't a great checksum routine but it will do for now.
177 // we use it to checksum the journal header and the block list
178 // headers that are at the start of each transaction.
181 calc_checksum(char *ptr
, int len
)
185 // this is a lame checksum but for now it'll do
186 for(i
=0; i
< len
; i
++, ptr
++) {
187 cksum
= (cksum
<< 8) ^ (cksum
+ *(unsigned char *)ptr
);
196 lck_grp_attr_t
* jnl_group_attr
;
197 lck_attr_t
* jnl_lock_attr
;
198 lck_grp_t
* jnl_mutex_group
;
203 jnl_lock_attr
= lck_attr_alloc_init();
204 jnl_group_attr
= lck_grp_attr_alloc_init();
205 jnl_mutex_group
= lck_grp_alloc_init("jnl-mutex", jnl_group_attr
);
208 static __inline__
void
209 lock_journal(journal
*jnl
)
211 lck_mtx_lock(&jnl
->jlock
);
214 static __inline__
void
215 unlock_journal(journal
*jnl
)
217 lck_mtx_unlock(&jnl
->jlock
);
220 static __inline__
void
221 lock_oldstart(journal
*jnl
)
223 lck_mtx_lock(&jnl
->old_start_lock
);
226 static __inline__
void
227 unlock_oldstart(journal
*jnl
)
229 lck_mtx_unlock(&jnl
->old_start_lock
);
234 #define JNL_WRITE 0x0001
235 #define JNL_READ 0x0002
236 #define JNL_HEADER 0x8000
239 // This function sets up a fake buf and passes it directly to the
240 // journal device strategy routine (so that it won't get cached in
243 // It also handles range checking the i/o so that we don't write
244 // outside the journal boundaries and it will wrap the i/o back
245 // to the beginning if necessary (skipping over the journal header)
248 do_journal_io(journal
*jnl
, off_t
*offset
, void *data
, size_t len
, int direction
)
250 int err
, io_sz
=0, curlen
=len
;
252 int max_iosize
= 128 * 1024;
253 struct vfsioattr ioattr
;
255 if (*offset
< 0 || *offset
> jnl
->jhdr
->size
) {
256 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset
, jnl
->jhdr
->size
);
258 vfs_ioattr(vnode_mount(jnl
->jdev
), &ioattr
);
260 if (direction
& JNL_WRITE
)
261 max_iosize
= ioattr
.io_maxwritecnt
;
262 else if (direction
& JNL_READ
)
263 max_iosize
= ioattr
.io_maxreadcnt
;
266 bp
= alloc_io_buf(jnl
->jdev
, 1);
268 if (*offset
+ (off_t
)curlen
> jnl
->jhdr
->size
&& *offset
!= 0 && jnl
->jhdr
->size
!= 0) {
269 if (*offset
== jnl
->jhdr
->size
) {
270 *offset
= jnl
->jhdr
->jhdr_size
;
272 curlen
= (off_t
)jnl
->jhdr
->size
- *offset
;
276 if (curlen
> max_iosize
) {
281 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %d\n", curlen
, *offset
, len
);
284 if (*offset
== 0 && (direction
& JNL_HEADER
) == 0) {
285 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen
, data
);
288 if (direction
& JNL_READ
)
289 buf_setflags(bp
, B_READ
);
292 * don't have to set any flags
294 vnode_startwrite(jnl
->jdev
);
296 buf_setsize(bp
, curlen
);
297 buf_setcount(bp
, curlen
);
298 buf_setdataptr(bp
, (uintptr_t)data
);
299 buf_setblkno(bp
, (daddr64_t
) ((jnl
->jdev_offset
+ *offset
) / (off_t
)jnl
->jhdr
->jhdr_size
));
300 buf_setlblkno(bp
, (daddr64_t
) ((jnl
->jdev_offset
+ *offset
) / (off_t
)jnl
->jhdr
->jhdr_size
));
302 err
= VNOP_STRATEGY(bp
);
304 err
= (int)buf_biowait(bp
);
309 printf("jnl: do_jnl_io: strategy err 0x%x\n", err
);
316 // handle wrap-around
317 data
= (char *)data
+ curlen
;
318 curlen
= len
- io_sz
;
319 if (*offset
>= jnl
->jhdr
->size
) {
320 *offset
= jnl
->jhdr
->jhdr_size
;
329 read_journal_data(journal
*jnl
, off_t
*offset
, void *data
, size_t len
)
331 return do_journal_io(jnl
, offset
, data
, len
, JNL_READ
);
335 write_journal_data(journal
*jnl
, off_t
*offset
, void *data
, size_t len
)
337 return do_journal_io(jnl
, offset
, data
, len
, JNL_WRITE
);
342 read_journal_header(journal
*jnl
, void *data
, size_t len
)
344 off_t hdr_offset
= 0;
346 return do_journal_io(jnl
, &hdr_offset
, data
, len
, JNL_READ
|JNL_HEADER
);
350 write_journal_header(journal
*jnl
)
352 static int num_err_prints
= 0;
354 off_t jhdr_offset
= 0;
355 struct vfs_context context
;
357 context
.vc_proc
= current_proc();
358 context
.vc_ucred
= NOCRED
;
360 // XXXdbg note: this ioctl doesn't seem to do anything on firewire disks.
362 ret
= VNOP_IOCTL(jnl
->jdev
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, &context
);
365 // Only print this error if it's a different error than the
366 // previous one, or if it's the first time for this device
367 // or if the total number of printfs is less than 25. We
368 // allow for up to 25 printfs to insure that some make it
369 // into the on-disk syslog. Otherwise if we only printed
370 // one, it's possible it would never make it to the syslog
371 // for the root volume and that makes debugging hard.
373 if ( ret
!= jnl
->last_flush_err
374 || (jnl
->flags
& JOURNAL_FLUSHCACHE_ERR
) == 0
375 || num_err_prints
++ < 25) {
377 printf("jnl: flushing fs disk buffer returned 0x%x\n", ret
);
379 jnl
->flags
|= JOURNAL_FLUSHCACHE_ERR
;
380 jnl
->last_flush_err
= ret
;
385 jnl
->jhdr
->checksum
= 0;
386 jnl
->jhdr
->checksum
= calc_checksum((char *)jnl
->jhdr
, sizeof(struct journal_header
));
387 if (do_journal_io(jnl
, &jhdr_offset
, jnl
->header_buf
, jnl
->jhdr
->jhdr_size
, JNL_WRITE
|JNL_HEADER
) != jnl
->jhdr
->jhdr_size
) {
388 printf("jnl: write_journal_header: error writing the journal header!\n");
389 jnl
->flags
|= JOURNAL_INVALID
;
393 // Have to flush after writing the journal header so that
394 // a future transaction doesn't sneak out to disk before
395 // the header does and thus overwrite data that the old
396 // journal header refers to. Saw this exact case happen
397 // on an IDE bus analyzer with Larry Barras so while it
398 // may seem obscure, it's not.
400 VNOP_IOCTL(jnl
->jdev
, DKIOCSYNCHRONIZECACHE
, NULL
, FWRITE
, &context
);
408 // this is a work function used to free up transactions that
409 // completed. they can't be free'd from buffer_flushed_callback
410 // because it is called from deep with the disk driver stack
411 // and thus can't do something that would potentially cause
412 // paging. it gets called by each of the journal api entry
413 // points so stuff shouldn't hang around for too long.
416 free_old_stuff(journal
*jnl
)
418 transaction
*tr
, *next
;
422 jnl
->tr_freeme
= NULL
;
423 unlock_oldstart(jnl
);
427 FREE_ZONE(tr
, sizeof(transaction
), M_JNL_TR
);
435 // This is our callback that lets us know when a buffer has been
436 // flushed to disk. It's called from deep within the driver stack
437 // and thus is quite limited in what it can do. Notably, it can
438 // not initiate any new i/o's or allocate/free memory.
441 buffer_flushed_callback(struct buf
*bp
, void *arg
)
445 transaction
*ctr
, *prev
=NULL
, *next
;
449 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
450 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
452 // snarf out the bits we want
453 bufsize
= buf_size(bp
);
454 tr
= (transaction
*)arg
;
456 // then we've already seen it
461 CHECK_TRANSACTION(tr
);
464 if (jnl
->flags
& JOURNAL_INVALID
) {
470 // update the number of blocks that have been flushed.
471 // this buf may represent more than one block so take
472 // that into account.
473 OSAddAtomic(bufsize
, &tr
->num_flushed
);
476 // if this transaction isn't done yet, just return as
477 // there is nothing to do.
478 if ((tr
->num_flushed
+ tr
->num_killed
) < tr
->total_bytes
) {
482 // this will single thread checking the transaction
485 if (tr
->total_bytes
== 0xfbadc0de) {
486 // then someone beat us to it...
487 unlock_oldstart(jnl
);
491 // mark this so that we're the owner of dealing with the
492 // cleanup for this transaction
493 tr
->total_bytes
= 0xfbadc0de;
495 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
496 // tr, tr->journal_start, tr->journal_end, jnl);
498 // find this entry in the old_start[] index and mark it completed
499 for(i
=0; i
< sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]); i
++) {
501 if ((jnl
->old_start
[i
] & ~(0x8000000000000000LL
)) == tr
->journal_start
) {
502 jnl
->old_start
[i
] &= ~(0x8000000000000000LL
);
506 if (i
>= sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0])) {
507 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr 0x%x, jnl 0x%x)\n",
508 tr
->journal_start
, tr
, jnl
);
510 unlock_oldstart(jnl
);
513 // if we are here then we need to update the journal header
514 // to reflect that this transaction is complete
515 if (tr
->journal_start
== jnl
->active_start
) {
516 jnl
->active_start
= tr
->journal_end
;
517 tr
->journal_start
= tr
->journal_end
= (off_t
)0;
520 // go through the completed_trs list and try to coalesce
521 // entries, restarting back at the beginning if we have to.
522 for(ctr
=jnl
->completed_trs
; ctr
; prev
=ctr
, ctr
=next
) {
523 if (ctr
->journal_start
== jnl
->active_start
) {
524 jnl
->active_start
= ctr
->journal_end
;
526 prev
->next
= ctr
->next
;
528 if (ctr
== jnl
->completed_trs
) {
529 jnl
->completed_trs
= ctr
->next
;
533 next
= jnl
->completed_trs
; // this starts us over again
534 ctr
->next
= jnl
->tr_freeme
;
535 jnl
->tr_freeme
= ctr
;
537 unlock_oldstart(jnl
);
538 } else if (tr
->journal_end
== ctr
->journal_start
) {
539 ctr
->journal_start
= tr
->journal_start
;
540 next
= jnl
->completed_trs
; // this starts us over again
542 tr
->journal_start
= tr
->journal_end
= (off_t
)0;
543 } else if (tr
->journal_start
== ctr
->journal_end
) {
544 ctr
->journal_end
= tr
->journal_end
;
546 tr
->journal_start
= tr
->journal_end
= (off_t
)0;
552 // if this is true then we didn't merge with anyone
553 // so link ourselves in at the head of the completed
555 if (tr
->journal_start
!= 0) {
556 // put this entry into the correct sorted place
557 // in the list instead of just at the head.
561 for(ctr
=jnl
->completed_trs
; ctr
&& tr
->journal_start
> ctr
->journal_start
; prev
=ctr
, ctr
=ctr
->next
) {
565 if (ctr
== NULL
&& prev
== NULL
) {
566 jnl
->completed_trs
= tr
;
568 } else if (ctr
== jnl
->completed_trs
) {
569 tr
->next
= jnl
->completed_trs
;
570 jnl
->completed_trs
= tr
;
572 tr
->next
= prev
->next
;
576 // if we're here this tr got merged with someone else so
577 // put it on the list to be free'd
579 tr
->next
= jnl
->tr_freeme
;
581 unlock_oldstart(jnl
);
586 #include <libkern/OSByteOrder.h>
588 #define SWAP16(x) OSSwapInt16(x)
589 #define SWAP32(x) OSSwapInt32(x)
590 #define SWAP64(x) OSSwapInt64(x)
594 swap_journal_header(journal
*jnl
)
596 jnl
->jhdr
->magic
= SWAP32(jnl
->jhdr
->magic
);
597 jnl
->jhdr
->endian
= SWAP32(jnl
->jhdr
->endian
);
598 jnl
->jhdr
->start
= SWAP64(jnl
->jhdr
->start
);
599 jnl
->jhdr
->end
= SWAP64(jnl
->jhdr
->end
);
600 jnl
->jhdr
->size
= SWAP64(jnl
->jhdr
->size
);
601 jnl
->jhdr
->blhdr_size
= SWAP32(jnl
->jhdr
->blhdr_size
);
602 jnl
->jhdr
->checksum
= SWAP32(jnl
->jhdr
->checksum
);
603 jnl
->jhdr
->jhdr_size
= SWAP32(jnl
->jhdr
->jhdr_size
);
607 swap_block_list_header(journal
*jnl
, block_list_header
*blhdr
)
611 blhdr
->max_blocks
= SWAP16(blhdr
->max_blocks
);
612 blhdr
->num_blocks
= SWAP16(blhdr
->num_blocks
);
613 blhdr
->bytes_used
= SWAP32(blhdr
->bytes_used
);
614 blhdr
->checksum
= SWAP32(blhdr
->checksum
);
615 blhdr
->pad
= SWAP32(blhdr
->pad
);
617 if (blhdr
->num_blocks
* sizeof(blhdr
->binfo
[0]) > jnl
->jhdr
->blhdr_size
) {
618 printf("jnl: blhdr num blocks looks suspicious (%d). not swapping.\n", blhdr
->num_blocks
);
622 for(i
=0; i
< blhdr
->num_blocks
; i
++) {
623 blhdr
->binfo
[i
].bnum
= SWAP64(blhdr
->binfo
[i
].bnum
);
624 blhdr
->binfo
[i
].bsize
= SWAP32(blhdr
->binfo
[i
].bsize
);
625 blhdr
->binfo
[i
].bp
= (void *)SWAP32((int)blhdr
->binfo
[i
].bp
);
631 update_fs_block(journal
*jnl
, void *block_ptr
, off_t fs_block
, size_t bsize
)
634 struct buf
*oblock_bp
=NULL
;
636 // first read the block we want.
637 ret
= buf_meta_bread(jnl
->fsdev
, (daddr64_t
)fs_block
, bsize
, NOCRED
, &oblock_bp
);
639 printf("jnl: update_fs_block: error reading fs block # %lld! (ret %d)\n", fs_block
, ret
);
642 buf_brelse(oblock_bp
);
646 // let's try to be aggressive here and just re-write the block
647 oblock_bp
= buf_getblk(jnl
->fsdev
, (daddr64_t
)fs_block
, bsize
, 0, 0, BLK_META
);
648 if (oblock_bp
== NULL
) {
649 printf("jnl: update_fs_block: buf_getblk() for %lld failed! failing update.\n", fs_block
);
654 // make sure it's the correct size.
655 if (buf_size(oblock_bp
) != bsize
) {
656 buf_brelse(oblock_bp
);
660 // copy the journal data over top of it
661 memcpy((void *)buf_dataptr(oblock_bp
), block_ptr
, bsize
);
663 if ((ret
= VNOP_BWRITE(oblock_bp
)) != 0) {
664 printf("jnl: update_fs_block: failed to update block %lld (ret %d)\n", fs_block
,ret
);
668 // and now invalidate it so that if someone else wants to read
669 // it in a different size they'll be able to do it.
670 ret
= buf_meta_bread(jnl
->fsdev
, (daddr64_t
)fs_block
, bsize
, NOCRED
, &oblock_bp
);
672 buf_markinvalid(oblock_bp
);
673 buf_brelse(oblock_bp
);
680 grow_table(struct bucket
**buf_ptr
, int num_buckets
, int new_size
)
682 struct bucket
*newBuf
;
683 int current_size
= num_buckets
, i
;
685 // return if newsize is less than the current size
686 if (new_size
< num_buckets
) {
690 if ((MALLOC(newBuf
, struct bucket
*, new_size
*sizeof(struct bucket
), M_TEMP
, M_WAITOK
)) == NULL
) {
691 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
695 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
697 // copy existing elements
698 bcopy(*buf_ptr
, newBuf
, num_buckets
*sizeof(struct bucket
));
700 // initialize the new ones
701 for(i
=num_buckets
; i
< new_size
; i
++) {
702 newBuf
[i
].block_num
= (off_t
)-1;
705 // free the old container
706 FREE(*buf_ptr
, M_TEMP
);
715 lookup_bucket(struct bucket
**buf_ptr
, off_t block_num
, int num_full
)
717 int lo
, hi
, index
, matches
, i
;
720 return 0; // table is empty, so insert at index=0
727 // perform binary search for block_num
729 int mid
= (hi
- lo
)/2 + lo
;
730 off_t this_num
= (*buf_ptr
)[mid
].block_num
;
732 if (block_num
== this_num
) {
737 if (block_num
< this_num
) {
742 if (block_num
> this_num
) {
748 // check if lo and hi converged on the match
749 if (block_num
== (*buf_ptr
)[hi
].block_num
) {
753 // if no existing entry found, find index for new one
755 index
= (block_num
< (*buf_ptr
)[hi
].block_num
) ? hi
: hi
+ 1;
757 // make sure that we return the right-most index in the case of multiple matches
760 while(i
< num_full
&& block_num
== (*buf_ptr
)[i
].block_num
) {
772 insert_block(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t num
, size_t size
, size_t offset
, int *num_buckets_ptr
, int *num_full_ptr
, int overwriting
)
775 // grow the table if we're out of space
776 if (*num_full_ptr
>= *num_buckets_ptr
) {
777 int new_size
= *num_buckets_ptr
* 2;
778 int grow_size
= grow_table(buf_ptr
, *num_buckets_ptr
, new_size
);
780 if (grow_size
< new_size
) {
781 printf("jnl: add_block: grow_table returned an error!\n");
785 *num_buckets_ptr
= grow_size
; //update num_buckets to reflect the new size
788 // if we're not inserting at the end, we need to bcopy
789 if (blk_index
!= *num_full_ptr
) {
790 bcopy( (*buf_ptr
)+(blk_index
), (*buf_ptr
)+(blk_index
+1), (*num_full_ptr
-blk_index
)*sizeof(struct bucket
) );
793 (*num_full_ptr
)++; // increment only if we're not overwriting
796 // sanity check the values we're about to add
797 if (offset
>= jnl
->jhdr
->size
) {
798 offset
= jnl
->jhdr
->jhdr_size
+ (offset
- jnl
->jhdr
->size
);
801 panic("jnl: insert_block: bad size in insert_block (%d)\n", size
);
804 (*buf_ptr
)[blk_index
].block_num
= num
;
805 (*buf_ptr
)[blk_index
].block_size
= size
;
806 (*buf_ptr
)[blk_index
].jnl_offset
= offset
;
812 do_overlap(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t block_num
, size_t size
, size_t offset
, int *num_buckets_ptr
, int *num_full_ptr
)
814 int num_to_remove
, index
, i
, overwrite
, err
;
815 size_t jhdr_size
= jnl
->jhdr
->jhdr_size
, new_offset
;
816 off_t overlap
, block_start
, block_end
;
818 block_start
= block_num
*jhdr_size
;
819 block_end
= block_start
+ size
;
820 overwrite
= (block_num
== (*buf_ptr
)[blk_index
].block_num
&& size
>= (*buf_ptr
)[blk_index
].block_size
);
822 // first, eliminate any overlap with the previous entry
823 if (blk_index
!= 0 && !overwrite
) {
824 off_t prev_block_start
= (*buf_ptr
)[blk_index
-1].block_num
*jhdr_size
;
825 off_t prev_block_end
= prev_block_start
+ (*buf_ptr
)[blk_index
-1].block_size
;
826 overlap
= prev_block_end
- block_start
;
828 if (overlap
% jhdr_size
!= 0) {
829 panic("jnl: do_overlap: overlap with previous entry not a multiple of %d\n", jhdr_size
);
832 // if the previous entry completely overlaps this one, we need to break it into two pieces.
833 if (prev_block_end
> block_end
) {
834 off_t new_num
= block_end
/ jhdr_size
;
835 size_t new_size
= prev_block_end
- block_end
;
837 new_offset
= (*buf_ptr
)[blk_index
-1].jnl_offset
+ (block_end
- prev_block_start
);
839 err
= insert_block(jnl
, buf_ptr
, blk_index
, new_num
, new_size
, new_offset
, num_buckets_ptr
, num_full_ptr
, 0);
841 panic("jnl: do_overlap: error inserting during pre-overlap\n");
845 // Regardless, we need to truncate the previous entry to the beginning of the overlap
846 (*buf_ptr
)[blk_index
-1].block_size
= block_start
- prev_block_start
;
850 // then, bail out fast if there's no overlap with the entries that follow
851 if (!overwrite
&& block_end
<= (*buf_ptr
)[blk_index
].block_num
*jhdr_size
) {
852 return 0; // no overlap, no overwrite
853 } else if (overwrite
&& (blk_index
+ 1 >= *num_full_ptr
|| block_end
<= (*buf_ptr
)[blk_index
+1].block_num
*jhdr_size
)) {
854 return 1; // simple overwrite
857 // Otherwise, find all cases of total and partial overlap. We use the special
858 // block_num of -2 to designate entries that are completely overlapped and must
859 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
860 // entries must be adjusted to keep the array consistent.
863 while(index
< *num_full_ptr
&& block_end
> (*buf_ptr
)[index
].block_num
*jhdr_size
) {
864 if (block_end
>= ((*buf_ptr
)[index
].block_num
*jhdr_size
+ (*buf_ptr
)[index
].block_size
)) {
865 (*buf_ptr
)[index
].block_num
= -2; // mark this for deletion
868 overlap
= block_end
- (*buf_ptr
)[index
].block_num
*jhdr_size
;
870 if (overlap
% jhdr_size
!= 0) {
871 panic("jnl: do_overlap: overlap of %lld is not multiple of %d\n", overlap
, jhdr_size
);
874 // if we partially overlap this entry, adjust its block number, jnl offset, and size
875 (*buf_ptr
)[index
].block_num
+= (overlap
/ jhdr_size
); // make sure overlap is multiple of jhdr_size, or round up
877 new_offset
= (*buf_ptr
)[index
].jnl_offset
+ overlap
; // check for wrap-around
878 if (new_offset
>= jnl
->jhdr
->size
) {
879 new_offset
= jhdr_size
+ (new_offset
- jnl
->jhdr
->size
);
881 (*buf_ptr
)[index
].jnl_offset
= new_offset
;
883 (*buf_ptr
)[index
].block_size
-= overlap
; // sanity check for negative value
884 if ((*buf_ptr
)[index
].block_size
<= 0) {
885 panic("jnl: do_overlap: after overlap, new block size is invalid (%d)\n", (*buf_ptr
)[index
].block_size
);
886 // return -1; // if above panic is removed, return -1 for error
895 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
896 index
--; // start with the last index used within the above loop
897 while(index
>= blk_index
) {
898 if ((*buf_ptr
)[index
].block_num
== -2) {
899 if (index
== *num_full_ptr
-1) {
900 (*buf_ptr
)[index
].block_num
= -1; // it's the last item in the table... just mark as free
902 bcopy( (*buf_ptr
)+(index
+1), (*buf_ptr
)+(index
), (*num_full_ptr
- (index
+ 1)) * sizeof(struct bucket
) );
909 // eliminate any stale entries at the end of the table
910 for(i
=*num_full_ptr
; i
< (*num_full_ptr
+ num_to_remove
); i
++) {
911 (*buf_ptr
)[i
].block_num
= -1;
914 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
917 // PR-3105942: Coalesce writes to the same block in journal replay
918 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
919 // to be replayed and the corresponding location in the journal which contains
920 // the most recent data for those blocks. The array is "played" once the all the
921 // blocks in the journal have been coalesced. The code for the case of conflicting/
922 // overlapping writes to a single block is the most dense. Because coalescing can
923 // disrupt the existing time-ordering of blocks in the journal playback, care
924 // is taken to catch any overlaps and keep the array consistent.
926 add_block(journal
*jnl
, struct bucket
**buf_ptr
, off_t block_num
, size_t size
, size_t offset
, int *num_buckets_ptr
, int *num_full_ptr
)
928 int blk_index
, overwriting
;
930 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
931 // inserted (or the index of the elem to overwrite).
932 blk_index
= lookup_bucket( buf_ptr
, block_num
, *num_full_ptr
);
934 // check if the index is within bounds (if we're adding this block to the end of
935 // the table, blk_index will be equal to num_full)
936 if (blk_index
< 0 || blk_index
> *num_full_ptr
) {
937 //printf("jnl: add_block: trouble adding block to co_buf\n");
939 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
941 // Determine whether we're overwriting an existing entry by checking for overlap
942 overwriting
= do_overlap(jnl
, buf_ptr
, blk_index
, block_num
, size
, offset
, num_buckets_ptr
, num_full_ptr
);
943 if (overwriting
< 0) {
944 return -1; // if we got an error, pass it along
947 // returns the index, or -1 on error
948 blk_index
= insert_block(jnl
, buf_ptr
, blk_index
, block_num
, size
, offset
, num_buckets_ptr
, num_full_ptr
, overwriting
);
954 replay_journal(journal
*jnl
)
956 int i
, ret
, orig_checksum
, checksum
, max_bsize
;
957 block_list_header
*blhdr
;
959 char *buff
, *block_ptr
=NULL
;
960 struct bucket
*co_buf
;
961 int num_buckets
= STARTING_BUCKETS
, num_full
;
963 // wrap the start ptr if it points to the very end of the journal
964 if (jnl
->jhdr
->start
== jnl
->jhdr
->size
) {
965 jnl
->jhdr
->start
= jnl
->jhdr
->jhdr_size
;
967 if (jnl
->jhdr
->end
== jnl
->jhdr
->size
) {
968 jnl
->jhdr
->end
= jnl
->jhdr
->jhdr_size
;
971 if (jnl
->jhdr
->start
== jnl
->jhdr
->end
) {
975 // allocate memory for the header_block. we'll read each blhdr into this
976 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&buff
, jnl
->jhdr
->blhdr_size
)) {
977 printf("jnl: replay_journal: no memory for block buffer! (%d bytes)\n",
978 jnl
->jhdr
->blhdr_size
);
982 // allocate memory for the coalesce buffer
983 if ((MALLOC(co_buf
, struct bucket
*, num_buckets
*sizeof(struct bucket
), M_TEMP
, M_WAITOK
)) == NULL
) {
984 printf("jnl: replay_journal: no memory for coalesce buffer!\n");
988 // initialize entries
989 for(i
=0; i
< num_buckets
; i
++) {
990 co_buf
[i
].block_num
= -1;
992 num_full
= 0; // empty at first
995 printf("jnl: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
996 jnl
->jhdr
->start
, jnl
->jhdr
->end
, jnl
->jdev_offset
);
998 while(jnl
->jhdr
->start
!= jnl
->jhdr
->end
) {
999 offset
= jnl
->jhdr
->start
;
1000 ret
= read_journal_data(jnl
, &offset
, buff
, jnl
->jhdr
->blhdr_size
);
1001 if (ret
!= jnl
->jhdr
->blhdr_size
) {
1002 printf("jnl: replay_journal: Could not read block list header block @ 0x%llx!\n", offset
);
1006 blhdr
= (block_list_header
*)buff
;
1008 orig_checksum
= blhdr
->checksum
;
1009 blhdr
->checksum
= 0;
1010 if (jnl
->flags
& JOURNAL_NEED_SWAP
) {
1011 // calculate the checksum based on the unswapped data
1012 // because it is done byte-at-a-time.
1013 orig_checksum
= SWAP32(orig_checksum
);
1014 checksum
= calc_checksum((char *)blhdr
, BLHDR_CHECKSUM_SIZE
);
1015 swap_block_list_header(jnl
, blhdr
);
1017 checksum
= calc_checksum((char *)blhdr
, BLHDR_CHECKSUM_SIZE
);
1019 if (checksum
!= orig_checksum
) {
1020 printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1021 offset
, orig_checksum
, checksum
);
1024 if ( blhdr
->max_blocks
<= 0 || blhdr
->max_blocks
> 2048
1025 || blhdr
->num_blocks
<= 0 || blhdr
->num_blocks
> blhdr
->max_blocks
) {
1026 printf("jnl: replay_journal: bad looking journal entry: max: %d num: %d\n",
1027 blhdr
->max_blocks
, blhdr
->num_blocks
);
1031 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
1032 if (blhdr
->binfo
[i
].bnum
< 0 && blhdr
->binfo
[i
].bnum
!= (off_t
)-1) {
1033 printf("jnl: replay_journal: bogus block number 0x%llx\n", blhdr
->binfo
[i
].bnum
);
1038 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1039 // blhdr->num_blocks-1, jnl->jhdr->start);
1040 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
1044 size
= blhdr
->binfo
[i
].bsize
;
1045 number
= blhdr
->binfo
[i
].bnum
;
1047 // don't add "killed" blocks
1048 if (number
== (off_t
)-1) {
1049 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1051 // add this bucket to co_buf, coalescing where possible
1052 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1053 ret_val
= add_block(jnl
, &co_buf
, number
, size
, (size_t) offset
, &num_buckets
, &num_full
);
1055 if (ret_val
== -1) {
1056 printf("jnl: replay_journal: trouble adding block to co_buf\n");
1058 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1064 // check if the last block added puts us off the end of the jnl.
1065 // if so, we need to wrap to the beginning and take any remainder
1068 if (offset
>= jnl
->jhdr
->size
) {
1069 offset
= jnl
->jhdr
->jhdr_size
+ (offset
- jnl
->jhdr
->size
);
1074 jnl
->jhdr
->start
+= blhdr
->bytes_used
;
1075 if (jnl
->jhdr
->start
>= jnl
->jhdr
->size
) {
1076 // wrap around and skip the journal header block
1077 jnl
->jhdr
->start
= (jnl
->jhdr
->start
% jnl
->jhdr
->size
) + jnl
->jhdr
->jhdr_size
;
1082 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1085 * make sure it's at least one page in size, so
1086 * start max_bsize at PAGE_SIZE
1088 for (i
= 0, max_bsize
= PAGE_SIZE
; i
< num_full
; i
++) {
1090 if (co_buf
[i
].block_num
== (off_t
)-1)
1093 if (co_buf
[i
].block_size
> max_bsize
)
1094 max_bsize
= co_buf
[i
].block_size
;
1097 * round max_bsize up to the nearest PAGE_SIZE multiple
1099 if (max_bsize
& (PAGE_SIZE
- 1)) {
1100 max_bsize
= (max_bsize
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
1103 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&block_ptr
, max_bsize
)) {
1107 // Replay the coalesced entries in the co-buf
1108 for(i
=0; i
< num_full
; i
++) {
1109 size_t size
= co_buf
[i
].block_size
;
1110 off_t jnl_offset
= (off_t
) co_buf
[i
].jnl_offset
;
1111 off_t number
= co_buf
[i
].block_num
;
1114 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1115 // co_buf[i].block_size, co_buf[i].jnl_offset);
1117 if (number
== (off_t
)-1) {
1118 // printf("jnl: replay_journal: skipping killed fs block\n");
1121 // do journal read, and set the phys. block
1122 ret
= read_journal_data(jnl
, &jnl_offset
, block_ptr
, size
);
1124 printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset
);
1128 if (update_fs_block(jnl
, block_ptr
, number
, size
) != 0) {
1135 // done replaying; update jnl header
1136 if (write_journal_header(jnl
) != 0) {
1141 kmem_free(kernel_map
, (vm_offset_t
)block_ptr
, max_bsize
);
1144 // free the coalesce buffer
1145 FREE(co_buf
, M_TEMP
);
1148 kmem_free(kernel_map
, (vm_offset_t
)buff
, jnl
->jhdr
->blhdr_size
);
1153 kmem_free(kernel_map
, (vm_offset_t
)block_ptr
, max_bsize
);
1156 FREE(co_buf
, M_TEMP
);
1158 kmem_free(kernel_map
, (vm_offset_t
)buff
, jnl
->jhdr
->blhdr_size
);
1164 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1165 //#define DEFAULT_TRANSACTION_BUFFER_SIZE (256*1024) // better performance but uses more mem
1166 #define MAX_TRANSACTION_BUFFER_SIZE (512*1024)
1168 // XXXdbg - so I can change it in the debugger
1169 int def_tbuffer_size
= 0;
1173 // This function sets the size of the tbuffer and the
1174 // size of the blhdr. It assumes that jnl->jhdr->size
1175 // and jnl->jhdr->jhdr_size are already valid.
1178 size_up_tbuffer(journal
*jnl
, int tbuffer_size
, int phys_blksz
)
1181 // one-time initialization based on how much memory
1182 // there is in the machine.
1184 if (def_tbuffer_size
== 0) {
1185 if (mem_size
< (256*1024*1024)) {
1186 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
;
1187 } else if (mem_size
< (512*1024*1024)) {
1188 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
* 2;
1189 } else if (mem_size
< (1024*1024*1024)) {
1190 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
* 3;
1191 } else if (mem_size
>= (1024*1024*1024)) {
1192 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
* 4;
1196 // size up the transaction buffer... can't be larger than the number
1197 // of blocks that can fit in a block_list_header block.
1198 if (tbuffer_size
== 0) {
1199 jnl
->tbuffer_size
= def_tbuffer_size
;
1201 // make sure that the specified tbuffer_size isn't too small
1202 if (tbuffer_size
< jnl
->jhdr
->blhdr_size
* 2) {
1203 tbuffer_size
= jnl
->jhdr
->blhdr_size
* 2;
1205 // and make sure it's an even multiple of the block size
1206 if ((tbuffer_size
% jnl
->jhdr
->jhdr_size
) != 0) {
1207 tbuffer_size
-= (tbuffer_size
% jnl
->jhdr
->jhdr_size
);
1210 jnl
->tbuffer_size
= tbuffer_size
;
1213 if (jnl
->tbuffer_size
> (jnl
->jhdr
->size
/ 2)) {
1214 jnl
->tbuffer_size
= (jnl
->jhdr
->size
/ 2);
1217 if (jnl
->tbuffer_size
> MAX_TRANSACTION_BUFFER_SIZE
) {
1218 jnl
->tbuffer_size
= MAX_TRANSACTION_BUFFER_SIZE
;
1221 jnl
->jhdr
->blhdr_size
= (jnl
->tbuffer_size
/ jnl
->jhdr
->jhdr_size
) * sizeof(block_info
);
1222 if (jnl
->jhdr
->blhdr_size
< phys_blksz
) {
1223 jnl
->jhdr
->blhdr_size
= phys_blksz
;
1224 } else if ((jnl
->jhdr
->blhdr_size
% phys_blksz
) != 0) {
1225 // have to round up so we're an even multiple of the physical block size
1226 jnl
->jhdr
->blhdr_size
= (jnl
->jhdr
->blhdr_size
+ (phys_blksz
- 1)) & ~(phys_blksz
- 1);
1233 journal_create(struct vnode
*jvp
,
1237 size_t min_fs_blksz
,
1239 int32_t tbuffer_size
,
1240 void (*flush
)(void *arg
),
1245 struct vfs_context context
;
1247 context
.vc_proc
= current_proc();
1248 context
.vc_ucred
= FSCRED
;
1250 /* Get the real physical block size. */
1251 if (VNOP_IOCTL(jvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, &context
)) {
1255 if (phys_blksz
> min_fs_blksz
) {
1256 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1257 phys_blksz
, min_fs_blksz
);
1261 if ((journal_size
% phys_blksz
) != 0) {
1262 printf("jnl: create: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1263 journal_size
, phys_blksz
);
1267 MALLOC_ZONE(jnl
, struct journal
*, sizeof(struct journal
), M_JNL_JNL
, M_WAITOK
);
1268 memset(jnl
, 0, sizeof(*jnl
));
1271 jnl
->jdev_offset
= offset
;
1274 jnl
->flush_arg
= arg
;
1275 jnl
->flags
= (flags
& JOURNAL_OPTION_FLAGS_MASK
);
1276 lck_mtx_init(&jnl
->old_start_lock
, jnl_mutex_group
, jnl_lock_attr
);
1278 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&jnl
->header_buf
, phys_blksz
)) {
1279 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz
);
1280 goto bad_kmem_alloc
;
1283 memset(jnl
->header_buf
, 0, phys_blksz
);
1285 jnl
->jhdr
= (journal_header
*)jnl
->header_buf
;
1286 jnl
->jhdr
->magic
= JOURNAL_HEADER_MAGIC
;
1287 jnl
->jhdr
->endian
= ENDIAN_MAGIC
;
1288 jnl
->jhdr
->start
= phys_blksz
; // start at block #1, block #0 is for the jhdr itself
1289 jnl
->jhdr
->end
= phys_blksz
;
1290 jnl
->jhdr
->size
= journal_size
;
1291 jnl
->jhdr
->jhdr_size
= phys_blksz
;
1292 size_up_tbuffer(jnl
, tbuffer_size
, phys_blksz
);
1294 jnl
->active_start
= jnl
->jhdr
->start
;
1296 // XXXdbg - for testing you can force the journal to wrap around
1297 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1298 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1300 lck_mtx_init(&jnl
->jlock
, jnl_mutex_group
, jnl_lock_attr
);
1302 if (write_journal_header(jnl
) != 0) {
1303 printf("jnl: journal_create: failed to write journal header.\n");
1311 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, phys_blksz
);
1314 FREE_ZONE(jnl
, sizeof(struct journal
), M_JNL_JNL
);
1320 journal_open(struct vnode
*jvp
,
1324 size_t min_fs_blksz
,
1326 int32_t tbuffer_size
,
1327 void (*flush
)(void *arg
),
1331 int orig_blksz
=0, phys_blksz
;
1332 int orig_checksum
, checksum
;
1333 struct vfs_context context
;
1335 context
.vc_proc
= current_proc();
1336 context
.vc_ucred
= FSCRED
;
1338 /* Get the real physical block size. */
1339 if (VNOP_IOCTL(jvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, &context
)) {
1343 if (phys_blksz
> min_fs_blksz
) {
1344 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1345 phys_blksz
, min_fs_blksz
);
1349 if ((journal_size
% phys_blksz
) != 0) {
1350 printf("jnl: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1351 journal_size
, phys_blksz
);
1355 MALLOC_ZONE(jnl
, struct journal
*, sizeof(struct journal
), M_JNL_JNL
, M_WAITOK
);
1356 memset(jnl
, 0, sizeof(*jnl
));
1359 jnl
->jdev_offset
= offset
;
1362 jnl
->flush_arg
= arg
;
1363 jnl
->flags
= (flags
& JOURNAL_OPTION_FLAGS_MASK
);
1364 lck_mtx_init(&jnl
->old_start_lock
, jnl_mutex_group
, jnl_lock_attr
);
1366 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&jnl
->header_buf
, phys_blksz
)) {
1367 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz
);
1368 goto bad_kmem_alloc
;
1371 jnl
->jhdr
= (journal_header
*)jnl
->header_buf
;
1372 memset(jnl
->jhdr
, 0, sizeof(journal_header
)+4);
1374 // we have to set this up here so that do_journal_io() will work
1375 jnl
->jhdr
->jhdr_size
= phys_blksz
;
1377 if (read_journal_header(jnl
, jnl
->jhdr
, phys_blksz
) != phys_blksz
) {
1378 printf("jnl: open: could not read %d bytes for the journal header.\n",
1383 orig_checksum
= jnl
->jhdr
->checksum
;
1384 jnl
->jhdr
->checksum
= 0;
1386 if (jnl
->jhdr
->magic
== SWAP32(JOURNAL_HEADER_MAGIC
)) {
1387 // do this before the swap since it's done byte-at-a-time
1388 orig_checksum
= SWAP32(orig_checksum
);
1389 checksum
= calc_checksum((char *)jnl
->jhdr
, sizeof(struct journal_header
));
1390 swap_journal_header(jnl
);
1391 jnl
->flags
|= JOURNAL_NEED_SWAP
;
1393 checksum
= calc_checksum((char *)jnl
->jhdr
, sizeof(struct journal_header
));
1396 if (jnl
->jhdr
->magic
!= JOURNAL_HEADER_MAGIC
&& jnl
->jhdr
->magic
!= OLD_JOURNAL_HEADER_MAGIC
) {
1397 printf("jnl: open: journal magic is bad (0x%x != 0x%x)\n",
1398 jnl
->jhdr
->magic
, JOURNAL_HEADER_MAGIC
);
1402 // only check if we're the current journal header magic value
1403 if (jnl
->jhdr
->magic
== JOURNAL_HEADER_MAGIC
) {
1405 if (orig_checksum
!= checksum
) {
1406 printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n",
1407 orig_checksum
, checksum
);
1413 // XXXdbg - convert old style magic numbers to the new one
1414 if (jnl
->jhdr
->magic
== OLD_JOURNAL_HEADER_MAGIC
) {
1415 jnl
->jhdr
->magic
= JOURNAL_HEADER_MAGIC
;
1418 if (phys_blksz
!= jnl
->jhdr
->jhdr_size
&& jnl
->jhdr
->jhdr_size
!= 0) {
1419 printf("jnl: open: phys_blksz %d does not match journal header size %d\n",
1420 phys_blksz
, jnl
->jhdr
->jhdr_size
);
1422 orig_blksz
= phys_blksz
;
1423 phys_blksz
= jnl
->jhdr
->jhdr_size
;
1424 if (VNOP_IOCTL(jvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&phys_blksz
, FWRITE
, &context
)) {
1425 printf("jnl: could not set block size to %d bytes.\n", phys_blksz
);
1428 // goto bad_journal;
1431 if ( jnl
->jhdr
->start
<= 0
1432 || jnl
->jhdr
->start
> jnl
->jhdr
->size
1433 || jnl
->jhdr
->start
> 1024*1024*1024) {
1434 printf("jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1435 jnl
->jhdr
->start
, jnl
->jhdr
->size
);
1439 if ( jnl
->jhdr
->end
<= 0
1440 || jnl
->jhdr
->end
> jnl
->jhdr
->size
1441 || jnl
->jhdr
->end
> 1024*1024*1024) {
1442 printf("jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
1443 jnl
->jhdr
->end
, jnl
->jhdr
->size
);
1447 if (jnl
->jhdr
->size
> 1024*1024*1024) {
1448 printf("jnl: open: jhdr size looks bad (0x%llx)\n", jnl
->jhdr
->size
);
1452 // XXXdbg - can't do these checks because hfs writes all kinds of
1453 // non-uniform sized blocks even on devices that have a block size
1454 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
1455 // therefore these checks will fail and so we just have to punt and
1456 // do more relaxed checking...
1457 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
1458 if ((jnl
->jhdr
->start
% 512) != 0) {
1459 printf("jnl: open: journal start (0x%llx) not a multiple of 512?\n",
1464 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
1465 if ((jnl
->jhdr
->end
% 512) != 0) {
1466 printf("jnl: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
1467 jnl
->jhdr
->end
, jnl
->jhdr
->jhdr_size
);
1471 // take care of replaying the journal if necessary
1472 if (flags
& JOURNAL_RESET
) {
1473 printf("jnl: journal start/end pointers reset! (jnl 0x%x; s 0x%llx e 0x%llx)\n",
1474 jnl
, jnl
->jhdr
->start
, jnl
->jhdr
->end
);
1475 jnl
->jhdr
->start
= jnl
->jhdr
->end
;
1476 } else if (replay_journal(jnl
) != 0) {
1477 printf("jnl: journal_open: Error replaying the journal!\n");
1481 if (orig_blksz
!= 0) {
1482 VNOP_IOCTL(jvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&orig_blksz
, FWRITE
, &context
);
1483 phys_blksz
= orig_blksz
;
1484 if (orig_blksz
< jnl
->jhdr
->jhdr_size
) {
1485 printf("jnl: open: jhdr_size is %d but orig phys blk size is %d. switching.\n",
1486 jnl
->jhdr
->jhdr_size
, orig_blksz
);
1488 jnl
->jhdr
->jhdr_size
= orig_blksz
;
1492 // make sure this is in sync!
1493 jnl
->active_start
= jnl
->jhdr
->start
;
1495 // set this now, after we've replayed the journal
1496 size_up_tbuffer(jnl
, tbuffer_size
, phys_blksz
);
1498 lck_mtx_init(&jnl
->jlock
, jnl_mutex_group
, jnl_lock_attr
);
1503 if (orig_blksz
!= 0) {
1504 phys_blksz
= orig_blksz
;
1505 VNOP_IOCTL(jvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&orig_blksz
, FWRITE
, &context
);
1507 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, phys_blksz
);
1509 FREE_ZONE(jnl
, sizeof(struct journal
), M_JNL_JNL
);
1515 journal_is_clean(struct vnode
*jvp
,
1519 size_t min_fs_block_size
)
1522 int phys_blksz
, ret
;
1523 int orig_checksum
, checksum
;
1524 struct vfs_context context
;
1526 context
.vc_proc
= current_proc();
1527 context
.vc_ucred
= FSCRED
;
1529 /* Get the real physical block size. */
1530 if (VNOP_IOCTL(jvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, &context
)) {
1531 printf("jnl: is_clean: failed to get device block size.\n");
1535 if (phys_blksz
> min_fs_block_size
) {
1536 printf("jnl: is_clean: error: phys blksize %d bigger than min fs blksize %d\n",
1537 phys_blksz
, min_fs_block_size
);
1541 if ((journal_size
% phys_blksz
) != 0) {
1542 printf("jnl: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1543 journal_size
, phys_blksz
);
1547 memset(&jnl
, 0, sizeof(jnl
));
1549 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&jnl
.header_buf
, phys_blksz
)) {
1550 printf("jnl: is_clean: could not allocate space for header buffer (%d bytes)\n", phys_blksz
);
1554 jnl
.jhdr
= (journal_header
*)jnl
.header_buf
;
1555 memset(jnl
.jhdr
, 0, sizeof(journal_header
)+4);
1558 jnl
.jdev_offset
= offset
;
1561 // we have to set this up here so that do_journal_io() will work
1562 jnl
.jhdr
->jhdr_size
= phys_blksz
;
1564 if (read_journal_header(&jnl
, jnl
.jhdr
, phys_blksz
) != phys_blksz
) {
1565 printf("jnl: is_clean: could not read %d bytes for the journal header.\n",
1571 orig_checksum
= jnl
.jhdr
->checksum
;
1572 jnl
.jhdr
->checksum
= 0;
1574 if (jnl
.jhdr
->magic
== SWAP32(JOURNAL_HEADER_MAGIC
)) {
1575 // do this before the swap since it's done byte-at-a-time
1576 orig_checksum
= SWAP32(orig_checksum
);
1577 checksum
= calc_checksum((char *)jnl
.jhdr
, sizeof(struct journal_header
));
1578 swap_journal_header(&jnl
);
1579 jnl
.flags
|= JOURNAL_NEED_SWAP
;
1581 checksum
= calc_checksum((char *)jnl
.jhdr
, sizeof(struct journal_header
));
1584 if (jnl
.jhdr
->magic
!= JOURNAL_HEADER_MAGIC
&& jnl
.jhdr
->magic
!= OLD_JOURNAL_HEADER_MAGIC
) {
1585 printf("jnl: is_clean: journal magic is bad (0x%x != 0x%x)\n",
1586 jnl
.jhdr
->magic
, JOURNAL_HEADER_MAGIC
);
1591 if (orig_checksum
!= checksum
) {
1592 printf("jnl: is_clean: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum
, checksum
);
1598 // if the start and end are equal then the journal is clean.
1599 // otherwise it's not clean and therefore an error.
1601 if (jnl
.jhdr
->start
== jnl
.jhdr
->end
) {
1608 kmem_free(kernel_map
, (vm_offset_t
)jnl
.header_buf
, phys_blksz
);
1618 journal_close(journal
*jnl
)
1620 volatile off_t
*start
, *end
;
1625 // set this before doing anything that would block so that
1626 // we start tearing things down properly.
1628 jnl
->flags
|= JOURNAL_CLOSE_PENDING
;
1630 if (jnl
->owner
!= current_thread()) {
1635 // only write stuff to disk if the journal is still valid
1637 if ((jnl
->flags
& JOURNAL_INVALID
) == 0) {
1639 if (jnl
->active_tr
) {
1640 journal_end_transaction(jnl
);
1643 // flush any buffered transactions
1645 transaction
*tr
= jnl
->cur_tr
;
1648 end_transaction(tr
, 1); // force it to get flushed
1651 //start = &jnl->jhdr->start;
1652 start
= &jnl
->active_start
;
1653 end
= &jnl
->jhdr
->end
;
1655 while (*start
!= *end
&& counter
++ < 500) {
1656 printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start
, *end
);
1658 jnl
->flush(jnl
->flush_arg
);
1660 tsleep((caddr_t
)jnl
, PRIBIO
, "jnl_close", 1);
1663 if (*start
!= *end
) {
1664 printf("jnl: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
1668 // make sure this is in sync when we close the journal
1669 jnl
->jhdr
->start
= jnl
->active_start
;
1671 // if this fails there's not much we can do at this point...
1672 write_journal_header(jnl
);
1674 // if we're here the journal isn't valid any more.
1675 // so make sure we don't leave any locked blocks lying around
1676 printf("jnl: close: journal 0x%x, is invalid. aborting outstanding transactions\n", jnl
);
1677 if (jnl
->active_tr
|| jnl
->cur_tr
) {
1679 if (jnl
->active_tr
) {
1680 tr
= jnl
->active_tr
;
1681 jnl
->active_tr
= NULL
;
1687 abort_transaction(jnl
, tr
);
1688 if (jnl
->active_tr
|| jnl
->cur_tr
) {
1689 panic("jnl: close: jnl @ 0x%x had both an active and cur tr\n", jnl
);
1694 free_old_stuff(jnl
);
1696 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, jnl
->jhdr
->jhdr_size
);
1697 jnl
->jhdr
= (void *)0xbeefbabe;
1699 FREE_ZONE(jnl
, sizeof(struct journal
), M_JNL_JNL
);
1703 dump_journal(journal
*jnl
)
1708 printf(" jdev_offset %.8llx\n", jnl
->jdev_offset
);
1709 printf(" magic: 0x%.8x\n", jnl
->jhdr
->magic
);
1710 printf(" start: 0x%.8llx\n", jnl
->jhdr
->start
);
1711 printf(" end: 0x%.8llx\n", jnl
->jhdr
->end
);
1712 printf(" size: 0x%.8llx\n", jnl
->jhdr
->size
);
1713 printf(" blhdr size: %d\n", jnl
->jhdr
->blhdr_size
);
1714 printf(" jhdr size: %d\n", jnl
->jhdr
->jhdr_size
);
1715 printf(" chksum: 0x%.8x\n", jnl
->jhdr
->checksum
);
1717 printf(" completed transactions:\n");
1718 for(ctr
=jnl
->completed_trs
; ctr
; ctr
=ctr
->next
) {
1719 printf(" 0x%.8llx - 0x%.8llx\n", ctr
->journal_start
, ctr
->journal_end
);
1726 free_space(journal
*jnl
)
1730 if (jnl
->jhdr
->start
< jnl
->jhdr
->end
) {
1731 free_space
= jnl
->jhdr
->size
- (jnl
->jhdr
->end
- jnl
->jhdr
->start
) - jnl
->jhdr
->jhdr_size
;
1732 } else if (jnl
->jhdr
->start
> jnl
->jhdr
->end
) {
1733 free_space
= jnl
->jhdr
->start
- jnl
->jhdr
->end
;
1735 // journal is completely empty
1736 free_space
= jnl
->jhdr
->size
- jnl
->jhdr
->jhdr_size
;
1744 // The journal must be locked on entry to this function.
1745 // The "desired_size" is in bytes.
1748 check_free_space(journal
*jnl
, int desired_size
)
1752 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
1753 // desired_size, free_space(jnl));
1756 int old_start_empty
;
1758 if (counter
++ == 5000) {
1760 panic("jnl: check_free_space: buffer flushing isn't working "
1761 "(jnl @ 0x%x s %lld e %lld f %lld [active start %lld]).\n", jnl
,
1762 jnl
->jhdr
->start
, jnl
->jhdr
->end
, free_space(jnl
), jnl
->active_start
);
1764 if (counter
> 7500) {
1765 printf("jnl: check_free_space: giving up waiting for free space.\n");
1769 // make sure there's space in the journal to hold this transaction
1770 if (free_space(jnl
) > desired_size
) {
1775 // here's where we lazily bump up jnl->jhdr->start. we'll consume
1776 // entries until there is enough space for the next transaction.
1778 old_start_empty
= 1;
1780 for(i
=0; i
< sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]); i
++) {
1784 while (jnl
->old_start
[i
] & 0x8000000000000000LL
) {
1785 if (counter
++ > 100) {
1786 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl 0x%x).\n",
1787 jnl
->old_start
[i
], jnl
);
1790 unlock_oldstart(jnl
);
1792 jnl
->flush(jnl
->flush_arg
);
1794 tsleep((caddr_t
)jnl
, PRIBIO
, "check_free_space1", 1);
1798 if (jnl
->old_start
[i
] == 0) {
1802 old_start_empty
= 0;
1803 jnl
->jhdr
->start
= jnl
->old_start
[i
];
1804 jnl
->old_start
[i
] = 0;
1805 if (free_space(jnl
) > desired_size
) {
1806 unlock_oldstart(jnl
);
1807 write_journal_header(jnl
);
1812 unlock_oldstart(jnl
);
1814 // if we bumped the start, loop and try again
1815 if (i
< sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0])) {
1817 } else if (old_start_empty
) {
1819 // if there is nothing in old_start anymore then we can
1820 // bump the jhdr->start to be the same as active_start
1821 // since it is possible there was only one very large
1822 // transaction in the old_start array. if we didn't do
1823 // this then jhdr->start would never get updated and we
1824 // would wind up looping until we hit the panic at the
1825 // start of the loop.
1827 jnl
->jhdr
->start
= jnl
->active_start
;
1828 write_journal_header(jnl
);
1833 // if the file system gave us a flush function, call it to so that
1834 // it can flush some blocks which hopefully will cause some transactions
1835 // to complete and thus free up space in the journal.
1837 jnl
->flush(jnl
->flush_arg
);
1840 // wait for a while to avoid being cpu-bound (this will
1841 // put us to sleep for 10 milliseconds)
1842 tsleep((caddr_t
)jnl
, PRIBIO
, "check_free_space2", 1);
1849 journal_start_transaction(journal
*jnl
)
1856 if (jnl
->flags
& JOURNAL_INVALID
) {
1860 if (jnl
->owner
== current_thread()) {
1861 if (jnl
->active_tr
== NULL
) {
1862 panic("jnl: start_tr: active_tr is NULL (jnl @ 0x%x, owner 0x%x, current_thread 0x%x\n",
1863 jnl
, jnl
->owner
, current_thread());
1865 jnl
->nested_count
++;
1871 if (jnl
->owner
!= NULL
|| jnl
->nested_count
!= 0 || jnl
->active_tr
!= NULL
) {
1872 panic("jnl: start_tr: owner 0x%x, nested count 0x%x, active_tr 0x%x jnl @ 0x%x\n",
1873 jnl
->owner
, jnl
->nested_count
, jnl
->active_tr
, jnl
);
1876 jnl
->owner
= current_thread();
1877 jnl
->nested_count
= 1;
1879 free_old_stuff(jnl
);
1881 // make sure there's room in the journal
1882 if (check_free_space(jnl
, jnl
->tbuffer_size
) != 0) {
1883 printf("jnl: start transaction failed: no space\n");
1888 // if there's a buffered transaction, use it.
1890 jnl
->active_tr
= jnl
->cur_tr
;
1896 MALLOC_ZONE(tr
, transaction
*, sizeof(transaction
), M_JNL_TR
, M_WAITOK
);
1897 memset(tr
, 0, sizeof(transaction
));
1899 tr
->tbuffer_size
= jnl
->tbuffer_size
;
1901 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&tr
->tbuffer
, tr
->tbuffer_size
)) {
1902 FREE_ZONE(tr
, sizeof(transaction
), M_JNL_TR
);
1903 printf("jnl: start transaction failed: no tbuffer mem\n");
1908 // journal replay code checksum check depends on this.
1909 memset(tr
->tbuffer
, 0, BLHDR_CHECKSUM_SIZE
);
1910 // Fill up the rest of the block with unimportant bytes (0x5a 'Z' chosen for visibility)
1911 memset(tr
->tbuffer
+ BLHDR_CHECKSUM_SIZE
, 0x5a, jnl
->jhdr
->blhdr_size
- BLHDR_CHECKSUM_SIZE
);
1913 tr
->blhdr
= (block_list_header
*)tr
->tbuffer
;
1914 tr
->blhdr
->max_blocks
= (jnl
->jhdr
->blhdr_size
/ sizeof(block_info
)) - 1;
1915 tr
->blhdr
->num_blocks
= 1; // accounts for this header block
1916 tr
->blhdr
->bytes_used
= jnl
->jhdr
->blhdr_size
;
1919 tr
->total_bytes
= jnl
->jhdr
->blhdr_size
;
1922 jnl
->active_tr
= tr
;
1924 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, tr);
1930 jnl
->nested_count
= 0;
1931 unlock_journal(jnl
);
1937 journal_modify_block_start(journal
*jnl
, struct buf
*bp
)
1943 if (jnl
->flags
& JOURNAL_INVALID
) {
1947 // XXXdbg - for debugging I want this to be true. later it may
1948 // not be necessary.
1949 if ((buf_flags(bp
) & B_META
) == 0) {
1950 panic("jnl: modify_block_start: bp @ 0x%x is not a meta-data block! (jnl 0x%x)\n", bp
, jnl
);
1953 tr
= jnl
->active_tr
;
1954 CHECK_TRANSACTION(tr
);
1956 if (jnl
->owner
!= current_thread()) {
1957 panic("jnl: modify_block_start: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1958 jnl
, jnl
->owner
, current_thread());
1961 free_old_stuff(jnl
);
1963 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
1964 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
1966 // can't allow blocks that aren't an even multiple of the
1967 // underlying block size.
1968 if ((buf_size(bp
) % jnl
->jhdr
->jhdr_size
) != 0) {
1969 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
1970 buf_size(bp
), jnl
->jhdr
->jhdr_size
);
1974 // make sure that this transaction isn't bigger than the whole journal
1975 if (tr
->total_bytes
+buf_size(bp
) >= (jnl
->jhdr
->size
- jnl
->jhdr
->jhdr_size
)) {
1976 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr 0x%x bp 0x%x)\n",
1977 tr
->total_bytes
, (tr
->jnl
->jhdr
->size
- jnl
->jhdr
->jhdr_size
), buf_size(bp
), tr
, bp
);
1981 // if the block is dirty and not already locked we have to write
1982 // it out before we muck with it because it has data that belongs
1983 // (presumably) to another transaction.
1985 if ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
) {
1987 if (buf_flags(bp
) & B_ASYNC
) {
1988 panic("modify_block_start: bp @ 0x% has async flag set!\n", bp
);
1991 // this will cause it to not be buf_brelse()'d
1992 buf_setflags(bp
, B_NORELSE
);
1995 buf_setflags(bp
, B_LOCKED
);
2001 journal_modify_block_abort(journal
*jnl
, struct buf
*bp
)
2004 block_list_header
*blhdr
;
2009 tr
= jnl
->active_tr
;
2012 // if there's no active transaction then we just want to
2013 // call buf_brelse() and return since this is just a block
2014 // that happened to be modified as part of another tr.
2021 if (jnl
->flags
& JOURNAL_INVALID
) {
2025 CHECK_TRANSACTION(tr
);
2027 if (jnl
->owner
!= current_thread()) {
2028 panic("jnl: modify_block_abort: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2029 jnl
, jnl
->owner
, current_thread());
2032 free_old_stuff(jnl
);
2034 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2036 // first check if it's already part of this transaction
2037 for(blhdr
=tr
->blhdr
; blhdr
; blhdr
=(block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
2038 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
2039 if (bp
== blhdr
->binfo
[i
].bp
) {
2040 if (buf_size(bp
) != blhdr
->binfo
[i
].bsize
) {
2041 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2042 bp
, buf_size(bp
), blhdr
->binfo
[i
].bsize
, jnl
);
2048 if (i
< blhdr
->num_blocks
) {
2054 // if blhdr is null, then this block has only had modify_block_start
2055 // called on it as part of the current transaction. that means that
2056 // it is ok to clear the LOCKED bit since it hasn't actually been
2057 // modified. if blhdr is non-null then modify_block_end was called
2058 // on it and so we need to keep it locked in memory.
2060 if (blhdr
== NULL
) {
2061 buf_clearflags(bp
, B_LOCKED
);
2070 journal_modify_block_end(journal
*jnl
, struct buf
*bp
)
2072 int i
, j
, tbuffer_offset
;
2074 block_list_header
*blhdr
, *prev
=NULL
;
2079 if (jnl
->flags
& JOURNAL_INVALID
) {
2083 tr
= jnl
->active_tr
;
2084 CHECK_TRANSACTION(tr
);
2086 if (jnl
->owner
!= current_thread()) {
2087 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2088 jnl
, jnl
->owner
, current_thread());
2091 free_old_stuff(jnl
);
2093 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2094 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2096 if ((buf_flags(bp
) & B_LOCKED
) == 0) {
2097 panic("jnl: modify_block_end: bp 0x%x not locked! jnl @ 0x%x\n", bp
, jnl
);
2100 // first check if it's already part of this transaction
2101 for(blhdr
=tr
->blhdr
; blhdr
; prev
=blhdr
,blhdr
=(block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
2102 tbuffer_offset
= jnl
->jhdr
->blhdr_size
;
2104 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
2105 if (bp
== blhdr
->binfo
[i
].bp
) {
2106 if (buf_size(bp
) != blhdr
->binfo
[i
].bsize
) {
2107 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2108 bp
, buf_size(bp
), blhdr
->binfo
[i
].bsize
, jnl
);
2112 tbuffer_offset
+= blhdr
->binfo
[i
].bsize
;
2115 if (i
< blhdr
->num_blocks
) {
2122 && (prev
->num_blocks
+1) <= prev
->max_blocks
2123 && (prev
->bytes_used
+buf_size(bp
)) <= tr
->tbuffer_size
) {
2125 } else if (blhdr
== NULL
) {
2126 block_list_header
*nblhdr
;
2129 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl 0x%x, bp 0x%x\n", jnl
, bp
);
2132 // we got to the end of the list, didn't find the block and there's
2133 // no room in the block_list_header pointed to by prev
2135 // we allocate another tbuffer and link it in at the end of the list
2136 // through prev->binfo[0].bnum. that's a skanky way to do things but
2137 // avoids having yet another linked list of small data structures to manage.
2139 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&nblhdr
, tr
->tbuffer_size
)) {
2140 panic("jnl: end_tr: no space for new block tr @ 0x%x (total bytes: %d)!\n",
2141 tr
, tr
->total_bytes
);
2144 // journal replay code checksum check depends on this.
2145 memset(nblhdr
, 0, BLHDR_CHECKSUM_SIZE
);
2146 // Fill up the rest of the block with unimportant bytes
2147 memset(nblhdr
+ BLHDR_CHECKSUM_SIZE
, 0x5a, jnl
->jhdr
->blhdr_size
- BLHDR_CHECKSUM_SIZE
);
2149 // initialize the new guy
2150 nblhdr
->max_blocks
= (jnl
->jhdr
->blhdr_size
/ sizeof(block_info
)) - 1;
2151 nblhdr
->num_blocks
= 1; // accounts for this header block
2152 nblhdr
->bytes_used
= jnl
->jhdr
->blhdr_size
;
2155 tr
->total_bytes
+= jnl
->jhdr
->blhdr_size
;
2157 // then link him in at the end
2158 prev
->binfo
[0].bnum
= (off_t
)((long)nblhdr
);
2160 // and finally switch to using the new guy
2162 tbuffer_offset
= jnl
->jhdr
->blhdr_size
;
2167 if ((i
+1) > blhdr
->max_blocks
) {
2168 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i
, blhdr
->max_blocks
);
2171 // copy the data into the in-memory transaction buffer
2172 blkptr
= (char *)&((char *)blhdr
)[tbuffer_offset
];
2173 memcpy(blkptr
, buf_dataptr(bp
), buf_size(bp
));
2175 // if this is true then this is a new block we haven't seen
2176 if (i
>= blhdr
->num_blocks
) {
2182 bsize
= buf_size(bp
);
2184 blhdr
->binfo
[i
].bnum
= (off_t
)(buf_blkno(bp
));
2185 blhdr
->binfo
[i
].bsize
= bsize
;
2186 blhdr
->binfo
[i
].bp
= bp
;
2188 blhdr
->bytes_used
+= bsize
;
2189 tr
->total_bytes
+= bsize
;
2191 blhdr
->num_blocks
++;
2199 journal_kill_block(journal
*jnl
, struct buf
*bp
)
2203 block_list_header
*blhdr
;
2208 if (jnl
->flags
& JOURNAL_INVALID
) {
2212 tr
= jnl
->active_tr
;
2213 CHECK_TRANSACTION(tr
);
2215 if (jnl
->owner
!= current_thread()) {
2216 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2217 jnl
, jnl
->owner
, current_thread());
2220 free_old_stuff(jnl
);
2222 bflags
= buf_flags(bp
);
2224 if ( !(bflags
& B_LOCKED
))
2225 panic("jnl: modify_block_end: called with bp not B_LOCKED");
2228 * bp must be BL_BUSY and B_LOCKED
2230 // first check if it's already part of this transaction
2231 for(blhdr
=tr
->blhdr
; blhdr
; blhdr
=(block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
2233 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
2234 if (bp
== blhdr
->binfo
[i
].bp
) {
2237 buf_clearflags(bp
, B_LOCKED
);
2239 // this undoes the vnode_ref() in journal_modify_block_end()
2241 vnode_rele_ext(vp
, 0, 1);
2243 // if the block has the DELWRI and FILTER bits sets, then
2244 // things are seriously weird. if it was part of another
2245 // transaction then journal_modify_block_start() should
2246 // have force it to be written.
2248 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
2249 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
2251 tr
->num_killed
+= buf_size(bp
);
2253 blhdr
->binfo
[i
].bp
= NULL
;
2254 blhdr
->binfo
[i
].bnum
= (off_t
)-1;
2262 if (i
< blhdr
->num_blocks
) {
2272 journal_binfo_cmp(void *a
, void *b
)
2274 block_info
*bi_a
= (struct block_info
*)a
;
2275 block_info
*bi_b
= (struct block_info
*)b
;
2278 if (bi_a
->bp
== NULL
) {
2281 if (bi_b
->bp
== NULL
) {
2285 // don't have to worry about negative block
2286 // numbers so this is ok to do.
2288 res
= (buf_blkno(bi_a
->bp
) - buf_blkno(bi_b
->bp
));
2295 end_transaction(transaction
*tr
, int force_it
)
2300 journal
*jnl
= tr
->jnl
;
2302 block_list_header
*blhdr
=NULL
, *next
=NULL
;
2305 panic("jnl: jnl @ 0x%x already has cur_tr 0x%x, new tr: 0x%x\n",
2306 jnl
, jnl
->cur_tr
, tr
);
2309 // if there weren't any modified blocks in the transaction
2310 // just save off the transaction pointer and return.
2311 if (tr
->total_bytes
== jnl
->jhdr
->blhdr_size
) {
2316 // if our transaction buffer isn't very full, just hang
2317 // on to it and don't actually flush anything. this is
2318 // what is known as "group commit". we will flush the
2319 // transaction buffer if it's full or if we have more than
2320 // one of them so we don't start hogging too much memory.
2323 && (jnl
->flags
& JOURNAL_NO_GROUP_COMMIT
) == 0
2324 && tr
->num_blhdrs
< 3
2325 && (tr
->total_bytes
<= ((tr
->tbuffer_size
*tr
->num_blhdrs
) - tr
->tbuffer_size
/8))) {
2332 // if we're here we're going to flush the transaction buffer to disk.
2333 // make sure there is room in the journal first.
2334 check_free_space(jnl
, tr
->total_bytes
);
2336 // range check the end index
2337 if (jnl
->jhdr
->end
<= 0 || jnl
->jhdr
->end
> jnl
->jhdr
->size
) {
2338 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
2339 jnl
->jhdr
->end
, jnl
->jhdr
->size
);
2342 // this transaction starts where the current journal ends
2343 tr
->journal_start
= jnl
->jhdr
->end
;
2344 end
= jnl
->jhdr
->end
;
2347 // if the first entry in old_start[] isn't free yet, loop calling the
2348 // file system flush routine until it is (or we panic).
2352 while ((jnl
->old_start
[0] & 0x8000000000000000LL
) != 0) {
2354 unlock_oldstart(jnl
);
2357 jnl
->flush(jnl
->flush_arg
);
2360 // yield the cpu so others can get in to clear the lock bit
2361 (void)tsleep((void *)jnl
, PRIBIO
, "jnl-old-start-sleep", 1);
2366 panic("jnl: transaction that started at 0x%llx is not completing! jnl 0x%x\n",
2367 jnl
->old_start
[0] & (~0x8000000000000000LL
), jnl
);
2372 // slide everyone else down and put our latest guy in the last
2373 // entry in the old_start array
2375 memcpy(&jnl
->old_start
[0], &jnl
->old_start
[1], sizeof(jnl
->old_start
)-sizeof(jnl
->old_start
[0]));
2376 jnl
->old_start
[sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]) - 1] = tr
->journal_start
| 0x8000000000000000LL
;
2378 unlock_oldstart(jnl
);
2381 // for each block, make sure that the physical block # is set
2382 for(blhdr
=tr
->blhdr
; blhdr
; blhdr
=next
) {
2384 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
2389 bp
= blhdr
->binfo
[i
].bp
;
2390 if (bp
== NULL
) { // only true if a block was "killed"
2391 if (blhdr
->binfo
[i
].bnum
!= (off_t
)-1) {
2392 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ 0x%x, tr 0x%x)\n",
2393 blhdr
->binfo
[i
].bnum
, jnl
, tr
);
2398 blkno
= buf_blkno(bp
);
2399 lblkno
= buf_lblkno(bp
);
2401 if (vp
== NULL
&& lblkno
== blkno
) {
2402 printf("jnl: end_tr: bad news! bp @ 0x%x w/null vp and l/blkno = %qd/%qd. aborting the transaction (tr 0x%x jnl 0x%x).\n",
2403 bp
, lblkno
, blkno
, tr
, jnl
);
2407 // if the lblkno is the same as blkno and this bp isn't
2408 // associated with the underlying file system device then
2409 // we need to call bmap() to get the actual physical block.
2411 if ((lblkno
== blkno
) && (vp
!= jnl
->fsdev
)) {
2413 size_t contig_bytes
;
2415 if (VNOP_BLKTOOFF(vp
, lblkno
, &f_offset
)) {
2416 printf("jnl: end_tr: vnop_blktooff failed @ 0x%x, jnl 0x%x\n", bp
, jnl
);
2419 if (VNOP_BLOCKMAP(vp
, f_offset
, buf_count(bp
), &blkno
, &contig_bytes
, NULL
, 0, NULL
)) {
2420 printf("jnl: end_tr: can't blockmap the bp @ 0x%x, jnl 0x%x\n", bp
, jnl
);
2423 if ((uint32_t)contig_bytes
< buf_count(bp
)) {
2424 printf("jnl: end_tr: blk not physically contiguous on disk@ 0x%x, jnl 0x%x\n", bp
, jnl
);
2427 buf_setblkno(bp
, blkno
);
2429 // update this so we write out the correct physical block number!
2430 blhdr
->binfo
[i
].bnum
= (off_t
)(blkno
);
2433 next
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
2436 for(blhdr
=tr
->blhdr
; blhdr
; blhdr
=(block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
2438 amt
= blhdr
->bytes_used
;
2440 blhdr
->checksum
= 0;
2441 blhdr
->checksum
= calc_checksum((char *)blhdr
, BLHDR_CHECKSUM_SIZE
);
2443 ret
= write_journal_data(jnl
, &end
, blhdr
, amt
);
2445 printf("jnl: end_transaction: only wrote %d of %d bytes to the journal!\n",
2452 jnl
->jhdr
->end
= end
; // update where the journal now ends
2453 tr
->journal_end
= end
; // the transaction ends here too
2454 if (tr
->journal_start
== 0 || tr
->journal_end
== 0) {
2455 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
2456 tr
->journal_start
, tr
->journal_end
);
2459 if (write_journal_header(jnl
) != 0) {
2464 // setup for looping through all the blhdr's. we null out the
2465 // tbuffer and blhdr fields so that they're not used any more.
2471 // the buffer_flushed_callback will only be called for the
2472 // real blocks that get flushed so we have to account for
2473 // the block_list_headers here.
2475 tr
->num_flushed
= tr
->num_blhdrs
* jnl
->jhdr
->blhdr_size
;
2477 // for each block, set the iodone callback and unlock it
2478 for(; blhdr
; blhdr
=next
) {
2480 // we can re-order the buf ptrs because everything is written out already
2481 qsort(&blhdr
->binfo
[1], blhdr
->num_blocks
-1, sizeof(block_info
), journal_binfo_cmp
);
2483 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
2484 if (blhdr
->binfo
[i
].bp
== NULL
) {
2488 errno
= buf_meta_bread(buf_vnode(blhdr
->binfo
[i
].bp
),
2489 buf_lblkno(blhdr
->binfo
[i
].bp
),
2490 buf_size(blhdr
->binfo
[i
].bp
),
2493 if (errno
== 0 && bp
!= NULL
) {
2494 struct vnode
*save_vp
;
2497 if (bp
!= blhdr
->binfo
[i
].bp
) {
2498 panic("jnl: end_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2499 bp
, blhdr
->binfo
[i
].bp
, jnl
);
2502 if ((buf_flags(bp
) & (B_LOCKED
|B_DELWRI
)) != (B_LOCKED
|B_DELWRI
)) {
2503 if (jnl
->flags
& JOURNAL_CLOSE_PENDING
) {
2504 buf_clearflags(bp
, B_LOCKED
);
2508 panic("jnl: end_tr: !!!DANGER!!! bp 0x%x flags (0x%x) not LOCKED & DELWRI\n", bp
, buf_flags(bp
));
2511 save_vp
= buf_vnode(bp
);
2513 buf_setfilter(bp
, buffer_flushed_callback
, tr
, &cur_filter
, NULL
);
2516 panic("jnl: bp @ 0x%x (blkno %qd, vp 0x%x) has non-null iodone (0x%x) buffflushcb 0x%x\n",
2517 bp
, buf_blkno(bp
), save_vp
, cur_filter
, buffer_flushed_callback
);
2519 buf_clearflags(bp
, B_LOCKED
);
2521 // kicking off the write here helps performance
2523 // XXXdbg this is good for testing: buf_bdwrite(bp);
2526 // this undoes the vnode_ref() in journal_modify_block_end()
2527 vnode_rele_ext(save_vp
, 0, 1);
2529 printf("jnl: end_transaction: could not find block %Ld vp 0x%x!\n",
2530 blhdr
->binfo
[i
].bnum
, blhdr
->binfo
[i
].bp
);
2532 buf_clearflags(bp
, B_LOCKED
);
2538 next
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
2540 // we can free blhdr here since we won't need it any more
2541 blhdr
->binfo
[0].bnum
= 0xdeadc0de;
2542 kmem_free(kernel_map
, (vm_offset_t
)blhdr
, tr
->tbuffer_size
);
2545 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
2546 // tr, tr->journal_start, tr->journal_end);
2551 jnl
->flags
|= JOURNAL_INVALID
;
2552 jnl
->old_start
[sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]) - 1] &= ~0x8000000000000000LL
;
2553 abort_transaction(jnl
, tr
);
2558 abort_transaction(journal
*jnl
, transaction
*tr
)
2562 block_list_header
*blhdr
, *next
;
2564 struct vnode
*save_vp
;
2566 // for each block list header, iterate over the blocks then
2567 // free up the memory associated with the block list.
2569 // for each block, clear the lock bit and release it.
2571 for(blhdr
=tr
->blhdr
; blhdr
; blhdr
=next
) {
2573 for(i
=1; i
< blhdr
->num_blocks
; i
++) {
2574 if (blhdr
->binfo
[i
].bp
== NULL
) {
2577 if ( (buf_vnode(blhdr
->binfo
[i
].bp
) == NULL
) ||
2578 !(buf_flags(blhdr
->binfo
[i
].bp
) & B_LOCKED
) ) {
2582 errno
= buf_meta_bread(buf_vnode(blhdr
->binfo
[i
].bp
),
2583 buf_lblkno(blhdr
->binfo
[i
].bp
),
2584 buf_size(blhdr
->binfo
[i
].bp
),
2588 if (bp
!= blhdr
->binfo
[i
].bp
) {
2589 panic("jnl: abort_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2590 bp
, blhdr
->binfo
[i
].bp
, jnl
);
2593 // releasing a bp marked invalid
2594 // also clears the locked and delayed state
2595 buf_markinvalid(bp
);
2596 save_vp
= buf_vnode(bp
);
2600 vnode_rele_ext(save_vp
, 0, 1);
2602 printf("jnl: abort_tr: could not find block %Ld vp 0x%x!\n",
2603 blhdr
->binfo
[i
].bnum
, blhdr
->binfo
[i
].bp
);
2610 next
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
2612 // we can free blhdr here since we won't need it any more
2613 blhdr
->binfo
[0].bnum
= 0xdeadc0de;
2614 kmem_free(kernel_map
, (vm_offset_t
)blhdr
, tr
->tbuffer_size
);
2619 tr
->total_bytes
= 0xdbadc0de;
2620 FREE_ZONE(tr
, sizeof(transaction
), M_JNL_TR
);
2625 journal_end_transaction(journal
*jnl
)
2632 if ((jnl
->flags
& JOURNAL_INVALID
) && jnl
->owner
== NULL
) {
2636 if (jnl
->owner
!= current_thread()) {
2637 panic("jnl: end_tr: I'm not the owner! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2638 jnl
, jnl
->owner
, current_thread());
2641 free_old_stuff(jnl
);
2643 jnl
->nested_count
--;
2644 if (jnl
->nested_count
> 0) {
2646 } else if (jnl
->nested_count
< 0) {
2647 panic("jnl: jnl @ 0x%x has negative nested count (%d). bad boy.\n", jnl
, jnl
->nested_count
);
2650 if (jnl
->flags
& JOURNAL_INVALID
) {
2651 if (jnl
->active_tr
) {
2652 if (jnl
->cur_tr
!= NULL
) {
2653 panic("jnl: journal @ 0x%x has active tr (0x%x) and cur tr (0x%x)\n",
2654 jnl
, jnl
->active_tr
, jnl
->cur_tr
);
2657 tr
= jnl
->active_tr
;
2658 jnl
->active_tr
= NULL
;
2659 abort_transaction(jnl
, tr
);
2663 unlock_journal(jnl
);
2668 tr
= jnl
->active_tr
;
2669 CHECK_TRANSACTION(tr
);
2671 // clear this out here so that when check_free_space() calls
2672 // the FS flush function, we don't panic in journal_flush()
2673 // if the FS were to call that. note: check_free_space() is
2674 // called from end_transaction().
2676 jnl
->active_tr
= NULL
;
2677 ret
= end_transaction(tr
, 0);
2680 unlock_journal(jnl
);
2687 journal_flush(journal
*jnl
)
2689 int need_signal
= 0;
2693 if (jnl
->flags
& JOURNAL_INVALID
) {
2697 if (jnl
->owner
!= current_thread()) {
2704 free_old_stuff(jnl
);
2706 // if we're not active, flush any buffered transactions
2707 if (jnl
->active_tr
== NULL
&& jnl
->cur_tr
) {
2708 transaction
*tr
= jnl
->cur_tr
;
2711 end_transaction(tr
, 1); // force it to get flushed
2715 unlock_journal(jnl
);
2722 journal_active(journal
*jnl
)
2724 if (jnl
->flags
& JOURNAL_INVALID
) {
2728 return (jnl
->active_tr
== NULL
) ? 0 : 1;
2732 journal_owner(journal
*jnl
)