]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_journal.c
ea0867aad68ba203e8a4aaa28bdf9fd66a81a4cc
[apple/xnu.git] / bsd / vfs / vfs_journal.c
1 /*
2 * Copyright (c) 1995-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 //
23 // This file implements a simple write-ahead journaling layer.
24 // In theory any file system can make use of it by calling these
25 // functions when the fs wants to modify meta-data blocks. See
26 // vfs_journal.h for a more detailed description of the api and
27 // data structures.
28 //
29 // Dominic Giampaolo (dbg@apple.com)
30 //
31
32 #ifdef KERNEL
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/file.h>
38 #include <sys/stat.h>
39 #include <sys/buf.h>
40 #include <sys/proc.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/vnode.h>
44 #include <sys/ioctl.h>
45 #include <sys/tty.h>
46 #include <sys/ubc.h>
47 #include <sys/malloc.h>
48 #include <sys/vnode.h>
49 #include <kern/thread_act.h>
50 #include <sys/disk.h>
51 #include <miscfs/specfs/specdev.h>
52
53 extern task_t kernel_task;
54
55 #else
56
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <string.h>
60 #include <limits.h>
61 #include <errno.h>
62 #include <fcntl.h>
63 #include <unistd.h>
64 #include <stdarg.h>
65 #include <sys/types.h>
66 #include "compat.h"
67
68 #endif /* KERNEL */
69
70 #include "vfs_journal.h"
71
72
73 // number of bytes to checksum in a block_list_header
74 // NOTE: this should be enough to clear out the header
75 // fields as well as the first entry of binfo[]
76 #define BLHDR_CHECKSUM_SIZE 32
77
78
79
80 static int end_transaction(transaction *tr, int force_it);
81 static void abort_transaction(journal *jnl, transaction *tr);
82 static void dump_journal(journal *jnl);
83
84
85 //
86 // 3105942 - Coalesce writes to the same block on journal replay
87 //
88
89 typedef struct bucket {
90 off_t block_num;
91 size_t jnl_offset;
92 size_t block_size;
93 } bucket;
94
95 #define STARTING_BUCKETS 256
96
97 static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
98 static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size);
99 static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full);
100 static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
101 static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting);
102
103 #define CHECK_JOURNAL(jnl) \
104 do { \
105 if (jnl == NULL) {\
106 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__);\
107 }\
108 if (jnl->jdev == NULL) { \
109 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__);\
110 } \
111 if (jnl->fsdev == NULL) { \
112 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__);\
113 } \
114 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) {\
115 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n",\
116 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);\
117 }\
118 if ( jnl->jhdr->start <= 0 \
119 || jnl->jhdr->start > jnl->jhdr->size\
120 || jnl->jhdr->start > 1024*1024*1024) {\
121 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
122 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\
123 }\
124 if ( jnl->jhdr->end <= 0 \
125 || jnl->jhdr->end > jnl->jhdr->size\
126 || jnl->jhdr->end > 1024*1024*1024) {\
127 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
128 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\
129 }\
130 if (jnl->jhdr->size > 1024*1024*1024) {\
131 panic("%s:%d: jhdr size looks bad (0x%llx)\n",\
132 __FILE__, __LINE__, jnl->jhdr->size);\
133 } \
134 } while(0)
135
136 #define CHECK_TRANSACTION(tr) \
137 do {\
138 if (tr == NULL) {\
139 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__);\
140 }\
141 if (tr->jnl == NULL) {\
142 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__);\
143 }\
144 if (tr->blhdr != (block_list_header *)tr->tbuffer) {\
145 panic("%s:%d: blhdr (0x%x) != tbuffer (0x%x)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer);\
146 }\
147 if (tr->total_bytes < 0) {\
148 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\
149 }\
150 if (tr->journal_start < 0 || tr->journal_start > 1024*1024*1024) {\
151 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\
152 }\
153 if (tr->journal_end < 0 || tr->journal_end > 1024*1024*1024) {\
154 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\
155 }\
156 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\
157 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\
158 }\
159 } while(0)
160
161
162
163 //
164 // this isn't a great checksum routine but it will do for now.
165 // we use it to checksum the journal header and the block list
166 // headers that are at the start of each transaction.
167 //
168 static int
169 calc_checksum(char *ptr, int len)
170 {
171 int i, cksum=0;
172
173 // this is a lame checksum but for now it'll do
174 for(i=0; i < len; i++, ptr++) {
175 cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr);
176 }
177
178 return (~cksum);
179 }
180
181
182 #define JNL_WRITE 0x0001
183 #define JNL_READ 0x0002
184 #define JNL_HEADER 0x8000
185
186 //
187 // This function sets up a fake buf and passes it directly to the
188 // journal device strategy routine (so that it won't get cached in
189 // the block cache.
190 //
191 // It also handles range checking the i/o so that we don't write
192 // outside the journal boundaries and it will wrap the i/o back
193 // to the beginning if necessary (skipping over the journal header)
194 //
195 static size_t
196 do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction)
197 {
198 int err, io_sz=0, curlen=len;
199 struct buf *bp;
200 int max_iosize=0, max_vectors;
201
202 if (*offset < 0 || *offset > jnl->jhdr->size) {
203 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size);
204 }
205
206 again:
207 bp = alloc_io_buf(jnl->jdev, 1);
208
209 if (direction & JNL_WRITE) {
210 bp->b_flags |= 0; // don't have to set any flags (was: B_WRITEINPROG)
211 jnl->jdev->v_numoutput++;
212 vfs_io_attributes(jnl->jdev, B_WRITE, &max_iosize, &max_vectors);
213 } else if (direction & JNL_READ) {
214 bp->b_flags |= B_READ;
215 vfs_io_attributes(jnl->jdev, B_READ, &max_iosize, &max_vectors);
216 }
217
218 if (max_iosize == 0) {
219 max_iosize = 128 * 1024;
220 }
221
222 if (*offset + (off_t)curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) {
223 if (*offset == jnl->jhdr->size) {
224 *offset = jnl->jhdr->jhdr_size;
225 } else {
226 curlen = (off_t)jnl->jhdr->size - *offset;
227 }
228 }
229
230 if (curlen > max_iosize) {
231 curlen = max_iosize;
232 }
233
234 if (curlen <= 0) {
235 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %d\n", curlen, *offset, len);
236 }
237
238 if (*offset == 0 && (direction & JNL_HEADER) == 0) {
239 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen, data);
240 }
241
242 bp->b_bufsize = curlen;
243 bp->b_bcount = curlen;
244 bp->b_data = data;
245 bp->b_blkno = (daddr_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size);
246 bp->b_lblkno = (daddr_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size);
247
248 err = VOP_STRATEGY(bp);
249 if (!err) {
250 err = biowait(bp);
251 }
252
253 bp->b_data = NULL;
254 bp->b_bufsize = bp->b_bcount = 0;
255 bp->b_blkno = bp->b_lblkno = -1;
256
257 free_io_buf(bp);
258
259 if (err) {
260 printf("jnl: do_jnl_io: strategy err 0x%x\n", err);
261 return 0;
262 }
263
264 *offset += curlen;
265 io_sz += curlen;
266 if (io_sz != len) {
267 // handle wrap-around
268 data = (char *)data + curlen;
269 curlen = len - io_sz;
270 if (*offset >= jnl->jhdr->size) {
271 *offset = jnl->jhdr->jhdr_size;
272 }
273 goto again;
274 }
275
276 return io_sz;
277 }
278
279 static size_t
280 read_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
281 {
282 return do_journal_io(jnl, offset, data, len, JNL_READ);
283 }
284
285 static size_t
286 write_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
287 {
288 return do_journal_io(jnl, offset, data, len, JNL_WRITE);
289 }
290
291
292 static int
293 read_journal_header(journal *jnl, void *data, size_t len)
294 {
295 off_t hdr_offset = 0;
296
297 return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER);
298 }
299
300 static int
301 write_journal_header(journal *jnl)
302 {
303 static int num_err_prints = 0;
304 int ret;
305 off_t jhdr_offset = 0;
306
307 //
308 // XXXdbg note: this ioctl doesn't seem to do anything on firewire disks.
309 //
310 ret = VOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, current_proc());
311 if (ret != 0) {
312 //
313 // Only print this error if it's a different error than the
314 // previous one, or if it's the first time for this device
315 // or if the total number of printfs is less than 25. We
316 // allow for up to 25 printfs to insure that some make it
317 // into the on-disk syslog. Otherwise if we only printed
318 // one, it's possible it would never make it to the syslog
319 // for the root volume and that makes debugging hard.
320 //
321 if ( ret != jnl->last_flush_err
322 || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0
323 || num_err_prints++ < 25) {
324
325 printf("jnl: flushing fs disk buffer returned 0x%x\n", ret);
326
327 jnl->flags |= JOURNAL_FLUSHCACHE_ERR;
328 jnl->last_flush_err = ret;
329 }
330 }
331
332
333 jnl->jhdr->checksum = 0;
334 jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
335 if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != jnl->jhdr->jhdr_size) {
336 printf("jnl: write_journal_header: error writing the journal header!\n");
337 jnl->flags |= JOURNAL_INVALID;
338 return -1;
339 }
340
341 // Have to flush after writing the journal header so that
342 // a future transaction doesn't sneak out to disk before
343 // the header does and thus overwrite data that the old
344 // journal header refers to. Saw this exact case happen
345 // on an IDE bus analyzer with Larry Barras so while it
346 // may seem obscure, it's not.
347 //
348 VOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, current_proc());
349
350 return 0;
351 }
352
353
354
355 //
356 // this is a work function used to free up transactions that
357 // completed. they can't be free'd from buffer_flushed_callback
358 // because it is called from deep with the disk driver stack
359 // and thus can't do something that would potentially cause
360 // paging. it gets called by each of the journal api entry
361 // points so stuff shouldn't hang around for too long.
362 //
363 static void
364 free_old_stuff(journal *jnl)
365 {
366 transaction *tr, *next;
367
368 for(tr=jnl->tr_freeme; tr; tr=next) {
369 next = tr->next;
370 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
371 }
372
373 jnl->tr_freeme = NULL;
374 }
375
376
377
378 //
379 // This is our callback that lets us know when a buffer has been
380 // flushed to disk. It's called from deep within the driver stack
381 // and thus is quite limited in what it can do. Notably, it can
382 // not initiate any new i/o's or allocate/free memory.
383 //
384 static void
385 buffer_flushed_callback(struct buf *bp)
386 {
387 transaction *tr;
388 journal *jnl;
389 transaction *ctr, *prev=NULL, *next;
390 int i, bufsize;
391
392
393 //printf("jnl: buf flush: bp @ 0x%x l/blkno %d/%d vp 0x%x tr @ 0x%x\n",
394 // bp, bp->b_lblkno, bp->b_blkno, bp->b_vp, bp->b_transaction);
395
396 // snarf out the bits we want
397 bufsize = bp->b_bufsize;
398 tr = bp->b_transaction;
399
400 bp->b_iodone = NULL; // don't call us for this guy again
401 bp->b_transaction = NULL;
402
403 //
404 // This is what biodone() would do if it didn't call us.
405 // NOTE: THIS CODE *HAS* TO BE HERE!
406 //
407 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
408 brelse(bp);
409 } else { /* or just wakeup the buffer */
410 CLR(bp->b_flags, B_WANTED);
411 wakeup(bp);
412 }
413
414 // NOTE: from here on out we do *NOT* touch bp anymore.
415
416
417 // then we've already seen it
418 if (tr == NULL) {
419 return;
420 }
421
422 CHECK_TRANSACTION(tr);
423
424 jnl = tr->jnl;
425 if (jnl->flags & JOURNAL_INVALID) {
426 return;
427 }
428
429 CHECK_JOURNAL(jnl);
430
431 // update the number of blocks that have been flushed.
432 // this buf may represent more than one block so take
433 // that into account.
434 tr->num_flushed += bufsize;
435
436
437 // if this transaction isn't done yet, just return as
438 // there is nothing to do.
439 if ((tr->num_flushed + tr->num_killed) < tr->total_bytes) {
440 return;
441 }
442
443 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
444 // tr, tr->journal_start, tr->journal_end, jnl);
445
446 // find this entry in the old_start[] index and mark it completed
447 simple_lock(&jnl->old_start_lock);
448 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
449
450 if ((jnl->old_start[i] & ~(0x8000000000000000LL)) == tr->journal_start) {
451 jnl->old_start[i] &= ~(0x8000000000000000LL);
452 break;
453 }
454 }
455 if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
456 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr 0x%x, jnl 0x%x)\n",
457 tr->journal_start, tr, jnl);
458 }
459 simple_unlock(&jnl->old_start_lock);
460
461
462 // if we are here then we need to update the journal header
463 // to reflect that this transaction is complete
464 if (tr->journal_start == jnl->active_start) {
465 jnl->active_start = tr->journal_end;
466 tr->journal_start = tr->journal_end = (off_t)0;
467 }
468
469 // go through the completed_trs list and try to coalesce
470 // entries, restarting back at the beginning if we have to.
471 for(ctr=jnl->completed_trs; ctr; prev=ctr, ctr=next) {
472 if (ctr->journal_start == jnl->active_start) {
473 jnl->active_start = ctr->journal_end;
474 if (prev) {
475 prev->next = ctr->next;
476 }
477 if (ctr == jnl->completed_trs) {
478 jnl->completed_trs = ctr->next;
479 }
480
481 next = jnl->completed_trs; // this starts us over again
482 ctr->next = jnl->tr_freeme;
483 jnl->tr_freeme = ctr;
484 ctr = NULL;
485 } else if (tr->journal_end == ctr->journal_start) {
486 ctr->journal_start = tr->journal_start;
487 next = jnl->completed_trs; // this starts us over again
488 ctr = NULL;
489 tr->journal_start = tr->journal_end = (off_t)0;
490 } else if (tr->journal_start == ctr->journal_end) {
491 ctr->journal_end = tr->journal_end;
492 next = ctr->next;
493 tr->journal_start = tr->journal_end = (off_t)0;
494 } else {
495 next = ctr->next;
496 }
497 }
498
499 // at this point no one should be using this guy anymore
500 tr->total_bytes = 0xfbadc0de;
501
502 // if this is true then we didn't merge with anyone
503 // so link ourselves in at the head of the completed
504 // transaction list.
505 if (tr->journal_start != 0) {
506 // put this entry into the correct sorted place
507 // in the list instead of just at the head.
508 //
509
510 prev = NULL;
511 for(ctr=jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) {
512 // just keep looping
513 }
514
515 if (ctr == NULL && prev == NULL) {
516 jnl->completed_trs = tr;
517 tr->next = NULL;
518 } else if (ctr == jnl->completed_trs) {
519 tr->next = jnl->completed_trs;
520 jnl->completed_trs = tr;
521 } else {
522 tr->next = prev->next;
523 prev->next = tr;
524 }
525 } else {
526 // if we're here this tr got merged with someone else so
527 // put it on the list to be free'd
528 tr->next = jnl->tr_freeme;
529 jnl->tr_freeme = tr;
530 }
531 }
532
533
534 #include <libkern/OSByteOrder.h>
535
536 #define SWAP16(x) OSSwapInt16(x)
537 #define SWAP32(x) OSSwapInt32(x)
538 #define SWAP64(x) OSSwapInt64(x)
539
540
541 static void
542 swap_journal_header(journal *jnl)
543 {
544 jnl->jhdr->magic = SWAP32(jnl->jhdr->magic);
545 jnl->jhdr->endian = SWAP32(jnl->jhdr->endian);
546 jnl->jhdr->start = SWAP64(jnl->jhdr->start);
547 jnl->jhdr->end = SWAP64(jnl->jhdr->end);
548 jnl->jhdr->size = SWAP64(jnl->jhdr->size);
549 jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size);
550 jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum);
551 jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size);
552 }
553
554 static void
555 swap_block_list_header(journal *jnl, block_list_header *blhdr)
556 {
557 int i;
558
559 blhdr->max_blocks = SWAP16(blhdr->max_blocks);
560 blhdr->num_blocks = SWAP16(blhdr->num_blocks);
561 blhdr->bytes_used = SWAP32(blhdr->bytes_used);
562 blhdr->checksum = SWAP32(blhdr->checksum);
563 blhdr->pad = SWAP32(blhdr->pad);
564
565 if (blhdr->num_blocks * sizeof(blhdr->binfo[0]) > jnl->jhdr->blhdr_size) {
566 printf("jnl: blhdr num blocks looks suspicious (%d). not swapping.\n", blhdr->num_blocks);
567 return;
568 }
569
570 for(i=0; i < blhdr->num_blocks; i++) {
571 blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum);
572 blhdr->binfo[i].bsize = SWAP32(blhdr->binfo[i].bsize);
573 blhdr->binfo[i].bp = (void *)SWAP32((int)blhdr->binfo[i].bp);
574 }
575 }
576
577
578 static int
579 update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize)
580 {
581 int ret;
582 struct buf *oblock_bp=NULL;
583
584 // first read the block we want.
585 ret = meta_bread(jnl->fsdev, (daddr_t)fs_block, bsize, NOCRED, &oblock_bp);
586 if (ret != 0) {
587 printf("jnl: update_fs_block: error reading fs block # %lld! (ret %d)\n", fs_block, ret);
588
589 if (oblock_bp) {
590 brelse(oblock_bp);
591 oblock_bp = NULL;
592 }
593
594 // let's try to be aggressive here and just re-write the block
595 oblock_bp = getblk(jnl->fsdev, (daddr_t)fs_block, bsize, 0, 0, BLK_META);
596 if (oblock_bp == NULL) {
597 printf("jnl: update_fs_block: getblk() for %lld failed! failing update.\n", fs_block);
598 return -1;
599 }
600 }
601
602 // make sure it's the correct size.
603 if (oblock_bp->b_bufsize != bsize) {
604 brelse(oblock_bp);
605 return -1;
606 }
607
608 // copy the journal data over top of it
609 memcpy(oblock_bp->b_data, block_ptr, bsize);
610
611 if ((ret = VOP_BWRITE(oblock_bp)) != 0) {
612 printf("jnl: update_fs_block: failed to update block %lld (ret %d)\n", fs_block,ret);
613 return ret;
614 }
615
616 // and now invalidate it so that if someone else wants to read
617 // it in a different size they'll be able to do it.
618 ret = meta_bread(jnl->fsdev, (daddr_t)fs_block, bsize, NOCRED, &oblock_bp);
619 if (oblock_bp) {
620 oblock_bp->b_flags |= B_INVAL;
621 brelse(oblock_bp);
622 }
623
624 return 0;
625 }
626
627 static int
628 grow_table(struct bucket **buf_ptr, int num_buckets, int new_size)
629 {
630 struct bucket *newBuf;
631 int current_size = num_buckets, i;
632
633 // return if newsize is less than the current size
634 if (new_size < num_buckets) {
635 return current_size;
636 }
637
638 if ((MALLOC(newBuf, struct bucket *, new_size*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
639 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
640 return -1;
641 }
642
643 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
644
645 // copy existing elements
646 bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket));
647
648 // initialize the new ones
649 for(i=num_buckets; i < new_size; i++) {
650 newBuf[i].block_num = (off_t)-1;
651 }
652
653 // free the old container
654 FREE(*buf_ptr, M_TEMP);
655
656 // reset the buf_ptr
657 *buf_ptr = newBuf;
658
659 return new_size;
660 }
661
662 static int
663 lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full)
664 {
665 int lo, hi, index, matches, i;
666
667 if (num_full == 0) {
668 return 0; // table is empty, so insert at index=0
669 }
670
671 lo = 0;
672 hi = num_full - 1;
673 index = -1;
674
675 // perform binary search for block_num
676 do {
677 int mid = (hi - lo)/2 + lo;
678 off_t this_num = (*buf_ptr)[mid].block_num;
679
680 if (block_num == this_num) {
681 index = mid;
682 break;
683 }
684
685 if (block_num < this_num) {
686 hi = mid;
687 continue;
688 }
689
690 if (block_num > this_num) {
691 lo = mid + 1;
692 continue;
693 }
694 } while(lo < hi);
695
696 // check if lo and hi converged on the match
697 if (block_num == (*buf_ptr)[hi].block_num) {
698 index = hi;
699 }
700
701 // if no existing entry found, find index for new one
702 if (index == -1) {
703 index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1;
704 } else {
705 // make sure that we return the right-most index in the case of multiple matches
706 matches = 0;
707 i = index + 1;
708 while(i < num_full && block_num == (*buf_ptr)[i].block_num) {
709 matches++;
710 i++;
711 }
712
713 index += matches;
714 }
715
716 return index;
717 }
718
719 static int
720 insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting)
721 {
722 if (!overwriting) {
723 // grow the table if we're out of space
724 if (*num_full_ptr >= *num_buckets_ptr) {
725 int new_size = *num_buckets_ptr * 2;
726 int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size);
727
728 if (grow_size < new_size) {
729 printf("jnl: add_block: grow_table returned an error!\n");
730 return -1;
731 }
732
733 *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size
734 }
735
736 // if we're not inserting at the end, we need to bcopy
737 if (blk_index != *num_full_ptr) {
738 bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) );
739 }
740
741 (*num_full_ptr)++; // increment only if we're not overwriting
742 }
743
744 // sanity check the values we're about to add
745 if (offset >= jnl->jhdr->size) {
746 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
747 }
748 if (size <= 0) {
749 panic("jnl: insert_block: bad size in insert_block (%d)\n", size);
750 }
751
752 (*buf_ptr)[blk_index].block_num = num;
753 (*buf_ptr)[blk_index].block_size = size;
754 (*buf_ptr)[blk_index].jnl_offset = offset;
755
756 return blk_index;
757 }
758
759 static int
760 do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
761 {
762 int num_to_remove, index, i, overwrite, err;
763 size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset;
764 off_t overlap, block_start, block_end;
765
766 block_start = block_num*jhdr_size;
767 block_end = block_start + size;
768 overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size);
769
770 // first, eliminate any overlap with the previous entry
771 if (blk_index != 0 && !overwrite) {
772 off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size;
773 off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size;
774 overlap = prev_block_end - block_start;
775 if (overlap > 0) {
776 if (overlap % jhdr_size != 0) {
777 panic("jnl: do_overlap: overlap with previous entry not a multiple of %d\n", jhdr_size);
778 }
779
780 // if the previous entry completely overlaps this one, we need to break it into two pieces.
781 if (prev_block_end > block_end) {
782 off_t new_num = block_end / jhdr_size;
783 size_t new_size = prev_block_end - block_end;
784 size_t new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start);
785
786 err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, num_buckets_ptr, num_full_ptr, 0);
787 if (err < 0) {
788 panic("jnl: do_overlap: error inserting during pre-overlap\n");
789 }
790 }
791
792 // Regardless, we need to truncate the previous entry to the beginning of the overlap
793 (*buf_ptr)[blk_index-1].block_size = block_start - prev_block_start;
794 }
795 }
796
797 // then, bail out fast if there's no overlap with the entries that follow
798 if (!overwrite && block_end <= (*buf_ptr)[blk_index].block_num*jhdr_size) {
799 return 0; // no overlap, no overwrite
800 } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (*buf_ptr)[blk_index+1].block_num*jhdr_size)) {
801 return 1; // simple overwrite
802 }
803
804 // Otherwise, find all cases of total and partial overlap. We use the special
805 // block_num of -2 to designate entries that are completely overlapped and must
806 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
807 // entries must be adjusted to keep the array consistent.
808 index = blk_index;
809 num_to_remove = 0;
810 while(index < *num_full_ptr && block_end > (*buf_ptr)[index].block_num*jhdr_size) {
811 if (block_end >= ((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size)) {
812 (*buf_ptr)[index].block_num = -2; // mark this for deletion
813 num_to_remove++;
814 } else {
815 overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size;
816 if (overlap > 0) {
817 if (overlap % jhdr_size != 0) {
818 panic("jnl: do_overlap: overlap of %d is not multiple of %d\n", overlap, jhdr_size);
819 }
820
821 // if we partially overlap this entry, adjust its block number, jnl offset, and size
822 (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up
823
824 new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around
825 if (new_offset >= jnl->jhdr->size) {
826 new_offset = jhdr_size + (new_offset - jnl->jhdr->size);
827 }
828 (*buf_ptr)[index].jnl_offset = new_offset;
829
830 (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value
831 if ((*buf_ptr)[index].block_size <= 0) {
832 panic("jnl: do_overlap: after overlap, new block size is invalid (%d)\n", (*buf_ptr)[index].block_size);
833 // return -1; // if above panic is removed, return -1 for error
834 }
835 }
836
837 }
838
839 index++;
840 }
841
842 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
843 index--; // start with the last index used within the above loop
844 while(index >= blk_index) {
845 if ((*buf_ptr)[index].block_num == -2) {
846 if (index == *num_full_ptr-1) {
847 (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free
848 } else {
849 bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) );
850 }
851 (*num_full_ptr)--;
852 }
853 index--;
854 }
855
856 // eliminate any stale entries at the end of the table
857 for(i=*num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) {
858 (*buf_ptr)[i].block_num = -1;
859 }
860
861 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
862 }
863
864 // PR-3105942: Coalesce writes to the same block in journal replay
865 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
866 // to be replayed and the corresponding location in the journal which contains
867 // the most recent data for those blocks. The array is "played" once the all the
868 // blocks in the journal have been coalesced. The code for the case of conflicting/
869 // overlapping writes to a single block is the most dense. Because coalescing can
870 // disrupt the existing time-ordering of blocks in the journal playback, care
871 // is taken to catch any overlaps and keep the array consistent.
872 static int
873 add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
874 {
875 int blk_index, overwriting;
876 size_t jhdr_size = jnl->jhdr->jhdr_size;
877
878 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
879 // inserted (or the index of the elem to overwrite).
880 blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr);
881
882 // check if the index is within bounds (if we're adding this block to the end of
883 // the table, blk_index will be equal to num_full)
884 if (blk_index < 0 || blk_index > *num_full_ptr) {
885 //printf("jnl: add_block: trouble adding block to co_buf\n");
886 return -1;
887 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
888
889 // Determine whether we're overwriting an existing entry by checking for overlap
890 overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr);
891 if (overwriting < 0) {
892 return -1; // if we got an error, pass it along
893 }
894
895 // returns the index, or -1 on error
896 blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr, overwriting);
897
898 return blk_index;
899 }
900
901 static int
902 replay_journal(journal *jnl)
903 {
904 int i, ret, orig_checksum, checksum, max_bsize;
905 struct buf *oblock_bp;
906 block_list_header *blhdr;
907 off_t offset;
908 char *buf, *block_ptr=NULL;
909 struct bucket *co_buf;
910 int num_buckets = STARTING_BUCKETS, num_full;
911
912 // wrap the start ptr if it points to the very end of the journal
913 if (jnl->jhdr->start == jnl->jhdr->size) {
914 jnl->jhdr->start = jnl->jhdr->jhdr_size;
915 }
916 if (jnl->jhdr->end == jnl->jhdr->size) {
917 jnl->jhdr->end = jnl->jhdr->jhdr_size;
918 }
919
920 if (jnl->jhdr->start == jnl->jhdr->end) {
921 return 0;
922 }
923
924 // allocate memory for the header_block. we'll read each blhdr into this
925 if (kmem_alloc(kernel_map, (vm_offset_t *)&buf, jnl->jhdr->blhdr_size)) {
926 printf("jnl: replay_journal: no memory for block buffer! (%d bytes)\n",
927 jnl->jhdr->blhdr_size);
928 return -1;
929 }
930
931 // allocate memory for the coalesce buffer
932 if ((MALLOC(co_buf, struct bucket *, num_buckets*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
933 printf("jnl: replay_journal: no memory for coalesce buffer!\n");
934 return -1;
935 }
936
937 // initialize entries
938 for(i=0; i < num_buckets; i++) {
939 co_buf[i].block_num = -1;
940 }
941 num_full = 0; // empty at first
942
943
944 printf("jnl: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
945 jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset);
946
947 while(jnl->jhdr->start != jnl->jhdr->end) {
948 offset = jnl->jhdr->start;
949 ret = read_journal_data(jnl, &offset, buf, jnl->jhdr->blhdr_size);
950 if (ret != jnl->jhdr->blhdr_size) {
951 printf("jnl: replay_journal: Could not read block list header block @ 0x%llx!\n", offset);
952 goto bad_replay;
953 }
954
955 blhdr = (block_list_header *)buf;
956
957 orig_checksum = blhdr->checksum;
958 blhdr->checksum = 0;
959 if (jnl->flags & JOURNAL_NEED_SWAP) {
960 // calculate the checksum based on the unswapped data
961 // because it is done byte-at-a-time.
962 orig_checksum = SWAP32(orig_checksum);
963 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
964 swap_block_list_header(jnl, blhdr);
965 } else {
966 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
967 }
968 if (checksum != orig_checksum) {
969 printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
970 offset, orig_checksum, checksum);
971 goto bad_replay;
972 }
973 if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > 2048
974 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) {
975 printf("jnl: replay_journal: bad looking journal entry: max: %d num: %d\n",
976 blhdr->max_blocks, blhdr->num_blocks);
977 goto bad_replay;
978 }
979
980 for(i=1,max_bsize=0; i < blhdr->num_blocks; i++) {
981 if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) {
982 printf("jnl: replay_journal: bogus block number 0x%llx\n", blhdr->binfo[i].bnum);
983 goto bad_replay;
984 }
985 if (blhdr->binfo[i].bsize > max_bsize) {
986 max_bsize = blhdr->binfo[i].bsize;
987 }
988 }
989
990 // make sure it's at least one page in size.
991 if (max_bsize & (PAGE_SIZE - 1)) {
992 max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1);
993 }
994
995
996 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
997 // blhdr->num_blocks-1, jnl->jhdr->start);
998 for(i=1; i < blhdr->num_blocks; i++) {
999 int size, ret_val;
1000 off_t number;
1001
1002 size = blhdr->binfo[i].bsize;
1003 number = blhdr->binfo[i].bnum;
1004
1005 // don't add "killed" blocks
1006 if (number == (off_t)-1) {
1007 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1008 } else {
1009 // add this bucket to co_buf, coalescing where possible
1010 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1011 ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, &num_buckets, &num_full);
1012
1013 if (ret_val == -1) {
1014 printf("jnl: replay_journal: trouble adding block to co_buf\n");
1015 goto bad_replay;
1016 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1017 }
1018
1019 // increment offset
1020 offset += size;
1021
1022 // check if the last block added puts us off the end of the jnl.
1023 // if so, we need to wrap to the beginning and take any remainder
1024 // into account
1025 //
1026 if (offset >= jnl->jhdr->size) {
1027 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
1028 }
1029 }
1030
1031
1032 jnl->jhdr->start += blhdr->bytes_used;
1033 if (jnl->jhdr->start >= jnl->jhdr->size) {
1034 // wrap around and skip the journal header block
1035 jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size;
1036 }
1037 }
1038
1039
1040 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1041
1042 if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) {
1043 goto bad_replay;
1044 }
1045
1046 // Replay the coalesced entries in the co-buf
1047 for(i=0; i < num_full; i++) {
1048 size_t size = co_buf[i].block_size;
1049 off_t jnl_offset = (off_t) co_buf[i].jnl_offset;
1050 off_t number = co_buf[i].block_num;
1051
1052
1053 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1054 // co_buf[i].block_size, co_buf[i].jnl_offset);
1055
1056 if (number == (off_t)-1) {
1057 // printf("jnl: replay_journal: skipping killed fs block\n");
1058 } else {
1059
1060 // do journal read, and set the phys. block
1061 ret = read_journal_data(jnl, &jnl_offset, block_ptr, size);
1062 if (ret != size) {
1063 printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset);
1064 goto bad_replay;
1065 }
1066
1067 if (update_fs_block(jnl, block_ptr, number, size) != 0) {
1068 goto bad_replay;
1069 }
1070 }
1071 }
1072
1073
1074 // done replaying; update jnl header
1075 if (write_journal_header(jnl) != 0) {
1076 goto bad_replay;
1077 }
1078
1079 // free block_ptr
1080 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1081 block_ptr = NULL;
1082
1083 // free the coalesce buffer
1084 FREE(co_buf, M_TEMP);
1085 co_buf = NULL;
1086
1087 kmem_free(kernel_map, (vm_offset_t)buf, jnl->jhdr->blhdr_size);
1088 return 0;
1089
1090 bad_replay:
1091 if (block_ptr) {
1092 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1093 }
1094 if (co_buf) {
1095 FREE(co_buf, M_TEMP);
1096 }
1097 kmem_free(kernel_map, (vm_offset_t)buf, jnl->jhdr->blhdr_size);
1098
1099 return -1;
1100 }
1101
1102
1103 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1104 //#define DEFAULT_TRANSACTION_BUFFER_SIZE (256*1024) // better performance but uses more mem
1105 #define MAX_TRANSACTION_BUFFER_SIZE (512*1024)
1106
1107 // XXXdbg - so I can change it in the debugger
1108 int def_tbuffer_size = 0;
1109
1110
1111 //
1112 // This function sets the size of the tbuffer and the
1113 // size of the blhdr. It assumes that jnl->jhdr->size
1114 // and jnl->jhdr->jhdr_size are already valid.
1115 //
1116 static void
1117 size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz)
1118 {
1119 //
1120 // one-time initialization based on how much memory
1121 // there is in the machine.
1122 //
1123 if (def_tbuffer_size == 0) {
1124 if (mem_size < (256*1024*1024)) {
1125 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE;
1126 } else if (mem_size < (512*1024*1024)) {
1127 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2;
1128 } else if (mem_size < (1024*1024*1024)) {
1129 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3;
1130 } else if (mem_size >= (1024*1024*1024)) {
1131 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 4;
1132 }
1133 }
1134
1135 // size up the transaction buffer... can't be larger than the number
1136 // of blocks that can fit in a block_list_header block.
1137 if (tbuffer_size == 0) {
1138 jnl->tbuffer_size = def_tbuffer_size;
1139 } else {
1140 // make sure that the specified tbuffer_size isn't too small
1141 if (tbuffer_size < jnl->jhdr->blhdr_size * 2) {
1142 tbuffer_size = jnl->jhdr->blhdr_size * 2;
1143 }
1144 // and make sure it's an even multiple of the block size
1145 if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) {
1146 tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size);
1147 }
1148
1149 jnl->tbuffer_size = tbuffer_size;
1150 }
1151
1152 if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) {
1153 jnl->tbuffer_size = (jnl->jhdr->size / 2);
1154 }
1155
1156 if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) {
1157 jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE;
1158 }
1159
1160 jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info);
1161 if (jnl->jhdr->blhdr_size < phys_blksz) {
1162 jnl->jhdr->blhdr_size = phys_blksz;
1163 } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) {
1164 // have to round up so we're an even multiple of the physical block size
1165 jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1);
1166 }
1167 }
1168
1169
1170
1171 journal *
1172 journal_create(struct vnode *jvp,
1173 off_t offset,
1174 off_t journal_size,
1175 struct vnode *fsvp,
1176 size_t min_fs_blksz,
1177 int32_t flags,
1178 int32_t tbuffer_size,
1179 void (*flush)(void *arg),
1180 void *arg)
1181 {
1182 journal *jnl;
1183 int ret, phys_blksz;
1184
1185 /* Get the real physical block size. */
1186 if (VOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, FSCRED, NULL)) {
1187 return NULL;
1188 }
1189
1190 if (phys_blksz > min_fs_blksz) {
1191 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1192 phys_blksz, min_fs_blksz);
1193 return NULL;
1194 }
1195
1196 if ((journal_size % phys_blksz) != 0) {
1197 printf("jnl: create: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1198 journal_size, phys_blksz);
1199 return NULL;
1200 }
1201
1202 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1203 memset(jnl, 0, sizeof(*jnl));
1204
1205 jnl->jdev = jvp;
1206 jnl->jdev_offset = offset;
1207 jnl->fsdev = fsvp;
1208 jnl->flush = flush;
1209 jnl->flush_arg = arg;
1210 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1211 simple_lock_init(&jnl->old_start_lock);
1212
1213 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1214 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1215 goto bad_kmem_alloc;
1216 }
1217
1218 memset(jnl->header_buf, 0, phys_blksz);
1219
1220 jnl->jhdr = (journal_header *)jnl->header_buf;
1221 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1222 jnl->jhdr->endian = ENDIAN_MAGIC;
1223 jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself
1224 jnl->jhdr->end = phys_blksz;
1225 jnl->jhdr->size = journal_size;
1226 jnl->jhdr->jhdr_size = phys_blksz;
1227 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1228
1229 jnl->active_start = jnl->jhdr->start;
1230
1231 // XXXdbg - for testing you can force the journal to wrap around
1232 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1233 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1234
1235 lockinit(&jnl->jlock, PINOD, "journal", 0, 0);
1236
1237 if (write_journal_header(jnl) != 0) {
1238 printf("jnl: journal_create: failed to write journal header.\n");
1239 goto bad_write;
1240 }
1241
1242 return jnl;
1243
1244
1245 bad_write:
1246 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1247 bad_kmem_alloc:
1248 jnl->jhdr = NULL;
1249 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1250 return NULL;
1251 }
1252
1253
1254 journal *
1255 journal_open(struct vnode *jvp,
1256 off_t offset,
1257 off_t journal_size,
1258 struct vnode *fsvp,
1259 size_t min_fs_blksz,
1260 int32_t flags,
1261 int32_t tbuffer_size,
1262 void (*flush)(void *arg),
1263 void *arg)
1264 {
1265 journal *jnl;
1266 int orig_blksz=0, phys_blksz, blhdr_size;
1267 int orig_checksum, checksum;
1268
1269 /* Get the real physical block size. */
1270 if (VOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, FSCRED, NULL)) {
1271 return NULL;
1272 }
1273
1274 if (phys_blksz > min_fs_blksz) {
1275 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1276 phys_blksz, min_fs_blksz);
1277 return NULL;
1278 }
1279
1280 if ((journal_size % phys_blksz) != 0) {
1281 printf("jnl: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1282 journal_size, phys_blksz);
1283 return NULL;
1284 }
1285
1286 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1287 memset(jnl, 0, sizeof(*jnl));
1288
1289 jnl->jdev = jvp;
1290 jnl->jdev_offset = offset;
1291 jnl->fsdev = fsvp;
1292 jnl->flush = flush;
1293 jnl->flush_arg = arg;
1294 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1295 simple_lock_init(&jnl->old_start_lock);
1296
1297 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1298 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1299 goto bad_kmem_alloc;
1300 }
1301
1302 jnl->jhdr = (journal_header *)jnl->header_buf;
1303 memset(jnl->jhdr, 0, sizeof(journal_header)+4);
1304
1305 // we have to set this up here so that do_journal_io() will work
1306 jnl->jhdr->jhdr_size = phys_blksz;
1307
1308 if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) {
1309 printf("jnl: open: could not read %d bytes for the journal header.\n",
1310 phys_blksz);
1311 goto bad_journal;
1312 }
1313
1314 orig_checksum = jnl->jhdr->checksum;
1315 jnl->jhdr->checksum = 0;
1316
1317 if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1318 // do this before the swap since it's done byte-at-a-time
1319 orig_checksum = SWAP32(orig_checksum);
1320 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1321 swap_journal_header(jnl);
1322 jnl->flags |= JOURNAL_NEED_SWAP;
1323 } else {
1324 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1325 }
1326
1327 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1328 printf("jnl: open: journal magic is bad (0x%x != 0x%x)\n",
1329 jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);
1330 goto bad_journal;
1331 }
1332
1333 // only check if we're the current journal header magic value
1334 if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) {
1335
1336 if (orig_checksum != checksum) {
1337 printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n",
1338 orig_checksum, checksum);
1339
1340 //goto bad_journal;
1341 }
1342 }
1343
1344 // XXXdbg - convert old style magic numbers to the new one
1345 if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) {
1346 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1347 }
1348
1349 if (phys_blksz != jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) {
1350 printf("jnl: open: phys_blksz %d does not match journal header size %d\n",
1351 phys_blksz, jnl->jhdr->jhdr_size);
1352
1353 orig_blksz = phys_blksz;
1354 phys_blksz = jnl->jhdr->jhdr_size;
1355 if (VOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&phys_blksz, FWRITE, FSCRED, NULL)) {
1356 printf("jnl: could not set block size to %d bytes.\n", phys_blksz);
1357 goto bad_journal;
1358 }
1359 // goto bad_journal;
1360 }
1361
1362 if ( jnl->jhdr->start <= 0
1363 || jnl->jhdr->start > jnl->jhdr->size
1364 || jnl->jhdr->start > 1024*1024*1024) {
1365 printf("jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1366 jnl->jhdr->start, jnl->jhdr->size);
1367 goto bad_journal;
1368 }
1369
1370 if ( jnl->jhdr->end <= 0
1371 || jnl->jhdr->end > jnl->jhdr->size
1372 || jnl->jhdr->end > 1024*1024*1024) {
1373 printf("jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
1374 jnl->jhdr->end, jnl->jhdr->size);
1375 goto bad_journal;
1376 }
1377
1378 if (jnl->jhdr->size > 1024*1024*1024) {
1379 printf("jnl: open: jhdr size looks bad (0x%llx)\n", jnl->jhdr->size);
1380 goto bad_journal;
1381 }
1382
1383 // XXXdbg - can't do these checks because hfs writes all kinds of
1384 // non-uniform sized blocks even on devices that have a block size
1385 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
1386 // therefore these checks will fail and so we just have to punt and
1387 // do more relaxed checking...
1388 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
1389 if ((jnl->jhdr->start % 512) != 0) {
1390 printf("jnl: open: journal start (0x%llx) not a multiple of 512?\n",
1391 jnl->jhdr->start);
1392 goto bad_journal;
1393 }
1394
1395 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
1396 if ((jnl->jhdr->end % 512) != 0) {
1397 printf("jnl: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
1398 jnl->jhdr->end, jnl->jhdr->jhdr_size);
1399 goto bad_journal;
1400 }
1401
1402 // take care of replaying the journal if necessary
1403 if (flags & JOURNAL_RESET) {
1404 printf("jnl: journal start/end pointers reset! (jnl 0x%x; s 0x%llx e 0x%llx)\n",
1405 jnl, jnl->jhdr->start, jnl->jhdr->end);
1406 jnl->jhdr->start = jnl->jhdr->end;
1407 } else if (replay_journal(jnl) != 0) {
1408 printf("jnl: journal_open: Error replaying the journal!\n");
1409 goto bad_journal;
1410 }
1411
1412 if (orig_blksz != 0) {
1413 VOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, FSCRED, NULL);
1414 phys_blksz = orig_blksz;
1415 if (orig_blksz < jnl->jhdr->jhdr_size) {
1416 printf("jnl: open: jhdr_size is %d but orig phys blk size is %d. switching.\n",
1417 jnl->jhdr->jhdr_size, orig_blksz);
1418
1419 jnl->jhdr->jhdr_size = orig_blksz;
1420 }
1421 }
1422
1423 // make sure this is in sync!
1424 jnl->active_start = jnl->jhdr->start;
1425
1426 // set this now, after we've replayed the journal
1427 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1428
1429 lockinit(&jnl->jlock, PINOD, "journal", 0, 0);
1430
1431 return jnl;
1432
1433 bad_journal:
1434 if (orig_blksz != 0) {
1435 phys_blksz = orig_blksz;
1436 VOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, FSCRED, NULL);
1437 }
1438 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1439 bad_kmem_alloc:
1440 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1441 return NULL;
1442 }
1443
1444 void
1445 journal_close(journal *jnl)
1446 {
1447 volatile off_t *start, *end;
1448 int counter=0;
1449
1450 CHECK_JOURNAL(jnl);
1451
1452 // set this before doing anything that would block so that
1453 // we start tearing things down properly.
1454 //
1455 jnl->flags |= JOURNAL_CLOSE_PENDING;
1456
1457 if (jnl->owner != current_act()) {
1458 int ret;
1459
1460 ret = lockmgr(&jnl->jlock, LK_EXCLUSIVE|LK_RETRY, NULL, current_proc());
1461 if (ret != 0) {
1462 printf("jnl: close: locking the journal (0x%x) failed %d.\n", jnl, ret);
1463 return;
1464 }
1465 }
1466
1467 //
1468 // only write stuff to disk if the journal is still valid
1469 //
1470 if ((jnl->flags & JOURNAL_INVALID) == 0) {
1471
1472 if (jnl->active_tr) {
1473 journal_end_transaction(jnl);
1474 }
1475
1476 // flush any buffered transactions
1477 if (jnl->cur_tr) {
1478 transaction *tr = jnl->cur_tr;
1479
1480 jnl->cur_tr = NULL;
1481 end_transaction(tr, 1); // force it to get flushed
1482 }
1483
1484 //start = &jnl->jhdr->start;
1485 start = &jnl->active_start;
1486 end = &jnl->jhdr->end;
1487
1488 while (*start != *end && counter++ < 500) {
1489 printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
1490 if (jnl->flush) {
1491 jnl->flush(jnl->flush_arg);
1492 }
1493 tsleep((caddr_t)jnl, PRIBIO, "jnl_close", 1);
1494 }
1495
1496 if (*start != *end) {
1497 printf("jnl: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
1498 *start, *end);
1499 }
1500
1501 // make sure this is in sync when we close the journal
1502 jnl->jhdr->start = jnl->active_start;
1503
1504 // if this fails there's not much we can do at this point...
1505 write_journal_header(jnl);
1506 } else {
1507 // if we're here the journal isn't valid any more.
1508 // so make sure we don't leave any locked blocks lying around
1509 printf("jnl: close: journal 0x%x, is invalid. aborting outstanding transactions\n", jnl);
1510 if (jnl->active_tr || jnl->cur_tr) {
1511 transaction *tr;
1512 if (jnl->active_tr) {
1513 tr = jnl->active_tr;
1514 jnl->active_tr = NULL;
1515 } else {
1516 tr = jnl->cur_tr;
1517 jnl->cur_tr = NULL;
1518 }
1519
1520 abort_transaction(jnl, tr);
1521 if (jnl->active_tr || jnl->cur_tr) {
1522 panic("jnl: close: jnl @ 0x%x had both an active and cur tr\n", jnl);
1523 }
1524 }
1525 }
1526
1527 free_old_stuff(jnl);
1528
1529 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->jhdr->jhdr_size);
1530 jnl->jhdr = (void *)0xbeefbabe;
1531
1532 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1533 }
1534
1535 static void
1536 dump_journal(journal *jnl)
1537 {
1538 transaction *ctr;
1539
1540 printf("journal:");
1541 printf(" jdev_offset %.8llx\n", jnl->jdev_offset);
1542 printf(" magic: 0x%.8x\n", jnl->jhdr->magic);
1543 printf(" start: 0x%.8llx\n", jnl->jhdr->start);
1544 printf(" end: 0x%.8llx\n", jnl->jhdr->end);
1545 printf(" size: 0x%.8llx\n", jnl->jhdr->size);
1546 printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size);
1547 printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size);
1548 printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum);
1549
1550 printf(" completed transactions:\n");
1551 for(ctr=jnl->completed_trs; ctr; ctr=ctr->next) {
1552 printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end);
1553 }
1554 }
1555
1556
1557
1558 static off_t
1559 free_space(journal *jnl)
1560 {
1561 off_t free_space;
1562
1563 if (jnl->jhdr->start < jnl->jhdr->end) {
1564 free_space = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size;
1565 } else if (jnl->jhdr->start > jnl->jhdr->end) {
1566 free_space = jnl->jhdr->start - jnl->jhdr->end;
1567 } else {
1568 // journal is completely empty
1569 free_space = jnl->jhdr->size - jnl->jhdr->jhdr_size;
1570 }
1571
1572 return free_space;
1573 }
1574
1575
1576 //
1577 // The journal must be locked on entry to this function.
1578 // The "desired_size" is in bytes.
1579 //
1580 static int
1581 check_free_space(journal *jnl, int desired_size)
1582 {
1583 int i, counter=0;
1584
1585 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
1586 // desired_size, free_space(jnl));
1587
1588 while (1) {
1589 int old_start_empty;
1590
1591 if (counter++ == 5000) {
1592 dump_journal(jnl);
1593 panic("jnl: check_free_space: buffer flushing isn't working "
1594 "(jnl @ 0x%x s %lld e %lld f %lld [active start %lld]).\n", jnl,
1595 jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start);
1596 }
1597 if (counter > 7500) {
1598 printf("jnl: check_free_space: giving up waiting for free space.\n");
1599 return ENOSPC;
1600 }
1601
1602 // make sure there's space in the journal to hold this transaction
1603 if (free_space(jnl) > desired_size) {
1604 break;
1605 }
1606
1607 //
1608 // here's where we lazily bump up jnl->jhdr->start. we'll consume
1609 // entries until there is enough space for the next transaction.
1610 //
1611 old_start_empty = 1;
1612 simple_lock(&jnl->old_start_lock);
1613 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
1614 int counter;
1615
1616 counter = 0;
1617 while (jnl->old_start[i] & 0x8000000000000000LL) {
1618 if (counter++ > 100) {
1619 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl 0x%x).\n",
1620 jnl->old_start[i], jnl);
1621 }
1622
1623 simple_unlock(&jnl->old_start_lock);
1624 if (jnl->flush) {
1625 jnl->flush(jnl->flush_arg);
1626 }
1627 tsleep((caddr_t)jnl, PRIBIO, "check_free_space1", 1);
1628 simple_lock(&jnl->old_start_lock);
1629 }
1630
1631 if (jnl->old_start[i] == 0) {
1632 continue;
1633 }
1634
1635 old_start_empty = 0;
1636 jnl->jhdr->start = jnl->old_start[i];
1637 jnl->old_start[i] = 0;
1638 if (free_space(jnl) > desired_size) {
1639 write_journal_header(jnl);
1640 break;
1641 }
1642 }
1643 simple_unlock(&jnl->old_start_lock);
1644
1645 // if we bumped the start, loop and try again
1646 if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
1647 continue;
1648 } else if (old_start_empty) {
1649 //
1650 // if there is nothing in old_start anymore then we can
1651 // bump the jhdr->start to be the same as active_start
1652 // since it is possible there was only one very large
1653 // transaction in the old_start array. if we didn't do
1654 // this then jhdr->start would never get updated and we
1655 // would wind up looping until we hit the panic at the
1656 // start of the loop.
1657 //
1658 jnl->jhdr->start = jnl->active_start;
1659 write_journal_header(jnl);
1660 continue;
1661 }
1662
1663
1664 // if the file system gave us a flush function, call it to so that
1665 // it can flush some blocks which hopefully will cause some transactions
1666 // to complete and thus free up space in the journal.
1667 if (jnl->flush) {
1668 jnl->flush(jnl->flush_arg);
1669 }
1670
1671 // wait for a while to avoid being cpu-bound (this will
1672 // put us to sleep for 10 milliseconds)
1673 tsleep((caddr_t)jnl, PRIBIO, "check_free_space2", 1);
1674 }
1675
1676 return 0;
1677 }
1678
1679 int
1680 journal_start_transaction(journal *jnl)
1681 {
1682 int ret;
1683 transaction *tr;
1684 int prev_priv;
1685
1686 CHECK_JOURNAL(jnl);
1687
1688 if (jnl->flags & JOURNAL_INVALID) {
1689 return EINVAL;
1690 }
1691
1692 if (jnl->owner == current_act()) {
1693 if (jnl->active_tr == NULL) {
1694 panic("jnl: start_tr: active_tr is NULL (jnl @ 0x%x, owner 0x%x, current_act 0x%x\n",
1695 jnl, jnl->owner, current_act());
1696 }
1697 jnl->nested_count++;
1698 return 0;
1699 }
1700
1701 ret = lockmgr(&jnl->jlock, LK_EXCLUSIVE|LK_RETRY, NULL, current_proc());
1702 if (ret != 0) {
1703 printf("jnl: start_tr: locking the journal (0x%x) failed %d.\n", jnl, ret);
1704 return EINVAL;
1705 }
1706
1707 if (jnl->owner != NULL || jnl->nested_count != 0 || jnl->active_tr != NULL) {
1708 panic("jnl: start_tr: owner 0x%x, nested count 0x%x, active_tr 0x%x jnl @ 0x%x\n",
1709 jnl->owner, jnl->nested_count, jnl->active_tr, jnl);
1710 }
1711
1712 jnl->owner = current_act();
1713 jnl->nested_count = 1;
1714
1715 free_old_stuff(jnl);
1716
1717 // make sure there's room in the journal
1718 if (check_free_space(jnl, jnl->tbuffer_size) != 0) {
1719 printf("jnl: start transaction failed: no space\n");
1720 ret = ENOSPC;
1721 goto bad_start;
1722 }
1723
1724 // if there's a buffered transaction, use it.
1725 if (jnl->cur_tr) {
1726 jnl->active_tr = jnl->cur_tr;
1727 jnl->cur_tr = NULL;
1728
1729 return 0;
1730 }
1731
1732 MALLOC_ZONE(tr, transaction *, sizeof(transaction), M_JNL_TR, M_WAITOK);
1733 memset(tr, 0, sizeof(transaction));
1734
1735 tr->tbuffer_size = jnl->tbuffer_size;
1736 thread_wire_internal(host_priv_self(), current_act(), TRUE, &prev_priv);
1737 if (kmem_alloc(kernel_map, (vm_offset_t *)&tr->tbuffer, tr->tbuffer_size)) {
1738 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
1739 printf("jnl: start transaction failed: no tbuffer mem\n");
1740 ret = ENOMEM;
1741 thread_wire_internal(host_priv_self(), current_act(), prev_priv, NULL);
1742 goto bad_start;
1743 }
1744 thread_wire_internal(host_priv_self(), current_act(), prev_priv, NULL);
1745
1746 // journal replay code checksum check depends on this.
1747 memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE);
1748
1749 tr->blhdr = (block_list_header *)tr->tbuffer;
1750 tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
1751 tr->blhdr->num_blocks = 1; // accounts for this header block
1752 tr->blhdr->bytes_used = jnl->jhdr->blhdr_size;
1753
1754 tr->num_blhdrs = 1;
1755 tr->total_bytes = jnl->jhdr->blhdr_size;
1756 tr->jnl = jnl;
1757
1758 jnl->active_tr = tr;
1759
1760 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, tr);
1761
1762 return 0;
1763
1764 bad_start:
1765 jnl->owner = NULL;
1766 jnl->nested_count = 0;
1767 lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc());
1768 return ret;
1769 }
1770
1771
1772 int
1773 journal_modify_block_start(journal *jnl, struct buf *bp)
1774 {
1775 transaction *tr;
1776
1777 CHECK_JOURNAL(jnl);
1778
1779 if (jnl->flags & JOURNAL_INVALID) {
1780 return EINVAL;
1781 }
1782
1783 // XXXdbg - for debugging I want this to be true. later it may
1784 // not be necessary.
1785 if ((bp->b_flags & B_META) == 0) {
1786 panic("jnl: modify_block_start: bp @ 0x%x is not a meta-data block! (jnl 0x%x)\n", bp, jnl);
1787 }
1788
1789 tr = jnl->active_tr;
1790 CHECK_TRANSACTION(tr);
1791
1792 if (jnl->owner != current_act()) {
1793 panic("jnl: modify_block_start: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1794 jnl, jnl->owner, current_act());
1795 }
1796
1797 free_old_stuff(jnl);
1798
1799 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %d/%d bsz %d; total bytes %d)\n",
1800 // bp, bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_bufsize, tr->total_bytes);
1801
1802 // can't allow blocks that aren't an even multiple of the
1803 // underlying block size.
1804 if ((bp->b_bufsize % jnl->jhdr->jhdr_size) != 0) {
1805 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
1806 bp->b_bufsize, jnl->jhdr->jhdr_size);
1807 return -1;
1808 }
1809
1810 // make sure that this transaction isn't bigger than the whole journal
1811 if (tr->total_bytes+bp->b_bufsize >= (jnl->jhdr->size - jnl->jhdr->jhdr_size)) {
1812 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr 0x%x bp 0x%x)\n",
1813 tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), bp->b_bufsize, tr, bp);
1814 return -1;
1815 }
1816
1817 // if the block is dirty and not already locked we have to write
1818 // it out before we muck with it because it has data that belongs
1819 // (presumably) to another transaction.
1820 //
1821 if ((bp->b_flags & B_DELWRI) && (bp->b_flags & B_LOCKED) == 0) {
1822
1823 // this will cause it to not be brelse()'d
1824 bp->b_flags |= B_NORELSE;
1825 VOP_BWRITE(bp);
1826 }
1827
1828 bp->b_flags |= B_LOCKED;
1829
1830 return 0;
1831 }
1832
1833 int
1834 journal_modify_block_abort(journal *jnl, struct buf *bp)
1835 {
1836 transaction *tr;
1837 block_list_header *blhdr;
1838 int i, j;
1839
1840 CHECK_JOURNAL(jnl);
1841
1842 tr = jnl->active_tr;
1843
1844 //
1845 // if there's no active transaction then we just want to
1846 // call brelse() and return since this is just a block
1847 // that happened to be modified as part of another tr.
1848 //
1849 if (tr == NULL) {
1850 brelse(bp);
1851 return 0;
1852 }
1853
1854 if (jnl->flags & JOURNAL_INVALID) {
1855 return EINVAL;
1856 }
1857
1858 CHECK_TRANSACTION(tr);
1859
1860 if (jnl->owner != current_act()) {
1861 panic("jnl: modify_block_abort: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1862 jnl, jnl->owner, current_act());
1863 }
1864
1865 free_old_stuff(jnl);
1866
1867 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
1868
1869 // first check if it's already part of this transaction
1870 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
1871 for(i=1; i < blhdr->num_blocks; i++) {
1872 if (bp == blhdr->binfo[i].bp) {
1873 if (bp->b_bufsize != blhdr->binfo[i].bsize) {
1874 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
1875 bp, bp->b_bufsize, blhdr->binfo[i].bsize, jnl);
1876 }
1877 break;
1878 }
1879 }
1880
1881 if (i < blhdr->num_blocks) {
1882 break;
1883 }
1884 }
1885
1886 //
1887 // if blhdr is null, then this block has only had modify_block_start
1888 // called on it as part of the current transaction. that means that
1889 // it is ok to clear the LOCKED bit since it hasn't actually been
1890 // modified. if blhdr is non-null then modify_block_end was called
1891 // on it and so we need to keep it locked in memory.
1892 //
1893 if (blhdr == NULL) {
1894 bp->b_flags &= ~(B_LOCKED);
1895 }
1896
1897 brelse(bp);
1898 return 0;
1899 }
1900
1901
1902 int
1903 journal_modify_block_end(journal *jnl, struct buf *bp)
1904 {
1905 int i, j, tbuffer_offset;
1906 char *blkptr;
1907 block_list_header *blhdr, *prev=NULL;
1908 transaction *tr;
1909
1910 CHECK_JOURNAL(jnl);
1911
1912 if (jnl->flags & JOURNAL_INVALID) {
1913 return EINVAL;
1914 }
1915
1916 tr = jnl->active_tr;
1917 CHECK_TRANSACTION(tr);
1918
1919 if (jnl->owner != current_act()) {
1920 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1921 jnl, jnl->owner, current_act());
1922 }
1923
1924 free_old_stuff(jnl);
1925
1926 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %d/%d bsz %d, total bytes %d)\n",
1927 // bp, bp->b_vp, bp->b_lblkno, bp->b_blkno, bp->b_bufsize, tr->total_bytes);
1928
1929 if ((bp->b_flags & B_LOCKED) == 0) {
1930 panic("jnl: modify_block_end: bp 0x%x not locked! jnl @ 0x%x\n", bp, jnl);
1931 bp->b_flags |= B_LOCKED;
1932 }
1933
1934 // first check if it's already part of this transaction
1935 for(blhdr=tr->blhdr; blhdr; prev=blhdr,blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
1936 tbuffer_offset = jnl->jhdr->blhdr_size;
1937
1938 for(i=1; i < blhdr->num_blocks; i++) {
1939 if (bp == blhdr->binfo[i].bp) {
1940 if (bp->b_bufsize != blhdr->binfo[i].bsize) {
1941 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
1942 bp, bp->b_bufsize, blhdr->binfo[i].bsize, jnl);
1943 }
1944 break;
1945 }
1946 tbuffer_offset += blhdr->binfo[i].bsize;
1947 }
1948
1949 if (i < blhdr->num_blocks) {
1950 break;
1951 }
1952 }
1953
1954 if (blhdr == NULL
1955 && prev
1956 && (prev->num_blocks+1) <= prev->max_blocks
1957 && (prev->bytes_used+bp->b_bufsize) <= tr->tbuffer_size) {
1958 blhdr = prev;
1959 } else if (blhdr == NULL) {
1960 block_list_header *nblhdr;
1961 int prev_priv;
1962
1963 if (prev == NULL) {
1964 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl 0x%x, bp 0x%x\n", jnl, bp);
1965 }
1966
1967 // we got to the end of the list, didn't find the block and there's
1968 // no room in the block_list_header pointed to by prev
1969
1970 // we allocate another tbuffer and link it in at the end of the list
1971 // through prev->binfo[0].bnum. that's a skanky way to do things but
1972 // avoids having yet another linked list of small data structures to manage.
1973
1974 thread_wire_internal(host_priv_self(), current_act(), TRUE, &prev_priv);
1975 if (kmem_alloc(kernel_map, (vm_offset_t *)&nblhdr, tr->tbuffer_size)) {
1976 panic("jnl: end_tr: no space for new block tr @ 0x%x (total bytes: %d)!\n",
1977 tr, tr->total_bytes);
1978 }
1979 thread_wire_internal(host_priv_self(), current_act(), prev_priv, NULL);
1980
1981 // journal replay code checksum check depends on this.
1982 memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE);
1983
1984 // initialize the new guy
1985 nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
1986 nblhdr->num_blocks = 1; // accounts for this header block
1987 nblhdr->bytes_used = jnl->jhdr->blhdr_size;
1988
1989 tr->num_blhdrs++;
1990 tr->total_bytes += jnl->jhdr->blhdr_size;
1991
1992 // then link him in at the end
1993 prev->binfo[0].bnum = (off_t)((long)nblhdr);
1994
1995 // and finally switch to using the new guy
1996 blhdr = nblhdr;
1997 tbuffer_offset = jnl->jhdr->blhdr_size;
1998 i = 1;
1999 }
2000
2001
2002 if ((i+1) > blhdr->max_blocks) {
2003 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks);
2004 }
2005
2006 // copy the data into the in-memory transaction buffer
2007 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
2008 memcpy(blkptr, bp->b_data, bp->b_bufsize);
2009
2010 // if this is true then this is a new block we haven't seen
2011 if (i >= blhdr->num_blocks) {
2012 vget(bp->b_vp, 0, current_proc());
2013
2014 blhdr->binfo[i].bnum = (off_t)((unsigned)bp->b_blkno);
2015 blhdr->binfo[i].bsize = bp->b_bufsize;
2016 blhdr->binfo[i].bp = bp;
2017
2018 blhdr->bytes_used += bp->b_bufsize;
2019 tr->total_bytes += bp->b_bufsize;
2020
2021 blhdr->num_blocks++;
2022 }
2023
2024 bdwrite(bp);
2025
2026 return 0;
2027 }
2028
2029 int
2030 journal_kill_block(journal *jnl, struct buf *bp)
2031 {
2032 int i;
2033 block_list_header *blhdr;
2034 transaction *tr;
2035
2036 CHECK_JOURNAL(jnl);
2037
2038 if (jnl->flags & JOURNAL_INVALID) {
2039 return EINVAL;
2040 }
2041
2042 tr = jnl->active_tr;
2043 CHECK_TRANSACTION(tr);
2044
2045 if (jnl->owner != current_act()) {
2046 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2047 jnl, jnl->owner, current_act());
2048 }
2049
2050 free_old_stuff(jnl);
2051
2052 if ((bp->b_flags & B_LOCKED) == 0) {
2053 panic("jnl: kill block: bp 0x%x not locked! jnl @ 0x%x\n", bp, jnl);
2054 }
2055
2056 // first check if it's already part of this transaction
2057 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2058
2059 for(i=1; i < blhdr->num_blocks; i++) {
2060 if (bp == blhdr->binfo[i].bp) {
2061 bp->b_flags &= ~B_LOCKED;
2062
2063 // this undoes the vget() in journal_modify_block_end()
2064 vrele(bp->b_vp);
2065
2066 // if the block has the DELWRI and CALL bits sets, then
2067 // things are seriously weird. if it was part of another
2068 // transaction then journal_modify_block_start() should
2069 // have force it to be written.
2070 //
2071 if ((bp->b_flags & B_DELWRI) && (bp->b_flags & B_CALL)) {
2072 panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
2073 } else {
2074 tr->num_killed += bp->b_bufsize;
2075 }
2076
2077 if (bp->b_flags & B_BUSY) {
2078 brelse(bp);
2079 }
2080
2081 blhdr->binfo[i].bp = NULL;
2082 blhdr->binfo[i].bnum = (off_t)-1;
2083 break;
2084 }
2085 }
2086
2087 if (i < blhdr->num_blocks) {
2088 break;
2089 }
2090 }
2091
2092 return 0;
2093 }
2094
2095
2096 static int
2097 journal_binfo_cmp(void *a, void *b)
2098 {
2099 block_info *bi_a = (struct block_info *)a,
2100 *bi_b = (struct block_info *)b;
2101 daddr_t res;
2102
2103 if (bi_a->bp == NULL) {
2104 return 1;
2105 }
2106 if (bi_b->bp == NULL) {
2107 return -1;
2108 }
2109
2110 // don't have to worry about negative block
2111 // numbers so this is ok to do.
2112 //
2113 res = (bi_a->bp->b_blkno - bi_b->bp->b_blkno);
2114
2115 return (int)res;
2116 }
2117
2118
2119 static int
2120 end_transaction(transaction *tr, int force_it)
2121 {
2122 int i, j, ret, amt;
2123 off_t end;
2124 journal *jnl = tr->jnl;
2125 struct buf *bp;
2126 block_list_header *blhdr=NULL, *next=NULL;
2127
2128 if (jnl->cur_tr) {
2129 panic("jnl: jnl @ 0x%x already has cur_tr 0x%x, new tr: 0x%x\n",
2130 jnl, jnl->cur_tr, tr);
2131 }
2132
2133 // if there weren't any modified blocks in the transaction
2134 // just save off the transaction pointer and return.
2135 if (tr->total_bytes == jnl->jhdr->blhdr_size) {
2136 jnl->cur_tr = tr;
2137 return;
2138 }
2139
2140 // if our transaction buffer isn't very full, just hang
2141 // on to it and don't actually flush anything. this is
2142 // what is known as "group commit". we will flush the
2143 // transaction buffer if it's full or if we have more than
2144 // one of them so we don't start hogging too much memory.
2145 //
2146 if ( force_it == 0
2147 && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0
2148 && tr->num_blhdrs < 3
2149 && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8))) {
2150
2151 jnl->cur_tr = tr;
2152 return;
2153 }
2154
2155
2156 // if we're here we're going to flush the transaction buffer to disk.
2157 // make sure there is room in the journal first.
2158 check_free_space(jnl, tr->total_bytes);
2159
2160 // range check the end index
2161 if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) {
2162 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
2163 jnl->jhdr->end, jnl->jhdr->size);
2164 }
2165
2166 // this transaction starts where the current journal ends
2167 tr->journal_start = jnl->jhdr->end;
2168 end = jnl->jhdr->end;
2169
2170 //
2171 // if the first entry in old_start[] isn't free yet, loop calling the
2172 // file system flush routine until it is (or we panic).
2173 //
2174 i = 0;
2175 simple_lock(&jnl->old_start_lock);
2176 while ((jnl->old_start[0] & 0x8000000000000000LL) != 0) {
2177 if (jnl->flush) {
2178 simple_unlock(&jnl->old_start_lock);
2179
2180 if (jnl->flush) {
2181 jnl->flush(jnl->flush_arg);
2182 }
2183
2184 // yield the cpu so others can get in to clear the lock bit
2185 (void)tsleep((void *)jnl, PRIBIO, "jnl-old-start-sleep", 1);
2186
2187 simple_lock(&jnl->old_start_lock);
2188 }
2189 if (i++ >= 100) {
2190 panic("jnl: transaction that started at 0x%llx is not completing! jnl 0x%x\n",
2191 jnl->old_start[0] & (~0x8000000000000000LL), jnl);
2192 }
2193 }
2194
2195 //
2196 // slide everyone else down and put our latest guy in the last
2197 // entry in the old_start array
2198 //
2199 memcpy(&jnl->old_start[0], &jnl->old_start[1], sizeof(jnl->old_start)-sizeof(jnl->old_start[0]));
2200 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL;
2201
2202 simple_unlock(&jnl->old_start_lock);
2203
2204
2205 // for each block, make sure that the physical block # is set
2206 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2207
2208 for(i=1; i < blhdr->num_blocks; i++) {
2209
2210 bp = blhdr->binfo[i].bp;
2211 if (bp == NULL) { // only true if a block was "killed"
2212 if (blhdr->binfo[i].bnum != (off_t)-1) {
2213 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ 0x%x, tr 0x%x)\n",
2214 blhdr->binfo[i].bnum, jnl, tr);
2215 }
2216 continue;
2217 }
2218
2219 if (bp->b_vp == NULL && bp->b_lblkno == bp->b_blkno) {
2220 panic("jnl: end_tr: DANGER! bp @ 0x%x w/null vp and l/blkno = %d/%d\n",
2221 bp, bp->b_lblkno, bp->b_blkno);
2222 }
2223
2224 // if the lblkno is the same as blkno and this bp isn't
2225 // associated with the underlying file system device then
2226 // we need to call bmap() to get the actual physical block.
2227 //
2228 if ((bp->b_lblkno == bp->b_blkno) && (bp->b_vp != jnl->fsdev)) {
2229 if (VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL) != 0) {
2230 printf("jnl: end_tr: can't bmap the bp @ 0x%x, jnl 0x%x\n", bp, jnl);
2231 goto bad_journal;
2232 }
2233 }
2234
2235 // update this so we write out the correct physical block number!
2236 blhdr->binfo[i].bnum = (off_t)((unsigned)bp->b_blkno);
2237 }
2238
2239 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2240 }
2241
2242 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2243
2244 amt = blhdr->bytes_used;
2245
2246 blhdr->checksum = 0;
2247 blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
2248
2249 ret = write_journal_data(jnl, &end, blhdr, amt);
2250 if (ret != amt) {
2251 printf("jnl: end_transaction: only wrote %d of %d bytes to the journal!\n",
2252 ret, amt);
2253
2254 goto bad_journal;
2255 }
2256 }
2257
2258 jnl->jhdr->end = end; // update where the journal now ends
2259 tr->journal_end = end; // the transaction ends here too
2260 if (tr->journal_start == 0 || tr->journal_end == 0) {
2261 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
2262 tr->journal_start, tr->journal_end);
2263 }
2264
2265 if (write_journal_header(jnl) != 0) {
2266 goto bad_journal;
2267 }
2268
2269 //
2270 // setup for looping through all the blhdr's. we null out the
2271 // tbuffer and blhdr fields so that they're not used any more.
2272 //
2273 blhdr = tr->blhdr;
2274 tr->tbuffer = NULL;
2275 tr->blhdr = NULL;
2276
2277 // the buffer_flushed_callback will only be called for the
2278 // real blocks that get flushed so we have to account for
2279 // the block_list_headers here.
2280 //
2281 tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size;
2282
2283 // for each block, set the iodone callback and unlock it
2284 for(; blhdr; blhdr=next) {
2285
2286 // we can re-order the buf ptrs because everything is written out already
2287 qsort(&blhdr->binfo[1], blhdr->num_blocks-1, sizeof(block_info), journal_binfo_cmp);
2288
2289 for(i=1; i < blhdr->num_blocks; i++) {
2290 if (blhdr->binfo[i].bp == NULL) {
2291 continue;
2292 }
2293
2294 ret = meta_bread(blhdr->binfo[i].bp->b_vp,
2295 (daddr_t)blhdr->binfo[i].bp->b_lblkno,
2296 blhdr->binfo[i].bp->b_bufsize,
2297 NOCRED,
2298 &bp);
2299 if (ret == 0 && bp != NULL) {
2300 struct vnode *save_vp;
2301
2302 if (bp != blhdr->binfo[i].bp) {
2303 panic("jnl: end_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2304 bp, blhdr->binfo[i].bp, jnl);
2305 }
2306
2307 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) != (B_LOCKED|B_DELWRI)) {
2308 if (jnl->flags & JOURNAL_CLOSE_PENDING) {
2309 brelse(bp);
2310 continue;
2311 } else {
2312 panic("jnl: end_tr: !!!DANGER!!! bp 0x%x flags (0x%x) not LOCKED & DELWRI\n", bp, bp->b_flags);
2313 }
2314 }
2315
2316 if (bp->b_iodone != NULL) {
2317 panic("jnl: bp @ 0x%x (blkno %d, vp 0x%x) has non-null iodone (0x%x) buffflushcb 0x%x\n",
2318 bp, bp->b_blkno, bp->b_vp, bp->b_iodone, buffer_flushed_callback);
2319 }
2320
2321 save_vp = bp->b_vp;
2322
2323 bp->b_iodone = buffer_flushed_callback;
2324 bp->b_transaction = tr;
2325 bp->b_flags |= B_CALL;
2326 bp->b_flags &= ~(B_LOCKED);
2327
2328 // kicking off the write here helps performance
2329 bawrite(bp);
2330 // XXXdbg this is good for testing: bdwrite(bp);
2331 //bdwrite(bp);
2332
2333 // this undoes the vget() in journal_modify_block_end()
2334 vrele(save_vp);
2335
2336 } else {
2337 printf("jnl: end_transaction: could not find block %Ld vp 0x%x!\n",
2338 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2339 if (bp) {
2340 brelse(bp);
2341 }
2342 }
2343 }
2344
2345 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2346
2347 // we can free blhdr here since we won't need it any more
2348 blhdr->binfo[0].bnum = 0xdeadc0de;
2349 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2350 }
2351
2352 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
2353 // tr, tr->journal_start, tr->journal_end);
2354 return 0;
2355
2356
2357 bad_journal:
2358 jnl->flags |= JOURNAL_INVALID;
2359 abort_transaction(jnl, tr);
2360 return -1;
2361 }
2362
2363 static void
2364 abort_transaction(journal *jnl, transaction *tr)
2365 {
2366 int i, ret;
2367 block_list_header *blhdr, *next;
2368 struct buf *bp;
2369 struct vnode *save_vp;
2370
2371 // for each block list header, iterate over the blocks then
2372 // free up the memory associated with the block list.
2373 //
2374 // for each block, clear the lock bit and release it.
2375 //
2376 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2377
2378 for(i=1; i < blhdr->num_blocks; i++) {
2379 if (blhdr->binfo[i].bp == NULL) {
2380 continue;
2381 }
2382
2383 ret = meta_bread(blhdr->binfo[i].bp->b_vp,
2384 (daddr_t)blhdr->binfo[i].bp->b_lblkno,
2385 blhdr->binfo[i].bp->b_bufsize,
2386 NOCRED,
2387 &bp);
2388 if (ret == 0) {
2389 if (bp != blhdr->binfo[i].bp) {
2390 panic("jnl: abort_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2391 bp, blhdr->binfo[i].bp, jnl);
2392 }
2393
2394 // clear the locked bit and the delayed-write bit. we
2395 // don't want these blocks going to disk.
2396 bp->b_flags &= ~(B_LOCKED|B_DELWRI);
2397 bp->b_flags |= B_INVAL;
2398 save_vp = bp->b_vp;
2399
2400 brelse(bp);
2401
2402 vrele(save_vp);
2403
2404 } else {
2405 printf("jnl: abort_tr: could not find block %Ld vp 0x%x!\n",
2406 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2407 if (bp) {
2408 brelse(bp);
2409 }
2410 }
2411 }
2412
2413 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2414
2415 // we can free blhdr here since we won't need it any more
2416 blhdr->binfo[0].bnum = 0xdeadc0de;
2417 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2418 }
2419
2420 tr->tbuffer = NULL;
2421 tr->blhdr = NULL;
2422 tr->total_bytes = 0xdbadc0de;
2423 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
2424 }
2425
2426
2427 int
2428 journal_end_transaction(journal *jnl)
2429 {
2430 int ret;
2431 transaction *tr;
2432
2433 CHECK_JOURNAL(jnl);
2434
2435 if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) {
2436 return 0;
2437 }
2438
2439 if (jnl->owner != current_act()) {
2440 panic("jnl: end_tr: I'm not the owner! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2441 jnl, jnl->owner, current_act());
2442 }
2443
2444 free_old_stuff(jnl);
2445
2446 jnl->nested_count--;
2447 if (jnl->nested_count > 0) {
2448 return 0;
2449 } else if (jnl->nested_count < 0) {
2450 panic("jnl: jnl @ 0x%x has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count);
2451 }
2452
2453 if (jnl->flags & JOURNAL_INVALID) {
2454 if (jnl->active_tr) {
2455 transaction *tr;
2456
2457 if (jnl->cur_tr != NULL) {
2458 panic("jnl: journal @ 0x%x has active tr (0x%x) and cur tr (0x%x)\n",
2459 jnl, jnl->active_tr, jnl->cur_tr);
2460 }
2461
2462 tr = jnl->active_tr;
2463 jnl->active_tr = NULL;
2464 abort_transaction(jnl, tr);
2465 }
2466
2467 jnl->owner = NULL;
2468 lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc());
2469
2470 return EINVAL;
2471 }
2472
2473 tr = jnl->active_tr;
2474 CHECK_TRANSACTION(tr);
2475
2476 // clear this out here so that when check_free_space() calls
2477 // the FS flush function, we don't panic in journal_flush()
2478 // if the FS were to call that. note: check_free_space() is
2479 // called from end_transaction().
2480 //
2481 jnl->active_tr = NULL;
2482 ret = end_transaction(tr, 0);
2483
2484 jnl->owner = NULL;
2485 lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc());
2486
2487 return ret;
2488 }
2489
2490
2491 int
2492 journal_flush(journal *jnl)
2493 {
2494 int need_signal = 0;
2495
2496 CHECK_JOURNAL(jnl);
2497
2498 if (jnl->flags & JOURNAL_INVALID) {
2499 return -1;
2500 }
2501
2502 if (jnl->owner != current_act()) {
2503 int ret;
2504
2505 ret = lockmgr(&jnl->jlock, LK_EXCLUSIVE|LK_RETRY, NULL, current_proc());
2506 if (ret != 0) {
2507 printf("jnl: flush: locking the journal (0x%x) failed %d.\n", jnl, ret);
2508 return -1;
2509 }
2510 need_signal = 1;
2511 }
2512
2513 free_old_stuff(jnl);
2514
2515 // if we're not active, flush any buffered transactions
2516 if (jnl->active_tr == NULL && jnl->cur_tr) {
2517 transaction *tr = jnl->cur_tr;
2518
2519 jnl->cur_tr = NULL;
2520 end_transaction(tr, 1); // force it to get flushed
2521 }
2522
2523 if (need_signal) {
2524 lockmgr(&jnl->jlock, LK_RELEASE, NULL, current_proc());
2525 }
2526
2527 return 0;
2528 }
2529
2530 int
2531 journal_active(journal *jnl)
2532 {
2533 if (jnl->flags & JOURNAL_INVALID) {
2534 return -1;
2535 }
2536
2537 return (jnl->active_tr == NULL) ? 0 : 1;
2538 }