]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_journal.c
7a60648c40dcacc0f87cee05f776d789db81ee06
[apple/xnu.git] / bsd / vfs / vfs_journal.c
1 /*
2 * Copyright (c) 1995-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 //
31 // This file implements a simple write-ahead journaling layer.
32 // In theory any file system can make use of it by calling these
33 // functions when the fs wants to modify meta-data blocks. See
34 // vfs_journal.h for a more detailed description of the api and
35 // data structures.
36 //
37 // Dominic Giampaolo (dbg@apple.com)
38 //
39
40 #ifdef KERNEL
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/file_internal.h>
46 #include <sys/stat.h>
47 #include <sys/buf_internal.h>
48 #include <sys/proc_internal.h>
49 #include <sys/mount_internal.h>
50 #include <sys/namei.h>
51 #include <sys/vnode_internal.h>
52 #include <sys/ioctl.h>
53 #include <sys/tty.h>
54 #include <sys/ubc.h>
55 #include <sys/malloc.h>
56 #include <kern/thread.h>
57 #include <sys/disk.h>
58 #include <miscfs/specfs/specdev.h>
59
60 extern task_t kernel_task;
61
62 #else
63
64 #include <stdio.h>
65 #include <stdlib.h>
66 #include <string.h>
67 #include <limits.h>
68 #include <errno.h>
69 #include <fcntl.h>
70 #include <unistd.h>
71 #include <stdarg.h>
72 #include <sys/types.h>
73 #include "compat.h"
74
75 #endif /* KERNEL */
76
77 #include "vfs_journal.h"
78
79
80 // number of bytes to checksum in a block_list_header
81 // NOTE: this should be enough to clear out the header
82 // fields as well as the first entry of binfo[]
83 #define BLHDR_CHECKSUM_SIZE 32
84
85
86
87 static int end_transaction(transaction *tr, int force_it);
88 static void abort_transaction(journal *jnl, transaction *tr);
89 static void dump_journal(journal *jnl);
90
91 static __inline__ void lock_journal(journal *jnl);
92 static __inline__ void unlock_journal(journal *jnl);
93 static __inline__ void lock_oldstart(journal *jnl);
94 static __inline__ void unlock_oldstart(journal *jnl);
95
96
97
98
99 //
100 // 3105942 - Coalesce writes to the same block on journal replay
101 //
102
103 typedef struct bucket {
104 off_t block_num;
105 size_t jnl_offset;
106 size_t block_size;
107 } bucket;
108
109 #define STARTING_BUCKETS 256
110
111 static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
112 static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size);
113 static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full);
114 static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
115 static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting);
116
117 #define CHECK_JOURNAL(jnl) \
118 do { \
119 if (jnl == NULL) {\
120 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__);\
121 }\
122 if (jnl->jdev == NULL) { \
123 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__);\
124 } \
125 if (jnl->fsdev == NULL) { \
126 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__);\
127 } \
128 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) {\
129 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n",\
130 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);\
131 }\
132 if ( jnl->jhdr->start <= 0 \
133 || jnl->jhdr->start > jnl->jhdr->size\
134 || jnl->jhdr->start > 1024*1024*1024) {\
135 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
136 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\
137 }\
138 if ( jnl->jhdr->end <= 0 \
139 || jnl->jhdr->end > jnl->jhdr->size\
140 || jnl->jhdr->end > 1024*1024*1024) {\
141 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
142 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\
143 }\
144 if (jnl->jhdr->size > 1024*1024*1024) {\
145 panic("%s:%d: jhdr size looks bad (0x%llx)\n",\
146 __FILE__, __LINE__, jnl->jhdr->size);\
147 } \
148 } while(0)
149
150 #define CHECK_TRANSACTION(tr) \
151 do {\
152 if (tr == NULL) {\
153 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__);\
154 }\
155 if (tr->jnl == NULL) {\
156 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__);\
157 }\
158 if (tr->blhdr != (block_list_header *)tr->tbuffer) {\
159 panic("%s:%d: blhdr (0x%x) != tbuffer (0x%x)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer);\
160 }\
161 if (tr->total_bytes < 0) {\
162 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\
163 }\
164 if (tr->journal_start < 0 || tr->journal_start > 1024*1024*1024) {\
165 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\
166 }\
167 if (tr->journal_end < 0 || tr->journal_end > 1024*1024*1024) {\
168 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\
169 }\
170 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\
171 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\
172 }\
173 } while(0)
174
175
176
177 //
178 // this isn't a great checksum routine but it will do for now.
179 // we use it to checksum the journal header and the block list
180 // headers that are at the start of each transaction.
181 //
182 static int
183 calc_checksum(char *ptr, int len)
184 {
185 int i, cksum=0;
186
187 // this is a lame checksum but for now it'll do
188 for(i=0; i < len; i++, ptr++) {
189 cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr);
190 }
191
192 return (~cksum);
193 }
194
195 //
196 // Journal Locking
197 //
198 lck_grp_attr_t * jnl_group_attr;
199 lck_attr_t * jnl_lock_attr;
200 lck_grp_t * jnl_mutex_group;
201
202 void
203 journal_init()
204 {
205 jnl_lock_attr = lck_attr_alloc_init();
206 jnl_group_attr = lck_grp_attr_alloc_init();
207 jnl_mutex_group = lck_grp_alloc_init("jnl-mutex", jnl_group_attr);
208 }
209
210 static __inline__ void
211 lock_journal(journal *jnl)
212 {
213 lck_mtx_lock(&jnl->jlock);
214 }
215
216 static __inline__ void
217 unlock_journal(journal *jnl)
218 {
219 lck_mtx_unlock(&jnl->jlock);
220 }
221
222 static __inline__ void
223 lock_oldstart(journal *jnl)
224 {
225 lck_mtx_lock(&jnl->old_start_lock);
226 }
227
228 static __inline__ void
229 unlock_oldstart(journal *jnl)
230 {
231 lck_mtx_unlock(&jnl->old_start_lock);
232 }
233
234
235
236 #define JNL_WRITE 0x0001
237 #define JNL_READ 0x0002
238 #define JNL_HEADER 0x8000
239
240 //
241 // This function sets up a fake buf and passes it directly to the
242 // journal device strategy routine (so that it won't get cached in
243 // the block cache.
244 //
245 // It also handles range checking the i/o so that we don't write
246 // outside the journal boundaries and it will wrap the i/o back
247 // to the beginning if necessary (skipping over the journal header)
248 //
249 static size_t
250 do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction)
251 {
252 int err, io_sz=0, curlen=len;
253 buf_t bp;
254 int max_iosize = 128 * 1024;
255 struct vfsioattr ioattr;
256
257 if (*offset < 0 || *offset > jnl->jhdr->size) {
258 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size);
259 }
260 vfs_ioattr(vnode_mount(jnl->jdev), &ioattr);
261
262 if (direction & JNL_WRITE)
263 max_iosize = ioattr.io_maxwritecnt;
264 else if (direction & JNL_READ)
265 max_iosize = ioattr.io_maxreadcnt;
266
267 again:
268 bp = alloc_io_buf(jnl->jdev, 1);
269
270 if (*offset + (off_t)curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) {
271 if (*offset == jnl->jhdr->size) {
272 *offset = jnl->jhdr->jhdr_size;
273 } else {
274 curlen = (off_t)jnl->jhdr->size - *offset;
275 }
276 }
277
278 if (curlen > max_iosize) {
279 curlen = max_iosize;
280 }
281
282 if (curlen <= 0) {
283 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %d\n", curlen, *offset, len);
284 }
285
286 if (*offset == 0 && (direction & JNL_HEADER) == 0) {
287 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen, data);
288 }
289
290 if (direction & JNL_READ)
291 buf_setflags(bp, B_READ);
292 else {
293 /*
294 * don't have to set any flags
295 */
296 vnode_startwrite(jnl->jdev);
297 }
298 buf_setsize(bp, curlen);
299 buf_setcount(bp, curlen);
300 buf_setdataptr(bp, (uintptr_t)data);
301 buf_setblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
302 buf_setlblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
303
304 err = VNOP_STRATEGY(bp);
305 if (!err) {
306 err = (int)buf_biowait(bp);
307 }
308 free_io_buf(bp);
309
310 if (err) {
311 printf("jnl: do_jnl_io: strategy err 0x%x\n", err);
312 return 0;
313 }
314
315 *offset += curlen;
316 io_sz += curlen;
317 if (io_sz != len) {
318 // handle wrap-around
319 data = (char *)data + curlen;
320 curlen = len - io_sz;
321 if (*offset >= jnl->jhdr->size) {
322 *offset = jnl->jhdr->jhdr_size;
323 }
324 goto again;
325 }
326
327 return io_sz;
328 }
329
330 static size_t
331 read_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
332 {
333 return do_journal_io(jnl, offset, data, len, JNL_READ);
334 }
335
336 static size_t
337 write_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
338 {
339 return do_journal_io(jnl, offset, data, len, JNL_WRITE);
340 }
341
342
343 static int
344 read_journal_header(journal *jnl, void *data, size_t len)
345 {
346 off_t hdr_offset = 0;
347
348 return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER);
349 }
350
351 static int
352 write_journal_header(journal *jnl)
353 {
354 static int num_err_prints = 0;
355 int ret;
356 off_t jhdr_offset = 0;
357 struct vfs_context context;
358
359 context.vc_proc = current_proc();
360 context.vc_ucred = NOCRED;
361 //
362 // XXXdbg note: this ioctl doesn't seem to do anything on firewire disks.
363 //
364 ret = VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
365 if (ret != 0) {
366 //
367 // Only print this error if it's a different error than the
368 // previous one, or if it's the first time for this device
369 // or if the total number of printfs is less than 25. We
370 // allow for up to 25 printfs to insure that some make it
371 // into the on-disk syslog. Otherwise if we only printed
372 // one, it's possible it would never make it to the syslog
373 // for the root volume and that makes debugging hard.
374 //
375 if ( ret != jnl->last_flush_err
376 || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0
377 || num_err_prints++ < 25) {
378
379 printf("jnl: flushing fs disk buffer returned 0x%x\n", ret);
380
381 jnl->flags |= JOURNAL_FLUSHCACHE_ERR;
382 jnl->last_flush_err = ret;
383 }
384 }
385
386
387 jnl->jhdr->checksum = 0;
388 jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
389 if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != jnl->jhdr->jhdr_size) {
390 printf("jnl: write_journal_header: error writing the journal header!\n");
391 jnl->flags |= JOURNAL_INVALID;
392 return -1;
393 }
394
395 // Have to flush after writing the journal header so that
396 // a future transaction doesn't sneak out to disk before
397 // the header does and thus overwrite data that the old
398 // journal header refers to. Saw this exact case happen
399 // on an IDE bus analyzer with Larry Barras so while it
400 // may seem obscure, it's not.
401 //
402 VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
403
404 return 0;
405 }
406
407
408
409 //
410 // this is a work function used to free up transactions that
411 // completed. they can't be free'd from buffer_flushed_callback
412 // because it is called from deep with the disk driver stack
413 // and thus can't do something that would potentially cause
414 // paging. it gets called by each of the journal api entry
415 // points so stuff shouldn't hang around for too long.
416 //
417 static void
418 free_old_stuff(journal *jnl)
419 {
420 transaction *tr, *next;
421
422 lock_oldstart(jnl);
423 tr = jnl->tr_freeme;
424 jnl->tr_freeme = NULL;
425 unlock_oldstart(jnl);
426
427 for(; tr; tr=next) {
428 next = tr->next;
429 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
430 }
431
432 }
433
434
435
436 //
437 // This is our callback that lets us know when a buffer has been
438 // flushed to disk. It's called from deep within the driver stack
439 // and thus is quite limited in what it can do. Notably, it can
440 // not initiate any new i/o's or allocate/free memory.
441 //
442 static void
443 buffer_flushed_callback(struct buf *bp, void *arg)
444 {
445 transaction *tr;
446 journal *jnl;
447 transaction *ctr, *prev=NULL, *next;
448 int i, bufsize;
449
450
451 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
452 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
453
454 // snarf out the bits we want
455 bufsize = buf_size(bp);
456 tr = (transaction *)arg;
457
458 // then we've already seen it
459 if (tr == NULL) {
460 return;
461 }
462
463 CHECK_TRANSACTION(tr);
464
465 jnl = tr->jnl;
466 if (jnl->flags & JOURNAL_INVALID) {
467 return;
468 }
469
470 CHECK_JOURNAL(jnl);
471
472 // update the number of blocks that have been flushed.
473 // this buf may represent more than one block so take
474 // that into account.
475 OSAddAtomic(bufsize, &tr->num_flushed);
476
477
478 // if this transaction isn't done yet, just return as
479 // there is nothing to do.
480 if ((tr->num_flushed + tr->num_killed) < tr->total_bytes) {
481 return;
482 }
483
484 // this will single thread checking the transaction
485 lock_oldstart(jnl);
486
487 if (tr->total_bytes == 0xfbadc0de) {
488 // then someone beat us to it...
489 unlock_oldstart(jnl);
490 return;
491 }
492
493 // mark this so that we're the owner of dealing with the
494 // cleanup for this transaction
495 tr->total_bytes = 0xfbadc0de;
496
497 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
498 // tr, tr->journal_start, tr->journal_end, jnl);
499
500 // find this entry in the old_start[] index and mark it completed
501 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
502
503 if ((jnl->old_start[i] & ~(0x8000000000000000LL)) == tr->journal_start) {
504 jnl->old_start[i] &= ~(0x8000000000000000LL);
505 break;
506 }
507 }
508 if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
509 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr 0x%x, jnl 0x%x)\n",
510 tr->journal_start, tr, jnl);
511 }
512 unlock_oldstart(jnl);
513
514
515 // if we are here then we need to update the journal header
516 // to reflect that this transaction is complete
517 if (tr->journal_start == jnl->active_start) {
518 jnl->active_start = tr->journal_end;
519 tr->journal_start = tr->journal_end = (off_t)0;
520 }
521
522 // go through the completed_trs list and try to coalesce
523 // entries, restarting back at the beginning if we have to.
524 for(ctr=jnl->completed_trs; ctr; prev=ctr, ctr=next) {
525 if (ctr->journal_start == jnl->active_start) {
526 jnl->active_start = ctr->journal_end;
527 if (prev) {
528 prev->next = ctr->next;
529 }
530 if (ctr == jnl->completed_trs) {
531 jnl->completed_trs = ctr->next;
532 }
533
534 lock_oldstart(jnl);
535 next = jnl->completed_trs; // this starts us over again
536 ctr->next = jnl->tr_freeme;
537 jnl->tr_freeme = ctr;
538 ctr = NULL;
539 unlock_oldstart(jnl);
540 } else if (tr->journal_end == ctr->journal_start) {
541 ctr->journal_start = tr->journal_start;
542 next = jnl->completed_trs; // this starts us over again
543 ctr = NULL;
544 tr->journal_start = tr->journal_end = (off_t)0;
545 } else if (tr->journal_start == ctr->journal_end) {
546 ctr->journal_end = tr->journal_end;
547 next = ctr->next;
548 tr->journal_start = tr->journal_end = (off_t)0;
549 } else {
550 next = ctr->next;
551 }
552 }
553
554 // if this is true then we didn't merge with anyone
555 // so link ourselves in at the head of the completed
556 // transaction list.
557 if (tr->journal_start != 0) {
558 // put this entry into the correct sorted place
559 // in the list instead of just at the head.
560 //
561
562 prev = NULL;
563 for(ctr=jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) {
564 // just keep looping
565 }
566
567 if (ctr == NULL && prev == NULL) {
568 jnl->completed_trs = tr;
569 tr->next = NULL;
570 } else if (ctr == jnl->completed_trs) {
571 tr->next = jnl->completed_trs;
572 jnl->completed_trs = tr;
573 } else {
574 tr->next = prev->next;
575 prev->next = tr;
576 }
577 } else {
578 // if we're here this tr got merged with someone else so
579 // put it on the list to be free'd
580 lock_oldstart(jnl);
581 tr->next = jnl->tr_freeme;
582 jnl->tr_freeme = tr;
583 unlock_oldstart(jnl);
584 }
585 }
586
587
588 #include <libkern/OSByteOrder.h>
589
590 #define SWAP16(x) OSSwapInt16(x)
591 #define SWAP32(x) OSSwapInt32(x)
592 #define SWAP64(x) OSSwapInt64(x)
593
594
595 static void
596 swap_journal_header(journal *jnl)
597 {
598 jnl->jhdr->magic = SWAP32(jnl->jhdr->magic);
599 jnl->jhdr->endian = SWAP32(jnl->jhdr->endian);
600 jnl->jhdr->start = SWAP64(jnl->jhdr->start);
601 jnl->jhdr->end = SWAP64(jnl->jhdr->end);
602 jnl->jhdr->size = SWAP64(jnl->jhdr->size);
603 jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size);
604 jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum);
605 jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size);
606 }
607
608 static void
609 swap_block_list_header(journal *jnl, block_list_header *blhdr)
610 {
611 int i;
612
613 blhdr->max_blocks = SWAP16(blhdr->max_blocks);
614 blhdr->num_blocks = SWAP16(blhdr->num_blocks);
615 blhdr->bytes_used = SWAP32(blhdr->bytes_used);
616 blhdr->checksum = SWAP32(blhdr->checksum);
617 blhdr->pad = SWAP32(blhdr->pad);
618
619 if (blhdr->num_blocks * sizeof(blhdr->binfo[0]) > jnl->jhdr->blhdr_size) {
620 printf("jnl: blhdr num blocks looks suspicious (%d). not swapping.\n", blhdr->num_blocks);
621 return;
622 }
623
624 for(i=0; i < blhdr->num_blocks; i++) {
625 blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum);
626 blhdr->binfo[i].bsize = SWAP32(blhdr->binfo[i].bsize);
627 blhdr->binfo[i].bp = (void *)SWAP32((int)blhdr->binfo[i].bp);
628 }
629 }
630
631
632 static int
633 update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize)
634 {
635 int ret;
636 struct buf *oblock_bp=NULL;
637
638 // first read the block we want.
639 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
640 if (ret != 0) {
641 printf("jnl: update_fs_block: error reading fs block # %lld! (ret %d)\n", fs_block, ret);
642
643 if (oblock_bp) {
644 buf_brelse(oblock_bp);
645 oblock_bp = NULL;
646 }
647
648 // let's try to be aggressive here and just re-write the block
649 oblock_bp = buf_getblk(jnl->fsdev, (daddr64_t)fs_block, bsize, 0, 0, BLK_META);
650 if (oblock_bp == NULL) {
651 printf("jnl: update_fs_block: buf_getblk() for %lld failed! failing update.\n", fs_block);
652 return -1;
653 }
654 }
655
656 // make sure it's the correct size.
657 if (buf_size(oblock_bp) != bsize) {
658 buf_brelse(oblock_bp);
659 return -1;
660 }
661
662 // copy the journal data over top of it
663 memcpy((void *)buf_dataptr(oblock_bp), block_ptr, bsize);
664
665 if ((ret = VNOP_BWRITE(oblock_bp)) != 0) {
666 printf("jnl: update_fs_block: failed to update block %lld (ret %d)\n", fs_block,ret);
667 return ret;
668 }
669
670 // and now invalidate it so that if someone else wants to read
671 // it in a different size they'll be able to do it.
672 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
673 if (oblock_bp) {
674 buf_markinvalid(oblock_bp);
675 buf_brelse(oblock_bp);
676 }
677
678 return 0;
679 }
680
681 static int
682 grow_table(struct bucket **buf_ptr, int num_buckets, int new_size)
683 {
684 struct bucket *newBuf;
685 int current_size = num_buckets, i;
686
687 // return if newsize is less than the current size
688 if (new_size < num_buckets) {
689 return current_size;
690 }
691
692 if ((MALLOC(newBuf, struct bucket *, new_size*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
693 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
694 return -1;
695 }
696
697 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
698
699 // copy existing elements
700 bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket));
701
702 // initialize the new ones
703 for(i=num_buckets; i < new_size; i++) {
704 newBuf[i].block_num = (off_t)-1;
705 }
706
707 // free the old container
708 FREE(*buf_ptr, M_TEMP);
709
710 // reset the buf_ptr
711 *buf_ptr = newBuf;
712
713 return new_size;
714 }
715
716 static int
717 lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full)
718 {
719 int lo, hi, index, matches, i;
720
721 if (num_full == 0) {
722 return 0; // table is empty, so insert at index=0
723 }
724
725 lo = 0;
726 hi = num_full - 1;
727 index = -1;
728
729 // perform binary search for block_num
730 do {
731 int mid = (hi - lo)/2 + lo;
732 off_t this_num = (*buf_ptr)[mid].block_num;
733
734 if (block_num == this_num) {
735 index = mid;
736 break;
737 }
738
739 if (block_num < this_num) {
740 hi = mid;
741 continue;
742 }
743
744 if (block_num > this_num) {
745 lo = mid + 1;
746 continue;
747 }
748 } while(lo < hi);
749
750 // check if lo and hi converged on the match
751 if (block_num == (*buf_ptr)[hi].block_num) {
752 index = hi;
753 }
754
755 // if no existing entry found, find index for new one
756 if (index == -1) {
757 index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1;
758 } else {
759 // make sure that we return the right-most index in the case of multiple matches
760 matches = 0;
761 i = index + 1;
762 while(i < num_full && block_num == (*buf_ptr)[i].block_num) {
763 matches++;
764 i++;
765 }
766
767 index += matches;
768 }
769
770 return index;
771 }
772
773 static int
774 insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting)
775 {
776 if (!overwriting) {
777 // grow the table if we're out of space
778 if (*num_full_ptr >= *num_buckets_ptr) {
779 int new_size = *num_buckets_ptr * 2;
780 int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size);
781
782 if (grow_size < new_size) {
783 printf("jnl: add_block: grow_table returned an error!\n");
784 return -1;
785 }
786
787 *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size
788 }
789
790 // if we're not inserting at the end, we need to bcopy
791 if (blk_index != *num_full_ptr) {
792 bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) );
793 }
794
795 (*num_full_ptr)++; // increment only if we're not overwriting
796 }
797
798 // sanity check the values we're about to add
799 if (offset >= jnl->jhdr->size) {
800 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
801 }
802 if (size <= 0) {
803 panic("jnl: insert_block: bad size in insert_block (%d)\n", size);
804 }
805
806 (*buf_ptr)[blk_index].block_num = num;
807 (*buf_ptr)[blk_index].block_size = size;
808 (*buf_ptr)[blk_index].jnl_offset = offset;
809
810 return blk_index;
811 }
812
813 static int
814 do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
815 {
816 int num_to_remove, index, i, overwrite, err;
817 size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset;
818 off_t overlap, block_start, block_end;
819
820 block_start = block_num*jhdr_size;
821 block_end = block_start + size;
822 overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size);
823
824 // first, eliminate any overlap with the previous entry
825 if (blk_index != 0 && !overwrite) {
826 off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size;
827 off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size;
828 overlap = prev_block_end - block_start;
829 if (overlap > 0) {
830 if (overlap % jhdr_size != 0) {
831 panic("jnl: do_overlap: overlap with previous entry not a multiple of %d\n", jhdr_size);
832 }
833
834 // if the previous entry completely overlaps this one, we need to break it into two pieces.
835 if (prev_block_end > block_end) {
836 off_t new_num = block_end / jhdr_size;
837 size_t new_size = prev_block_end - block_end;
838
839 new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start);
840
841 err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, num_buckets_ptr, num_full_ptr, 0);
842 if (err < 0) {
843 panic("jnl: do_overlap: error inserting during pre-overlap\n");
844 }
845 }
846
847 // Regardless, we need to truncate the previous entry to the beginning of the overlap
848 (*buf_ptr)[blk_index-1].block_size = block_start - prev_block_start;
849 }
850 }
851
852 // then, bail out fast if there's no overlap with the entries that follow
853 if (!overwrite && block_end <= (*buf_ptr)[blk_index].block_num*jhdr_size) {
854 return 0; // no overlap, no overwrite
855 } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (*buf_ptr)[blk_index+1].block_num*jhdr_size)) {
856 return 1; // simple overwrite
857 }
858
859 // Otherwise, find all cases of total and partial overlap. We use the special
860 // block_num of -2 to designate entries that are completely overlapped and must
861 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
862 // entries must be adjusted to keep the array consistent.
863 index = blk_index;
864 num_to_remove = 0;
865 while(index < *num_full_ptr && block_end > (*buf_ptr)[index].block_num*jhdr_size) {
866 if (block_end >= ((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size)) {
867 (*buf_ptr)[index].block_num = -2; // mark this for deletion
868 num_to_remove++;
869 } else {
870 overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size;
871 if (overlap > 0) {
872 if (overlap % jhdr_size != 0) {
873 panic("jnl: do_overlap: overlap of %lld is not multiple of %d\n", overlap, jhdr_size);
874 }
875
876 // if we partially overlap this entry, adjust its block number, jnl offset, and size
877 (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up
878
879 new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around
880 if (new_offset >= jnl->jhdr->size) {
881 new_offset = jhdr_size + (new_offset - jnl->jhdr->size);
882 }
883 (*buf_ptr)[index].jnl_offset = new_offset;
884
885 (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value
886 if ((*buf_ptr)[index].block_size <= 0) {
887 panic("jnl: do_overlap: after overlap, new block size is invalid (%d)\n", (*buf_ptr)[index].block_size);
888 // return -1; // if above panic is removed, return -1 for error
889 }
890 }
891
892 }
893
894 index++;
895 }
896
897 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
898 index--; // start with the last index used within the above loop
899 while(index >= blk_index) {
900 if ((*buf_ptr)[index].block_num == -2) {
901 if (index == *num_full_ptr-1) {
902 (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free
903 } else {
904 bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) );
905 }
906 (*num_full_ptr)--;
907 }
908 index--;
909 }
910
911 // eliminate any stale entries at the end of the table
912 for(i=*num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) {
913 (*buf_ptr)[i].block_num = -1;
914 }
915
916 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
917 }
918
919 // PR-3105942: Coalesce writes to the same block in journal replay
920 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
921 // to be replayed and the corresponding location in the journal which contains
922 // the most recent data for those blocks. The array is "played" once the all the
923 // blocks in the journal have been coalesced. The code for the case of conflicting/
924 // overlapping writes to a single block is the most dense. Because coalescing can
925 // disrupt the existing time-ordering of blocks in the journal playback, care
926 // is taken to catch any overlaps and keep the array consistent.
927 static int
928 add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
929 {
930 int blk_index, overwriting;
931
932 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
933 // inserted (or the index of the elem to overwrite).
934 blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr);
935
936 // check if the index is within bounds (if we're adding this block to the end of
937 // the table, blk_index will be equal to num_full)
938 if (blk_index < 0 || blk_index > *num_full_ptr) {
939 //printf("jnl: add_block: trouble adding block to co_buf\n");
940 return -1;
941 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
942
943 // Determine whether we're overwriting an existing entry by checking for overlap
944 overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr);
945 if (overwriting < 0) {
946 return -1; // if we got an error, pass it along
947 }
948
949 // returns the index, or -1 on error
950 blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr, overwriting);
951
952 return blk_index;
953 }
954
955 static int
956 replay_journal(journal *jnl)
957 {
958 int i, ret, orig_checksum, checksum, max_bsize;
959 block_list_header *blhdr;
960 off_t offset;
961 char *buff, *block_ptr=NULL;
962 struct bucket *co_buf;
963 int num_buckets = STARTING_BUCKETS, num_full;
964
965 // wrap the start ptr if it points to the very end of the journal
966 if (jnl->jhdr->start == jnl->jhdr->size) {
967 jnl->jhdr->start = jnl->jhdr->jhdr_size;
968 }
969 if (jnl->jhdr->end == jnl->jhdr->size) {
970 jnl->jhdr->end = jnl->jhdr->jhdr_size;
971 }
972
973 if (jnl->jhdr->start == jnl->jhdr->end) {
974 return 0;
975 }
976
977 // allocate memory for the header_block. we'll read each blhdr into this
978 if (kmem_alloc(kernel_map, (vm_offset_t *)&buff, jnl->jhdr->blhdr_size)) {
979 printf("jnl: replay_journal: no memory for block buffer! (%d bytes)\n",
980 jnl->jhdr->blhdr_size);
981 return -1;
982 }
983
984 // allocate memory for the coalesce buffer
985 if ((MALLOC(co_buf, struct bucket *, num_buckets*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
986 printf("jnl: replay_journal: no memory for coalesce buffer!\n");
987 return -1;
988 }
989
990 // initialize entries
991 for(i=0; i < num_buckets; i++) {
992 co_buf[i].block_num = -1;
993 }
994 num_full = 0; // empty at first
995
996
997 printf("jnl: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
998 jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset);
999
1000 while(jnl->jhdr->start != jnl->jhdr->end) {
1001 offset = jnl->jhdr->start;
1002 ret = read_journal_data(jnl, &offset, buff, jnl->jhdr->blhdr_size);
1003 if (ret != jnl->jhdr->blhdr_size) {
1004 printf("jnl: replay_journal: Could not read block list header block @ 0x%llx!\n", offset);
1005 goto bad_replay;
1006 }
1007
1008 blhdr = (block_list_header *)buff;
1009
1010 orig_checksum = blhdr->checksum;
1011 blhdr->checksum = 0;
1012 if (jnl->flags & JOURNAL_NEED_SWAP) {
1013 // calculate the checksum based on the unswapped data
1014 // because it is done byte-at-a-time.
1015 orig_checksum = SWAP32(orig_checksum);
1016 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1017 swap_block_list_header(jnl, blhdr);
1018 } else {
1019 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1020 }
1021 if (checksum != orig_checksum) {
1022 printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1023 offset, orig_checksum, checksum);
1024 goto bad_replay;
1025 }
1026 if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > 2048
1027 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) {
1028 printf("jnl: replay_journal: bad looking journal entry: max: %d num: %d\n",
1029 blhdr->max_blocks, blhdr->num_blocks);
1030 goto bad_replay;
1031 }
1032
1033 for(i=1; i < blhdr->num_blocks; i++) {
1034 if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) {
1035 printf("jnl: replay_journal: bogus block number 0x%llx\n", blhdr->binfo[i].bnum);
1036 goto bad_replay;
1037 }
1038 }
1039
1040 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1041 // blhdr->num_blocks-1, jnl->jhdr->start);
1042 for(i=1; i < blhdr->num_blocks; i++) {
1043 int size, ret_val;
1044 off_t number;
1045
1046 size = blhdr->binfo[i].bsize;
1047 number = blhdr->binfo[i].bnum;
1048
1049 // don't add "killed" blocks
1050 if (number == (off_t)-1) {
1051 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1052 } else {
1053 // add this bucket to co_buf, coalescing where possible
1054 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1055 ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, &num_buckets, &num_full);
1056
1057 if (ret_val == -1) {
1058 printf("jnl: replay_journal: trouble adding block to co_buf\n");
1059 goto bad_replay;
1060 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1061 }
1062
1063 // increment offset
1064 offset += size;
1065
1066 // check if the last block added puts us off the end of the jnl.
1067 // if so, we need to wrap to the beginning and take any remainder
1068 // into account
1069 //
1070 if (offset >= jnl->jhdr->size) {
1071 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
1072 }
1073 }
1074
1075
1076 jnl->jhdr->start += blhdr->bytes_used;
1077 if (jnl->jhdr->start >= jnl->jhdr->size) {
1078 // wrap around and skip the journal header block
1079 jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size;
1080 }
1081 }
1082
1083
1084 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1085
1086 /*
1087 * make sure it's at least one page in size, so
1088 * start max_bsize at PAGE_SIZE
1089 */
1090 for (i = 0, max_bsize = PAGE_SIZE; i < num_full; i++) {
1091
1092 if (co_buf[i].block_num == (off_t)-1)
1093 continue;
1094
1095 if (co_buf[i].block_size > max_bsize)
1096 max_bsize = co_buf[i].block_size;
1097 }
1098 /*
1099 * round max_bsize up to the nearest PAGE_SIZE multiple
1100 */
1101 if (max_bsize & (PAGE_SIZE - 1)) {
1102 max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1103 }
1104
1105 if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) {
1106 goto bad_replay;
1107 }
1108
1109 // Replay the coalesced entries in the co-buf
1110 for(i=0; i < num_full; i++) {
1111 size_t size = co_buf[i].block_size;
1112 off_t jnl_offset = (off_t) co_buf[i].jnl_offset;
1113 off_t number = co_buf[i].block_num;
1114
1115
1116 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1117 // co_buf[i].block_size, co_buf[i].jnl_offset);
1118
1119 if (number == (off_t)-1) {
1120 // printf("jnl: replay_journal: skipping killed fs block\n");
1121 } else {
1122
1123 // do journal read, and set the phys. block
1124 ret = read_journal_data(jnl, &jnl_offset, block_ptr, size);
1125 if (ret != size) {
1126 printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset);
1127 goto bad_replay;
1128 }
1129
1130 if (update_fs_block(jnl, block_ptr, number, size) != 0) {
1131 goto bad_replay;
1132 }
1133 }
1134 }
1135
1136
1137 // done replaying; update jnl header
1138 if (write_journal_header(jnl) != 0) {
1139 goto bad_replay;
1140 }
1141
1142 // free block_ptr
1143 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1144 block_ptr = NULL;
1145
1146 // free the coalesce buffer
1147 FREE(co_buf, M_TEMP);
1148 co_buf = NULL;
1149
1150 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1151 return 0;
1152
1153 bad_replay:
1154 if (block_ptr) {
1155 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1156 }
1157 if (co_buf) {
1158 FREE(co_buf, M_TEMP);
1159 }
1160 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1161
1162 return -1;
1163 }
1164
1165
1166 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1167 //#define DEFAULT_TRANSACTION_BUFFER_SIZE (256*1024) // better performance but uses more mem
1168 #define MAX_TRANSACTION_BUFFER_SIZE (512*1024)
1169
1170 // XXXdbg - so I can change it in the debugger
1171 int def_tbuffer_size = 0;
1172
1173
1174 //
1175 // This function sets the size of the tbuffer and the
1176 // size of the blhdr. It assumes that jnl->jhdr->size
1177 // and jnl->jhdr->jhdr_size are already valid.
1178 //
1179 static void
1180 size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz)
1181 {
1182 //
1183 // one-time initialization based on how much memory
1184 // there is in the machine.
1185 //
1186 if (def_tbuffer_size == 0) {
1187 if (mem_size < (256*1024*1024)) {
1188 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE;
1189 } else if (mem_size < (512*1024*1024)) {
1190 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2;
1191 } else if (mem_size < (1024*1024*1024)) {
1192 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3;
1193 } else if (mem_size >= (1024*1024*1024)) {
1194 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 4;
1195 }
1196 }
1197
1198 // size up the transaction buffer... can't be larger than the number
1199 // of blocks that can fit in a block_list_header block.
1200 if (tbuffer_size == 0) {
1201 jnl->tbuffer_size = def_tbuffer_size;
1202 } else {
1203 // make sure that the specified tbuffer_size isn't too small
1204 if (tbuffer_size < jnl->jhdr->blhdr_size * 2) {
1205 tbuffer_size = jnl->jhdr->blhdr_size * 2;
1206 }
1207 // and make sure it's an even multiple of the block size
1208 if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) {
1209 tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size);
1210 }
1211
1212 jnl->tbuffer_size = tbuffer_size;
1213 }
1214
1215 if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) {
1216 jnl->tbuffer_size = (jnl->jhdr->size / 2);
1217 }
1218
1219 if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) {
1220 jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE;
1221 }
1222
1223 jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info);
1224 if (jnl->jhdr->blhdr_size < phys_blksz) {
1225 jnl->jhdr->blhdr_size = phys_blksz;
1226 } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) {
1227 // have to round up so we're an even multiple of the physical block size
1228 jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1);
1229 }
1230 }
1231
1232
1233
1234 journal *
1235 journal_create(struct vnode *jvp,
1236 off_t offset,
1237 off_t journal_size,
1238 struct vnode *fsvp,
1239 size_t min_fs_blksz,
1240 int32_t flags,
1241 int32_t tbuffer_size,
1242 void (*flush)(void *arg),
1243 void *arg)
1244 {
1245 journal *jnl;
1246 int phys_blksz;
1247 struct vfs_context context;
1248
1249 context.vc_proc = current_proc();
1250 context.vc_ucred = FSCRED;
1251
1252 /* Get the real physical block size. */
1253 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1254 return NULL;
1255 }
1256
1257 if (phys_blksz > min_fs_blksz) {
1258 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1259 phys_blksz, min_fs_blksz);
1260 return NULL;
1261 }
1262
1263 if ((journal_size % phys_blksz) != 0) {
1264 printf("jnl: create: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1265 journal_size, phys_blksz);
1266 return NULL;
1267 }
1268
1269 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1270 memset(jnl, 0, sizeof(*jnl));
1271
1272 jnl->jdev = jvp;
1273 jnl->jdev_offset = offset;
1274 jnl->fsdev = fsvp;
1275 jnl->flush = flush;
1276 jnl->flush_arg = arg;
1277 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1278 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1279
1280 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1281 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1282 goto bad_kmem_alloc;
1283 }
1284
1285 memset(jnl->header_buf, 0, phys_blksz);
1286
1287 jnl->jhdr = (journal_header *)jnl->header_buf;
1288 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1289 jnl->jhdr->endian = ENDIAN_MAGIC;
1290 jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself
1291 jnl->jhdr->end = phys_blksz;
1292 jnl->jhdr->size = journal_size;
1293 jnl->jhdr->jhdr_size = phys_blksz;
1294 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1295
1296 jnl->active_start = jnl->jhdr->start;
1297
1298 // XXXdbg - for testing you can force the journal to wrap around
1299 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1300 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1301
1302 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1303
1304 if (write_journal_header(jnl) != 0) {
1305 printf("jnl: journal_create: failed to write journal header.\n");
1306 goto bad_write;
1307 }
1308
1309 return jnl;
1310
1311
1312 bad_write:
1313 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1314 bad_kmem_alloc:
1315 jnl->jhdr = NULL;
1316 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1317 return NULL;
1318 }
1319
1320
1321 journal *
1322 journal_open(struct vnode *jvp,
1323 off_t offset,
1324 off_t journal_size,
1325 struct vnode *fsvp,
1326 size_t min_fs_blksz,
1327 int32_t flags,
1328 int32_t tbuffer_size,
1329 void (*flush)(void *arg),
1330 void *arg)
1331 {
1332 journal *jnl;
1333 int orig_blksz=0, phys_blksz;
1334 int orig_checksum, checksum;
1335 struct vfs_context context;
1336
1337 context.vc_proc = current_proc();
1338 context.vc_ucred = FSCRED;
1339
1340 /* Get the real physical block size. */
1341 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1342 return NULL;
1343 }
1344
1345 if (phys_blksz > min_fs_blksz) {
1346 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1347 phys_blksz, min_fs_blksz);
1348 return NULL;
1349 }
1350
1351 if ((journal_size % phys_blksz) != 0) {
1352 printf("jnl: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1353 journal_size, phys_blksz);
1354 return NULL;
1355 }
1356
1357 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1358 memset(jnl, 0, sizeof(*jnl));
1359
1360 jnl->jdev = jvp;
1361 jnl->jdev_offset = offset;
1362 jnl->fsdev = fsvp;
1363 jnl->flush = flush;
1364 jnl->flush_arg = arg;
1365 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1366 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1367
1368 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1369 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1370 goto bad_kmem_alloc;
1371 }
1372
1373 jnl->jhdr = (journal_header *)jnl->header_buf;
1374 memset(jnl->jhdr, 0, sizeof(journal_header)+4);
1375
1376 // we have to set this up here so that do_journal_io() will work
1377 jnl->jhdr->jhdr_size = phys_blksz;
1378
1379 if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) {
1380 printf("jnl: open: could not read %d bytes for the journal header.\n",
1381 phys_blksz);
1382 goto bad_journal;
1383 }
1384
1385 orig_checksum = jnl->jhdr->checksum;
1386 jnl->jhdr->checksum = 0;
1387
1388 if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1389 // do this before the swap since it's done byte-at-a-time
1390 orig_checksum = SWAP32(orig_checksum);
1391 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1392 swap_journal_header(jnl);
1393 jnl->flags |= JOURNAL_NEED_SWAP;
1394 } else {
1395 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1396 }
1397
1398 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1399 printf("jnl: open: journal magic is bad (0x%x != 0x%x)\n",
1400 jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);
1401 goto bad_journal;
1402 }
1403
1404 // only check if we're the current journal header magic value
1405 if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) {
1406
1407 if (orig_checksum != checksum) {
1408 printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n",
1409 orig_checksum, checksum);
1410
1411 //goto bad_journal;
1412 }
1413 }
1414
1415 // XXXdbg - convert old style magic numbers to the new one
1416 if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) {
1417 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1418 }
1419
1420 if (phys_blksz != jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) {
1421 printf("jnl: open: phys_blksz %d does not match journal header size %d\n",
1422 phys_blksz, jnl->jhdr->jhdr_size);
1423
1424 orig_blksz = phys_blksz;
1425 phys_blksz = jnl->jhdr->jhdr_size;
1426 if (VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&phys_blksz, FWRITE, &context)) {
1427 printf("jnl: could not set block size to %d bytes.\n", phys_blksz);
1428 goto bad_journal;
1429 }
1430 // goto bad_journal;
1431 }
1432
1433 if ( jnl->jhdr->start <= 0
1434 || jnl->jhdr->start > jnl->jhdr->size
1435 || jnl->jhdr->start > 1024*1024*1024) {
1436 printf("jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1437 jnl->jhdr->start, jnl->jhdr->size);
1438 goto bad_journal;
1439 }
1440
1441 if ( jnl->jhdr->end <= 0
1442 || jnl->jhdr->end > jnl->jhdr->size
1443 || jnl->jhdr->end > 1024*1024*1024) {
1444 printf("jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
1445 jnl->jhdr->end, jnl->jhdr->size);
1446 goto bad_journal;
1447 }
1448
1449 if (jnl->jhdr->size > 1024*1024*1024) {
1450 printf("jnl: open: jhdr size looks bad (0x%llx)\n", jnl->jhdr->size);
1451 goto bad_journal;
1452 }
1453
1454 // XXXdbg - can't do these checks because hfs writes all kinds of
1455 // non-uniform sized blocks even on devices that have a block size
1456 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
1457 // therefore these checks will fail and so we just have to punt and
1458 // do more relaxed checking...
1459 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
1460 if ((jnl->jhdr->start % 512) != 0) {
1461 printf("jnl: open: journal start (0x%llx) not a multiple of 512?\n",
1462 jnl->jhdr->start);
1463 goto bad_journal;
1464 }
1465
1466 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
1467 if ((jnl->jhdr->end % 512) != 0) {
1468 printf("jnl: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
1469 jnl->jhdr->end, jnl->jhdr->jhdr_size);
1470 goto bad_journal;
1471 }
1472
1473 // take care of replaying the journal if necessary
1474 if (flags & JOURNAL_RESET) {
1475 printf("jnl: journal start/end pointers reset! (jnl 0x%x; s 0x%llx e 0x%llx)\n",
1476 jnl, jnl->jhdr->start, jnl->jhdr->end);
1477 jnl->jhdr->start = jnl->jhdr->end;
1478 } else if (replay_journal(jnl) != 0) {
1479 printf("jnl: journal_open: Error replaying the journal!\n");
1480 goto bad_journal;
1481 }
1482
1483 if (orig_blksz != 0) {
1484 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1485 phys_blksz = orig_blksz;
1486 if (orig_blksz < jnl->jhdr->jhdr_size) {
1487 printf("jnl: open: jhdr_size is %d but orig phys blk size is %d. switching.\n",
1488 jnl->jhdr->jhdr_size, orig_blksz);
1489
1490 jnl->jhdr->jhdr_size = orig_blksz;
1491 }
1492 }
1493
1494 // make sure this is in sync!
1495 jnl->active_start = jnl->jhdr->start;
1496
1497 // set this now, after we've replayed the journal
1498 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1499
1500 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1501
1502 return jnl;
1503
1504 bad_journal:
1505 if (orig_blksz != 0) {
1506 phys_blksz = orig_blksz;
1507 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1508 }
1509 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1510 bad_kmem_alloc:
1511 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1512 return NULL;
1513 }
1514
1515
1516 int
1517 journal_is_clean(struct vnode *jvp,
1518 off_t offset,
1519 off_t journal_size,
1520 struct vnode *fsvp,
1521 size_t min_fs_block_size)
1522 {
1523 journal jnl;
1524 int phys_blksz, ret;
1525 int orig_checksum, checksum;
1526 struct vfs_context context;
1527
1528 context.vc_proc = current_proc();
1529 context.vc_ucred = FSCRED;
1530
1531 /* Get the real physical block size. */
1532 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1533 printf("jnl: is_clean: failed to get device block size.\n");
1534 return EINVAL;
1535 }
1536
1537 if (phys_blksz > min_fs_block_size) {
1538 printf("jnl: is_clean: error: phys blksize %d bigger than min fs blksize %d\n",
1539 phys_blksz, min_fs_block_size);
1540 return EINVAL;
1541 }
1542
1543 if ((journal_size % phys_blksz) != 0) {
1544 printf("jnl: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1545 journal_size, phys_blksz);
1546 return EINVAL;
1547 }
1548
1549 memset(&jnl, 0, sizeof(jnl));
1550
1551 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl.header_buf, phys_blksz)) {
1552 printf("jnl: is_clean: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1553 return ENOMEM;
1554 }
1555
1556 jnl.jhdr = (journal_header *)jnl.header_buf;
1557 memset(jnl.jhdr, 0, sizeof(journal_header)+4);
1558
1559 jnl.jdev = jvp;
1560 jnl.jdev_offset = offset;
1561 jnl.fsdev = fsvp;
1562
1563 // we have to set this up here so that do_journal_io() will work
1564 jnl.jhdr->jhdr_size = phys_blksz;
1565
1566 if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != phys_blksz) {
1567 printf("jnl: is_clean: could not read %d bytes for the journal header.\n",
1568 phys_blksz);
1569 ret = EINVAL;
1570 goto get_out;
1571 }
1572
1573 orig_checksum = jnl.jhdr->checksum;
1574 jnl.jhdr->checksum = 0;
1575
1576 if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1577 // do this before the swap since it's done byte-at-a-time
1578 orig_checksum = SWAP32(orig_checksum);
1579 checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header));
1580 swap_journal_header(&jnl);
1581 jnl.flags |= JOURNAL_NEED_SWAP;
1582 } else {
1583 checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header));
1584 }
1585
1586 if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1587 printf("jnl: is_clean: journal magic is bad (0x%x != 0x%x)\n",
1588 jnl.jhdr->magic, JOURNAL_HEADER_MAGIC);
1589 ret = EINVAL;
1590 goto get_out;
1591 }
1592
1593 if (orig_checksum != checksum) {
1594 printf("jnl: is_clean: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum, checksum);
1595 ret = EINVAL;
1596 goto get_out;
1597 }
1598
1599 //
1600 // if the start and end are equal then the journal is clean.
1601 // otherwise it's not clean and therefore an error.
1602 //
1603 if (jnl.jhdr->start == jnl.jhdr->end) {
1604 ret = 0;
1605 } else {
1606 ret = EINVAL;
1607 }
1608
1609 get_out:
1610 kmem_free(kernel_map, (vm_offset_t)jnl.header_buf, phys_blksz);
1611
1612 return ret;
1613
1614
1615 }
1616
1617
1618
1619 void
1620 journal_close(journal *jnl)
1621 {
1622 volatile off_t *start, *end;
1623 int counter=0;
1624
1625 CHECK_JOURNAL(jnl);
1626
1627 // set this before doing anything that would block so that
1628 // we start tearing things down properly.
1629 //
1630 jnl->flags |= JOURNAL_CLOSE_PENDING;
1631
1632 if (jnl->owner != current_thread()) {
1633 lock_journal(jnl);
1634 }
1635
1636 //
1637 // only write stuff to disk if the journal is still valid
1638 //
1639 if ((jnl->flags & JOURNAL_INVALID) == 0) {
1640
1641 if (jnl->active_tr) {
1642 journal_end_transaction(jnl);
1643 }
1644
1645 // flush any buffered transactions
1646 if (jnl->cur_tr) {
1647 transaction *tr = jnl->cur_tr;
1648
1649 jnl->cur_tr = NULL;
1650 end_transaction(tr, 1); // force it to get flushed
1651 }
1652
1653 //start = &jnl->jhdr->start;
1654 start = &jnl->active_start;
1655 end = &jnl->jhdr->end;
1656
1657 while (*start != *end && counter++ < 500) {
1658 printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
1659 if (jnl->flush) {
1660 jnl->flush(jnl->flush_arg);
1661 }
1662 tsleep((caddr_t)jnl, PRIBIO, "jnl_close", 1);
1663 }
1664
1665 if (*start != *end) {
1666 printf("jnl: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
1667 *start, *end);
1668 }
1669
1670 // make sure this is in sync when we close the journal
1671 jnl->jhdr->start = jnl->active_start;
1672
1673 // if this fails there's not much we can do at this point...
1674 write_journal_header(jnl);
1675 } else {
1676 // if we're here the journal isn't valid any more.
1677 // so make sure we don't leave any locked blocks lying around
1678 printf("jnl: close: journal 0x%x, is invalid. aborting outstanding transactions\n", jnl);
1679 if (jnl->active_tr || jnl->cur_tr) {
1680 transaction *tr;
1681 if (jnl->active_tr) {
1682 tr = jnl->active_tr;
1683 jnl->active_tr = NULL;
1684 } else {
1685 tr = jnl->cur_tr;
1686 jnl->cur_tr = NULL;
1687 }
1688
1689 abort_transaction(jnl, tr);
1690 if (jnl->active_tr || jnl->cur_tr) {
1691 panic("jnl: close: jnl @ 0x%x had both an active and cur tr\n", jnl);
1692 }
1693 }
1694 }
1695
1696 free_old_stuff(jnl);
1697
1698 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->jhdr->jhdr_size);
1699 jnl->jhdr = (void *)0xbeefbabe;
1700
1701 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1702 }
1703
1704 static void
1705 dump_journal(journal *jnl)
1706 {
1707 transaction *ctr;
1708
1709 printf("journal:");
1710 printf(" jdev_offset %.8llx\n", jnl->jdev_offset);
1711 printf(" magic: 0x%.8x\n", jnl->jhdr->magic);
1712 printf(" start: 0x%.8llx\n", jnl->jhdr->start);
1713 printf(" end: 0x%.8llx\n", jnl->jhdr->end);
1714 printf(" size: 0x%.8llx\n", jnl->jhdr->size);
1715 printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size);
1716 printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size);
1717 printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum);
1718
1719 printf(" completed transactions:\n");
1720 for(ctr=jnl->completed_trs; ctr; ctr=ctr->next) {
1721 printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end);
1722 }
1723 }
1724
1725
1726
1727 static off_t
1728 free_space(journal *jnl)
1729 {
1730 off_t free_space;
1731
1732 if (jnl->jhdr->start < jnl->jhdr->end) {
1733 free_space = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size;
1734 } else if (jnl->jhdr->start > jnl->jhdr->end) {
1735 free_space = jnl->jhdr->start - jnl->jhdr->end;
1736 } else {
1737 // journal is completely empty
1738 free_space = jnl->jhdr->size - jnl->jhdr->jhdr_size;
1739 }
1740
1741 return free_space;
1742 }
1743
1744
1745 //
1746 // The journal must be locked on entry to this function.
1747 // The "desired_size" is in bytes.
1748 //
1749 static int
1750 check_free_space(journal *jnl, int desired_size)
1751 {
1752 int i, counter=0;
1753
1754 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
1755 // desired_size, free_space(jnl));
1756
1757 while (1) {
1758 int old_start_empty;
1759
1760 if (counter++ == 5000) {
1761 dump_journal(jnl);
1762 panic("jnl: check_free_space: buffer flushing isn't working "
1763 "(jnl @ 0x%x s %lld e %lld f %lld [active start %lld]).\n", jnl,
1764 jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start);
1765 }
1766 if (counter > 7500) {
1767 printf("jnl: check_free_space: giving up waiting for free space.\n");
1768 return ENOSPC;
1769 }
1770
1771 // make sure there's space in the journal to hold this transaction
1772 if (free_space(jnl) > desired_size) {
1773 break;
1774 }
1775
1776 //
1777 // here's where we lazily bump up jnl->jhdr->start. we'll consume
1778 // entries until there is enough space for the next transaction.
1779 //
1780 old_start_empty = 1;
1781 lock_oldstart(jnl);
1782 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
1783 int counter;
1784
1785 counter = 0;
1786 while (jnl->old_start[i] & 0x8000000000000000LL) {
1787 if (counter++ > 100) {
1788 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl 0x%x).\n",
1789 jnl->old_start[i], jnl);
1790 }
1791
1792 unlock_oldstart(jnl);
1793 if (jnl->flush) {
1794 jnl->flush(jnl->flush_arg);
1795 }
1796 tsleep((caddr_t)jnl, PRIBIO, "check_free_space1", 1);
1797 lock_oldstart(jnl);
1798 }
1799
1800 if (jnl->old_start[i] == 0) {
1801 continue;
1802 }
1803
1804 old_start_empty = 0;
1805 jnl->jhdr->start = jnl->old_start[i];
1806 jnl->old_start[i] = 0;
1807 if (free_space(jnl) > desired_size) {
1808 unlock_oldstart(jnl);
1809 write_journal_header(jnl);
1810 lock_oldstart(jnl);
1811 break;
1812 }
1813 }
1814 unlock_oldstart(jnl);
1815
1816 // if we bumped the start, loop and try again
1817 if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
1818 continue;
1819 } else if (old_start_empty) {
1820 //
1821 // if there is nothing in old_start anymore then we can
1822 // bump the jhdr->start to be the same as active_start
1823 // since it is possible there was only one very large
1824 // transaction in the old_start array. if we didn't do
1825 // this then jhdr->start would never get updated and we
1826 // would wind up looping until we hit the panic at the
1827 // start of the loop.
1828 //
1829 jnl->jhdr->start = jnl->active_start;
1830 write_journal_header(jnl);
1831 continue;
1832 }
1833
1834
1835 // if the file system gave us a flush function, call it to so that
1836 // it can flush some blocks which hopefully will cause some transactions
1837 // to complete and thus free up space in the journal.
1838 if (jnl->flush) {
1839 jnl->flush(jnl->flush_arg);
1840 }
1841
1842 // wait for a while to avoid being cpu-bound (this will
1843 // put us to sleep for 10 milliseconds)
1844 tsleep((caddr_t)jnl, PRIBIO, "check_free_space2", 1);
1845 }
1846
1847 return 0;
1848 }
1849
1850 int
1851 journal_start_transaction(journal *jnl)
1852 {
1853 int ret;
1854 transaction *tr;
1855
1856 CHECK_JOURNAL(jnl);
1857
1858 if (jnl->flags & JOURNAL_INVALID) {
1859 return EINVAL;
1860 }
1861
1862 if (jnl->owner == current_thread()) {
1863 if (jnl->active_tr == NULL) {
1864 panic("jnl: start_tr: active_tr is NULL (jnl @ 0x%x, owner 0x%x, current_thread 0x%x\n",
1865 jnl, jnl->owner, current_thread());
1866 }
1867 jnl->nested_count++;
1868 return 0;
1869 }
1870
1871 lock_journal(jnl);
1872
1873 if (jnl->owner != NULL || jnl->nested_count != 0 || jnl->active_tr != NULL) {
1874 panic("jnl: start_tr: owner 0x%x, nested count 0x%x, active_tr 0x%x jnl @ 0x%x\n",
1875 jnl->owner, jnl->nested_count, jnl->active_tr, jnl);
1876 }
1877
1878 jnl->owner = current_thread();
1879 jnl->nested_count = 1;
1880
1881 free_old_stuff(jnl);
1882
1883 // make sure there's room in the journal
1884 if (check_free_space(jnl, jnl->tbuffer_size) != 0) {
1885 printf("jnl: start transaction failed: no space\n");
1886 ret = ENOSPC;
1887 goto bad_start;
1888 }
1889
1890 // if there's a buffered transaction, use it.
1891 if (jnl->cur_tr) {
1892 jnl->active_tr = jnl->cur_tr;
1893 jnl->cur_tr = NULL;
1894
1895 return 0;
1896 }
1897
1898 MALLOC_ZONE(tr, transaction *, sizeof(transaction), M_JNL_TR, M_WAITOK);
1899 memset(tr, 0, sizeof(transaction));
1900
1901 tr->tbuffer_size = jnl->tbuffer_size;
1902
1903 if (kmem_alloc(kernel_map, (vm_offset_t *)&tr->tbuffer, tr->tbuffer_size)) {
1904 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
1905 printf("jnl: start transaction failed: no tbuffer mem\n");
1906 ret = ENOMEM;
1907 goto bad_start;
1908 }
1909
1910 // journal replay code checksum check depends on this.
1911 memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE);
1912
1913 tr->blhdr = (block_list_header *)tr->tbuffer;
1914 tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
1915 tr->blhdr->num_blocks = 1; // accounts for this header block
1916 tr->blhdr->bytes_used = jnl->jhdr->blhdr_size;
1917
1918 tr->num_blhdrs = 1;
1919 tr->total_bytes = jnl->jhdr->blhdr_size;
1920 tr->jnl = jnl;
1921
1922 jnl->active_tr = tr;
1923
1924 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, tr);
1925
1926 return 0;
1927
1928 bad_start:
1929 jnl->owner = NULL;
1930 jnl->nested_count = 0;
1931 unlock_journal(jnl);
1932 return ret;
1933 }
1934
1935
1936 int
1937 journal_modify_block_start(journal *jnl, struct buf *bp)
1938 {
1939 transaction *tr;
1940
1941 CHECK_JOURNAL(jnl);
1942
1943 if (jnl->flags & JOURNAL_INVALID) {
1944 return EINVAL;
1945 }
1946
1947 // XXXdbg - for debugging I want this to be true. later it may
1948 // not be necessary.
1949 if ((buf_flags(bp) & B_META) == 0) {
1950 panic("jnl: modify_block_start: bp @ 0x%x is not a meta-data block! (jnl 0x%x)\n", bp, jnl);
1951 }
1952
1953 tr = jnl->active_tr;
1954 CHECK_TRANSACTION(tr);
1955
1956 if (jnl->owner != current_thread()) {
1957 panic("jnl: modify_block_start: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1958 jnl, jnl->owner, current_thread());
1959 }
1960
1961 free_old_stuff(jnl);
1962
1963 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
1964 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
1965
1966 // can't allow blocks that aren't an even multiple of the
1967 // underlying block size.
1968 if ((buf_size(bp) % jnl->jhdr->jhdr_size) != 0) {
1969 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
1970 buf_size(bp), jnl->jhdr->jhdr_size);
1971 return -1;
1972 }
1973
1974 // make sure that this transaction isn't bigger than the whole journal
1975 if (tr->total_bytes+buf_size(bp) >= (jnl->jhdr->size - jnl->jhdr->jhdr_size)) {
1976 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr 0x%x bp 0x%x)\n",
1977 tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), buf_size(bp), tr, bp);
1978 return -1;
1979 }
1980
1981 // if the block is dirty and not already locked we have to write
1982 // it out before we muck with it because it has data that belongs
1983 // (presumably) to another transaction.
1984 //
1985 if ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI) {
1986
1987 if (buf_flags(bp) & B_ASYNC) {
1988 panic("modify_block_start: bp @ 0x% has async flag set!\n", bp);
1989 }
1990
1991 // this will cause it to not be buf_brelse()'d
1992 buf_setflags(bp, B_NORELSE);
1993 VNOP_BWRITE(bp);
1994 }
1995 buf_setflags(bp, B_LOCKED);
1996
1997 return 0;
1998 }
1999
2000 int
2001 journal_modify_block_abort(journal *jnl, struct buf *bp)
2002 {
2003 transaction *tr;
2004 block_list_header *blhdr;
2005 int i, j;
2006
2007 CHECK_JOURNAL(jnl);
2008
2009 tr = jnl->active_tr;
2010
2011 //
2012 // if there's no active transaction then we just want to
2013 // call buf_brelse() and return since this is just a block
2014 // that happened to be modified as part of another tr.
2015 //
2016 if (tr == NULL) {
2017 buf_brelse(bp);
2018 return 0;
2019 }
2020
2021 if (jnl->flags & JOURNAL_INVALID) {
2022 return EINVAL;
2023 }
2024
2025 CHECK_TRANSACTION(tr);
2026
2027 if (jnl->owner != current_thread()) {
2028 panic("jnl: modify_block_abort: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2029 jnl, jnl->owner, current_thread());
2030 }
2031
2032 free_old_stuff(jnl);
2033
2034 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2035
2036 // first check if it's already part of this transaction
2037 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2038 for(i=1; i < blhdr->num_blocks; i++) {
2039 if (bp == blhdr->binfo[i].bp) {
2040 if (buf_size(bp) != blhdr->binfo[i].bsize) {
2041 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2042 bp, buf_size(bp), blhdr->binfo[i].bsize, jnl);
2043 }
2044 break;
2045 }
2046 }
2047
2048 if (i < blhdr->num_blocks) {
2049 break;
2050 }
2051 }
2052
2053 //
2054 // if blhdr is null, then this block has only had modify_block_start
2055 // called on it as part of the current transaction. that means that
2056 // it is ok to clear the LOCKED bit since it hasn't actually been
2057 // modified. if blhdr is non-null then modify_block_end was called
2058 // on it and so we need to keep it locked in memory.
2059 //
2060 if (blhdr == NULL) {
2061 buf_clearflags(bp, B_LOCKED);
2062 }
2063
2064 buf_brelse(bp);
2065 return 0;
2066 }
2067
2068
2069 int
2070 journal_modify_block_end(journal *jnl, struct buf *bp)
2071 {
2072 int i, j, tbuffer_offset;
2073 char *blkptr;
2074 block_list_header *blhdr, *prev=NULL;
2075 transaction *tr;
2076
2077 CHECK_JOURNAL(jnl);
2078
2079 if (jnl->flags & JOURNAL_INVALID) {
2080 return EINVAL;
2081 }
2082
2083 tr = jnl->active_tr;
2084 CHECK_TRANSACTION(tr);
2085
2086 if (jnl->owner != current_thread()) {
2087 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2088 jnl, jnl->owner, current_thread());
2089 }
2090
2091 free_old_stuff(jnl);
2092
2093 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2094 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2095
2096 if ((buf_flags(bp) & B_LOCKED) == 0) {
2097 panic("jnl: modify_block_end: bp 0x%x not locked! jnl @ 0x%x\n", bp, jnl);
2098 }
2099
2100 // first check if it's already part of this transaction
2101 for(blhdr=tr->blhdr; blhdr; prev=blhdr,blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2102 tbuffer_offset = jnl->jhdr->blhdr_size;
2103
2104 for(i=1; i < blhdr->num_blocks; i++) {
2105 if (bp == blhdr->binfo[i].bp) {
2106 if (buf_size(bp) != blhdr->binfo[i].bsize) {
2107 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2108 bp, buf_size(bp), blhdr->binfo[i].bsize, jnl);
2109 }
2110 break;
2111 }
2112 tbuffer_offset += blhdr->binfo[i].bsize;
2113 }
2114
2115 if (i < blhdr->num_blocks) {
2116 break;
2117 }
2118 }
2119
2120 if (blhdr == NULL
2121 && prev
2122 && (prev->num_blocks+1) <= prev->max_blocks
2123 && (prev->bytes_used+buf_size(bp)) <= tr->tbuffer_size) {
2124 blhdr = prev;
2125 } else if (blhdr == NULL) {
2126 block_list_header *nblhdr;
2127
2128 if (prev == NULL) {
2129 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl 0x%x, bp 0x%x\n", jnl, bp);
2130 }
2131
2132 // we got to the end of the list, didn't find the block and there's
2133 // no room in the block_list_header pointed to by prev
2134
2135 // we allocate another tbuffer and link it in at the end of the list
2136 // through prev->binfo[0].bnum. that's a skanky way to do things but
2137 // avoids having yet another linked list of small data structures to manage.
2138
2139 if (kmem_alloc(kernel_map, (vm_offset_t *)&nblhdr, tr->tbuffer_size)) {
2140 panic("jnl: end_tr: no space for new block tr @ 0x%x (total bytes: %d)!\n",
2141 tr, tr->total_bytes);
2142 }
2143
2144 // journal replay code checksum check depends on this.
2145 memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE);
2146
2147 // initialize the new guy
2148 nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2149 nblhdr->num_blocks = 1; // accounts for this header block
2150 nblhdr->bytes_used = jnl->jhdr->blhdr_size;
2151
2152 tr->num_blhdrs++;
2153 tr->total_bytes += jnl->jhdr->blhdr_size;
2154
2155 // then link him in at the end
2156 prev->binfo[0].bnum = (off_t)((long)nblhdr);
2157
2158 // and finally switch to using the new guy
2159 blhdr = nblhdr;
2160 tbuffer_offset = jnl->jhdr->blhdr_size;
2161 i = 1;
2162 }
2163
2164
2165 if ((i+1) > blhdr->max_blocks) {
2166 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks);
2167 }
2168
2169 // copy the data into the in-memory transaction buffer
2170 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
2171 memcpy(blkptr, buf_dataptr(bp), buf_size(bp));
2172
2173 // if this is true then this is a new block we haven't seen
2174 if (i >= blhdr->num_blocks) {
2175 int bsize;
2176 vnode_t vp;
2177
2178 vp = buf_vnode(bp);
2179 vnode_ref(vp);
2180 bsize = buf_size(bp);
2181
2182 blhdr->binfo[i].bnum = (off_t)(buf_blkno(bp));
2183 blhdr->binfo[i].bsize = bsize;
2184 blhdr->binfo[i].bp = bp;
2185
2186 blhdr->bytes_used += bsize;
2187 tr->total_bytes += bsize;
2188
2189 blhdr->num_blocks++;
2190 }
2191 buf_bdwrite(bp);
2192
2193 return 0;
2194 }
2195
2196 int
2197 journal_kill_block(journal *jnl, struct buf *bp)
2198 {
2199 int i;
2200 int bflags;
2201 block_list_header *blhdr;
2202 transaction *tr;
2203
2204 CHECK_JOURNAL(jnl);
2205
2206 if (jnl->flags & JOURNAL_INVALID) {
2207 return EINVAL;
2208 }
2209
2210 tr = jnl->active_tr;
2211 CHECK_TRANSACTION(tr);
2212
2213 if (jnl->owner != current_thread()) {
2214 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2215 jnl, jnl->owner, current_thread());
2216 }
2217
2218 free_old_stuff(jnl);
2219
2220 bflags = buf_flags(bp);
2221
2222 if ( !(bflags & B_LOCKED))
2223 panic("jnl: modify_block_end: called with bp not B_LOCKED");
2224
2225 /*
2226 * bp must be BL_BUSY and B_LOCKED
2227 */
2228 // first check if it's already part of this transaction
2229 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2230
2231 for(i=1; i < blhdr->num_blocks; i++) {
2232 if (bp == blhdr->binfo[i].bp) {
2233 vnode_t vp;
2234
2235 buf_clearflags(bp, B_LOCKED);
2236
2237 // this undoes the vnode_ref() in journal_modify_block_end()
2238 vp = buf_vnode(bp);
2239 vnode_rele_ext(vp, 0, 1);
2240
2241 // if the block has the DELWRI and FILTER bits sets, then
2242 // things are seriously weird. if it was part of another
2243 // transaction then journal_modify_block_start() should
2244 // have force it to be written.
2245 //
2246 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
2247 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
2248 //} else {
2249 tr->num_killed += buf_size(bp);
2250 //}
2251 blhdr->binfo[i].bp = NULL;
2252 blhdr->binfo[i].bnum = (off_t)-1;
2253
2254 buf_brelse(bp);
2255
2256 break;
2257 }
2258 }
2259
2260 if (i < blhdr->num_blocks) {
2261 break;
2262 }
2263 }
2264
2265 return 0;
2266 }
2267
2268
2269 static int
2270 journal_binfo_cmp(void *a, void *b)
2271 {
2272 block_info *bi_a = (struct block_info *)a;
2273 block_info *bi_b = (struct block_info *)b;
2274 daddr64_t res;
2275
2276 if (bi_a->bp == NULL) {
2277 return 1;
2278 }
2279 if (bi_b->bp == NULL) {
2280 return -1;
2281 }
2282
2283 // don't have to worry about negative block
2284 // numbers so this is ok to do.
2285 //
2286 res = (buf_blkno(bi_a->bp) - buf_blkno(bi_b->bp));
2287
2288 return (int)res;
2289 }
2290
2291
2292 static int
2293 end_transaction(transaction *tr, int force_it)
2294 {
2295 int i, j, ret, amt;
2296 errno_t errno;
2297 off_t end;
2298 journal *jnl = tr->jnl;
2299 struct buf *bp;
2300 block_list_header *blhdr=NULL, *next=NULL;
2301
2302 if (jnl->cur_tr) {
2303 panic("jnl: jnl @ 0x%x already has cur_tr 0x%x, new tr: 0x%x\n",
2304 jnl, jnl->cur_tr, tr);
2305 }
2306
2307 // if there weren't any modified blocks in the transaction
2308 // just save off the transaction pointer and return.
2309 if (tr->total_bytes == jnl->jhdr->blhdr_size) {
2310 jnl->cur_tr = tr;
2311 return 0;
2312 }
2313
2314 // if our transaction buffer isn't very full, just hang
2315 // on to it and don't actually flush anything. this is
2316 // what is known as "group commit". we will flush the
2317 // transaction buffer if it's full or if we have more than
2318 // one of them so we don't start hogging too much memory.
2319 //
2320 if ( force_it == 0
2321 && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0
2322 && tr->num_blhdrs < 3
2323 && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8))) {
2324
2325 jnl->cur_tr = tr;
2326 return 0;
2327 }
2328
2329
2330 // if we're here we're going to flush the transaction buffer to disk.
2331 // make sure there is room in the journal first.
2332 check_free_space(jnl, tr->total_bytes);
2333
2334 // range check the end index
2335 if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) {
2336 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
2337 jnl->jhdr->end, jnl->jhdr->size);
2338 }
2339
2340 // this transaction starts where the current journal ends
2341 tr->journal_start = jnl->jhdr->end;
2342 end = jnl->jhdr->end;
2343
2344 //
2345 // if the first entry in old_start[] isn't free yet, loop calling the
2346 // file system flush routine until it is (or we panic).
2347 //
2348 i = 0;
2349 lock_oldstart(jnl);
2350 while ((jnl->old_start[0] & 0x8000000000000000LL) != 0) {
2351 if (jnl->flush) {
2352 unlock_oldstart(jnl);
2353
2354 if (jnl->flush) {
2355 jnl->flush(jnl->flush_arg);
2356 }
2357
2358 // yield the cpu so others can get in to clear the lock bit
2359 (void)tsleep((void *)jnl, PRIBIO, "jnl-old-start-sleep", 1);
2360
2361 lock_oldstart(jnl);
2362 }
2363 if (i++ >= 500) {
2364 panic("jnl: transaction that started at 0x%llx is not completing! jnl 0x%x\n",
2365 jnl->old_start[0] & (~0x8000000000000000LL), jnl);
2366 }
2367 }
2368
2369 //
2370 // slide everyone else down and put our latest guy in the last
2371 // entry in the old_start array
2372 //
2373 memcpy(&jnl->old_start[0], &jnl->old_start[1], sizeof(jnl->old_start)-sizeof(jnl->old_start[0]));
2374 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL;
2375
2376 unlock_oldstart(jnl);
2377
2378
2379 // for each block, make sure that the physical block # is set
2380 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2381
2382 for(i=1; i < blhdr->num_blocks; i++) {
2383 daddr64_t blkno;
2384 daddr64_t lblkno;
2385 struct vnode *vp;
2386
2387 bp = blhdr->binfo[i].bp;
2388 if (bp == NULL) { // only true if a block was "killed"
2389 if (blhdr->binfo[i].bnum != (off_t)-1) {
2390 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ 0x%x, tr 0x%x)\n",
2391 blhdr->binfo[i].bnum, jnl, tr);
2392 }
2393 continue;
2394 }
2395 vp = buf_vnode(bp);
2396 blkno = buf_blkno(bp);
2397 lblkno = buf_lblkno(bp);
2398
2399 if (vp == NULL && lblkno == blkno) {
2400 printf("jnl: end_tr: bad news! bp @ 0x%x w/null vp and l/blkno = %qd/%qd. aborting the transaction (tr 0x%x jnl 0x%x).\n",
2401 bp, lblkno, blkno, tr, jnl);
2402 goto bad_journal;
2403 }
2404
2405 // if the lblkno is the same as blkno and this bp isn't
2406 // associated with the underlying file system device then
2407 // we need to call bmap() to get the actual physical block.
2408 //
2409 if ((lblkno == blkno) && (vp != jnl->fsdev)) {
2410 off_t f_offset;
2411 size_t contig_bytes;
2412
2413 if (VNOP_BLKTOOFF(vp, lblkno, &f_offset)) {
2414 printf("jnl: end_tr: vnop_blktooff failed @ 0x%x, jnl 0x%x\n", bp, jnl);
2415 goto bad_journal;
2416 }
2417 if (VNOP_BLOCKMAP(vp, f_offset, buf_count(bp), &blkno, &contig_bytes, NULL, 0, NULL)) {
2418 printf("jnl: end_tr: can't blockmap the bp @ 0x%x, jnl 0x%x\n", bp, jnl);
2419 goto bad_journal;
2420 }
2421 if ((uint32_t)contig_bytes < buf_count(bp)) {
2422 printf("jnl: end_tr: blk not physically contiguous on disk@ 0x%x, jnl 0x%x\n", bp, jnl);
2423 goto bad_journal;
2424 }
2425 buf_setblkno(bp, blkno);
2426 }
2427 // update this so we write out the correct physical block number!
2428 blhdr->binfo[i].bnum = (off_t)(blkno);
2429 }
2430
2431 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2432 }
2433
2434 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2435
2436 amt = blhdr->bytes_used;
2437
2438 blhdr->checksum = 0;
2439 blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
2440
2441 ret = write_journal_data(jnl, &end, blhdr, amt);
2442 if (ret != amt) {
2443 printf("jnl: end_transaction: only wrote %d of %d bytes to the journal!\n",
2444 ret, amt);
2445
2446 goto bad_journal;
2447 }
2448 }
2449
2450 jnl->jhdr->end = end; // update where the journal now ends
2451 tr->journal_end = end; // the transaction ends here too
2452 if (tr->journal_start == 0 || tr->journal_end == 0) {
2453 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
2454 tr->journal_start, tr->journal_end);
2455 }
2456
2457 if (write_journal_header(jnl) != 0) {
2458 goto bad_journal;
2459 }
2460
2461 //
2462 // setup for looping through all the blhdr's. we null out the
2463 // tbuffer and blhdr fields so that they're not used any more.
2464 //
2465 blhdr = tr->blhdr;
2466 tr->tbuffer = NULL;
2467 tr->blhdr = NULL;
2468
2469 // the buffer_flushed_callback will only be called for the
2470 // real blocks that get flushed so we have to account for
2471 // the block_list_headers here.
2472 //
2473 tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size;
2474
2475 // for each block, set the iodone callback and unlock it
2476 for(; blhdr; blhdr=next) {
2477
2478 // we can re-order the buf ptrs because everything is written out already
2479 qsort(&blhdr->binfo[1], blhdr->num_blocks-1, sizeof(block_info), journal_binfo_cmp);
2480
2481 for(i=1; i < blhdr->num_blocks; i++) {
2482 if (blhdr->binfo[i].bp == NULL) {
2483 continue;
2484 }
2485
2486 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].bp),
2487 buf_lblkno(blhdr->binfo[i].bp),
2488 buf_size(blhdr->binfo[i].bp),
2489 NOCRED,
2490 &bp);
2491 if (errno == 0 && bp != NULL) {
2492 struct vnode *save_vp;
2493 void *cur_filter;
2494
2495 if (bp != blhdr->binfo[i].bp) {
2496 panic("jnl: end_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2497 bp, blhdr->binfo[i].bp, jnl);
2498 }
2499
2500 if ((buf_flags(bp) & (B_LOCKED|B_DELWRI)) != (B_LOCKED|B_DELWRI)) {
2501 if (jnl->flags & JOURNAL_CLOSE_PENDING) {
2502 buf_clearflags(bp, B_LOCKED);
2503 buf_brelse(bp);
2504 continue;
2505 } else {
2506 panic("jnl: end_tr: !!!DANGER!!! bp 0x%x flags (0x%x) not LOCKED & DELWRI\n", bp, buf_flags(bp));
2507 }
2508 }
2509 save_vp = buf_vnode(bp);
2510
2511 buf_setfilter(bp, buffer_flushed_callback, tr, &cur_filter, NULL);
2512
2513 if (cur_filter) {
2514 panic("jnl: bp @ 0x%x (blkno %qd, vp 0x%x) has non-null iodone (0x%x) buffflushcb 0x%x\n",
2515 bp, buf_blkno(bp), save_vp, cur_filter, buffer_flushed_callback);
2516 }
2517 buf_clearflags(bp, B_LOCKED);
2518
2519 // kicking off the write here helps performance
2520 buf_bawrite(bp);
2521 // XXXdbg this is good for testing: buf_bdwrite(bp);
2522 //buf_bdwrite(bp);
2523
2524 // this undoes the vnode_ref() in journal_modify_block_end()
2525 vnode_rele_ext(save_vp, 0, 1);
2526 } else {
2527 printf("jnl: end_transaction: could not find block %Ld vp 0x%x!\n",
2528 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2529 if (bp) {
2530 buf_clearflags(bp, B_LOCKED);
2531 buf_brelse(bp);
2532 }
2533 }
2534 }
2535
2536 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2537
2538 // we can free blhdr here since we won't need it any more
2539 blhdr->binfo[0].bnum = 0xdeadc0de;
2540 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2541 }
2542
2543 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
2544 // tr, tr->journal_start, tr->journal_end);
2545 return 0;
2546
2547
2548 bad_journal:
2549 jnl->flags |= JOURNAL_INVALID;
2550 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] &= ~0x8000000000000000LL;
2551 abort_transaction(jnl, tr);
2552 return -1;
2553 }
2554
2555 static void
2556 abort_transaction(journal *jnl, transaction *tr)
2557 {
2558 int i;
2559 errno_t errno;
2560 block_list_header *blhdr, *next;
2561 struct buf *bp;
2562 struct vnode *save_vp;
2563
2564 // for each block list header, iterate over the blocks then
2565 // free up the memory associated with the block list.
2566 //
2567 // for each block, clear the lock bit and release it.
2568 //
2569 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2570
2571 for(i=1; i < blhdr->num_blocks; i++) {
2572 if (blhdr->binfo[i].bp == NULL) {
2573 continue;
2574 }
2575 if ( (buf_vnode(blhdr->binfo[i].bp) == NULL) ||
2576 !(buf_flags(blhdr->binfo[i].bp) & B_LOCKED) ) {
2577 continue;
2578 }
2579
2580 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].bp),
2581 buf_lblkno(blhdr->binfo[i].bp),
2582 buf_size(blhdr->binfo[i].bp),
2583 NOCRED,
2584 &bp);
2585 if (errno == 0) {
2586 if (bp != blhdr->binfo[i].bp) {
2587 panic("jnl: abort_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2588 bp, blhdr->binfo[i].bp, jnl);
2589 }
2590
2591 // releasing a bp marked invalid
2592 // also clears the locked and delayed state
2593 buf_markinvalid(bp);
2594 save_vp = buf_vnode(bp);
2595
2596 buf_brelse(bp);
2597
2598 vnode_rele_ext(save_vp, 0, 1);
2599 } else {
2600 printf("jnl: abort_tr: could not find block %Ld vp 0x%x!\n",
2601 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2602 if (bp) {
2603 buf_brelse(bp);
2604 }
2605 }
2606 }
2607
2608 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2609
2610 // we can free blhdr here since we won't need it any more
2611 blhdr->binfo[0].bnum = 0xdeadc0de;
2612 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2613 }
2614
2615 tr->tbuffer = NULL;
2616 tr->blhdr = NULL;
2617 tr->total_bytes = 0xdbadc0de;
2618 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
2619 }
2620
2621
2622 int
2623 journal_end_transaction(journal *jnl)
2624 {
2625 int ret;
2626 transaction *tr;
2627
2628 CHECK_JOURNAL(jnl);
2629
2630 if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) {
2631 return 0;
2632 }
2633
2634 if (jnl->owner != current_thread()) {
2635 panic("jnl: end_tr: I'm not the owner! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2636 jnl, jnl->owner, current_thread());
2637 }
2638
2639 free_old_stuff(jnl);
2640
2641 jnl->nested_count--;
2642 if (jnl->nested_count > 0) {
2643 return 0;
2644 } else if (jnl->nested_count < 0) {
2645 panic("jnl: jnl @ 0x%x has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count);
2646 }
2647
2648 if (jnl->flags & JOURNAL_INVALID) {
2649 if (jnl->active_tr) {
2650 if (jnl->cur_tr != NULL) {
2651 panic("jnl: journal @ 0x%x has active tr (0x%x) and cur tr (0x%x)\n",
2652 jnl, jnl->active_tr, jnl->cur_tr);
2653 }
2654
2655 tr = jnl->active_tr;
2656 jnl->active_tr = NULL;
2657 abort_transaction(jnl, tr);
2658 }
2659
2660 jnl->owner = NULL;
2661 unlock_journal(jnl);
2662
2663 return EINVAL;
2664 }
2665
2666 tr = jnl->active_tr;
2667 CHECK_TRANSACTION(tr);
2668
2669 // clear this out here so that when check_free_space() calls
2670 // the FS flush function, we don't panic in journal_flush()
2671 // if the FS were to call that. note: check_free_space() is
2672 // called from end_transaction().
2673 //
2674 jnl->active_tr = NULL;
2675 ret = end_transaction(tr, 0);
2676
2677 jnl->owner = NULL;
2678 unlock_journal(jnl);
2679
2680 return ret;
2681 }
2682
2683
2684 int
2685 journal_flush(journal *jnl)
2686 {
2687 int need_signal = 0;
2688
2689 CHECK_JOURNAL(jnl);
2690
2691 if (jnl->flags & JOURNAL_INVALID) {
2692 return -1;
2693 }
2694
2695 if (jnl->owner != current_thread()) {
2696 int ret;
2697
2698 lock_journal(jnl);
2699 need_signal = 1;
2700 }
2701
2702 free_old_stuff(jnl);
2703
2704 // if we're not active, flush any buffered transactions
2705 if (jnl->active_tr == NULL && jnl->cur_tr) {
2706 transaction *tr = jnl->cur_tr;
2707
2708 jnl->cur_tr = NULL;
2709 end_transaction(tr, 1); // force it to get flushed
2710 }
2711
2712 if (need_signal) {
2713 unlock_journal(jnl);
2714 }
2715
2716 return 0;
2717 }
2718
2719 int
2720 journal_active(journal *jnl)
2721 {
2722 if (jnl->flags & JOURNAL_INVALID) {
2723 return -1;
2724 }
2725
2726 return (jnl->active_tr == NULL) ? 0 : 1;
2727 }
2728
2729 void *
2730 journal_owner(journal *jnl)
2731 {
2732 return jnl->owner;
2733 }