]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_journal.c
15103be1a70a7d368ca3e56ace3eb6a96ce35468
[apple/xnu.git] / bsd / vfs / vfs_journal.c
1 /*
2 * Copyright (c) 1995-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 //
31 // This file implements a simple write-ahead journaling layer.
32 // In theory any file system can make use of it by calling these
33 // functions when the fs wants to modify meta-data blocks. See
34 // vfs_journal.h for a more detailed description of the api and
35 // data structures.
36 //
37 // Dominic Giampaolo (dbg@apple.com)
38 //
39
40 #ifdef KERNEL
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/file_internal.h>
46 #include <sys/stat.h>
47 #include <sys/buf_internal.h>
48 #include <sys/proc_internal.h>
49 #include <sys/mount_internal.h>
50 #include <sys/namei.h>
51 #include <sys/vnode_internal.h>
52 #include <sys/ioctl.h>
53 #include <sys/tty.h>
54 #include <sys/ubc.h>
55 #include <sys/malloc.h>
56 #include <kern/thread.h>
57 #include <sys/disk.h>
58 #include <miscfs/specfs/specdev.h>
59
60 extern task_t kernel_task;
61
62 #else
63
64 #include <stdio.h>
65 #include <stdlib.h>
66 #include <string.h>
67 #include <limits.h>
68 #include <errno.h>
69 #include <fcntl.h>
70 #include <unistd.h>
71 #include <stdarg.h>
72 #include <sys/types.h>
73 #include "compat.h"
74
75 #endif /* KERNEL */
76
77 #include "vfs_journal.h"
78
79
80 // number of bytes to checksum in a block_list_header
81 // NOTE: this should be enough to clear out the header
82 // fields as well as the first entry of binfo[]
83 #define BLHDR_CHECKSUM_SIZE 32
84
85
86
87 static int end_transaction(transaction *tr, int force_it);
88 static void abort_transaction(journal *jnl, transaction *tr);
89 static void dump_journal(journal *jnl);
90
91 static __inline__ void lock_journal(journal *jnl);
92 static __inline__ void unlock_journal(journal *jnl);
93 static __inline__ void lock_oldstart(journal *jnl);
94 static __inline__ void unlock_oldstart(journal *jnl);
95
96
97
98
99 //
100 // 3105942 - Coalesce writes to the same block on journal replay
101 //
102
103 typedef struct bucket {
104 off_t block_num;
105 size_t jnl_offset;
106 size_t block_size;
107 } bucket;
108
109 #define STARTING_BUCKETS 256
110
111 static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
112 static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size);
113 static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full);
114 static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
115 static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting);
116
117 #define CHECK_JOURNAL(jnl) \
118 do { \
119 if (jnl == NULL) {\
120 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__);\
121 }\
122 if (jnl->jdev == NULL) { \
123 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__);\
124 } \
125 if (jnl->fsdev == NULL) { \
126 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__);\
127 } \
128 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) {\
129 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n",\
130 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);\
131 }\
132 if ( jnl->jhdr->start <= 0 \
133 || jnl->jhdr->start > jnl->jhdr->size\
134 || jnl->jhdr->start > 1024*1024*1024) {\
135 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
136 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\
137 }\
138 if ( jnl->jhdr->end <= 0 \
139 || jnl->jhdr->end > jnl->jhdr->size\
140 || jnl->jhdr->end > 1024*1024*1024) {\
141 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
142 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\
143 }\
144 if (jnl->jhdr->size > 1024*1024*1024) {\
145 panic("%s:%d: jhdr size looks bad (0x%llx)\n",\
146 __FILE__, __LINE__, jnl->jhdr->size);\
147 } \
148 } while(0)
149
150 #define CHECK_TRANSACTION(tr) \
151 do {\
152 if (tr == NULL) {\
153 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__);\
154 }\
155 if (tr->jnl == NULL) {\
156 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__);\
157 }\
158 if (tr->blhdr != (block_list_header *)tr->tbuffer) {\
159 panic("%s:%d: blhdr (0x%x) != tbuffer (0x%x)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer);\
160 }\
161 if (tr->total_bytes < 0) {\
162 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\
163 }\
164 if (tr->journal_start < 0 || tr->journal_start > 1024*1024*1024) {\
165 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\
166 }\
167 if (tr->journal_end < 0 || tr->journal_end > 1024*1024*1024) {\
168 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\
169 }\
170 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\
171 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\
172 }\
173 } while(0)
174
175
176
177 //
178 // this isn't a great checksum routine but it will do for now.
179 // we use it to checksum the journal header and the block list
180 // headers that are at the start of each transaction.
181 //
182 static int
183 calc_checksum(char *ptr, int len)
184 {
185 int i, cksum=0;
186
187 // this is a lame checksum but for now it'll do
188 for(i=0; i < len; i++, ptr++) {
189 cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr);
190 }
191
192 return (~cksum);
193 }
194
195 //
196 // Journal Locking
197 //
198 lck_grp_attr_t * jnl_group_attr;
199 lck_attr_t * jnl_lock_attr;
200 lck_grp_t * jnl_mutex_group;
201
202 void
203 journal_init()
204 {
205 jnl_lock_attr = lck_attr_alloc_init();
206 jnl_group_attr = lck_grp_attr_alloc_init();
207 jnl_mutex_group = lck_grp_alloc_init("jnl-mutex", jnl_group_attr);
208
209 /* Turn on lock debugging */
210 //lck_attr_setdebug(jnl_lock_attr);
211 }
212
213 static __inline__ void
214 lock_journal(journal *jnl)
215 {
216 lck_mtx_lock(&jnl->jlock);
217 }
218
219 static __inline__ void
220 unlock_journal(journal *jnl)
221 {
222 lck_mtx_unlock(&jnl->jlock);
223 }
224
225 static __inline__ void
226 lock_oldstart(journal *jnl)
227 {
228 lck_mtx_lock(&jnl->old_start_lock);
229 }
230
231 static __inline__ void
232 unlock_oldstart(journal *jnl)
233 {
234 lck_mtx_unlock(&jnl->old_start_lock);
235 }
236
237
238
239 #define JNL_WRITE 0x0001
240 #define JNL_READ 0x0002
241 #define JNL_HEADER 0x8000
242
243 //
244 // This function sets up a fake buf and passes it directly to the
245 // journal device strategy routine (so that it won't get cached in
246 // the block cache.
247 //
248 // It also handles range checking the i/o so that we don't write
249 // outside the journal boundaries and it will wrap the i/o back
250 // to the beginning if necessary (skipping over the journal header)
251 //
252 static size_t
253 do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction)
254 {
255 int err, io_sz=0, curlen=len;
256 buf_t bp;
257 int max_iosize = 128 * 1024;
258 struct vfsioattr ioattr;
259
260 if (*offset < 0 || *offset > jnl->jhdr->size) {
261 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size);
262 }
263 vfs_ioattr(vnode_mount(jnl->jdev), &ioattr);
264
265 if (direction & JNL_WRITE)
266 max_iosize = ioattr.io_maxwritecnt;
267 else if (direction & JNL_READ)
268 max_iosize = ioattr.io_maxreadcnt;
269
270 again:
271 bp = alloc_io_buf(jnl->jdev, 1);
272
273 if (*offset + (off_t)curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) {
274 if (*offset == jnl->jhdr->size) {
275 *offset = jnl->jhdr->jhdr_size;
276 } else {
277 curlen = (off_t)jnl->jhdr->size - *offset;
278 }
279 }
280
281 if (curlen > max_iosize) {
282 curlen = max_iosize;
283 }
284
285 if (curlen <= 0) {
286 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %d\n", curlen, *offset, len);
287 }
288
289 if (*offset == 0 && (direction & JNL_HEADER) == 0) {
290 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen, data);
291 }
292
293 if (direction & JNL_READ)
294 buf_setflags(bp, B_READ);
295 else {
296 /*
297 * don't have to set any flags
298 */
299 vnode_startwrite(jnl->jdev);
300 }
301 buf_setsize(bp, curlen);
302 buf_setcount(bp, curlen);
303 buf_setdataptr(bp, (uintptr_t)data);
304 buf_setblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
305 buf_setlblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
306
307 err = VNOP_STRATEGY(bp);
308 if (!err) {
309 err = (int)buf_biowait(bp);
310 }
311 free_io_buf(bp);
312
313 if (err) {
314 printf("jnl: do_jnl_io: strategy err 0x%x\n", err);
315 return 0;
316 }
317
318 *offset += curlen;
319 io_sz += curlen;
320 if (io_sz != len) {
321 // handle wrap-around
322 data = (char *)data + curlen;
323 curlen = len - io_sz;
324 if (*offset >= jnl->jhdr->size) {
325 *offset = jnl->jhdr->jhdr_size;
326 }
327 goto again;
328 }
329
330 return io_sz;
331 }
332
333 static size_t
334 read_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
335 {
336 return do_journal_io(jnl, offset, data, len, JNL_READ);
337 }
338
339 static size_t
340 write_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
341 {
342 return do_journal_io(jnl, offset, data, len, JNL_WRITE);
343 }
344
345
346 static int
347 read_journal_header(journal *jnl, void *data, size_t len)
348 {
349 off_t hdr_offset = 0;
350
351 return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER);
352 }
353
354 static int
355 write_journal_header(journal *jnl)
356 {
357 static int num_err_prints = 0;
358 int ret;
359 off_t jhdr_offset = 0;
360 struct vfs_context context;
361
362 context.vc_proc = current_proc();
363 context.vc_ucred = NOCRED;
364 //
365 // XXXdbg note: this ioctl doesn't seem to do anything on firewire disks.
366 //
367 ret = VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
368 if (ret != 0) {
369 //
370 // Only print this error if it's a different error than the
371 // previous one, or if it's the first time for this device
372 // or if the total number of printfs is less than 25. We
373 // allow for up to 25 printfs to insure that some make it
374 // into the on-disk syslog. Otherwise if we only printed
375 // one, it's possible it would never make it to the syslog
376 // for the root volume and that makes debugging hard.
377 //
378 if ( ret != jnl->last_flush_err
379 || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0
380 || num_err_prints++ < 25) {
381
382 printf("jnl: flushing fs disk buffer returned 0x%x\n", ret);
383
384 jnl->flags |= JOURNAL_FLUSHCACHE_ERR;
385 jnl->last_flush_err = ret;
386 }
387 }
388
389
390 jnl->jhdr->checksum = 0;
391 jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
392 if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != jnl->jhdr->jhdr_size) {
393 printf("jnl: write_journal_header: error writing the journal header!\n");
394 jnl->flags |= JOURNAL_INVALID;
395 return -1;
396 }
397
398 // Have to flush after writing the journal header so that
399 // a future transaction doesn't sneak out to disk before
400 // the header does and thus overwrite data that the old
401 // journal header refers to. Saw this exact case happen
402 // on an IDE bus analyzer with Larry Barras so while it
403 // may seem obscure, it's not.
404 //
405 VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
406
407 return 0;
408 }
409
410
411
412 //
413 // this is a work function used to free up transactions that
414 // completed. they can't be free'd from buffer_flushed_callback
415 // because it is called from deep with the disk driver stack
416 // and thus can't do something that would potentially cause
417 // paging. it gets called by each of the journal api entry
418 // points so stuff shouldn't hang around for too long.
419 //
420 static void
421 free_old_stuff(journal *jnl)
422 {
423 transaction *tr, *next;
424
425 lock_oldstart(jnl);
426 tr = jnl->tr_freeme;
427 jnl->tr_freeme = NULL;
428 unlock_oldstart(jnl);
429
430 for(; tr; tr=next) {
431 next = tr->next;
432 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
433 }
434
435 }
436
437
438
439 //
440 // This is our callback that lets us know when a buffer has been
441 // flushed to disk. It's called from deep within the driver stack
442 // and thus is quite limited in what it can do. Notably, it can
443 // not initiate any new i/o's or allocate/free memory.
444 //
445 static void
446 buffer_flushed_callback(struct buf *bp, void *arg)
447 {
448 transaction *tr;
449 journal *jnl;
450 transaction *ctr, *prev=NULL, *next;
451 int i, bufsize;
452
453
454 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
455 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
456
457 // snarf out the bits we want
458 bufsize = buf_size(bp);
459 tr = (transaction *)arg;
460
461 // then we've already seen it
462 if (tr == NULL) {
463 return;
464 }
465
466 CHECK_TRANSACTION(tr);
467
468 jnl = tr->jnl;
469 if (jnl->flags & JOURNAL_INVALID) {
470 return;
471 }
472
473 CHECK_JOURNAL(jnl);
474
475 // update the number of blocks that have been flushed.
476 // this buf may represent more than one block so take
477 // that into account.
478 OSAddAtomic(bufsize, &tr->num_flushed);
479
480
481 // if this transaction isn't done yet, just return as
482 // there is nothing to do.
483 if ((tr->num_flushed + tr->num_killed) < tr->total_bytes) {
484 return;
485 }
486
487 // this will single thread checking the transaction
488 lock_oldstart(jnl);
489
490 if (tr->total_bytes == 0xfbadc0de) {
491 // then someone beat us to it...
492 unlock_oldstart(jnl);
493 return;
494 }
495
496 // mark this so that we're the owner of dealing with the
497 // cleanup for this transaction
498 tr->total_bytes = 0xfbadc0de;
499
500 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
501 // tr, tr->journal_start, tr->journal_end, jnl);
502
503 // find this entry in the old_start[] index and mark it completed
504 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
505
506 if ((jnl->old_start[i] & ~(0x8000000000000000LL)) == tr->journal_start) {
507 jnl->old_start[i] &= ~(0x8000000000000000LL);
508 break;
509 }
510 }
511 if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
512 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr 0x%x, jnl 0x%x)\n",
513 tr->journal_start, tr, jnl);
514 }
515 unlock_oldstart(jnl);
516
517
518 // if we are here then we need to update the journal header
519 // to reflect that this transaction is complete
520 if (tr->journal_start == jnl->active_start) {
521 jnl->active_start = tr->journal_end;
522 tr->journal_start = tr->journal_end = (off_t)0;
523 }
524
525 // go through the completed_trs list and try to coalesce
526 // entries, restarting back at the beginning if we have to.
527 for(ctr=jnl->completed_trs; ctr; prev=ctr, ctr=next) {
528 if (ctr->journal_start == jnl->active_start) {
529 jnl->active_start = ctr->journal_end;
530 if (prev) {
531 prev->next = ctr->next;
532 }
533 if (ctr == jnl->completed_trs) {
534 jnl->completed_trs = ctr->next;
535 }
536
537 lock_oldstart(jnl);
538 next = jnl->completed_trs; // this starts us over again
539 ctr->next = jnl->tr_freeme;
540 jnl->tr_freeme = ctr;
541 ctr = NULL;
542 unlock_oldstart(jnl);
543 } else if (tr->journal_end == ctr->journal_start) {
544 ctr->journal_start = tr->journal_start;
545 next = jnl->completed_trs; // this starts us over again
546 ctr = NULL;
547 tr->journal_start = tr->journal_end = (off_t)0;
548 } else if (tr->journal_start == ctr->journal_end) {
549 ctr->journal_end = tr->journal_end;
550 next = ctr->next;
551 tr->journal_start = tr->journal_end = (off_t)0;
552 } else {
553 next = ctr->next;
554 }
555 }
556
557 // if this is true then we didn't merge with anyone
558 // so link ourselves in at the head of the completed
559 // transaction list.
560 if (tr->journal_start != 0) {
561 // put this entry into the correct sorted place
562 // in the list instead of just at the head.
563 //
564
565 prev = NULL;
566 for(ctr=jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) {
567 // just keep looping
568 }
569
570 if (ctr == NULL && prev == NULL) {
571 jnl->completed_trs = tr;
572 tr->next = NULL;
573 } else if (ctr == jnl->completed_trs) {
574 tr->next = jnl->completed_trs;
575 jnl->completed_trs = tr;
576 } else {
577 tr->next = prev->next;
578 prev->next = tr;
579 }
580 } else {
581 // if we're here this tr got merged with someone else so
582 // put it on the list to be free'd
583 lock_oldstart(jnl);
584 tr->next = jnl->tr_freeme;
585 jnl->tr_freeme = tr;
586 unlock_oldstart(jnl);
587 }
588 }
589
590
591 #include <libkern/OSByteOrder.h>
592
593 #define SWAP16(x) OSSwapInt16(x)
594 #define SWAP32(x) OSSwapInt32(x)
595 #define SWAP64(x) OSSwapInt64(x)
596
597
598 static void
599 swap_journal_header(journal *jnl)
600 {
601 jnl->jhdr->magic = SWAP32(jnl->jhdr->magic);
602 jnl->jhdr->endian = SWAP32(jnl->jhdr->endian);
603 jnl->jhdr->start = SWAP64(jnl->jhdr->start);
604 jnl->jhdr->end = SWAP64(jnl->jhdr->end);
605 jnl->jhdr->size = SWAP64(jnl->jhdr->size);
606 jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size);
607 jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum);
608 jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size);
609 }
610
611 static void
612 swap_block_list_header(journal *jnl, block_list_header *blhdr)
613 {
614 int i;
615
616 blhdr->max_blocks = SWAP16(blhdr->max_blocks);
617 blhdr->num_blocks = SWAP16(blhdr->num_blocks);
618 blhdr->bytes_used = SWAP32(blhdr->bytes_used);
619 blhdr->checksum = SWAP32(blhdr->checksum);
620 blhdr->pad = SWAP32(blhdr->pad);
621
622 if (blhdr->num_blocks * sizeof(blhdr->binfo[0]) > jnl->jhdr->blhdr_size) {
623 printf("jnl: blhdr num blocks looks suspicious (%d). not swapping.\n", blhdr->num_blocks);
624 return;
625 }
626
627 for(i=0; i < blhdr->num_blocks; i++) {
628 blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum);
629 blhdr->binfo[i].bsize = SWAP32(blhdr->binfo[i].bsize);
630 blhdr->binfo[i].bp = (void *)SWAP32((int)blhdr->binfo[i].bp);
631 }
632 }
633
634
635 static int
636 update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize)
637 {
638 int ret;
639 struct buf *oblock_bp=NULL;
640
641 // first read the block we want.
642 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
643 if (ret != 0) {
644 printf("jnl: update_fs_block: error reading fs block # %lld! (ret %d)\n", fs_block, ret);
645
646 if (oblock_bp) {
647 buf_brelse(oblock_bp);
648 oblock_bp = NULL;
649 }
650
651 // let's try to be aggressive here and just re-write the block
652 oblock_bp = buf_getblk(jnl->fsdev, (daddr64_t)fs_block, bsize, 0, 0, BLK_META);
653 if (oblock_bp == NULL) {
654 printf("jnl: update_fs_block: buf_getblk() for %lld failed! failing update.\n", fs_block);
655 return -1;
656 }
657 }
658
659 // make sure it's the correct size.
660 if (buf_size(oblock_bp) != bsize) {
661 buf_brelse(oblock_bp);
662 return -1;
663 }
664
665 // copy the journal data over top of it
666 memcpy((void *)buf_dataptr(oblock_bp), block_ptr, bsize);
667
668 if ((ret = VNOP_BWRITE(oblock_bp)) != 0) {
669 printf("jnl: update_fs_block: failed to update block %lld (ret %d)\n", fs_block,ret);
670 return ret;
671 }
672
673 // and now invalidate it so that if someone else wants to read
674 // it in a different size they'll be able to do it.
675 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
676 if (oblock_bp) {
677 buf_markinvalid(oblock_bp);
678 buf_brelse(oblock_bp);
679 }
680
681 return 0;
682 }
683
684 static int
685 grow_table(struct bucket **buf_ptr, int num_buckets, int new_size)
686 {
687 struct bucket *newBuf;
688 int current_size = num_buckets, i;
689
690 // return if newsize is less than the current size
691 if (new_size < num_buckets) {
692 return current_size;
693 }
694
695 if ((MALLOC(newBuf, struct bucket *, new_size*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
696 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
697 return -1;
698 }
699
700 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
701
702 // copy existing elements
703 bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket));
704
705 // initialize the new ones
706 for(i=num_buckets; i < new_size; i++) {
707 newBuf[i].block_num = (off_t)-1;
708 }
709
710 // free the old container
711 FREE(*buf_ptr, M_TEMP);
712
713 // reset the buf_ptr
714 *buf_ptr = newBuf;
715
716 return new_size;
717 }
718
719 static int
720 lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full)
721 {
722 int lo, hi, index, matches, i;
723
724 if (num_full == 0) {
725 return 0; // table is empty, so insert at index=0
726 }
727
728 lo = 0;
729 hi = num_full - 1;
730 index = -1;
731
732 // perform binary search for block_num
733 do {
734 int mid = (hi - lo)/2 + lo;
735 off_t this_num = (*buf_ptr)[mid].block_num;
736
737 if (block_num == this_num) {
738 index = mid;
739 break;
740 }
741
742 if (block_num < this_num) {
743 hi = mid;
744 continue;
745 }
746
747 if (block_num > this_num) {
748 lo = mid + 1;
749 continue;
750 }
751 } while(lo < hi);
752
753 // check if lo and hi converged on the match
754 if (block_num == (*buf_ptr)[hi].block_num) {
755 index = hi;
756 }
757
758 // if no existing entry found, find index for new one
759 if (index == -1) {
760 index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1;
761 } else {
762 // make sure that we return the right-most index in the case of multiple matches
763 matches = 0;
764 i = index + 1;
765 while(i < num_full && block_num == (*buf_ptr)[i].block_num) {
766 matches++;
767 i++;
768 }
769
770 index += matches;
771 }
772
773 return index;
774 }
775
776 static int
777 insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting)
778 {
779 if (!overwriting) {
780 // grow the table if we're out of space
781 if (*num_full_ptr >= *num_buckets_ptr) {
782 int new_size = *num_buckets_ptr * 2;
783 int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size);
784
785 if (grow_size < new_size) {
786 printf("jnl: add_block: grow_table returned an error!\n");
787 return -1;
788 }
789
790 *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size
791 }
792
793 // if we're not inserting at the end, we need to bcopy
794 if (blk_index != *num_full_ptr) {
795 bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) );
796 }
797
798 (*num_full_ptr)++; // increment only if we're not overwriting
799 }
800
801 // sanity check the values we're about to add
802 if (offset >= jnl->jhdr->size) {
803 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
804 }
805 if (size <= 0) {
806 panic("jnl: insert_block: bad size in insert_block (%d)\n", size);
807 }
808
809 (*buf_ptr)[blk_index].block_num = num;
810 (*buf_ptr)[blk_index].block_size = size;
811 (*buf_ptr)[blk_index].jnl_offset = offset;
812
813 return blk_index;
814 }
815
816 static int
817 do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
818 {
819 int num_to_remove, index, i, overwrite, err;
820 size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset;
821 off_t overlap, block_start, block_end;
822
823 block_start = block_num*jhdr_size;
824 block_end = block_start + size;
825 overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size);
826
827 // first, eliminate any overlap with the previous entry
828 if (blk_index != 0 && !overwrite) {
829 off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size;
830 off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size;
831 overlap = prev_block_end - block_start;
832 if (overlap > 0) {
833 if (overlap % jhdr_size != 0) {
834 panic("jnl: do_overlap: overlap with previous entry not a multiple of %d\n", jhdr_size);
835 }
836
837 // if the previous entry completely overlaps this one, we need to break it into two pieces.
838 if (prev_block_end > block_end) {
839 off_t new_num = block_end / jhdr_size;
840 size_t new_size = prev_block_end - block_end;
841
842 new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start);
843
844 err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, num_buckets_ptr, num_full_ptr, 0);
845 if (err < 0) {
846 panic("jnl: do_overlap: error inserting during pre-overlap\n");
847 }
848 }
849
850 // Regardless, we need to truncate the previous entry to the beginning of the overlap
851 (*buf_ptr)[blk_index-1].block_size = block_start - prev_block_start;
852 }
853 }
854
855 // then, bail out fast if there's no overlap with the entries that follow
856 if (!overwrite && block_end <= (*buf_ptr)[blk_index].block_num*jhdr_size) {
857 return 0; // no overlap, no overwrite
858 } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (*buf_ptr)[blk_index+1].block_num*jhdr_size)) {
859 return 1; // simple overwrite
860 }
861
862 // Otherwise, find all cases of total and partial overlap. We use the special
863 // block_num of -2 to designate entries that are completely overlapped and must
864 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
865 // entries must be adjusted to keep the array consistent.
866 index = blk_index;
867 num_to_remove = 0;
868 while(index < *num_full_ptr && block_end > (*buf_ptr)[index].block_num*jhdr_size) {
869 if (block_end >= ((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size)) {
870 (*buf_ptr)[index].block_num = -2; // mark this for deletion
871 num_to_remove++;
872 } else {
873 overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size;
874 if (overlap > 0) {
875 if (overlap % jhdr_size != 0) {
876 panic("jnl: do_overlap: overlap of %lld is not multiple of %d\n", overlap, jhdr_size);
877 }
878
879 // if we partially overlap this entry, adjust its block number, jnl offset, and size
880 (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up
881
882 new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around
883 if (new_offset >= jnl->jhdr->size) {
884 new_offset = jhdr_size + (new_offset - jnl->jhdr->size);
885 }
886 (*buf_ptr)[index].jnl_offset = new_offset;
887
888 (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value
889 if ((*buf_ptr)[index].block_size <= 0) {
890 panic("jnl: do_overlap: after overlap, new block size is invalid (%d)\n", (*buf_ptr)[index].block_size);
891 // return -1; // if above panic is removed, return -1 for error
892 }
893 }
894
895 }
896
897 index++;
898 }
899
900 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
901 index--; // start with the last index used within the above loop
902 while(index >= blk_index) {
903 if ((*buf_ptr)[index].block_num == -2) {
904 if (index == *num_full_ptr-1) {
905 (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free
906 } else {
907 bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) );
908 }
909 (*num_full_ptr)--;
910 }
911 index--;
912 }
913
914 // eliminate any stale entries at the end of the table
915 for(i=*num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) {
916 (*buf_ptr)[i].block_num = -1;
917 }
918
919 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
920 }
921
922 // PR-3105942: Coalesce writes to the same block in journal replay
923 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
924 // to be replayed and the corresponding location in the journal which contains
925 // the most recent data for those blocks. The array is "played" once the all the
926 // blocks in the journal have been coalesced. The code for the case of conflicting/
927 // overlapping writes to a single block is the most dense. Because coalescing can
928 // disrupt the existing time-ordering of blocks in the journal playback, care
929 // is taken to catch any overlaps and keep the array consistent.
930 static int
931 add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
932 {
933 int blk_index, overwriting;
934
935 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
936 // inserted (or the index of the elem to overwrite).
937 blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr);
938
939 // check if the index is within bounds (if we're adding this block to the end of
940 // the table, blk_index will be equal to num_full)
941 if (blk_index < 0 || blk_index > *num_full_ptr) {
942 //printf("jnl: add_block: trouble adding block to co_buf\n");
943 return -1;
944 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
945
946 // Determine whether we're overwriting an existing entry by checking for overlap
947 overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr);
948 if (overwriting < 0) {
949 return -1; // if we got an error, pass it along
950 }
951
952 // returns the index, or -1 on error
953 blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr, overwriting);
954
955 return blk_index;
956 }
957
958 static int
959 replay_journal(journal *jnl)
960 {
961 int i, ret, orig_checksum, checksum, max_bsize;
962 block_list_header *blhdr;
963 off_t offset;
964 char *buff, *block_ptr=NULL;
965 struct bucket *co_buf;
966 int num_buckets = STARTING_BUCKETS, num_full;
967
968 // wrap the start ptr if it points to the very end of the journal
969 if (jnl->jhdr->start == jnl->jhdr->size) {
970 jnl->jhdr->start = jnl->jhdr->jhdr_size;
971 }
972 if (jnl->jhdr->end == jnl->jhdr->size) {
973 jnl->jhdr->end = jnl->jhdr->jhdr_size;
974 }
975
976 if (jnl->jhdr->start == jnl->jhdr->end) {
977 return 0;
978 }
979
980 // allocate memory for the header_block. we'll read each blhdr into this
981 if (kmem_alloc(kernel_map, (vm_offset_t *)&buff, jnl->jhdr->blhdr_size)) {
982 printf("jnl: replay_journal: no memory for block buffer! (%d bytes)\n",
983 jnl->jhdr->blhdr_size);
984 return -1;
985 }
986
987 // allocate memory for the coalesce buffer
988 if ((MALLOC(co_buf, struct bucket *, num_buckets*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
989 printf("jnl: replay_journal: no memory for coalesce buffer!\n");
990 return -1;
991 }
992
993 // initialize entries
994 for(i=0; i < num_buckets; i++) {
995 co_buf[i].block_num = -1;
996 }
997 num_full = 0; // empty at first
998
999
1000 printf("jnl: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
1001 jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset);
1002
1003 while(jnl->jhdr->start != jnl->jhdr->end) {
1004 offset = jnl->jhdr->start;
1005 ret = read_journal_data(jnl, &offset, buff, jnl->jhdr->blhdr_size);
1006 if (ret != jnl->jhdr->blhdr_size) {
1007 printf("jnl: replay_journal: Could not read block list header block @ 0x%llx!\n", offset);
1008 goto bad_replay;
1009 }
1010
1011 blhdr = (block_list_header *)buff;
1012
1013 orig_checksum = blhdr->checksum;
1014 blhdr->checksum = 0;
1015 if (jnl->flags & JOURNAL_NEED_SWAP) {
1016 // calculate the checksum based on the unswapped data
1017 // because it is done byte-at-a-time.
1018 orig_checksum = SWAP32(orig_checksum);
1019 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1020 swap_block_list_header(jnl, blhdr);
1021 } else {
1022 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1023 }
1024 if (checksum != orig_checksum) {
1025 printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1026 offset, orig_checksum, checksum);
1027 goto bad_replay;
1028 }
1029 if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > 2048
1030 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) {
1031 printf("jnl: replay_journal: bad looking journal entry: max: %d num: %d\n",
1032 blhdr->max_blocks, blhdr->num_blocks);
1033 goto bad_replay;
1034 }
1035
1036 for(i=1; i < blhdr->num_blocks; i++) {
1037 if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) {
1038 printf("jnl: replay_journal: bogus block number 0x%llx\n", blhdr->binfo[i].bnum);
1039 goto bad_replay;
1040 }
1041 }
1042
1043 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1044 // blhdr->num_blocks-1, jnl->jhdr->start);
1045 for(i=1; i < blhdr->num_blocks; i++) {
1046 int size, ret_val;
1047 off_t number;
1048
1049 size = blhdr->binfo[i].bsize;
1050 number = blhdr->binfo[i].bnum;
1051
1052 // don't add "killed" blocks
1053 if (number == (off_t)-1) {
1054 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1055 } else {
1056 // add this bucket to co_buf, coalescing where possible
1057 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1058 ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, &num_buckets, &num_full);
1059
1060 if (ret_val == -1) {
1061 printf("jnl: replay_journal: trouble adding block to co_buf\n");
1062 goto bad_replay;
1063 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1064 }
1065
1066 // increment offset
1067 offset += size;
1068
1069 // check if the last block added puts us off the end of the jnl.
1070 // if so, we need to wrap to the beginning and take any remainder
1071 // into account
1072 //
1073 if (offset >= jnl->jhdr->size) {
1074 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
1075 }
1076 }
1077
1078
1079 jnl->jhdr->start += blhdr->bytes_used;
1080 if (jnl->jhdr->start >= jnl->jhdr->size) {
1081 // wrap around and skip the journal header block
1082 jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size;
1083 }
1084 }
1085
1086
1087 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1088
1089 /*
1090 * make sure it's at least one page in size, so
1091 * start max_bsize at PAGE_SIZE
1092 */
1093 for (i = 0, max_bsize = PAGE_SIZE; i < num_full; i++) {
1094
1095 if (co_buf[i].block_num == (off_t)-1)
1096 continue;
1097
1098 if (co_buf[i].block_size > max_bsize)
1099 max_bsize = co_buf[i].block_size;
1100 }
1101 /*
1102 * round max_bsize up to the nearest PAGE_SIZE multiple
1103 */
1104 if (max_bsize & (PAGE_SIZE - 1)) {
1105 max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1106 }
1107
1108 if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) {
1109 goto bad_replay;
1110 }
1111
1112 // Replay the coalesced entries in the co-buf
1113 for(i=0; i < num_full; i++) {
1114 size_t size = co_buf[i].block_size;
1115 off_t jnl_offset = (off_t) co_buf[i].jnl_offset;
1116 off_t number = co_buf[i].block_num;
1117
1118
1119 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1120 // co_buf[i].block_size, co_buf[i].jnl_offset);
1121
1122 if (number == (off_t)-1) {
1123 // printf("jnl: replay_journal: skipping killed fs block\n");
1124 } else {
1125
1126 // do journal read, and set the phys. block
1127 ret = read_journal_data(jnl, &jnl_offset, block_ptr, size);
1128 if (ret != size) {
1129 printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset);
1130 goto bad_replay;
1131 }
1132
1133 if (update_fs_block(jnl, block_ptr, number, size) != 0) {
1134 goto bad_replay;
1135 }
1136 }
1137 }
1138
1139
1140 // done replaying; update jnl header
1141 if (write_journal_header(jnl) != 0) {
1142 goto bad_replay;
1143 }
1144
1145 // free block_ptr
1146 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1147 block_ptr = NULL;
1148
1149 // free the coalesce buffer
1150 FREE(co_buf, M_TEMP);
1151 co_buf = NULL;
1152
1153 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1154 return 0;
1155
1156 bad_replay:
1157 if (block_ptr) {
1158 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1159 }
1160 if (co_buf) {
1161 FREE(co_buf, M_TEMP);
1162 }
1163 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1164
1165 return -1;
1166 }
1167
1168
1169 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1170 //#define DEFAULT_TRANSACTION_BUFFER_SIZE (256*1024) // better performance but uses more mem
1171 #define MAX_TRANSACTION_BUFFER_SIZE (512*1024)
1172
1173 // XXXdbg - so I can change it in the debugger
1174 int def_tbuffer_size = 0;
1175
1176
1177 //
1178 // This function sets the size of the tbuffer and the
1179 // size of the blhdr. It assumes that jnl->jhdr->size
1180 // and jnl->jhdr->jhdr_size are already valid.
1181 //
1182 static void
1183 size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz)
1184 {
1185 //
1186 // one-time initialization based on how much memory
1187 // there is in the machine.
1188 //
1189 if (def_tbuffer_size == 0) {
1190 if (mem_size < (256*1024*1024)) {
1191 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE;
1192 } else if (mem_size < (512*1024*1024)) {
1193 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2;
1194 } else if (mem_size < (1024*1024*1024)) {
1195 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3;
1196 } else if (mem_size >= (1024*1024*1024)) {
1197 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 4;
1198 }
1199 }
1200
1201 // size up the transaction buffer... can't be larger than the number
1202 // of blocks that can fit in a block_list_header block.
1203 if (tbuffer_size == 0) {
1204 jnl->tbuffer_size = def_tbuffer_size;
1205 } else {
1206 // make sure that the specified tbuffer_size isn't too small
1207 if (tbuffer_size < jnl->jhdr->blhdr_size * 2) {
1208 tbuffer_size = jnl->jhdr->blhdr_size * 2;
1209 }
1210 // and make sure it's an even multiple of the block size
1211 if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) {
1212 tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size);
1213 }
1214
1215 jnl->tbuffer_size = tbuffer_size;
1216 }
1217
1218 if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) {
1219 jnl->tbuffer_size = (jnl->jhdr->size / 2);
1220 }
1221
1222 if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) {
1223 jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE;
1224 }
1225
1226 jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info);
1227 if (jnl->jhdr->blhdr_size < phys_blksz) {
1228 jnl->jhdr->blhdr_size = phys_blksz;
1229 } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) {
1230 // have to round up so we're an even multiple of the physical block size
1231 jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1);
1232 }
1233 }
1234
1235
1236
1237 journal *
1238 journal_create(struct vnode *jvp,
1239 off_t offset,
1240 off_t journal_size,
1241 struct vnode *fsvp,
1242 size_t min_fs_blksz,
1243 int32_t flags,
1244 int32_t tbuffer_size,
1245 void (*flush)(void *arg),
1246 void *arg)
1247 {
1248 journal *jnl;
1249 int phys_blksz;
1250 struct vfs_context context;
1251
1252 context.vc_proc = current_proc();
1253 context.vc_ucred = FSCRED;
1254
1255 /* Get the real physical block size. */
1256 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1257 return NULL;
1258 }
1259
1260 if (phys_blksz > min_fs_blksz) {
1261 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1262 phys_blksz, min_fs_blksz);
1263 return NULL;
1264 }
1265
1266 if ((journal_size % phys_blksz) != 0) {
1267 printf("jnl: create: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1268 journal_size, phys_blksz);
1269 return NULL;
1270 }
1271
1272 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1273 memset(jnl, 0, sizeof(*jnl));
1274
1275 jnl->jdev = jvp;
1276 jnl->jdev_offset = offset;
1277 jnl->fsdev = fsvp;
1278 jnl->flush = flush;
1279 jnl->flush_arg = arg;
1280 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1281 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1282
1283 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1284 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1285 goto bad_kmem_alloc;
1286 }
1287
1288 memset(jnl->header_buf, 0, phys_blksz);
1289
1290 jnl->jhdr = (journal_header *)jnl->header_buf;
1291 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1292 jnl->jhdr->endian = ENDIAN_MAGIC;
1293 jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself
1294 jnl->jhdr->end = phys_blksz;
1295 jnl->jhdr->size = journal_size;
1296 jnl->jhdr->jhdr_size = phys_blksz;
1297 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1298
1299 jnl->active_start = jnl->jhdr->start;
1300
1301 // XXXdbg - for testing you can force the journal to wrap around
1302 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1303 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1304
1305 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1306
1307 if (write_journal_header(jnl) != 0) {
1308 printf("jnl: journal_create: failed to write journal header.\n");
1309 goto bad_write;
1310 }
1311
1312 return jnl;
1313
1314
1315 bad_write:
1316 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1317 bad_kmem_alloc:
1318 jnl->jhdr = NULL;
1319 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1320 return NULL;
1321 }
1322
1323
1324 journal *
1325 journal_open(struct vnode *jvp,
1326 off_t offset,
1327 off_t journal_size,
1328 struct vnode *fsvp,
1329 size_t min_fs_blksz,
1330 int32_t flags,
1331 int32_t tbuffer_size,
1332 void (*flush)(void *arg),
1333 void *arg)
1334 {
1335 journal *jnl;
1336 int orig_blksz=0, phys_blksz;
1337 int orig_checksum, checksum;
1338 struct vfs_context context;
1339
1340 context.vc_proc = current_proc();
1341 context.vc_ucred = FSCRED;
1342
1343 /* Get the real physical block size. */
1344 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1345 return NULL;
1346 }
1347
1348 if (phys_blksz > min_fs_blksz) {
1349 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1350 phys_blksz, min_fs_blksz);
1351 return NULL;
1352 }
1353
1354 if ((journal_size % phys_blksz) != 0) {
1355 printf("jnl: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1356 journal_size, phys_blksz);
1357 return NULL;
1358 }
1359
1360 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1361 memset(jnl, 0, sizeof(*jnl));
1362
1363 jnl->jdev = jvp;
1364 jnl->jdev_offset = offset;
1365 jnl->fsdev = fsvp;
1366 jnl->flush = flush;
1367 jnl->flush_arg = arg;
1368 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1369 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1370
1371 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1372 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1373 goto bad_kmem_alloc;
1374 }
1375
1376 jnl->jhdr = (journal_header *)jnl->header_buf;
1377 memset(jnl->jhdr, 0, sizeof(journal_header)+4);
1378
1379 // we have to set this up here so that do_journal_io() will work
1380 jnl->jhdr->jhdr_size = phys_blksz;
1381
1382 if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) {
1383 printf("jnl: open: could not read %d bytes for the journal header.\n",
1384 phys_blksz);
1385 goto bad_journal;
1386 }
1387
1388 orig_checksum = jnl->jhdr->checksum;
1389 jnl->jhdr->checksum = 0;
1390
1391 if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1392 // do this before the swap since it's done byte-at-a-time
1393 orig_checksum = SWAP32(orig_checksum);
1394 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1395 swap_journal_header(jnl);
1396 jnl->flags |= JOURNAL_NEED_SWAP;
1397 } else {
1398 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1399 }
1400
1401 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1402 printf("jnl: open: journal magic is bad (0x%x != 0x%x)\n",
1403 jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);
1404 goto bad_journal;
1405 }
1406
1407 // only check if we're the current journal header magic value
1408 if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) {
1409
1410 if (orig_checksum != checksum) {
1411 printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n",
1412 orig_checksum, checksum);
1413
1414 //goto bad_journal;
1415 }
1416 }
1417
1418 // XXXdbg - convert old style magic numbers to the new one
1419 if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) {
1420 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1421 }
1422
1423 if (phys_blksz != jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) {
1424 printf("jnl: open: phys_blksz %d does not match journal header size %d\n",
1425 phys_blksz, jnl->jhdr->jhdr_size);
1426
1427 orig_blksz = phys_blksz;
1428 phys_blksz = jnl->jhdr->jhdr_size;
1429 if (VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&phys_blksz, FWRITE, &context)) {
1430 printf("jnl: could not set block size to %d bytes.\n", phys_blksz);
1431 goto bad_journal;
1432 }
1433 // goto bad_journal;
1434 }
1435
1436 if ( jnl->jhdr->start <= 0
1437 || jnl->jhdr->start > jnl->jhdr->size
1438 || jnl->jhdr->start > 1024*1024*1024) {
1439 printf("jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1440 jnl->jhdr->start, jnl->jhdr->size);
1441 goto bad_journal;
1442 }
1443
1444 if ( jnl->jhdr->end <= 0
1445 || jnl->jhdr->end > jnl->jhdr->size
1446 || jnl->jhdr->end > 1024*1024*1024) {
1447 printf("jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
1448 jnl->jhdr->end, jnl->jhdr->size);
1449 goto bad_journal;
1450 }
1451
1452 if (jnl->jhdr->size > 1024*1024*1024) {
1453 printf("jnl: open: jhdr size looks bad (0x%llx)\n", jnl->jhdr->size);
1454 goto bad_journal;
1455 }
1456
1457 // XXXdbg - can't do these checks because hfs writes all kinds of
1458 // non-uniform sized blocks even on devices that have a block size
1459 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
1460 // therefore these checks will fail and so we just have to punt and
1461 // do more relaxed checking...
1462 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
1463 if ((jnl->jhdr->start % 512) != 0) {
1464 printf("jnl: open: journal start (0x%llx) not a multiple of 512?\n",
1465 jnl->jhdr->start);
1466 goto bad_journal;
1467 }
1468
1469 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
1470 if ((jnl->jhdr->end % 512) != 0) {
1471 printf("jnl: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
1472 jnl->jhdr->end, jnl->jhdr->jhdr_size);
1473 goto bad_journal;
1474 }
1475
1476 // take care of replaying the journal if necessary
1477 if (flags & JOURNAL_RESET) {
1478 printf("jnl: journal start/end pointers reset! (jnl 0x%x; s 0x%llx e 0x%llx)\n",
1479 jnl, jnl->jhdr->start, jnl->jhdr->end);
1480 jnl->jhdr->start = jnl->jhdr->end;
1481 } else if (replay_journal(jnl) != 0) {
1482 printf("jnl: journal_open: Error replaying the journal!\n");
1483 goto bad_journal;
1484 }
1485
1486 if (orig_blksz != 0) {
1487 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1488 phys_blksz = orig_blksz;
1489 if (orig_blksz < jnl->jhdr->jhdr_size) {
1490 printf("jnl: open: jhdr_size is %d but orig phys blk size is %d. switching.\n",
1491 jnl->jhdr->jhdr_size, orig_blksz);
1492
1493 jnl->jhdr->jhdr_size = orig_blksz;
1494 }
1495 }
1496
1497 // make sure this is in sync!
1498 jnl->active_start = jnl->jhdr->start;
1499
1500 // set this now, after we've replayed the journal
1501 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1502
1503 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1504
1505 return jnl;
1506
1507 bad_journal:
1508 if (orig_blksz != 0) {
1509 phys_blksz = orig_blksz;
1510 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1511 }
1512 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1513 bad_kmem_alloc:
1514 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1515 return NULL;
1516 }
1517
1518
1519 int
1520 journal_is_clean(struct vnode *jvp,
1521 off_t offset,
1522 off_t journal_size,
1523 struct vnode *fsvp,
1524 size_t min_fs_block_size)
1525 {
1526 journal jnl;
1527 int phys_blksz, ret;
1528 int orig_checksum, checksum;
1529 struct vfs_context context;
1530
1531 context.vc_proc = current_proc();
1532 context.vc_ucred = FSCRED;
1533
1534 /* Get the real physical block size. */
1535 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1536 printf("jnl: is_clean: failed to get device block size.\n");
1537 return EINVAL;
1538 }
1539
1540 if (phys_blksz > min_fs_block_size) {
1541 printf("jnl: is_clean: error: phys blksize %d bigger than min fs blksize %d\n",
1542 phys_blksz, min_fs_block_size);
1543 return EINVAL;
1544 }
1545
1546 if ((journal_size % phys_blksz) != 0) {
1547 printf("jnl: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1548 journal_size, phys_blksz);
1549 return EINVAL;
1550 }
1551
1552 memset(&jnl, 0, sizeof(jnl));
1553
1554 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl.header_buf, phys_blksz)) {
1555 printf("jnl: is_clean: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1556 return ENOMEM;
1557 }
1558
1559 jnl.jhdr = (journal_header *)jnl.header_buf;
1560 memset(jnl.jhdr, 0, sizeof(journal_header)+4);
1561
1562 jnl.jdev = jvp;
1563 jnl.jdev_offset = offset;
1564 jnl.fsdev = fsvp;
1565
1566 // we have to set this up here so that do_journal_io() will work
1567 jnl.jhdr->jhdr_size = phys_blksz;
1568
1569 if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != phys_blksz) {
1570 printf("jnl: is_clean: could not read %d bytes for the journal header.\n",
1571 phys_blksz);
1572 ret = EINVAL;
1573 goto get_out;
1574 }
1575
1576 orig_checksum = jnl.jhdr->checksum;
1577 jnl.jhdr->checksum = 0;
1578
1579 if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1580 // do this before the swap since it's done byte-at-a-time
1581 orig_checksum = SWAP32(orig_checksum);
1582 checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header));
1583 swap_journal_header(&jnl);
1584 jnl.flags |= JOURNAL_NEED_SWAP;
1585 } else {
1586 checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header));
1587 }
1588
1589 if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1590 printf("jnl: is_clean: journal magic is bad (0x%x != 0x%x)\n",
1591 jnl.jhdr->magic, JOURNAL_HEADER_MAGIC);
1592 ret = EINVAL;
1593 goto get_out;
1594 }
1595
1596 if (orig_checksum != checksum) {
1597 printf("jnl: is_clean: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum, checksum);
1598 ret = EINVAL;
1599 goto get_out;
1600 }
1601
1602 //
1603 // if the start and end are equal then the journal is clean.
1604 // otherwise it's not clean and therefore an error.
1605 //
1606 if (jnl.jhdr->start == jnl.jhdr->end) {
1607 ret = 0;
1608 } else {
1609 ret = EINVAL;
1610 }
1611
1612 get_out:
1613 kmem_free(kernel_map, (vm_offset_t)jnl.header_buf, phys_blksz);
1614
1615 return ret;
1616
1617
1618 }
1619
1620
1621
1622 void
1623 journal_close(journal *jnl)
1624 {
1625 volatile off_t *start, *end;
1626 int counter=0;
1627
1628 CHECK_JOURNAL(jnl);
1629
1630 // set this before doing anything that would block so that
1631 // we start tearing things down properly.
1632 //
1633 jnl->flags |= JOURNAL_CLOSE_PENDING;
1634
1635 if (jnl->owner != current_thread()) {
1636 lock_journal(jnl);
1637 }
1638
1639 //
1640 // only write stuff to disk if the journal is still valid
1641 //
1642 if ((jnl->flags & JOURNAL_INVALID) == 0) {
1643
1644 if (jnl->active_tr) {
1645 journal_end_transaction(jnl);
1646 }
1647
1648 // flush any buffered transactions
1649 if (jnl->cur_tr) {
1650 transaction *tr = jnl->cur_tr;
1651
1652 jnl->cur_tr = NULL;
1653 end_transaction(tr, 1); // force it to get flushed
1654 }
1655
1656 //start = &jnl->jhdr->start;
1657 start = &jnl->active_start;
1658 end = &jnl->jhdr->end;
1659
1660 while (*start != *end && counter++ < 500) {
1661 printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
1662 if (jnl->flush) {
1663 jnl->flush(jnl->flush_arg);
1664 }
1665 tsleep((caddr_t)jnl, PRIBIO, "jnl_close", 1);
1666 }
1667
1668 if (*start != *end) {
1669 printf("jnl: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
1670 *start, *end);
1671 }
1672
1673 // make sure this is in sync when we close the journal
1674 jnl->jhdr->start = jnl->active_start;
1675
1676 // if this fails there's not much we can do at this point...
1677 write_journal_header(jnl);
1678 } else {
1679 // if we're here the journal isn't valid any more.
1680 // so make sure we don't leave any locked blocks lying around
1681 printf("jnl: close: journal 0x%x, is invalid. aborting outstanding transactions\n", jnl);
1682 if (jnl->active_tr || jnl->cur_tr) {
1683 transaction *tr;
1684 if (jnl->active_tr) {
1685 tr = jnl->active_tr;
1686 jnl->active_tr = NULL;
1687 } else {
1688 tr = jnl->cur_tr;
1689 jnl->cur_tr = NULL;
1690 }
1691
1692 abort_transaction(jnl, tr);
1693 if (jnl->active_tr || jnl->cur_tr) {
1694 panic("jnl: close: jnl @ 0x%x had both an active and cur tr\n", jnl);
1695 }
1696 }
1697 }
1698
1699 free_old_stuff(jnl);
1700
1701 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->jhdr->jhdr_size);
1702 jnl->jhdr = (void *)0xbeefbabe;
1703
1704 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1705 }
1706
1707 static void
1708 dump_journal(journal *jnl)
1709 {
1710 transaction *ctr;
1711
1712 printf("journal:");
1713 printf(" jdev_offset %.8llx\n", jnl->jdev_offset);
1714 printf(" magic: 0x%.8x\n", jnl->jhdr->magic);
1715 printf(" start: 0x%.8llx\n", jnl->jhdr->start);
1716 printf(" end: 0x%.8llx\n", jnl->jhdr->end);
1717 printf(" size: 0x%.8llx\n", jnl->jhdr->size);
1718 printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size);
1719 printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size);
1720 printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum);
1721
1722 printf(" completed transactions:\n");
1723 for(ctr=jnl->completed_trs; ctr; ctr=ctr->next) {
1724 printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end);
1725 }
1726 }
1727
1728
1729
1730 static off_t
1731 free_space(journal *jnl)
1732 {
1733 off_t free_space;
1734
1735 if (jnl->jhdr->start < jnl->jhdr->end) {
1736 free_space = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size;
1737 } else if (jnl->jhdr->start > jnl->jhdr->end) {
1738 free_space = jnl->jhdr->start - jnl->jhdr->end;
1739 } else {
1740 // journal is completely empty
1741 free_space = jnl->jhdr->size - jnl->jhdr->jhdr_size;
1742 }
1743
1744 return free_space;
1745 }
1746
1747
1748 //
1749 // The journal must be locked on entry to this function.
1750 // The "desired_size" is in bytes.
1751 //
1752 static int
1753 check_free_space(journal *jnl, int desired_size)
1754 {
1755 int i, counter=0;
1756
1757 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
1758 // desired_size, free_space(jnl));
1759
1760 while (1) {
1761 int old_start_empty;
1762
1763 if (counter++ == 5000) {
1764 dump_journal(jnl);
1765 panic("jnl: check_free_space: buffer flushing isn't working "
1766 "(jnl @ 0x%x s %lld e %lld f %lld [active start %lld]).\n", jnl,
1767 jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start);
1768 }
1769 if (counter > 7500) {
1770 printf("jnl: check_free_space: giving up waiting for free space.\n");
1771 return ENOSPC;
1772 }
1773
1774 // make sure there's space in the journal to hold this transaction
1775 if (free_space(jnl) > desired_size) {
1776 break;
1777 }
1778
1779 //
1780 // here's where we lazily bump up jnl->jhdr->start. we'll consume
1781 // entries until there is enough space for the next transaction.
1782 //
1783 old_start_empty = 1;
1784 lock_oldstart(jnl);
1785 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
1786 int counter;
1787
1788 counter = 0;
1789 while (jnl->old_start[i] & 0x8000000000000000LL) {
1790 if (counter++ > 100) {
1791 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl 0x%x).\n",
1792 jnl->old_start[i], jnl);
1793 }
1794
1795 unlock_oldstart(jnl);
1796 if (jnl->flush) {
1797 jnl->flush(jnl->flush_arg);
1798 }
1799 tsleep((caddr_t)jnl, PRIBIO, "check_free_space1", 1);
1800 lock_oldstart(jnl);
1801 }
1802
1803 if (jnl->old_start[i] == 0) {
1804 continue;
1805 }
1806
1807 old_start_empty = 0;
1808 jnl->jhdr->start = jnl->old_start[i];
1809 jnl->old_start[i] = 0;
1810 if (free_space(jnl) > desired_size) {
1811 unlock_oldstart(jnl);
1812 write_journal_header(jnl);
1813 lock_oldstart(jnl);
1814 break;
1815 }
1816 }
1817 unlock_oldstart(jnl);
1818
1819 // if we bumped the start, loop and try again
1820 if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
1821 continue;
1822 } else if (old_start_empty) {
1823 //
1824 // if there is nothing in old_start anymore then we can
1825 // bump the jhdr->start to be the same as active_start
1826 // since it is possible there was only one very large
1827 // transaction in the old_start array. if we didn't do
1828 // this then jhdr->start would never get updated and we
1829 // would wind up looping until we hit the panic at the
1830 // start of the loop.
1831 //
1832 jnl->jhdr->start = jnl->active_start;
1833 write_journal_header(jnl);
1834 continue;
1835 }
1836
1837
1838 // if the file system gave us a flush function, call it to so that
1839 // it can flush some blocks which hopefully will cause some transactions
1840 // to complete and thus free up space in the journal.
1841 if (jnl->flush) {
1842 jnl->flush(jnl->flush_arg);
1843 }
1844
1845 // wait for a while to avoid being cpu-bound (this will
1846 // put us to sleep for 10 milliseconds)
1847 tsleep((caddr_t)jnl, PRIBIO, "check_free_space2", 1);
1848 }
1849
1850 return 0;
1851 }
1852
1853 int
1854 journal_start_transaction(journal *jnl)
1855 {
1856 int ret;
1857 transaction *tr;
1858
1859 CHECK_JOURNAL(jnl);
1860
1861 if (jnl->flags & JOURNAL_INVALID) {
1862 return EINVAL;
1863 }
1864
1865 if (jnl->owner == current_thread()) {
1866 if (jnl->active_tr == NULL) {
1867 panic("jnl: start_tr: active_tr is NULL (jnl @ 0x%x, owner 0x%x, current_thread 0x%x\n",
1868 jnl, jnl->owner, current_thread());
1869 }
1870 jnl->nested_count++;
1871 return 0;
1872 }
1873
1874 lock_journal(jnl);
1875
1876 if (jnl->owner != NULL || jnl->nested_count != 0 || jnl->active_tr != NULL) {
1877 panic("jnl: start_tr: owner 0x%x, nested count 0x%x, active_tr 0x%x jnl @ 0x%x\n",
1878 jnl->owner, jnl->nested_count, jnl->active_tr, jnl);
1879 }
1880
1881 jnl->owner = current_thread();
1882 jnl->nested_count = 1;
1883
1884 free_old_stuff(jnl);
1885
1886 // make sure there's room in the journal
1887 if (check_free_space(jnl, jnl->tbuffer_size) != 0) {
1888 printf("jnl: start transaction failed: no space\n");
1889 ret = ENOSPC;
1890 goto bad_start;
1891 }
1892
1893 // if there's a buffered transaction, use it.
1894 if (jnl->cur_tr) {
1895 jnl->active_tr = jnl->cur_tr;
1896 jnl->cur_tr = NULL;
1897
1898 return 0;
1899 }
1900
1901 MALLOC_ZONE(tr, transaction *, sizeof(transaction), M_JNL_TR, M_WAITOK);
1902 memset(tr, 0, sizeof(transaction));
1903
1904 tr->tbuffer_size = jnl->tbuffer_size;
1905
1906 if (kmem_alloc(kernel_map, (vm_offset_t *)&tr->tbuffer, tr->tbuffer_size)) {
1907 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
1908 printf("jnl: start transaction failed: no tbuffer mem\n");
1909 ret = ENOMEM;
1910 goto bad_start;
1911 }
1912
1913 // journal replay code checksum check depends on this.
1914 memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE);
1915
1916 tr->blhdr = (block_list_header *)tr->tbuffer;
1917 tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
1918 tr->blhdr->num_blocks = 1; // accounts for this header block
1919 tr->blhdr->bytes_used = jnl->jhdr->blhdr_size;
1920
1921 tr->num_blhdrs = 1;
1922 tr->total_bytes = jnl->jhdr->blhdr_size;
1923 tr->jnl = jnl;
1924
1925 jnl->active_tr = tr;
1926
1927 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, tr);
1928
1929 return 0;
1930
1931 bad_start:
1932 jnl->owner = NULL;
1933 jnl->nested_count = 0;
1934 unlock_journal(jnl);
1935 return ret;
1936 }
1937
1938
1939 int
1940 journal_modify_block_start(journal *jnl, struct buf *bp)
1941 {
1942 transaction *tr;
1943
1944 CHECK_JOURNAL(jnl);
1945
1946 if (jnl->flags & JOURNAL_INVALID) {
1947 return EINVAL;
1948 }
1949
1950 // XXXdbg - for debugging I want this to be true. later it may
1951 // not be necessary.
1952 if ((buf_flags(bp) & B_META) == 0) {
1953 panic("jnl: modify_block_start: bp @ 0x%x is not a meta-data block! (jnl 0x%x)\n", bp, jnl);
1954 }
1955
1956 tr = jnl->active_tr;
1957 CHECK_TRANSACTION(tr);
1958
1959 if (jnl->owner != current_thread()) {
1960 panic("jnl: modify_block_start: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1961 jnl, jnl->owner, current_thread());
1962 }
1963
1964 free_old_stuff(jnl);
1965
1966 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
1967 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
1968
1969 // can't allow blocks that aren't an even multiple of the
1970 // underlying block size.
1971 if ((buf_size(bp) % jnl->jhdr->jhdr_size) != 0) {
1972 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
1973 buf_size(bp), jnl->jhdr->jhdr_size);
1974 return -1;
1975 }
1976
1977 // make sure that this transaction isn't bigger than the whole journal
1978 if (tr->total_bytes+buf_size(bp) >= (jnl->jhdr->size - jnl->jhdr->jhdr_size)) {
1979 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr 0x%x bp 0x%x)\n",
1980 tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), buf_size(bp), tr, bp);
1981 return -1;
1982 }
1983
1984 // if the block is dirty and not already locked we have to write
1985 // it out before we muck with it because it has data that belongs
1986 // (presumably) to another transaction.
1987 //
1988 if ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI) {
1989
1990 if (buf_flags(bp) & B_ASYNC) {
1991 panic("modify_block_start: bp @ 0x% has async flag set!\n", bp);
1992 }
1993
1994 // this will cause it to not be buf_brelse()'d
1995 buf_setflags(bp, B_NORELSE);
1996 VNOP_BWRITE(bp);
1997 }
1998 buf_setflags(bp, B_LOCKED);
1999
2000 return 0;
2001 }
2002
2003 int
2004 journal_modify_block_abort(journal *jnl, struct buf *bp)
2005 {
2006 transaction *tr;
2007 block_list_header *blhdr;
2008 int i, j;
2009
2010 CHECK_JOURNAL(jnl);
2011
2012 tr = jnl->active_tr;
2013
2014 //
2015 // if there's no active transaction then we just want to
2016 // call buf_brelse() and return since this is just a block
2017 // that happened to be modified as part of another tr.
2018 //
2019 if (tr == NULL) {
2020 buf_brelse(bp);
2021 return 0;
2022 }
2023
2024 if (jnl->flags & JOURNAL_INVALID) {
2025 return EINVAL;
2026 }
2027
2028 CHECK_TRANSACTION(tr);
2029
2030 if (jnl->owner != current_thread()) {
2031 panic("jnl: modify_block_abort: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2032 jnl, jnl->owner, current_thread());
2033 }
2034
2035 free_old_stuff(jnl);
2036
2037 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2038
2039 // first check if it's already part of this transaction
2040 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2041 for(i=1; i < blhdr->num_blocks; i++) {
2042 if (bp == blhdr->binfo[i].bp) {
2043 if (buf_size(bp) != blhdr->binfo[i].bsize) {
2044 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2045 bp, buf_size(bp), blhdr->binfo[i].bsize, jnl);
2046 }
2047 break;
2048 }
2049 }
2050
2051 if (i < blhdr->num_blocks) {
2052 break;
2053 }
2054 }
2055
2056 //
2057 // if blhdr is null, then this block has only had modify_block_start
2058 // called on it as part of the current transaction. that means that
2059 // it is ok to clear the LOCKED bit since it hasn't actually been
2060 // modified. if blhdr is non-null then modify_block_end was called
2061 // on it and so we need to keep it locked in memory.
2062 //
2063 if (blhdr == NULL) {
2064 buf_clearflags(bp, B_LOCKED);
2065 }
2066
2067 buf_brelse(bp);
2068 return 0;
2069 }
2070
2071
2072 int
2073 journal_modify_block_end(journal *jnl, struct buf *bp)
2074 {
2075 int i, j, tbuffer_offset;
2076 char *blkptr;
2077 block_list_header *blhdr, *prev=NULL;
2078 transaction *tr;
2079
2080 CHECK_JOURNAL(jnl);
2081
2082 if (jnl->flags & JOURNAL_INVALID) {
2083 return EINVAL;
2084 }
2085
2086 tr = jnl->active_tr;
2087 CHECK_TRANSACTION(tr);
2088
2089 if (jnl->owner != current_thread()) {
2090 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2091 jnl, jnl->owner, current_thread());
2092 }
2093
2094 free_old_stuff(jnl);
2095
2096 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2097 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2098
2099 if ((buf_flags(bp) & B_LOCKED) == 0) {
2100 panic("jnl: modify_block_end: bp 0x%x not locked! jnl @ 0x%x\n", bp, jnl);
2101 }
2102
2103 // first check if it's already part of this transaction
2104 for(blhdr=tr->blhdr; blhdr; prev=blhdr,blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2105 tbuffer_offset = jnl->jhdr->blhdr_size;
2106
2107 for(i=1; i < blhdr->num_blocks; i++) {
2108 if (bp == blhdr->binfo[i].bp) {
2109 if (buf_size(bp) != blhdr->binfo[i].bsize) {
2110 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2111 bp, buf_size(bp), blhdr->binfo[i].bsize, jnl);
2112 }
2113 break;
2114 }
2115 tbuffer_offset += blhdr->binfo[i].bsize;
2116 }
2117
2118 if (i < blhdr->num_blocks) {
2119 break;
2120 }
2121 }
2122
2123 if (blhdr == NULL
2124 && prev
2125 && (prev->num_blocks+1) <= prev->max_blocks
2126 && (prev->bytes_used+buf_size(bp)) <= tr->tbuffer_size) {
2127 blhdr = prev;
2128 } else if (blhdr == NULL) {
2129 block_list_header *nblhdr;
2130
2131 if (prev == NULL) {
2132 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl 0x%x, bp 0x%x\n", jnl, bp);
2133 }
2134
2135 // we got to the end of the list, didn't find the block and there's
2136 // no room in the block_list_header pointed to by prev
2137
2138 // we allocate another tbuffer and link it in at the end of the list
2139 // through prev->binfo[0].bnum. that's a skanky way to do things but
2140 // avoids having yet another linked list of small data structures to manage.
2141
2142 if (kmem_alloc(kernel_map, (vm_offset_t *)&nblhdr, tr->tbuffer_size)) {
2143 panic("jnl: end_tr: no space for new block tr @ 0x%x (total bytes: %d)!\n",
2144 tr, tr->total_bytes);
2145 }
2146
2147 // journal replay code checksum check depends on this.
2148 memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE);
2149
2150 // initialize the new guy
2151 nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2152 nblhdr->num_blocks = 1; // accounts for this header block
2153 nblhdr->bytes_used = jnl->jhdr->blhdr_size;
2154
2155 tr->num_blhdrs++;
2156 tr->total_bytes += jnl->jhdr->blhdr_size;
2157
2158 // then link him in at the end
2159 prev->binfo[0].bnum = (off_t)((long)nblhdr);
2160
2161 // and finally switch to using the new guy
2162 blhdr = nblhdr;
2163 tbuffer_offset = jnl->jhdr->blhdr_size;
2164 i = 1;
2165 }
2166
2167
2168 if ((i+1) > blhdr->max_blocks) {
2169 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks);
2170 }
2171
2172 // copy the data into the in-memory transaction buffer
2173 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
2174 memcpy(blkptr, buf_dataptr(bp), buf_size(bp));
2175
2176 // if this is true then this is a new block we haven't seen
2177 if (i >= blhdr->num_blocks) {
2178 int bsize;
2179 vnode_t vp;
2180
2181 vp = buf_vnode(bp);
2182 vnode_ref(vp);
2183 bsize = buf_size(bp);
2184
2185 blhdr->binfo[i].bnum = (off_t)(buf_blkno(bp));
2186 blhdr->binfo[i].bsize = bsize;
2187 blhdr->binfo[i].bp = bp;
2188
2189 blhdr->bytes_used += bsize;
2190 tr->total_bytes += bsize;
2191
2192 blhdr->num_blocks++;
2193 }
2194 buf_bdwrite(bp);
2195
2196 return 0;
2197 }
2198
2199 int
2200 journal_kill_block(journal *jnl, struct buf *bp)
2201 {
2202 int i;
2203 int bflags;
2204 block_list_header *blhdr;
2205 transaction *tr;
2206
2207 CHECK_JOURNAL(jnl);
2208
2209 if (jnl->flags & JOURNAL_INVALID) {
2210 return EINVAL;
2211 }
2212
2213 tr = jnl->active_tr;
2214 CHECK_TRANSACTION(tr);
2215
2216 if (jnl->owner != current_thread()) {
2217 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2218 jnl, jnl->owner, current_thread());
2219 }
2220
2221 free_old_stuff(jnl);
2222
2223 bflags = buf_flags(bp);
2224
2225 if ( !(bflags & B_LOCKED))
2226 panic("jnl: modify_block_end: called with bp not B_LOCKED");
2227
2228 /*
2229 * bp must be BL_BUSY and B_LOCKED
2230 */
2231 // first check if it's already part of this transaction
2232 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2233
2234 for(i=1; i < blhdr->num_blocks; i++) {
2235 if (bp == blhdr->binfo[i].bp) {
2236 vnode_t vp;
2237
2238 buf_clearflags(bp, B_LOCKED);
2239
2240 // this undoes the vnode_ref() in journal_modify_block_end()
2241 vp = buf_vnode(bp);
2242 vnode_rele_ext(vp, 0, 1);
2243
2244 // if the block has the DELWRI and FILTER bits sets, then
2245 // things are seriously weird. if it was part of another
2246 // transaction then journal_modify_block_start() should
2247 // have force it to be written.
2248 //
2249 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
2250 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
2251 //} else {
2252 tr->num_killed += buf_size(bp);
2253 //}
2254 blhdr->binfo[i].bp = NULL;
2255 blhdr->binfo[i].bnum = (off_t)-1;
2256
2257 buf_brelse(bp);
2258
2259 break;
2260 }
2261 }
2262
2263 if (i < blhdr->num_blocks) {
2264 break;
2265 }
2266 }
2267
2268 return 0;
2269 }
2270
2271
2272 static int
2273 journal_binfo_cmp(void *a, void *b)
2274 {
2275 block_info *bi_a = (struct block_info *)a;
2276 block_info *bi_b = (struct block_info *)b;
2277 daddr64_t res;
2278
2279 if (bi_a->bp == NULL) {
2280 return 1;
2281 }
2282 if (bi_b->bp == NULL) {
2283 return -1;
2284 }
2285
2286 // don't have to worry about negative block
2287 // numbers so this is ok to do.
2288 //
2289 res = (buf_blkno(bi_a->bp) - buf_blkno(bi_b->bp));
2290
2291 return (int)res;
2292 }
2293
2294
2295 static int
2296 end_transaction(transaction *tr, int force_it)
2297 {
2298 int i, j, ret, amt;
2299 errno_t errno;
2300 off_t end;
2301 journal *jnl = tr->jnl;
2302 struct buf *bp;
2303 block_list_header *blhdr=NULL, *next=NULL;
2304
2305 if (jnl->cur_tr) {
2306 panic("jnl: jnl @ 0x%x already has cur_tr 0x%x, new tr: 0x%x\n",
2307 jnl, jnl->cur_tr, tr);
2308 }
2309
2310 // if there weren't any modified blocks in the transaction
2311 // just save off the transaction pointer and return.
2312 if (tr->total_bytes == jnl->jhdr->blhdr_size) {
2313 jnl->cur_tr = tr;
2314 return 0;
2315 }
2316
2317 // if our transaction buffer isn't very full, just hang
2318 // on to it and don't actually flush anything. this is
2319 // what is known as "group commit". we will flush the
2320 // transaction buffer if it's full or if we have more than
2321 // one of them so we don't start hogging too much memory.
2322 //
2323 if ( force_it == 0
2324 && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0
2325 && tr->num_blhdrs < 3
2326 && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8))) {
2327
2328 jnl->cur_tr = tr;
2329 return 0;
2330 }
2331
2332
2333 // if we're here we're going to flush the transaction buffer to disk.
2334 // make sure there is room in the journal first.
2335 check_free_space(jnl, tr->total_bytes);
2336
2337 // range check the end index
2338 if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) {
2339 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
2340 jnl->jhdr->end, jnl->jhdr->size);
2341 }
2342
2343 // this transaction starts where the current journal ends
2344 tr->journal_start = jnl->jhdr->end;
2345 end = jnl->jhdr->end;
2346
2347 //
2348 // if the first entry in old_start[] isn't free yet, loop calling the
2349 // file system flush routine until it is (or we panic).
2350 //
2351 i = 0;
2352 lock_oldstart(jnl);
2353 while ((jnl->old_start[0] & 0x8000000000000000LL) != 0) {
2354 if (jnl->flush) {
2355 unlock_oldstart(jnl);
2356
2357 if (jnl->flush) {
2358 jnl->flush(jnl->flush_arg);
2359 }
2360
2361 // yield the cpu so others can get in to clear the lock bit
2362 (void)tsleep((void *)jnl, PRIBIO, "jnl-old-start-sleep", 1);
2363
2364 lock_oldstart(jnl);
2365 }
2366 if (i++ >= 500) {
2367 panic("jnl: transaction that started at 0x%llx is not completing! jnl 0x%x\n",
2368 jnl->old_start[0] & (~0x8000000000000000LL), jnl);
2369 }
2370 }
2371
2372 //
2373 // slide everyone else down and put our latest guy in the last
2374 // entry in the old_start array
2375 //
2376 memcpy(&jnl->old_start[0], &jnl->old_start[1], sizeof(jnl->old_start)-sizeof(jnl->old_start[0]));
2377 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL;
2378
2379 unlock_oldstart(jnl);
2380
2381
2382 // for each block, make sure that the physical block # is set
2383 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2384
2385 for(i=1; i < blhdr->num_blocks; i++) {
2386 daddr64_t blkno;
2387 daddr64_t lblkno;
2388 struct vnode *vp;
2389
2390 bp = blhdr->binfo[i].bp;
2391 if (bp == NULL) { // only true if a block was "killed"
2392 if (blhdr->binfo[i].bnum != (off_t)-1) {
2393 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ 0x%x, tr 0x%x)\n",
2394 blhdr->binfo[i].bnum, jnl, tr);
2395 }
2396 continue;
2397 }
2398 vp = buf_vnode(bp);
2399 blkno = buf_blkno(bp);
2400 lblkno = buf_lblkno(bp);
2401
2402 if (vp == NULL && lblkno == blkno) {
2403 printf("jnl: end_tr: bad news! bp @ 0x%x w/null vp and l/blkno = %qd/%qd. aborting the transaction (tr 0x%x jnl 0x%x).\n",
2404 bp, lblkno, blkno, tr, jnl);
2405 goto bad_journal;
2406 }
2407
2408 // if the lblkno is the same as blkno and this bp isn't
2409 // associated with the underlying file system device then
2410 // we need to call bmap() to get the actual physical block.
2411 //
2412 if ((lblkno == blkno) && (vp != jnl->fsdev)) {
2413 off_t f_offset;
2414 size_t contig_bytes;
2415
2416 if (VNOP_BLKTOOFF(vp, lblkno, &f_offset)) {
2417 printf("jnl: end_tr: vnop_blktooff failed @ 0x%x, jnl 0x%x\n", bp, jnl);
2418 goto bad_journal;
2419 }
2420 if (VNOP_BLOCKMAP(vp, f_offset, buf_count(bp), &blkno, &contig_bytes, NULL, 0, NULL)) {
2421 printf("jnl: end_tr: can't blockmap the bp @ 0x%x, jnl 0x%x\n", bp, jnl);
2422 goto bad_journal;
2423 }
2424 if ((uint32_t)contig_bytes < buf_count(bp)) {
2425 printf("jnl: end_tr: blk not physically contiguous on disk@ 0x%x, jnl 0x%x\n", bp, jnl);
2426 goto bad_journal;
2427 }
2428 buf_setblkno(bp, blkno);
2429 }
2430 // update this so we write out the correct physical block number!
2431 blhdr->binfo[i].bnum = (off_t)(blkno);
2432 }
2433
2434 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2435 }
2436
2437 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2438
2439 amt = blhdr->bytes_used;
2440
2441 blhdr->checksum = 0;
2442 blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
2443
2444 ret = write_journal_data(jnl, &end, blhdr, amt);
2445 if (ret != amt) {
2446 printf("jnl: end_transaction: only wrote %d of %d bytes to the journal!\n",
2447 ret, amt);
2448
2449 goto bad_journal;
2450 }
2451 }
2452
2453 jnl->jhdr->end = end; // update where the journal now ends
2454 tr->journal_end = end; // the transaction ends here too
2455 if (tr->journal_start == 0 || tr->journal_end == 0) {
2456 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
2457 tr->journal_start, tr->journal_end);
2458 }
2459
2460 if (write_journal_header(jnl) != 0) {
2461 goto bad_journal;
2462 }
2463
2464 //
2465 // setup for looping through all the blhdr's. we null out the
2466 // tbuffer and blhdr fields so that they're not used any more.
2467 //
2468 blhdr = tr->blhdr;
2469 tr->tbuffer = NULL;
2470 tr->blhdr = NULL;
2471
2472 // the buffer_flushed_callback will only be called for the
2473 // real blocks that get flushed so we have to account for
2474 // the block_list_headers here.
2475 //
2476 tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size;
2477
2478 // for each block, set the iodone callback and unlock it
2479 for(; blhdr; blhdr=next) {
2480
2481 // we can re-order the buf ptrs because everything is written out already
2482 qsort(&blhdr->binfo[1], blhdr->num_blocks-1, sizeof(block_info), journal_binfo_cmp);
2483
2484 for(i=1; i < blhdr->num_blocks; i++) {
2485 if (blhdr->binfo[i].bp == NULL) {
2486 continue;
2487 }
2488
2489 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].bp),
2490 buf_lblkno(blhdr->binfo[i].bp),
2491 buf_size(blhdr->binfo[i].bp),
2492 NOCRED,
2493 &bp);
2494 if (errno == 0 && bp != NULL) {
2495 struct vnode *save_vp;
2496 void *cur_filter;
2497
2498 if (bp != blhdr->binfo[i].bp) {
2499 panic("jnl: end_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2500 bp, blhdr->binfo[i].bp, jnl);
2501 }
2502
2503 if ((buf_flags(bp) & (B_LOCKED|B_DELWRI)) != (B_LOCKED|B_DELWRI)) {
2504 if (jnl->flags & JOURNAL_CLOSE_PENDING) {
2505 buf_clearflags(bp, B_LOCKED);
2506 buf_brelse(bp);
2507 continue;
2508 } else {
2509 panic("jnl: end_tr: !!!DANGER!!! bp 0x%x flags (0x%x) not LOCKED & DELWRI\n", bp, buf_flags(bp));
2510 }
2511 }
2512 save_vp = buf_vnode(bp);
2513
2514 buf_setfilter(bp, buffer_flushed_callback, tr, &cur_filter, NULL);
2515
2516 if (cur_filter) {
2517 panic("jnl: bp @ 0x%x (blkno %qd, vp 0x%x) has non-null iodone (0x%x) buffflushcb 0x%x\n",
2518 bp, buf_blkno(bp), save_vp, cur_filter, buffer_flushed_callback);
2519 }
2520 buf_clearflags(bp, B_LOCKED);
2521
2522 // kicking off the write here helps performance
2523 buf_bawrite(bp);
2524 // XXXdbg this is good for testing: buf_bdwrite(bp);
2525 //buf_bdwrite(bp);
2526
2527 // this undoes the vnode_ref() in journal_modify_block_end()
2528 vnode_rele_ext(save_vp, 0, 1);
2529 } else {
2530 printf("jnl: end_transaction: could not find block %Ld vp 0x%x!\n",
2531 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2532 if (bp) {
2533 buf_clearflags(bp, B_LOCKED);
2534 buf_brelse(bp);
2535 }
2536 }
2537 }
2538
2539 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2540
2541 // we can free blhdr here since we won't need it any more
2542 blhdr->binfo[0].bnum = 0xdeadc0de;
2543 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2544 }
2545
2546 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
2547 // tr, tr->journal_start, tr->journal_end);
2548 return 0;
2549
2550
2551 bad_journal:
2552 jnl->flags |= JOURNAL_INVALID;
2553 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] &= ~0x8000000000000000LL;
2554 abort_transaction(jnl, tr);
2555 return -1;
2556 }
2557
2558 static void
2559 abort_transaction(journal *jnl, transaction *tr)
2560 {
2561 int i;
2562 errno_t errno;
2563 block_list_header *blhdr, *next;
2564 struct buf *bp;
2565 struct vnode *save_vp;
2566
2567 // for each block list header, iterate over the blocks then
2568 // free up the memory associated with the block list.
2569 //
2570 // for each block, clear the lock bit and release it.
2571 //
2572 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2573
2574 for(i=1; i < blhdr->num_blocks; i++) {
2575 if (blhdr->binfo[i].bp == NULL) {
2576 continue;
2577 }
2578 if ( (buf_vnode(blhdr->binfo[i].bp) == NULL) ||
2579 !(buf_flags(blhdr->binfo[i].bp) & B_LOCKED) ) {
2580 continue;
2581 }
2582
2583 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].bp),
2584 buf_lblkno(blhdr->binfo[i].bp),
2585 buf_size(blhdr->binfo[i].bp),
2586 NOCRED,
2587 &bp);
2588 if (errno == 0) {
2589 if (bp != blhdr->binfo[i].bp) {
2590 panic("jnl: abort_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2591 bp, blhdr->binfo[i].bp, jnl);
2592 }
2593
2594 // releasing a bp marked invalid
2595 // also clears the locked and delayed state
2596 buf_markinvalid(bp);
2597 save_vp = buf_vnode(bp);
2598
2599 buf_brelse(bp);
2600
2601 vnode_rele_ext(save_vp, 0, 1);
2602 } else {
2603 printf("jnl: abort_tr: could not find block %Ld vp 0x%x!\n",
2604 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2605 if (bp) {
2606 buf_brelse(bp);
2607 }
2608 }
2609 }
2610
2611 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2612
2613 // we can free blhdr here since we won't need it any more
2614 blhdr->binfo[0].bnum = 0xdeadc0de;
2615 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2616 }
2617
2618 tr->tbuffer = NULL;
2619 tr->blhdr = NULL;
2620 tr->total_bytes = 0xdbadc0de;
2621 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
2622 }
2623
2624
2625 int
2626 journal_end_transaction(journal *jnl)
2627 {
2628 int ret;
2629 transaction *tr;
2630
2631 CHECK_JOURNAL(jnl);
2632
2633 if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) {
2634 return 0;
2635 }
2636
2637 if (jnl->owner != current_thread()) {
2638 panic("jnl: end_tr: I'm not the owner! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2639 jnl, jnl->owner, current_thread());
2640 }
2641
2642 free_old_stuff(jnl);
2643
2644 jnl->nested_count--;
2645 if (jnl->nested_count > 0) {
2646 return 0;
2647 } else if (jnl->nested_count < 0) {
2648 panic("jnl: jnl @ 0x%x has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count);
2649 }
2650
2651 if (jnl->flags & JOURNAL_INVALID) {
2652 if (jnl->active_tr) {
2653 if (jnl->cur_tr != NULL) {
2654 panic("jnl: journal @ 0x%x has active tr (0x%x) and cur tr (0x%x)\n",
2655 jnl, jnl->active_tr, jnl->cur_tr);
2656 }
2657
2658 tr = jnl->active_tr;
2659 jnl->active_tr = NULL;
2660 abort_transaction(jnl, tr);
2661 }
2662
2663 jnl->owner = NULL;
2664 unlock_journal(jnl);
2665
2666 return EINVAL;
2667 }
2668
2669 tr = jnl->active_tr;
2670 CHECK_TRANSACTION(tr);
2671
2672 // clear this out here so that when check_free_space() calls
2673 // the FS flush function, we don't panic in journal_flush()
2674 // if the FS were to call that. note: check_free_space() is
2675 // called from end_transaction().
2676 //
2677 jnl->active_tr = NULL;
2678 ret = end_transaction(tr, 0);
2679
2680 jnl->owner = NULL;
2681 unlock_journal(jnl);
2682
2683 return ret;
2684 }
2685
2686
2687 int
2688 journal_flush(journal *jnl)
2689 {
2690 int need_signal = 0;
2691
2692 CHECK_JOURNAL(jnl);
2693
2694 if (jnl->flags & JOURNAL_INVALID) {
2695 return -1;
2696 }
2697
2698 if (jnl->owner != current_thread()) {
2699 int ret;
2700
2701 lock_journal(jnl);
2702 need_signal = 1;
2703 }
2704
2705 free_old_stuff(jnl);
2706
2707 // if we're not active, flush any buffered transactions
2708 if (jnl->active_tr == NULL && jnl->cur_tr) {
2709 transaction *tr = jnl->cur_tr;
2710
2711 jnl->cur_tr = NULL;
2712 end_transaction(tr, 1); // force it to get flushed
2713 }
2714
2715 if (need_signal) {
2716 unlock_journal(jnl);
2717 }
2718
2719 return 0;
2720 }
2721
2722 int
2723 journal_active(journal *jnl)
2724 {
2725 if (jnl->flags & JOURNAL_INVALID) {
2726 return -1;
2727 }
2728
2729 return (jnl->active_tr == NULL) ? 0 : 1;
2730 }
2731
2732 void *
2733 journal_owner(journal *jnl)
2734 {
2735 return jnl->owner;
2736 }