]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_journal.c
6d82743a1e092d3b04b6774793693b32fd0ea06e
[apple/xnu.git] / bsd / vfs / vfs_journal.c
1 /*
2 * Copyright (c) 1995-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 //
29 // This file implements a simple write-ahead journaling layer.
30 // In theory any file system can make use of it by calling these
31 // functions when the fs wants to modify meta-data blocks. See
32 // vfs_journal.h for a more detailed description of the api and
33 // data structures.
34 //
35 // Dominic Giampaolo (dbg@apple.com)
36 //
37
38 #ifdef KERNEL
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file_internal.h>
44 #include <sys/stat.h>
45 #include <sys/buf_internal.h>
46 #include <sys/proc_internal.h>
47 #include <sys/mount_internal.h>
48 #include <sys/namei.h>
49 #include <sys/vnode_internal.h>
50 #include <sys/ioctl.h>
51 #include <sys/tty.h>
52 #include <sys/ubc.h>
53 #include <sys/malloc.h>
54 #include <kern/thread.h>
55 #include <sys/disk.h>
56 #include <miscfs/specfs/specdev.h>
57
58 extern task_t kernel_task;
59
60 #else
61
62 #include <stdio.h>
63 #include <stdlib.h>
64 #include <string.h>
65 #include <limits.h>
66 #include <errno.h>
67 #include <fcntl.h>
68 #include <unistd.h>
69 #include <stdarg.h>
70 #include <sys/types.h>
71 #include "compat.h"
72
73 #endif /* KERNEL */
74
75 #include "vfs_journal.h"
76
77
78 // number of bytes to checksum in a block_list_header
79 // NOTE: this should be enough to clear out the header
80 // fields as well as the first entry of binfo[]
81 #define BLHDR_CHECKSUM_SIZE 32
82
83
84
85 static int end_transaction(transaction *tr, int force_it);
86 static void abort_transaction(journal *jnl, transaction *tr);
87 static void dump_journal(journal *jnl);
88
89 static __inline__ void lock_journal(journal *jnl);
90 static __inline__ void unlock_journal(journal *jnl);
91 static __inline__ void lock_oldstart(journal *jnl);
92 static __inline__ void unlock_oldstart(journal *jnl);
93
94
95
96
97 //
98 // 3105942 - Coalesce writes to the same block on journal replay
99 //
100
101 typedef struct bucket {
102 off_t block_num;
103 size_t jnl_offset;
104 size_t block_size;
105 } bucket;
106
107 #define STARTING_BUCKETS 256
108
109 static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
110 static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size);
111 static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full);
112 static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr);
113 static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting);
114
115 #define CHECK_JOURNAL(jnl) \
116 do { \
117 if (jnl == NULL) {\
118 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__);\
119 }\
120 if (jnl->jdev == NULL) { \
121 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__);\
122 } \
123 if (jnl->fsdev == NULL) { \
124 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__);\
125 } \
126 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) {\
127 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n",\
128 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);\
129 }\
130 if ( jnl->jhdr->start <= 0 \
131 || jnl->jhdr->start > jnl->jhdr->size\
132 || jnl->jhdr->start > 1024*1024*1024) {\
133 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
134 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\
135 }\
136 if ( jnl->jhdr->end <= 0 \
137 || jnl->jhdr->end > jnl->jhdr->size\
138 || jnl->jhdr->end > 1024*1024*1024) {\
139 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
140 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\
141 }\
142 if (jnl->jhdr->size > 1024*1024*1024) {\
143 panic("%s:%d: jhdr size looks bad (0x%llx)\n",\
144 __FILE__, __LINE__, jnl->jhdr->size);\
145 } \
146 } while(0)
147
148 #define CHECK_TRANSACTION(tr) \
149 do {\
150 if (tr == NULL) {\
151 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__);\
152 }\
153 if (tr->jnl == NULL) {\
154 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__);\
155 }\
156 if (tr->blhdr != (block_list_header *)tr->tbuffer) {\
157 panic("%s:%d: blhdr (0x%x) != tbuffer (0x%x)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer);\
158 }\
159 if (tr->total_bytes < 0) {\
160 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\
161 }\
162 if (tr->journal_start < 0 || tr->journal_start > 1024*1024*1024) {\
163 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\
164 }\
165 if (tr->journal_end < 0 || tr->journal_end > 1024*1024*1024) {\
166 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\
167 }\
168 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\
169 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\
170 }\
171 } while(0)
172
173
174
175 //
176 // this isn't a great checksum routine but it will do for now.
177 // we use it to checksum the journal header and the block list
178 // headers that are at the start of each transaction.
179 //
180 static int
181 calc_checksum(char *ptr, int len)
182 {
183 int i, cksum=0;
184
185 // this is a lame checksum but for now it'll do
186 for(i=0; i < len; i++, ptr++) {
187 cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr);
188 }
189
190 return (~cksum);
191 }
192
193 //
194 // Journal Locking
195 //
196 lck_grp_attr_t * jnl_group_attr;
197 lck_attr_t * jnl_lock_attr;
198 lck_grp_t * jnl_mutex_group;
199
200 void
201 journal_init()
202 {
203 jnl_lock_attr = lck_attr_alloc_init();
204 jnl_group_attr = lck_grp_attr_alloc_init();
205 jnl_mutex_group = lck_grp_alloc_init("jnl-mutex", jnl_group_attr);
206 }
207
208 static __inline__ void
209 lock_journal(journal *jnl)
210 {
211 lck_mtx_lock(&jnl->jlock);
212 }
213
214 static __inline__ void
215 unlock_journal(journal *jnl)
216 {
217 lck_mtx_unlock(&jnl->jlock);
218 }
219
220 static __inline__ void
221 lock_oldstart(journal *jnl)
222 {
223 lck_mtx_lock(&jnl->old_start_lock);
224 }
225
226 static __inline__ void
227 unlock_oldstart(journal *jnl)
228 {
229 lck_mtx_unlock(&jnl->old_start_lock);
230 }
231
232
233
234 #define JNL_WRITE 0x0001
235 #define JNL_READ 0x0002
236 #define JNL_HEADER 0x8000
237
238 //
239 // This function sets up a fake buf and passes it directly to the
240 // journal device strategy routine (so that it won't get cached in
241 // the block cache.
242 //
243 // It also handles range checking the i/o so that we don't write
244 // outside the journal boundaries and it will wrap the i/o back
245 // to the beginning if necessary (skipping over the journal header)
246 //
247 static size_t
248 do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction)
249 {
250 int err, io_sz=0, curlen=len;
251 buf_t bp;
252 int max_iosize = 128 * 1024;
253 struct vfsioattr ioattr;
254
255 if (*offset < 0 || *offset > jnl->jhdr->size) {
256 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size);
257 }
258 vfs_ioattr(vnode_mount(jnl->jdev), &ioattr);
259
260 if (direction & JNL_WRITE)
261 max_iosize = ioattr.io_maxwritecnt;
262 else if (direction & JNL_READ)
263 max_iosize = ioattr.io_maxreadcnt;
264
265 again:
266 bp = alloc_io_buf(jnl->jdev, 1);
267
268 if (*offset + (off_t)curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) {
269 if (*offset == jnl->jhdr->size) {
270 *offset = jnl->jhdr->jhdr_size;
271 } else {
272 curlen = (off_t)jnl->jhdr->size - *offset;
273 }
274 }
275
276 if (curlen > max_iosize) {
277 curlen = max_iosize;
278 }
279
280 if (curlen <= 0) {
281 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %d\n", curlen, *offset, len);
282 }
283
284 if (*offset == 0 && (direction & JNL_HEADER) == 0) {
285 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen, data);
286 }
287
288 if (direction & JNL_READ)
289 buf_setflags(bp, B_READ);
290 else {
291 /*
292 * don't have to set any flags
293 */
294 vnode_startwrite(jnl->jdev);
295 }
296 buf_setsize(bp, curlen);
297 buf_setcount(bp, curlen);
298 buf_setdataptr(bp, (uintptr_t)data);
299 buf_setblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
300 buf_setlblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
301
302 err = VNOP_STRATEGY(bp);
303 if (!err) {
304 err = (int)buf_biowait(bp);
305 }
306 free_io_buf(bp);
307
308 if (err) {
309 printf("jnl: do_jnl_io: strategy err 0x%x\n", err);
310 return 0;
311 }
312
313 *offset += curlen;
314 io_sz += curlen;
315 if (io_sz != len) {
316 // handle wrap-around
317 data = (char *)data + curlen;
318 curlen = len - io_sz;
319 if (*offset >= jnl->jhdr->size) {
320 *offset = jnl->jhdr->jhdr_size;
321 }
322 goto again;
323 }
324
325 return io_sz;
326 }
327
328 static size_t
329 read_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
330 {
331 return do_journal_io(jnl, offset, data, len, JNL_READ);
332 }
333
334 static size_t
335 write_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
336 {
337 return do_journal_io(jnl, offset, data, len, JNL_WRITE);
338 }
339
340
341 static int
342 read_journal_header(journal *jnl, void *data, size_t len)
343 {
344 off_t hdr_offset = 0;
345
346 return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER);
347 }
348
349 static int
350 write_journal_header(journal *jnl)
351 {
352 static int num_err_prints = 0;
353 int ret;
354 off_t jhdr_offset = 0;
355 struct vfs_context context;
356
357 context.vc_proc = current_proc();
358 context.vc_ucred = NOCRED;
359 //
360 // XXXdbg note: this ioctl doesn't seem to do anything on firewire disks.
361 //
362 ret = VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
363 if (ret != 0) {
364 //
365 // Only print this error if it's a different error than the
366 // previous one, or if it's the first time for this device
367 // or if the total number of printfs is less than 25. We
368 // allow for up to 25 printfs to insure that some make it
369 // into the on-disk syslog. Otherwise if we only printed
370 // one, it's possible it would never make it to the syslog
371 // for the root volume and that makes debugging hard.
372 //
373 if ( ret != jnl->last_flush_err
374 || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0
375 || num_err_prints++ < 25) {
376
377 printf("jnl: flushing fs disk buffer returned 0x%x\n", ret);
378
379 jnl->flags |= JOURNAL_FLUSHCACHE_ERR;
380 jnl->last_flush_err = ret;
381 }
382 }
383
384
385 jnl->jhdr->checksum = 0;
386 jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
387 if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != jnl->jhdr->jhdr_size) {
388 printf("jnl: write_journal_header: error writing the journal header!\n");
389 jnl->flags |= JOURNAL_INVALID;
390 return -1;
391 }
392
393 // Have to flush after writing the journal header so that
394 // a future transaction doesn't sneak out to disk before
395 // the header does and thus overwrite data that the old
396 // journal header refers to. Saw this exact case happen
397 // on an IDE bus analyzer with Larry Barras so while it
398 // may seem obscure, it's not.
399 //
400 VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
401
402 return 0;
403 }
404
405
406
407 //
408 // this is a work function used to free up transactions that
409 // completed. they can't be free'd from buffer_flushed_callback
410 // because it is called from deep with the disk driver stack
411 // and thus can't do something that would potentially cause
412 // paging. it gets called by each of the journal api entry
413 // points so stuff shouldn't hang around for too long.
414 //
415 static void
416 free_old_stuff(journal *jnl)
417 {
418 transaction *tr, *next;
419
420 lock_oldstart(jnl);
421 tr = jnl->tr_freeme;
422 jnl->tr_freeme = NULL;
423 unlock_oldstart(jnl);
424
425 for(; tr; tr=next) {
426 next = tr->next;
427 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
428 }
429
430 }
431
432
433
434 //
435 // This is our callback that lets us know when a buffer has been
436 // flushed to disk. It's called from deep within the driver stack
437 // and thus is quite limited in what it can do. Notably, it can
438 // not initiate any new i/o's or allocate/free memory.
439 //
440 static void
441 buffer_flushed_callback(struct buf *bp, void *arg)
442 {
443 transaction *tr;
444 journal *jnl;
445 transaction *ctr, *prev=NULL, *next;
446 int i, bufsize;
447
448
449 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
450 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
451
452 // snarf out the bits we want
453 bufsize = buf_size(bp);
454 tr = (transaction *)arg;
455
456 // then we've already seen it
457 if (tr == NULL) {
458 return;
459 }
460
461 CHECK_TRANSACTION(tr);
462
463 jnl = tr->jnl;
464 if (jnl->flags & JOURNAL_INVALID) {
465 return;
466 }
467
468 CHECK_JOURNAL(jnl);
469
470 // update the number of blocks that have been flushed.
471 // this buf may represent more than one block so take
472 // that into account.
473 OSAddAtomic(bufsize, &tr->num_flushed);
474
475
476 // if this transaction isn't done yet, just return as
477 // there is nothing to do.
478 if ((tr->num_flushed + tr->num_killed) < tr->total_bytes) {
479 return;
480 }
481
482 // this will single thread checking the transaction
483 lock_oldstart(jnl);
484
485 if (tr->total_bytes == 0xfbadc0de) {
486 // then someone beat us to it...
487 unlock_oldstart(jnl);
488 return;
489 }
490
491 // mark this so that we're the owner of dealing with the
492 // cleanup for this transaction
493 tr->total_bytes = 0xfbadc0de;
494
495 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
496 // tr, tr->journal_start, tr->journal_end, jnl);
497
498 // find this entry in the old_start[] index and mark it completed
499 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
500
501 if ((jnl->old_start[i] & ~(0x8000000000000000LL)) == tr->journal_start) {
502 jnl->old_start[i] &= ~(0x8000000000000000LL);
503 break;
504 }
505 }
506 if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
507 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr 0x%x, jnl 0x%x)\n",
508 tr->journal_start, tr, jnl);
509 }
510 unlock_oldstart(jnl);
511
512
513 // if we are here then we need to update the journal header
514 // to reflect that this transaction is complete
515 if (tr->journal_start == jnl->active_start) {
516 jnl->active_start = tr->journal_end;
517 tr->journal_start = tr->journal_end = (off_t)0;
518 }
519
520 // go through the completed_trs list and try to coalesce
521 // entries, restarting back at the beginning if we have to.
522 for(ctr=jnl->completed_trs; ctr; prev=ctr, ctr=next) {
523 if (ctr->journal_start == jnl->active_start) {
524 jnl->active_start = ctr->journal_end;
525 if (prev) {
526 prev->next = ctr->next;
527 }
528 if (ctr == jnl->completed_trs) {
529 jnl->completed_trs = ctr->next;
530 }
531
532 lock_oldstart(jnl);
533 next = jnl->completed_trs; // this starts us over again
534 ctr->next = jnl->tr_freeme;
535 jnl->tr_freeme = ctr;
536 ctr = NULL;
537 unlock_oldstart(jnl);
538 } else if (tr->journal_end == ctr->journal_start) {
539 ctr->journal_start = tr->journal_start;
540 next = jnl->completed_trs; // this starts us over again
541 ctr = NULL;
542 tr->journal_start = tr->journal_end = (off_t)0;
543 } else if (tr->journal_start == ctr->journal_end) {
544 ctr->journal_end = tr->journal_end;
545 next = ctr->next;
546 tr->journal_start = tr->journal_end = (off_t)0;
547 } else {
548 next = ctr->next;
549 }
550 }
551
552 // if this is true then we didn't merge with anyone
553 // so link ourselves in at the head of the completed
554 // transaction list.
555 if (tr->journal_start != 0) {
556 // put this entry into the correct sorted place
557 // in the list instead of just at the head.
558 //
559
560 prev = NULL;
561 for(ctr=jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) {
562 // just keep looping
563 }
564
565 if (ctr == NULL && prev == NULL) {
566 jnl->completed_trs = tr;
567 tr->next = NULL;
568 } else if (ctr == jnl->completed_trs) {
569 tr->next = jnl->completed_trs;
570 jnl->completed_trs = tr;
571 } else {
572 tr->next = prev->next;
573 prev->next = tr;
574 }
575 } else {
576 // if we're here this tr got merged with someone else so
577 // put it on the list to be free'd
578 lock_oldstart(jnl);
579 tr->next = jnl->tr_freeme;
580 jnl->tr_freeme = tr;
581 unlock_oldstart(jnl);
582 }
583 }
584
585
586 #include <libkern/OSByteOrder.h>
587
588 #define SWAP16(x) OSSwapInt16(x)
589 #define SWAP32(x) OSSwapInt32(x)
590 #define SWAP64(x) OSSwapInt64(x)
591
592
593 static void
594 swap_journal_header(journal *jnl)
595 {
596 jnl->jhdr->magic = SWAP32(jnl->jhdr->magic);
597 jnl->jhdr->endian = SWAP32(jnl->jhdr->endian);
598 jnl->jhdr->start = SWAP64(jnl->jhdr->start);
599 jnl->jhdr->end = SWAP64(jnl->jhdr->end);
600 jnl->jhdr->size = SWAP64(jnl->jhdr->size);
601 jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size);
602 jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum);
603 jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size);
604 }
605
606 static void
607 swap_block_list_header(journal *jnl, block_list_header *blhdr)
608 {
609 int i;
610
611 blhdr->max_blocks = SWAP16(blhdr->max_blocks);
612 blhdr->num_blocks = SWAP16(blhdr->num_blocks);
613 blhdr->bytes_used = SWAP32(blhdr->bytes_used);
614 blhdr->checksum = SWAP32(blhdr->checksum);
615 blhdr->pad = SWAP32(blhdr->pad);
616
617 if (blhdr->num_blocks * sizeof(blhdr->binfo[0]) > jnl->jhdr->blhdr_size) {
618 printf("jnl: blhdr num blocks looks suspicious (%d). not swapping.\n", blhdr->num_blocks);
619 return;
620 }
621
622 for(i=0; i < blhdr->num_blocks; i++) {
623 blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum);
624 blhdr->binfo[i].bsize = SWAP32(blhdr->binfo[i].bsize);
625 blhdr->binfo[i].bp = (void *)SWAP32((int)blhdr->binfo[i].bp);
626 }
627 }
628
629
630 static int
631 update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize)
632 {
633 int ret;
634 struct buf *oblock_bp=NULL;
635
636 // first read the block we want.
637 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
638 if (ret != 0) {
639 printf("jnl: update_fs_block: error reading fs block # %lld! (ret %d)\n", fs_block, ret);
640
641 if (oblock_bp) {
642 buf_brelse(oblock_bp);
643 oblock_bp = NULL;
644 }
645
646 // let's try to be aggressive here and just re-write the block
647 oblock_bp = buf_getblk(jnl->fsdev, (daddr64_t)fs_block, bsize, 0, 0, BLK_META);
648 if (oblock_bp == NULL) {
649 printf("jnl: update_fs_block: buf_getblk() for %lld failed! failing update.\n", fs_block);
650 return -1;
651 }
652 }
653
654 // make sure it's the correct size.
655 if (buf_size(oblock_bp) != bsize) {
656 buf_brelse(oblock_bp);
657 return -1;
658 }
659
660 // copy the journal data over top of it
661 memcpy((void *)buf_dataptr(oblock_bp), block_ptr, bsize);
662
663 if ((ret = VNOP_BWRITE(oblock_bp)) != 0) {
664 printf("jnl: update_fs_block: failed to update block %lld (ret %d)\n", fs_block,ret);
665 return ret;
666 }
667
668 // and now invalidate it so that if someone else wants to read
669 // it in a different size they'll be able to do it.
670 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
671 if (oblock_bp) {
672 buf_markinvalid(oblock_bp);
673 buf_brelse(oblock_bp);
674 }
675
676 return 0;
677 }
678
679 static int
680 grow_table(struct bucket **buf_ptr, int num_buckets, int new_size)
681 {
682 struct bucket *newBuf;
683 int current_size = num_buckets, i;
684
685 // return if newsize is less than the current size
686 if (new_size < num_buckets) {
687 return current_size;
688 }
689
690 if ((MALLOC(newBuf, struct bucket *, new_size*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
691 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
692 return -1;
693 }
694
695 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
696
697 // copy existing elements
698 bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket));
699
700 // initialize the new ones
701 for(i=num_buckets; i < new_size; i++) {
702 newBuf[i].block_num = (off_t)-1;
703 }
704
705 // free the old container
706 FREE(*buf_ptr, M_TEMP);
707
708 // reset the buf_ptr
709 *buf_ptr = newBuf;
710
711 return new_size;
712 }
713
714 static int
715 lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full)
716 {
717 int lo, hi, index, matches, i;
718
719 if (num_full == 0) {
720 return 0; // table is empty, so insert at index=0
721 }
722
723 lo = 0;
724 hi = num_full - 1;
725 index = -1;
726
727 // perform binary search for block_num
728 do {
729 int mid = (hi - lo)/2 + lo;
730 off_t this_num = (*buf_ptr)[mid].block_num;
731
732 if (block_num == this_num) {
733 index = mid;
734 break;
735 }
736
737 if (block_num < this_num) {
738 hi = mid;
739 continue;
740 }
741
742 if (block_num > this_num) {
743 lo = mid + 1;
744 continue;
745 }
746 } while(lo < hi);
747
748 // check if lo and hi converged on the match
749 if (block_num == (*buf_ptr)[hi].block_num) {
750 index = hi;
751 }
752
753 // if no existing entry found, find index for new one
754 if (index == -1) {
755 index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1;
756 } else {
757 // make sure that we return the right-most index in the case of multiple matches
758 matches = 0;
759 i = index + 1;
760 while(i < num_full && block_num == (*buf_ptr)[i].block_num) {
761 matches++;
762 i++;
763 }
764
765 index += matches;
766 }
767
768 return index;
769 }
770
771 static int
772 insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr, int overwriting)
773 {
774 if (!overwriting) {
775 // grow the table if we're out of space
776 if (*num_full_ptr >= *num_buckets_ptr) {
777 int new_size = *num_buckets_ptr * 2;
778 int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size);
779
780 if (grow_size < new_size) {
781 printf("jnl: add_block: grow_table returned an error!\n");
782 return -1;
783 }
784
785 *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size
786 }
787
788 // if we're not inserting at the end, we need to bcopy
789 if (blk_index != *num_full_ptr) {
790 bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) );
791 }
792
793 (*num_full_ptr)++; // increment only if we're not overwriting
794 }
795
796 // sanity check the values we're about to add
797 if (offset >= jnl->jhdr->size) {
798 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
799 }
800 if (size <= 0) {
801 panic("jnl: insert_block: bad size in insert_block (%d)\n", size);
802 }
803
804 (*buf_ptr)[blk_index].block_num = num;
805 (*buf_ptr)[blk_index].block_size = size;
806 (*buf_ptr)[blk_index].jnl_offset = offset;
807
808 return blk_index;
809 }
810
811 static int
812 do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
813 {
814 int num_to_remove, index, i, overwrite, err;
815 size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset;
816 off_t overlap, block_start, block_end;
817
818 block_start = block_num*jhdr_size;
819 block_end = block_start + size;
820 overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size);
821
822 // first, eliminate any overlap with the previous entry
823 if (blk_index != 0 && !overwrite) {
824 off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size;
825 off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size;
826 overlap = prev_block_end - block_start;
827 if (overlap > 0) {
828 if (overlap % jhdr_size != 0) {
829 panic("jnl: do_overlap: overlap with previous entry not a multiple of %d\n", jhdr_size);
830 }
831
832 // if the previous entry completely overlaps this one, we need to break it into two pieces.
833 if (prev_block_end > block_end) {
834 off_t new_num = block_end / jhdr_size;
835 size_t new_size = prev_block_end - block_end;
836
837 new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start);
838
839 err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, num_buckets_ptr, num_full_ptr, 0);
840 if (err < 0) {
841 panic("jnl: do_overlap: error inserting during pre-overlap\n");
842 }
843 }
844
845 // Regardless, we need to truncate the previous entry to the beginning of the overlap
846 (*buf_ptr)[blk_index-1].block_size = block_start - prev_block_start;
847 }
848 }
849
850 // then, bail out fast if there's no overlap with the entries that follow
851 if (!overwrite && block_end <= (*buf_ptr)[blk_index].block_num*jhdr_size) {
852 return 0; // no overlap, no overwrite
853 } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (*buf_ptr)[blk_index+1].block_num*jhdr_size)) {
854 return 1; // simple overwrite
855 }
856
857 // Otherwise, find all cases of total and partial overlap. We use the special
858 // block_num of -2 to designate entries that are completely overlapped and must
859 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
860 // entries must be adjusted to keep the array consistent.
861 index = blk_index;
862 num_to_remove = 0;
863 while(index < *num_full_ptr && block_end > (*buf_ptr)[index].block_num*jhdr_size) {
864 if (block_end >= ((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size)) {
865 (*buf_ptr)[index].block_num = -2; // mark this for deletion
866 num_to_remove++;
867 } else {
868 overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size;
869 if (overlap > 0) {
870 if (overlap % jhdr_size != 0) {
871 panic("jnl: do_overlap: overlap of %lld is not multiple of %d\n", overlap, jhdr_size);
872 }
873
874 // if we partially overlap this entry, adjust its block number, jnl offset, and size
875 (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up
876
877 new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around
878 if (new_offset >= jnl->jhdr->size) {
879 new_offset = jhdr_size + (new_offset - jnl->jhdr->size);
880 }
881 (*buf_ptr)[index].jnl_offset = new_offset;
882
883 (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value
884 if ((*buf_ptr)[index].block_size <= 0) {
885 panic("jnl: do_overlap: after overlap, new block size is invalid (%d)\n", (*buf_ptr)[index].block_size);
886 // return -1; // if above panic is removed, return -1 for error
887 }
888 }
889
890 }
891
892 index++;
893 }
894
895 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
896 index--; // start with the last index used within the above loop
897 while(index >= blk_index) {
898 if ((*buf_ptr)[index].block_num == -2) {
899 if (index == *num_full_ptr-1) {
900 (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free
901 } else {
902 bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) );
903 }
904 (*num_full_ptr)--;
905 }
906 index--;
907 }
908
909 // eliminate any stale entries at the end of the table
910 for(i=*num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) {
911 (*buf_ptr)[i].block_num = -1;
912 }
913
914 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
915 }
916
917 // PR-3105942: Coalesce writes to the same block in journal replay
918 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
919 // to be replayed and the corresponding location in the journal which contains
920 // the most recent data for those blocks. The array is "played" once the all the
921 // blocks in the journal have been coalesced. The code for the case of conflicting/
922 // overlapping writes to a single block is the most dense. Because coalescing can
923 // disrupt the existing time-ordering of blocks in the journal playback, care
924 // is taken to catch any overlaps and keep the array consistent.
925 static int
926 add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int *num_buckets_ptr, int *num_full_ptr)
927 {
928 int blk_index, overwriting;
929
930 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
931 // inserted (or the index of the elem to overwrite).
932 blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr);
933
934 // check if the index is within bounds (if we're adding this block to the end of
935 // the table, blk_index will be equal to num_full)
936 if (blk_index < 0 || blk_index > *num_full_ptr) {
937 //printf("jnl: add_block: trouble adding block to co_buf\n");
938 return -1;
939 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
940
941 // Determine whether we're overwriting an existing entry by checking for overlap
942 overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr);
943 if (overwriting < 0) {
944 return -1; // if we got an error, pass it along
945 }
946
947 // returns the index, or -1 on error
948 blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, num_buckets_ptr, num_full_ptr, overwriting);
949
950 return blk_index;
951 }
952
953 static int
954 replay_journal(journal *jnl)
955 {
956 int i, ret, orig_checksum, checksum, max_bsize;
957 block_list_header *blhdr;
958 off_t offset;
959 char *buff, *block_ptr=NULL;
960 struct bucket *co_buf;
961 int num_buckets = STARTING_BUCKETS, num_full;
962
963 // wrap the start ptr if it points to the very end of the journal
964 if (jnl->jhdr->start == jnl->jhdr->size) {
965 jnl->jhdr->start = jnl->jhdr->jhdr_size;
966 }
967 if (jnl->jhdr->end == jnl->jhdr->size) {
968 jnl->jhdr->end = jnl->jhdr->jhdr_size;
969 }
970
971 if (jnl->jhdr->start == jnl->jhdr->end) {
972 return 0;
973 }
974
975 // allocate memory for the header_block. we'll read each blhdr into this
976 if (kmem_alloc(kernel_map, (vm_offset_t *)&buff, jnl->jhdr->blhdr_size)) {
977 printf("jnl: replay_journal: no memory for block buffer! (%d bytes)\n",
978 jnl->jhdr->blhdr_size);
979 return -1;
980 }
981
982 // allocate memory for the coalesce buffer
983 if ((MALLOC(co_buf, struct bucket *, num_buckets*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
984 printf("jnl: replay_journal: no memory for coalesce buffer!\n");
985 return -1;
986 }
987
988 // initialize entries
989 for(i=0; i < num_buckets; i++) {
990 co_buf[i].block_num = -1;
991 }
992 num_full = 0; // empty at first
993
994
995 printf("jnl: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
996 jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset);
997
998 while(jnl->jhdr->start != jnl->jhdr->end) {
999 offset = jnl->jhdr->start;
1000 ret = read_journal_data(jnl, &offset, buff, jnl->jhdr->blhdr_size);
1001 if (ret != jnl->jhdr->blhdr_size) {
1002 printf("jnl: replay_journal: Could not read block list header block @ 0x%llx!\n", offset);
1003 goto bad_replay;
1004 }
1005
1006 blhdr = (block_list_header *)buff;
1007
1008 orig_checksum = blhdr->checksum;
1009 blhdr->checksum = 0;
1010 if (jnl->flags & JOURNAL_NEED_SWAP) {
1011 // calculate the checksum based on the unswapped data
1012 // because it is done byte-at-a-time.
1013 orig_checksum = SWAP32(orig_checksum);
1014 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1015 swap_block_list_header(jnl, blhdr);
1016 } else {
1017 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1018 }
1019 if (checksum != orig_checksum) {
1020 printf("jnl: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1021 offset, orig_checksum, checksum);
1022 goto bad_replay;
1023 }
1024 if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > 2048
1025 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) {
1026 printf("jnl: replay_journal: bad looking journal entry: max: %d num: %d\n",
1027 blhdr->max_blocks, blhdr->num_blocks);
1028 goto bad_replay;
1029 }
1030
1031 for(i=1; i < blhdr->num_blocks; i++) {
1032 if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) {
1033 printf("jnl: replay_journal: bogus block number 0x%llx\n", blhdr->binfo[i].bnum);
1034 goto bad_replay;
1035 }
1036 }
1037
1038 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1039 // blhdr->num_blocks-1, jnl->jhdr->start);
1040 for(i=1; i < blhdr->num_blocks; i++) {
1041 int size, ret_val;
1042 off_t number;
1043
1044 size = blhdr->binfo[i].bsize;
1045 number = blhdr->binfo[i].bnum;
1046
1047 // don't add "killed" blocks
1048 if (number == (off_t)-1) {
1049 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1050 } else {
1051 // add this bucket to co_buf, coalescing where possible
1052 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1053 ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, &num_buckets, &num_full);
1054
1055 if (ret_val == -1) {
1056 printf("jnl: replay_journal: trouble adding block to co_buf\n");
1057 goto bad_replay;
1058 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1059 }
1060
1061 // increment offset
1062 offset += size;
1063
1064 // check if the last block added puts us off the end of the jnl.
1065 // if so, we need to wrap to the beginning and take any remainder
1066 // into account
1067 //
1068 if (offset >= jnl->jhdr->size) {
1069 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
1070 }
1071 }
1072
1073
1074 jnl->jhdr->start += blhdr->bytes_used;
1075 if (jnl->jhdr->start >= jnl->jhdr->size) {
1076 // wrap around and skip the journal header block
1077 jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size;
1078 }
1079 }
1080
1081
1082 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1083
1084 /*
1085 * make sure it's at least one page in size, so
1086 * start max_bsize at PAGE_SIZE
1087 */
1088 for (i = 0, max_bsize = PAGE_SIZE; i < num_full; i++) {
1089
1090 if (co_buf[i].block_num == (off_t)-1)
1091 continue;
1092
1093 if (co_buf[i].block_size > max_bsize)
1094 max_bsize = co_buf[i].block_size;
1095 }
1096 /*
1097 * round max_bsize up to the nearest PAGE_SIZE multiple
1098 */
1099 if (max_bsize & (PAGE_SIZE - 1)) {
1100 max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1101 }
1102
1103 if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) {
1104 goto bad_replay;
1105 }
1106
1107 // Replay the coalesced entries in the co-buf
1108 for(i=0; i < num_full; i++) {
1109 size_t size = co_buf[i].block_size;
1110 off_t jnl_offset = (off_t) co_buf[i].jnl_offset;
1111 off_t number = co_buf[i].block_num;
1112
1113
1114 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1115 // co_buf[i].block_size, co_buf[i].jnl_offset);
1116
1117 if (number == (off_t)-1) {
1118 // printf("jnl: replay_journal: skipping killed fs block\n");
1119 } else {
1120
1121 // do journal read, and set the phys. block
1122 ret = read_journal_data(jnl, &jnl_offset, block_ptr, size);
1123 if (ret != size) {
1124 printf("jnl: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", offset);
1125 goto bad_replay;
1126 }
1127
1128 if (update_fs_block(jnl, block_ptr, number, size) != 0) {
1129 goto bad_replay;
1130 }
1131 }
1132 }
1133
1134
1135 // done replaying; update jnl header
1136 if (write_journal_header(jnl) != 0) {
1137 goto bad_replay;
1138 }
1139
1140 // free block_ptr
1141 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1142 block_ptr = NULL;
1143
1144 // free the coalesce buffer
1145 FREE(co_buf, M_TEMP);
1146 co_buf = NULL;
1147
1148 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1149 return 0;
1150
1151 bad_replay:
1152 if (block_ptr) {
1153 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1154 }
1155 if (co_buf) {
1156 FREE(co_buf, M_TEMP);
1157 }
1158 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1159
1160 return -1;
1161 }
1162
1163
1164 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1165 //#define DEFAULT_TRANSACTION_BUFFER_SIZE (256*1024) // better performance but uses more mem
1166 #define MAX_TRANSACTION_BUFFER_SIZE (512*1024)
1167
1168 // XXXdbg - so I can change it in the debugger
1169 int def_tbuffer_size = 0;
1170
1171
1172 //
1173 // This function sets the size of the tbuffer and the
1174 // size of the blhdr. It assumes that jnl->jhdr->size
1175 // and jnl->jhdr->jhdr_size are already valid.
1176 //
1177 static void
1178 size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz)
1179 {
1180 //
1181 // one-time initialization based on how much memory
1182 // there is in the machine.
1183 //
1184 if (def_tbuffer_size == 0) {
1185 if (mem_size < (256*1024*1024)) {
1186 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE;
1187 } else if (mem_size < (512*1024*1024)) {
1188 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2;
1189 } else if (mem_size < (1024*1024*1024)) {
1190 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3;
1191 } else if (mem_size >= (1024*1024*1024)) {
1192 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 4;
1193 }
1194 }
1195
1196 // size up the transaction buffer... can't be larger than the number
1197 // of blocks that can fit in a block_list_header block.
1198 if (tbuffer_size == 0) {
1199 jnl->tbuffer_size = def_tbuffer_size;
1200 } else {
1201 // make sure that the specified tbuffer_size isn't too small
1202 if (tbuffer_size < jnl->jhdr->blhdr_size * 2) {
1203 tbuffer_size = jnl->jhdr->blhdr_size * 2;
1204 }
1205 // and make sure it's an even multiple of the block size
1206 if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) {
1207 tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size);
1208 }
1209
1210 jnl->tbuffer_size = tbuffer_size;
1211 }
1212
1213 if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) {
1214 jnl->tbuffer_size = (jnl->jhdr->size / 2);
1215 }
1216
1217 if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) {
1218 jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE;
1219 }
1220
1221 jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info);
1222 if (jnl->jhdr->blhdr_size < phys_blksz) {
1223 jnl->jhdr->blhdr_size = phys_blksz;
1224 } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) {
1225 // have to round up so we're an even multiple of the physical block size
1226 jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1);
1227 }
1228 }
1229
1230
1231
1232 journal *
1233 journal_create(struct vnode *jvp,
1234 off_t offset,
1235 off_t journal_size,
1236 struct vnode *fsvp,
1237 size_t min_fs_blksz,
1238 int32_t flags,
1239 int32_t tbuffer_size,
1240 void (*flush)(void *arg),
1241 void *arg)
1242 {
1243 journal *jnl;
1244 int phys_blksz;
1245 struct vfs_context context;
1246
1247 context.vc_proc = current_proc();
1248 context.vc_ucred = FSCRED;
1249
1250 /* Get the real physical block size. */
1251 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1252 return NULL;
1253 }
1254
1255 if (phys_blksz > min_fs_blksz) {
1256 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1257 phys_blksz, min_fs_blksz);
1258 return NULL;
1259 }
1260
1261 if ((journal_size % phys_blksz) != 0) {
1262 printf("jnl: create: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1263 journal_size, phys_blksz);
1264 return NULL;
1265 }
1266
1267 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1268 memset(jnl, 0, sizeof(*jnl));
1269
1270 jnl->jdev = jvp;
1271 jnl->jdev_offset = offset;
1272 jnl->fsdev = fsvp;
1273 jnl->flush = flush;
1274 jnl->flush_arg = arg;
1275 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1276 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1277
1278 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1279 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1280 goto bad_kmem_alloc;
1281 }
1282
1283 memset(jnl->header_buf, 0, phys_blksz);
1284
1285 jnl->jhdr = (journal_header *)jnl->header_buf;
1286 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1287 jnl->jhdr->endian = ENDIAN_MAGIC;
1288 jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself
1289 jnl->jhdr->end = phys_blksz;
1290 jnl->jhdr->size = journal_size;
1291 jnl->jhdr->jhdr_size = phys_blksz;
1292 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1293
1294 jnl->active_start = jnl->jhdr->start;
1295
1296 // XXXdbg - for testing you can force the journal to wrap around
1297 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1298 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1299
1300 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1301
1302 if (write_journal_header(jnl) != 0) {
1303 printf("jnl: journal_create: failed to write journal header.\n");
1304 goto bad_write;
1305 }
1306
1307 return jnl;
1308
1309
1310 bad_write:
1311 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1312 bad_kmem_alloc:
1313 jnl->jhdr = NULL;
1314 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1315 return NULL;
1316 }
1317
1318
1319 journal *
1320 journal_open(struct vnode *jvp,
1321 off_t offset,
1322 off_t journal_size,
1323 struct vnode *fsvp,
1324 size_t min_fs_blksz,
1325 int32_t flags,
1326 int32_t tbuffer_size,
1327 void (*flush)(void *arg),
1328 void *arg)
1329 {
1330 journal *jnl;
1331 int orig_blksz=0, phys_blksz;
1332 int orig_checksum, checksum;
1333 struct vfs_context context;
1334
1335 context.vc_proc = current_proc();
1336 context.vc_ucred = FSCRED;
1337
1338 /* Get the real physical block size. */
1339 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1340 return NULL;
1341 }
1342
1343 if (phys_blksz > min_fs_blksz) {
1344 printf("jnl: create: error: phys blksize %d bigger than min fs blksize %d\n",
1345 phys_blksz, min_fs_blksz);
1346 return NULL;
1347 }
1348
1349 if ((journal_size % phys_blksz) != 0) {
1350 printf("jnl: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1351 journal_size, phys_blksz);
1352 return NULL;
1353 }
1354
1355 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1356 memset(jnl, 0, sizeof(*jnl));
1357
1358 jnl->jdev = jvp;
1359 jnl->jdev_offset = offset;
1360 jnl->fsdev = fsvp;
1361 jnl->flush = flush;
1362 jnl->flush_arg = arg;
1363 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1364 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1365
1366 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1367 printf("jnl: create: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1368 goto bad_kmem_alloc;
1369 }
1370
1371 jnl->jhdr = (journal_header *)jnl->header_buf;
1372 memset(jnl->jhdr, 0, sizeof(journal_header)+4);
1373
1374 // we have to set this up here so that do_journal_io() will work
1375 jnl->jhdr->jhdr_size = phys_blksz;
1376
1377 if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) {
1378 printf("jnl: open: could not read %d bytes for the journal header.\n",
1379 phys_blksz);
1380 goto bad_journal;
1381 }
1382
1383 orig_checksum = jnl->jhdr->checksum;
1384 jnl->jhdr->checksum = 0;
1385
1386 if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1387 // do this before the swap since it's done byte-at-a-time
1388 orig_checksum = SWAP32(orig_checksum);
1389 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1390 swap_journal_header(jnl);
1391 jnl->flags |= JOURNAL_NEED_SWAP;
1392 } else {
1393 checksum = calc_checksum((char *)jnl->jhdr, sizeof(struct journal_header));
1394 }
1395
1396 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1397 printf("jnl: open: journal magic is bad (0x%x != 0x%x)\n",
1398 jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);
1399 goto bad_journal;
1400 }
1401
1402 // only check if we're the current journal header magic value
1403 if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) {
1404
1405 if (orig_checksum != checksum) {
1406 printf("jnl: open: journal checksum is bad (0x%x != 0x%x)\n",
1407 orig_checksum, checksum);
1408
1409 //goto bad_journal;
1410 }
1411 }
1412
1413 // XXXdbg - convert old style magic numbers to the new one
1414 if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) {
1415 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1416 }
1417
1418 if (phys_blksz != jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) {
1419 printf("jnl: open: phys_blksz %d does not match journal header size %d\n",
1420 phys_blksz, jnl->jhdr->jhdr_size);
1421
1422 orig_blksz = phys_blksz;
1423 phys_blksz = jnl->jhdr->jhdr_size;
1424 if (VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&phys_blksz, FWRITE, &context)) {
1425 printf("jnl: could not set block size to %d bytes.\n", phys_blksz);
1426 goto bad_journal;
1427 }
1428 // goto bad_journal;
1429 }
1430
1431 if ( jnl->jhdr->start <= 0
1432 || jnl->jhdr->start > jnl->jhdr->size
1433 || jnl->jhdr->start > 1024*1024*1024) {
1434 printf("jnl: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1435 jnl->jhdr->start, jnl->jhdr->size);
1436 goto bad_journal;
1437 }
1438
1439 if ( jnl->jhdr->end <= 0
1440 || jnl->jhdr->end > jnl->jhdr->size
1441 || jnl->jhdr->end > 1024*1024*1024) {
1442 printf("jnl: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
1443 jnl->jhdr->end, jnl->jhdr->size);
1444 goto bad_journal;
1445 }
1446
1447 if (jnl->jhdr->size > 1024*1024*1024) {
1448 printf("jnl: open: jhdr size looks bad (0x%llx)\n", jnl->jhdr->size);
1449 goto bad_journal;
1450 }
1451
1452 // XXXdbg - can't do these checks because hfs writes all kinds of
1453 // non-uniform sized blocks even on devices that have a block size
1454 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
1455 // therefore these checks will fail and so we just have to punt and
1456 // do more relaxed checking...
1457 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
1458 if ((jnl->jhdr->start % 512) != 0) {
1459 printf("jnl: open: journal start (0x%llx) not a multiple of 512?\n",
1460 jnl->jhdr->start);
1461 goto bad_journal;
1462 }
1463
1464 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
1465 if ((jnl->jhdr->end % 512) != 0) {
1466 printf("jnl: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
1467 jnl->jhdr->end, jnl->jhdr->jhdr_size);
1468 goto bad_journal;
1469 }
1470
1471 // take care of replaying the journal if necessary
1472 if (flags & JOURNAL_RESET) {
1473 printf("jnl: journal start/end pointers reset! (jnl 0x%x; s 0x%llx e 0x%llx)\n",
1474 jnl, jnl->jhdr->start, jnl->jhdr->end);
1475 jnl->jhdr->start = jnl->jhdr->end;
1476 } else if (replay_journal(jnl) != 0) {
1477 printf("jnl: journal_open: Error replaying the journal!\n");
1478 goto bad_journal;
1479 }
1480
1481 if (orig_blksz != 0) {
1482 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1483 phys_blksz = orig_blksz;
1484 if (orig_blksz < jnl->jhdr->jhdr_size) {
1485 printf("jnl: open: jhdr_size is %d but orig phys blk size is %d. switching.\n",
1486 jnl->jhdr->jhdr_size, orig_blksz);
1487
1488 jnl->jhdr->jhdr_size = orig_blksz;
1489 }
1490 }
1491
1492 // make sure this is in sync!
1493 jnl->active_start = jnl->jhdr->start;
1494
1495 // set this now, after we've replayed the journal
1496 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1497
1498 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1499
1500 return jnl;
1501
1502 bad_journal:
1503 if (orig_blksz != 0) {
1504 phys_blksz = orig_blksz;
1505 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1506 }
1507 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1508 bad_kmem_alloc:
1509 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1510 return NULL;
1511 }
1512
1513
1514 int
1515 journal_is_clean(struct vnode *jvp,
1516 off_t offset,
1517 off_t journal_size,
1518 struct vnode *fsvp,
1519 size_t min_fs_block_size)
1520 {
1521 journal jnl;
1522 int phys_blksz, ret;
1523 int orig_checksum, checksum;
1524 struct vfs_context context;
1525
1526 context.vc_proc = current_proc();
1527 context.vc_ucred = FSCRED;
1528
1529 /* Get the real physical block size. */
1530 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1531 printf("jnl: is_clean: failed to get device block size.\n");
1532 return EINVAL;
1533 }
1534
1535 if (phys_blksz > min_fs_block_size) {
1536 printf("jnl: is_clean: error: phys blksize %d bigger than min fs blksize %d\n",
1537 phys_blksz, min_fs_block_size);
1538 return EINVAL;
1539 }
1540
1541 if ((journal_size % phys_blksz) != 0) {
1542 printf("jnl: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1543 journal_size, phys_blksz);
1544 return EINVAL;
1545 }
1546
1547 memset(&jnl, 0, sizeof(jnl));
1548
1549 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl.header_buf, phys_blksz)) {
1550 printf("jnl: is_clean: could not allocate space for header buffer (%d bytes)\n", phys_blksz);
1551 return ENOMEM;
1552 }
1553
1554 jnl.jhdr = (journal_header *)jnl.header_buf;
1555 memset(jnl.jhdr, 0, sizeof(journal_header)+4);
1556
1557 jnl.jdev = jvp;
1558 jnl.jdev_offset = offset;
1559 jnl.fsdev = fsvp;
1560
1561 // we have to set this up here so that do_journal_io() will work
1562 jnl.jhdr->jhdr_size = phys_blksz;
1563
1564 if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != phys_blksz) {
1565 printf("jnl: is_clean: could not read %d bytes for the journal header.\n",
1566 phys_blksz);
1567 ret = EINVAL;
1568 goto get_out;
1569 }
1570
1571 orig_checksum = jnl.jhdr->checksum;
1572 jnl.jhdr->checksum = 0;
1573
1574 if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1575 // do this before the swap since it's done byte-at-a-time
1576 orig_checksum = SWAP32(orig_checksum);
1577 checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header));
1578 swap_journal_header(&jnl);
1579 jnl.flags |= JOURNAL_NEED_SWAP;
1580 } else {
1581 checksum = calc_checksum((char *)jnl.jhdr, sizeof(struct journal_header));
1582 }
1583
1584 if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1585 printf("jnl: is_clean: journal magic is bad (0x%x != 0x%x)\n",
1586 jnl.jhdr->magic, JOURNAL_HEADER_MAGIC);
1587 ret = EINVAL;
1588 goto get_out;
1589 }
1590
1591 if (orig_checksum != checksum) {
1592 printf("jnl: is_clean: journal checksum is bad (0x%x != 0x%x)\n", orig_checksum, checksum);
1593 ret = EINVAL;
1594 goto get_out;
1595 }
1596
1597 //
1598 // if the start and end are equal then the journal is clean.
1599 // otherwise it's not clean and therefore an error.
1600 //
1601 if (jnl.jhdr->start == jnl.jhdr->end) {
1602 ret = 0;
1603 } else {
1604 ret = EINVAL;
1605 }
1606
1607 get_out:
1608 kmem_free(kernel_map, (vm_offset_t)jnl.header_buf, phys_blksz);
1609
1610 return ret;
1611
1612
1613 }
1614
1615
1616
1617 void
1618 journal_close(journal *jnl)
1619 {
1620 volatile off_t *start, *end;
1621 int counter=0;
1622
1623 CHECK_JOURNAL(jnl);
1624
1625 // set this before doing anything that would block so that
1626 // we start tearing things down properly.
1627 //
1628 jnl->flags |= JOURNAL_CLOSE_PENDING;
1629
1630 if (jnl->owner != current_thread()) {
1631 lock_journal(jnl);
1632 }
1633
1634 //
1635 // only write stuff to disk if the journal is still valid
1636 //
1637 if ((jnl->flags & JOURNAL_INVALID) == 0) {
1638
1639 if (jnl->active_tr) {
1640 journal_end_transaction(jnl);
1641 }
1642
1643 // flush any buffered transactions
1644 if (jnl->cur_tr) {
1645 transaction *tr = jnl->cur_tr;
1646
1647 jnl->cur_tr = NULL;
1648 end_transaction(tr, 1); // force it to get flushed
1649 }
1650
1651 //start = &jnl->jhdr->start;
1652 start = &jnl->active_start;
1653 end = &jnl->jhdr->end;
1654
1655 while (*start != *end && counter++ < 500) {
1656 printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
1657 if (jnl->flush) {
1658 jnl->flush(jnl->flush_arg);
1659 }
1660 tsleep((caddr_t)jnl, PRIBIO, "jnl_close", 1);
1661 }
1662
1663 if (*start != *end) {
1664 printf("jnl: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
1665 *start, *end);
1666 }
1667
1668 // make sure this is in sync when we close the journal
1669 jnl->jhdr->start = jnl->active_start;
1670
1671 // if this fails there's not much we can do at this point...
1672 write_journal_header(jnl);
1673 } else {
1674 // if we're here the journal isn't valid any more.
1675 // so make sure we don't leave any locked blocks lying around
1676 printf("jnl: close: journal 0x%x, is invalid. aborting outstanding transactions\n", jnl);
1677 if (jnl->active_tr || jnl->cur_tr) {
1678 transaction *tr;
1679 if (jnl->active_tr) {
1680 tr = jnl->active_tr;
1681 jnl->active_tr = NULL;
1682 } else {
1683 tr = jnl->cur_tr;
1684 jnl->cur_tr = NULL;
1685 }
1686
1687 abort_transaction(jnl, tr);
1688 if (jnl->active_tr || jnl->cur_tr) {
1689 panic("jnl: close: jnl @ 0x%x had both an active and cur tr\n", jnl);
1690 }
1691 }
1692 }
1693
1694 free_old_stuff(jnl);
1695
1696 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->jhdr->jhdr_size);
1697 jnl->jhdr = (void *)0xbeefbabe;
1698
1699 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1700 }
1701
1702 static void
1703 dump_journal(journal *jnl)
1704 {
1705 transaction *ctr;
1706
1707 printf("journal:");
1708 printf(" jdev_offset %.8llx\n", jnl->jdev_offset);
1709 printf(" magic: 0x%.8x\n", jnl->jhdr->magic);
1710 printf(" start: 0x%.8llx\n", jnl->jhdr->start);
1711 printf(" end: 0x%.8llx\n", jnl->jhdr->end);
1712 printf(" size: 0x%.8llx\n", jnl->jhdr->size);
1713 printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size);
1714 printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size);
1715 printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum);
1716
1717 printf(" completed transactions:\n");
1718 for(ctr=jnl->completed_trs; ctr; ctr=ctr->next) {
1719 printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end);
1720 }
1721 }
1722
1723
1724
1725 static off_t
1726 free_space(journal *jnl)
1727 {
1728 off_t free_space;
1729
1730 if (jnl->jhdr->start < jnl->jhdr->end) {
1731 free_space = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size;
1732 } else if (jnl->jhdr->start > jnl->jhdr->end) {
1733 free_space = jnl->jhdr->start - jnl->jhdr->end;
1734 } else {
1735 // journal is completely empty
1736 free_space = jnl->jhdr->size - jnl->jhdr->jhdr_size;
1737 }
1738
1739 return free_space;
1740 }
1741
1742
1743 //
1744 // The journal must be locked on entry to this function.
1745 // The "desired_size" is in bytes.
1746 //
1747 static int
1748 check_free_space(journal *jnl, int desired_size)
1749 {
1750 int i, counter=0;
1751
1752 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
1753 // desired_size, free_space(jnl));
1754
1755 while (1) {
1756 int old_start_empty;
1757
1758 if (counter++ == 5000) {
1759 dump_journal(jnl);
1760 panic("jnl: check_free_space: buffer flushing isn't working "
1761 "(jnl @ 0x%x s %lld e %lld f %lld [active start %lld]).\n", jnl,
1762 jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start);
1763 }
1764 if (counter > 7500) {
1765 printf("jnl: check_free_space: giving up waiting for free space.\n");
1766 return ENOSPC;
1767 }
1768
1769 // make sure there's space in the journal to hold this transaction
1770 if (free_space(jnl) > desired_size) {
1771 break;
1772 }
1773
1774 //
1775 // here's where we lazily bump up jnl->jhdr->start. we'll consume
1776 // entries until there is enough space for the next transaction.
1777 //
1778 old_start_empty = 1;
1779 lock_oldstart(jnl);
1780 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
1781 int counter;
1782
1783 counter = 0;
1784 while (jnl->old_start[i] & 0x8000000000000000LL) {
1785 if (counter++ > 100) {
1786 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl 0x%x).\n",
1787 jnl->old_start[i], jnl);
1788 }
1789
1790 unlock_oldstart(jnl);
1791 if (jnl->flush) {
1792 jnl->flush(jnl->flush_arg);
1793 }
1794 tsleep((caddr_t)jnl, PRIBIO, "check_free_space1", 1);
1795 lock_oldstart(jnl);
1796 }
1797
1798 if (jnl->old_start[i] == 0) {
1799 continue;
1800 }
1801
1802 old_start_empty = 0;
1803 jnl->jhdr->start = jnl->old_start[i];
1804 jnl->old_start[i] = 0;
1805 if (free_space(jnl) > desired_size) {
1806 unlock_oldstart(jnl);
1807 write_journal_header(jnl);
1808 lock_oldstart(jnl);
1809 break;
1810 }
1811 }
1812 unlock_oldstart(jnl);
1813
1814 // if we bumped the start, loop and try again
1815 if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
1816 continue;
1817 } else if (old_start_empty) {
1818 //
1819 // if there is nothing in old_start anymore then we can
1820 // bump the jhdr->start to be the same as active_start
1821 // since it is possible there was only one very large
1822 // transaction in the old_start array. if we didn't do
1823 // this then jhdr->start would never get updated and we
1824 // would wind up looping until we hit the panic at the
1825 // start of the loop.
1826 //
1827 jnl->jhdr->start = jnl->active_start;
1828 write_journal_header(jnl);
1829 continue;
1830 }
1831
1832
1833 // if the file system gave us a flush function, call it to so that
1834 // it can flush some blocks which hopefully will cause some transactions
1835 // to complete and thus free up space in the journal.
1836 if (jnl->flush) {
1837 jnl->flush(jnl->flush_arg);
1838 }
1839
1840 // wait for a while to avoid being cpu-bound (this will
1841 // put us to sleep for 10 milliseconds)
1842 tsleep((caddr_t)jnl, PRIBIO, "check_free_space2", 1);
1843 }
1844
1845 return 0;
1846 }
1847
1848 int
1849 journal_start_transaction(journal *jnl)
1850 {
1851 int ret;
1852 transaction *tr;
1853
1854 CHECK_JOURNAL(jnl);
1855
1856 if (jnl->flags & JOURNAL_INVALID) {
1857 return EINVAL;
1858 }
1859
1860 if (jnl->owner == current_thread()) {
1861 if (jnl->active_tr == NULL) {
1862 panic("jnl: start_tr: active_tr is NULL (jnl @ 0x%x, owner 0x%x, current_thread 0x%x\n",
1863 jnl, jnl->owner, current_thread());
1864 }
1865 jnl->nested_count++;
1866 return 0;
1867 }
1868
1869 lock_journal(jnl);
1870
1871 if (jnl->owner != NULL || jnl->nested_count != 0 || jnl->active_tr != NULL) {
1872 panic("jnl: start_tr: owner 0x%x, nested count 0x%x, active_tr 0x%x jnl @ 0x%x\n",
1873 jnl->owner, jnl->nested_count, jnl->active_tr, jnl);
1874 }
1875
1876 jnl->owner = current_thread();
1877 jnl->nested_count = 1;
1878
1879 free_old_stuff(jnl);
1880
1881 // make sure there's room in the journal
1882 if (check_free_space(jnl, jnl->tbuffer_size) != 0) {
1883 printf("jnl: start transaction failed: no space\n");
1884 ret = ENOSPC;
1885 goto bad_start;
1886 }
1887
1888 // if there's a buffered transaction, use it.
1889 if (jnl->cur_tr) {
1890 jnl->active_tr = jnl->cur_tr;
1891 jnl->cur_tr = NULL;
1892
1893 return 0;
1894 }
1895
1896 MALLOC_ZONE(tr, transaction *, sizeof(transaction), M_JNL_TR, M_WAITOK);
1897 memset(tr, 0, sizeof(transaction));
1898
1899 tr->tbuffer_size = jnl->tbuffer_size;
1900
1901 if (kmem_alloc(kernel_map, (vm_offset_t *)&tr->tbuffer, tr->tbuffer_size)) {
1902 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
1903 printf("jnl: start transaction failed: no tbuffer mem\n");
1904 ret = ENOMEM;
1905 goto bad_start;
1906 }
1907
1908 // journal replay code checksum check depends on this.
1909 memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE);
1910 // Fill up the rest of the block with unimportant bytes (0x5a 'Z' chosen for visibility)
1911 memset(tr->tbuffer + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE);
1912
1913 tr->blhdr = (block_list_header *)tr->tbuffer;
1914 tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
1915 tr->blhdr->num_blocks = 1; // accounts for this header block
1916 tr->blhdr->bytes_used = jnl->jhdr->blhdr_size;
1917
1918 tr->num_blhdrs = 1;
1919 tr->total_bytes = jnl->jhdr->blhdr_size;
1920 tr->jnl = jnl;
1921
1922 jnl->active_tr = tr;
1923
1924 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, tr);
1925
1926 return 0;
1927
1928 bad_start:
1929 jnl->owner = NULL;
1930 jnl->nested_count = 0;
1931 unlock_journal(jnl);
1932 return ret;
1933 }
1934
1935
1936 int
1937 journal_modify_block_start(journal *jnl, struct buf *bp)
1938 {
1939 transaction *tr;
1940
1941 CHECK_JOURNAL(jnl);
1942
1943 if (jnl->flags & JOURNAL_INVALID) {
1944 return EINVAL;
1945 }
1946
1947 // XXXdbg - for debugging I want this to be true. later it may
1948 // not be necessary.
1949 if ((buf_flags(bp) & B_META) == 0) {
1950 panic("jnl: modify_block_start: bp @ 0x%x is not a meta-data block! (jnl 0x%x)\n", bp, jnl);
1951 }
1952
1953 tr = jnl->active_tr;
1954 CHECK_TRANSACTION(tr);
1955
1956 if (jnl->owner != current_thread()) {
1957 panic("jnl: modify_block_start: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
1958 jnl, jnl->owner, current_thread());
1959 }
1960
1961 free_old_stuff(jnl);
1962
1963 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
1964 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
1965
1966 // can't allow blocks that aren't an even multiple of the
1967 // underlying block size.
1968 if ((buf_size(bp) % jnl->jhdr->jhdr_size) != 0) {
1969 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
1970 buf_size(bp), jnl->jhdr->jhdr_size);
1971 return -1;
1972 }
1973
1974 // make sure that this transaction isn't bigger than the whole journal
1975 if (tr->total_bytes+buf_size(bp) >= (jnl->jhdr->size - jnl->jhdr->jhdr_size)) {
1976 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr 0x%x bp 0x%x)\n",
1977 tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), buf_size(bp), tr, bp);
1978 return -1;
1979 }
1980
1981 // if the block is dirty and not already locked we have to write
1982 // it out before we muck with it because it has data that belongs
1983 // (presumably) to another transaction.
1984 //
1985 if ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI) {
1986
1987 if (buf_flags(bp) & B_ASYNC) {
1988 panic("modify_block_start: bp @ 0x% has async flag set!\n", bp);
1989 }
1990
1991 // this will cause it to not be buf_brelse()'d
1992 buf_setflags(bp, B_NORELSE);
1993 VNOP_BWRITE(bp);
1994 }
1995 buf_setflags(bp, B_LOCKED);
1996
1997 return 0;
1998 }
1999
2000 int
2001 journal_modify_block_abort(journal *jnl, struct buf *bp)
2002 {
2003 transaction *tr;
2004 block_list_header *blhdr;
2005 int i, j;
2006
2007 CHECK_JOURNAL(jnl);
2008
2009 tr = jnl->active_tr;
2010
2011 //
2012 // if there's no active transaction then we just want to
2013 // call buf_brelse() and return since this is just a block
2014 // that happened to be modified as part of another tr.
2015 //
2016 if (tr == NULL) {
2017 buf_brelse(bp);
2018 return 0;
2019 }
2020
2021 if (jnl->flags & JOURNAL_INVALID) {
2022 return EINVAL;
2023 }
2024
2025 CHECK_TRANSACTION(tr);
2026
2027 if (jnl->owner != current_thread()) {
2028 panic("jnl: modify_block_abort: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2029 jnl, jnl->owner, current_thread());
2030 }
2031
2032 free_old_stuff(jnl);
2033
2034 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2035
2036 // first check if it's already part of this transaction
2037 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2038 for(i=1; i < blhdr->num_blocks; i++) {
2039 if (bp == blhdr->binfo[i].bp) {
2040 if (buf_size(bp) != blhdr->binfo[i].bsize) {
2041 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2042 bp, buf_size(bp), blhdr->binfo[i].bsize, jnl);
2043 }
2044 break;
2045 }
2046 }
2047
2048 if (i < blhdr->num_blocks) {
2049 break;
2050 }
2051 }
2052
2053 //
2054 // if blhdr is null, then this block has only had modify_block_start
2055 // called on it as part of the current transaction. that means that
2056 // it is ok to clear the LOCKED bit since it hasn't actually been
2057 // modified. if blhdr is non-null then modify_block_end was called
2058 // on it and so we need to keep it locked in memory.
2059 //
2060 if (blhdr == NULL) {
2061 buf_clearflags(bp, B_LOCKED);
2062 }
2063
2064 buf_brelse(bp);
2065 return 0;
2066 }
2067
2068
2069 int
2070 journal_modify_block_end(journal *jnl, struct buf *bp)
2071 {
2072 int i, j, tbuffer_offset;
2073 char *blkptr;
2074 block_list_header *blhdr, *prev=NULL;
2075 transaction *tr;
2076
2077 CHECK_JOURNAL(jnl);
2078
2079 if (jnl->flags & JOURNAL_INVALID) {
2080 return EINVAL;
2081 }
2082
2083 tr = jnl->active_tr;
2084 CHECK_TRANSACTION(tr);
2085
2086 if (jnl->owner != current_thread()) {
2087 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2088 jnl, jnl->owner, current_thread());
2089 }
2090
2091 free_old_stuff(jnl);
2092
2093 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2094 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2095
2096 if ((buf_flags(bp) & B_LOCKED) == 0) {
2097 panic("jnl: modify_block_end: bp 0x%x not locked! jnl @ 0x%x\n", bp, jnl);
2098 }
2099
2100 // first check if it's already part of this transaction
2101 for(blhdr=tr->blhdr; blhdr; prev=blhdr,blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2102 tbuffer_offset = jnl->jhdr->blhdr_size;
2103
2104 for(i=1; i < blhdr->num_blocks; i++) {
2105 if (bp == blhdr->binfo[i].bp) {
2106 if (buf_size(bp) != blhdr->binfo[i].bsize) {
2107 panic("jnl: bp @ 0x%x changed size on me! (%d vs. %d, jnl 0x%x)\n",
2108 bp, buf_size(bp), blhdr->binfo[i].bsize, jnl);
2109 }
2110 break;
2111 }
2112 tbuffer_offset += blhdr->binfo[i].bsize;
2113 }
2114
2115 if (i < blhdr->num_blocks) {
2116 break;
2117 }
2118 }
2119
2120 if (blhdr == NULL
2121 && prev
2122 && (prev->num_blocks+1) <= prev->max_blocks
2123 && (prev->bytes_used+buf_size(bp)) <= tr->tbuffer_size) {
2124 blhdr = prev;
2125 } else if (blhdr == NULL) {
2126 block_list_header *nblhdr;
2127
2128 if (prev == NULL) {
2129 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl 0x%x, bp 0x%x\n", jnl, bp);
2130 }
2131
2132 // we got to the end of the list, didn't find the block and there's
2133 // no room in the block_list_header pointed to by prev
2134
2135 // we allocate another tbuffer and link it in at the end of the list
2136 // through prev->binfo[0].bnum. that's a skanky way to do things but
2137 // avoids having yet another linked list of small data structures to manage.
2138
2139 if (kmem_alloc(kernel_map, (vm_offset_t *)&nblhdr, tr->tbuffer_size)) {
2140 panic("jnl: end_tr: no space for new block tr @ 0x%x (total bytes: %d)!\n",
2141 tr, tr->total_bytes);
2142 }
2143
2144 // journal replay code checksum check depends on this.
2145 memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE);
2146 // Fill up the rest of the block with unimportant bytes
2147 memset(nblhdr + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE);
2148
2149 // initialize the new guy
2150 nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2151 nblhdr->num_blocks = 1; // accounts for this header block
2152 nblhdr->bytes_used = jnl->jhdr->blhdr_size;
2153
2154 tr->num_blhdrs++;
2155 tr->total_bytes += jnl->jhdr->blhdr_size;
2156
2157 // then link him in at the end
2158 prev->binfo[0].bnum = (off_t)((long)nblhdr);
2159
2160 // and finally switch to using the new guy
2161 blhdr = nblhdr;
2162 tbuffer_offset = jnl->jhdr->blhdr_size;
2163 i = 1;
2164 }
2165
2166
2167 if ((i+1) > blhdr->max_blocks) {
2168 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks);
2169 }
2170
2171 // copy the data into the in-memory transaction buffer
2172 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
2173 memcpy(blkptr, buf_dataptr(bp), buf_size(bp));
2174
2175 // if this is true then this is a new block we haven't seen
2176 if (i >= blhdr->num_blocks) {
2177 int bsize;
2178 vnode_t vp;
2179
2180 vp = buf_vnode(bp);
2181 vnode_ref(vp);
2182 bsize = buf_size(bp);
2183
2184 blhdr->binfo[i].bnum = (off_t)(buf_blkno(bp));
2185 blhdr->binfo[i].bsize = bsize;
2186 blhdr->binfo[i].bp = bp;
2187
2188 blhdr->bytes_used += bsize;
2189 tr->total_bytes += bsize;
2190
2191 blhdr->num_blocks++;
2192 }
2193 buf_bdwrite(bp);
2194
2195 return 0;
2196 }
2197
2198 int
2199 journal_kill_block(journal *jnl, struct buf *bp)
2200 {
2201 int i;
2202 int bflags;
2203 block_list_header *blhdr;
2204 transaction *tr;
2205
2206 CHECK_JOURNAL(jnl);
2207
2208 if (jnl->flags & JOURNAL_INVALID) {
2209 return EINVAL;
2210 }
2211
2212 tr = jnl->active_tr;
2213 CHECK_TRANSACTION(tr);
2214
2215 if (jnl->owner != current_thread()) {
2216 panic("jnl: modify_block_end: called w/out a transaction! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2217 jnl, jnl->owner, current_thread());
2218 }
2219
2220 free_old_stuff(jnl);
2221
2222 bflags = buf_flags(bp);
2223
2224 if ( !(bflags & B_LOCKED))
2225 panic("jnl: modify_block_end: called with bp not B_LOCKED");
2226
2227 /*
2228 * bp must be BL_BUSY and B_LOCKED
2229 */
2230 // first check if it's already part of this transaction
2231 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2232
2233 for(i=1; i < blhdr->num_blocks; i++) {
2234 if (bp == blhdr->binfo[i].bp) {
2235 vnode_t vp;
2236
2237 buf_clearflags(bp, B_LOCKED);
2238
2239 // this undoes the vnode_ref() in journal_modify_block_end()
2240 vp = buf_vnode(bp);
2241 vnode_rele_ext(vp, 0, 1);
2242
2243 // if the block has the DELWRI and FILTER bits sets, then
2244 // things are seriously weird. if it was part of another
2245 // transaction then journal_modify_block_start() should
2246 // have force it to be written.
2247 //
2248 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
2249 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
2250 //} else {
2251 tr->num_killed += buf_size(bp);
2252 //}
2253 blhdr->binfo[i].bp = NULL;
2254 blhdr->binfo[i].bnum = (off_t)-1;
2255
2256 buf_brelse(bp);
2257
2258 break;
2259 }
2260 }
2261
2262 if (i < blhdr->num_blocks) {
2263 break;
2264 }
2265 }
2266
2267 return 0;
2268 }
2269
2270
2271 static int
2272 journal_binfo_cmp(void *a, void *b)
2273 {
2274 block_info *bi_a = (struct block_info *)a;
2275 block_info *bi_b = (struct block_info *)b;
2276 daddr64_t res;
2277
2278 if (bi_a->bp == NULL) {
2279 return 1;
2280 }
2281 if (bi_b->bp == NULL) {
2282 return -1;
2283 }
2284
2285 // don't have to worry about negative block
2286 // numbers so this is ok to do.
2287 //
2288 res = (buf_blkno(bi_a->bp) - buf_blkno(bi_b->bp));
2289
2290 return (int)res;
2291 }
2292
2293
2294 static int
2295 end_transaction(transaction *tr, int force_it)
2296 {
2297 int i, j, ret, amt;
2298 errno_t errno;
2299 off_t end;
2300 journal *jnl = tr->jnl;
2301 struct buf *bp;
2302 block_list_header *blhdr=NULL, *next=NULL;
2303
2304 if (jnl->cur_tr) {
2305 panic("jnl: jnl @ 0x%x already has cur_tr 0x%x, new tr: 0x%x\n",
2306 jnl, jnl->cur_tr, tr);
2307 }
2308
2309 // if there weren't any modified blocks in the transaction
2310 // just save off the transaction pointer and return.
2311 if (tr->total_bytes == jnl->jhdr->blhdr_size) {
2312 jnl->cur_tr = tr;
2313 return 0;
2314 }
2315
2316 // if our transaction buffer isn't very full, just hang
2317 // on to it and don't actually flush anything. this is
2318 // what is known as "group commit". we will flush the
2319 // transaction buffer if it's full or if we have more than
2320 // one of them so we don't start hogging too much memory.
2321 //
2322 if ( force_it == 0
2323 && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0
2324 && tr->num_blhdrs < 3
2325 && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8))) {
2326
2327 jnl->cur_tr = tr;
2328 return 0;
2329 }
2330
2331
2332 // if we're here we're going to flush the transaction buffer to disk.
2333 // make sure there is room in the journal first.
2334 check_free_space(jnl, tr->total_bytes);
2335
2336 // range check the end index
2337 if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) {
2338 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
2339 jnl->jhdr->end, jnl->jhdr->size);
2340 }
2341
2342 // this transaction starts where the current journal ends
2343 tr->journal_start = jnl->jhdr->end;
2344 end = jnl->jhdr->end;
2345
2346 //
2347 // if the first entry in old_start[] isn't free yet, loop calling the
2348 // file system flush routine until it is (or we panic).
2349 //
2350 i = 0;
2351 lock_oldstart(jnl);
2352 while ((jnl->old_start[0] & 0x8000000000000000LL) != 0) {
2353 if (jnl->flush) {
2354 unlock_oldstart(jnl);
2355
2356 if (jnl->flush) {
2357 jnl->flush(jnl->flush_arg);
2358 }
2359
2360 // yield the cpu so others can get in to clear the lock bit
2361 (void)tsleep((void *)jnl, PRIBIO, "jnl-old-start-sleep", 1);
2362
2363 lock_oldstart(jnl);
2364 }
2365 if (i++ >= 500) {
2366 panic("jnl: transaction that started at 0x%llx is not completing! jnl 0x%x\n",
2367 jnl->old_start[0] & (~0x8000000000000000LL), jnl);
2368 }
2369 }
2370
2371 //
2372 // slide everyone else down and put our latest guy in the last
2373 // entry in the old_start array
2374 //
2375 memcpy(&jnl->old_start[0], &jnl->old_start[1], sizeof(jnl->old_start)-sizeof(jnl->old_start[0]));
2376 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL;
2377
2378 unlock_oldstart(jnl);
2379
2380
2381 // for each block, make sure that the physical block # is set
2382 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2383
2384 for(i=1; i < blhdr->num_blocks; i++) {
2385 daddr64_t blkno;
2386 daddr64_t lblkno;
2387 struct vnode *vp;
2388
2389 bp = blhdr->binfo[i].bp;
2390 if (bp == NULL) { // only true if a block was "killed"
2391 if (blhdr->binfo[i].bnum != (off_t)-1) {
2392 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ 0x%x, tr 0x%x)\n",
2393 blhdr->binfo[i].bnum, jnl, tr);
2394 }
2395 continue;
2396 }
2397 vp = buf_vnode(bp);
2398 blkno = buf_blkno(bp);
2399 lblkno = buf_lblkno(bp);
2400
2401 if (vp == NULL && lblkno == blkno) {
2402 printf("jnl: end_tr: bad news! bp @ 0x%x w/null vp and l/blkno = %qd/%qd. aborting the transaction (tr 0x%x jnl 0x%x).\n",
2403 bp, lblkno, blkno, tr, jnl);
2404 goto bad_journal;
2405 }
2406
2407 // if the lblkno is the same as blkno and this bp isn't
2408 // associated with the underlying file system device then
2409 // we need to call bmap() to get the actual physical block.
2410 //
2411 if ((lblkno == blkno) && (vp != jnl->fsdev)) {
2412 off_t f_offset;
2413 size_t contig_bytes;
2414
2415 if (VNOP_BLKTOOFF(vp, lblkno, &f_offset)) {
2416 printf("jnl: end_tr: vnop_blktooff failed @ 0x%x, jnl 0x%x\n", bp, jnl);
2417 goto bad_journal;
2418 }
2419 if (VNOP_BLOCKMAP(vp, f_offset, buf_count(bp), &blkno, &contig_bytes, NULL, 0, NULL)) {
2420 printf("jnl: end_tr: can't blockmap the bp @ 0x%x, jnl 0x%x\n", bp, jnl);
2421 goto bad_journal;
2422 }
2423 if ((uint32_t)contig_bytes < buf_count(bp)) {
2424 printf("jnl: end_tr: blk not physically contiguous on disk@ 0x%x, jnl 0x%x\n", bp, jnl);
2425 goto bad_journal;
2426 }
2427 buf_setblkno(bp, blkno);
2428 }
2429 // update this so we write out the correct physical block number!
2430 blhdr->binfo[i].bnum = (off_t)(blkno);
2431 }
2432
2433 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2434 }
2435
2436 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2437
2438 amt = blhdr->bytes_used;
2439
2440 blhdr->checksum = 0;
2441 blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
2442
2443 ret = write_journal_data(jnl, &end, blhdr, amt);
2444 if (ret != amt) {
2445 printf("jnl: end_transaction: only wrote %d of %d bytes to the journal!\n",
2446 ret, amt);
2447
2448 goto bad_journal;
2449 }
2450 }
2451
2452 jnl->jhdr->end = end; // update where the journal now ends
2453 tr->journal_end = end; // the transaction ends here too
2454 if (tr->journal_start == 0 || tr->journal_end == 0) {
2455 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
2456 tr->journal_start, tr->journal_end);
2457 }
2458
2459 if (write_journal_header(jnl) != 0) {
2460 goto bad_journal;
2461 }
2462
2463 //
2464 // setup for looping through all the blhdr's. we null out the
2465 // tbuffer and blhdr fields so that they're not used any more.
2466 //
2467 blhdr = tr->blhdr;
2468 tr->tbuffer = NULL;
2469 tr->blhdr = NULL;
2470
2471 // the buffer_flushed_callback will only be called for the
2472 // real blocks that get flushed so we have to account for
2473 // the block_list_headers here.
2474 //
2475 tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size;
2476
2477 // for each block, set the iodone callback and unlock it
2478 for(; blhdr; blhdr=next) {
2479
2480 // we can re-order the buf ptrs because everything is written out already
2481 qsort(&blhdr->binfo[1], blhdr->num_blocks-1, sizeof(block_info), journal_binfo_cmp);
2482
2483 for(i=1; i < blhdr->num_blocks; i++) {
2484 if (blhdr->binfo[i].bp == NULL) {
2485 continue;
2486 }
2487
2488 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].bp),
2489 buf_lblkno(blhdr->binfo[i].bp),
2490 buf_size(blhdr->binfo[i].bp),
2491 NOCRED,
2492 &bp);
2493 if (errno == 0 && bp != NULL) {
2494 struct vnode *save_vp;
2495 void *cur_filter;
2496
2497 if (bp != blhdr->binfo[i].bp) {
2498 panic("jnl: end_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2499 bp, blhdr->binfo[i].bp, jnl);
2500 }
2501
2502 if ((buf_flags(bp) & (B_LOCKED|B_DELWRI)) != (B_LOCKED|B_DELWRI)) {
2503 if (jnl->flags & JOURNAL_CLOSE_PENDING) {
2504 buf_clearflags(bp, B_LOCKED);
2505 buf_brelse(bp);
2506 continue;
2507 } else {
2508 panic("jnl: end_tr: !!!DANGER!!! bp 0x%x flags (0x%x) not LOCKED & DELWRI\n", bp, buf_flags(bp));
2509 }
2510 }
2511 save_vp = buf_vnode(bp);
2512
2513 buf_setfilter(bp, buffer_flushed_callback, tr, &cur_filter, NULL);
2514
2515 if (cur_filter) {
2516 panic("jnl: bp @ 0x%x (blkno %qd, vp 0x%x) has non-null iodone (0x%x) buffflushcb 0x%x\n",
2517 bp, buf_blkno(bp), save_vp, cur_filter, buffer_flushed_callback);
2518 }
2519 buf_clearflags(bp, B_LOCKED);
2520
2521 // kicking off the write here helps performance
2522 buf_bawrite(bp);
2523 // XXXdbg this is good for testing: buf_bdwrite(bp);
2524 //buf_bdwrite(bp);
2525
2526 // this undoes the vnode_ref() in journal_modify_block_end()
2527 vnode_rele_ext(save_vp, 0, 1);
2528 } else {
2529 printf("jnl: end_transaction: could not find block %Ld vp 0x%x!\n",
2530 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2531 if (bp) {
2532 buf_clearflags(bp, B_LOCKED);
2533 buf_brelse(bp);
2534 }
2535 }
2536 }
2537
2538 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2539
2540 // we can free blhdr here since we won't need it any more
2541 blhdr->binfo[0].bnum = 0xdeadc0de;
2542 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2543 }
2544
2545 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
2546 // tr, tr->journal_start, tr->journal_end);
2547 return 0;
2548
2549
2550 bad_journal:
2551 jnl->flags |= JOURNAL_INVALID;
2552 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] &= ~0x8000000000000000LL;
2553 abort_transaction(jnl, tr);
2554 return -1;
2555 }
2556
2557 static void
2558 abort_transaction(journal *jnl, transaction *tr)
2559 {
2560 int i;
2561 errno_t errno;
2562 block_list_header *blhdr, *next;
2563 struct buf *bp;
2564 struct vnode *save_vp;
2565
2566 // for each block list header, iterate over the blocks then
2567 // free up the memory associated with the block list.
2568 //
2569 // for each block, clear the lock bit and release it.
2570 //
2571 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2572
2573 for(i=1; i < blhdr->num_blocks; i++) {
2574 if (blhdr->binfo[i].bp == NULL) {
2575 continue;
2576 }
2577 if ( (buf_vnode(blhdr->binfo[i].bp) == NULL) ||
2578 !(buf_flags(blhdr->binfo[i].bp) & B_LOCKED) ) {
2579 continue;
2580 }
2581
2582 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].bp),
2583 buf_lblkno(blhdr->binfo[i].bp),
2584 buf_size(blhdr->binfo[i].bp),
2585 NOCRED,
2586 &bp);
2587 if (errno == 0) {
2588 if (bp != blhdr->binfo[i].bp) {
2589 panic("jnl: abort_tr: got back a different bp! (bp 0x%x should be 0x%x, jnl 0x%x\n",
2590 bp, blhdr->binfo[i].bp, jnl);
2591 }
2592
2593 // releasing a bp marked invalid
2594 // also clears the locked and delayed state
2595 buf_markinvalid(bp);
2596 save_vp = buf_vnode(bp);
2597
2598 buf_brelse(bp);
2599
2600 vnode_rele_ext(save_vp, 0, 1);
2601 } else {
2602 printf("jnl: abort_tr: could not find block %Ld vp 0x%x!\n",
2603 blhdr->binfo[i].bnum, blhdr->binfo[i].bp);
2604 if (bp) {
2605 buf_brelse(bp);
2606 }
2607 }
2608 }
2609
2610 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2611
2612 // we can free blhdr here since we won't need it any more
2613 blhdr->binfo[0].bnum = 0xdeadc0de;
2614 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
2615 }
2616
2617 tr->tbuffer = NULL;
2618 tr->blhdr = NULL;
2619 tr->total_bytes = 0xdbadc0de;
2620 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
2621 }
2622
2623
2624 int
2625 journal_end_transaction(journal *jnl)
2626 {
2627 int ret;
2628 transaction *tr;
2629
2630 CHECK_JOURNAL(jnl);
2631
2632 if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) {
2633 return 0;
2634 }
2635
2636 if (jnl->owner != current_thread()) {
2637 panic("jnl: end_tr: I'm not the owner! jnl 0x%x, owner 0x%x, curact 0x%x\n",
2638 jnl, jnl->owner, current_thread());
2639 }
2640
2641 free_old_stuff(jnl);
2642
2643 jnl->nested_count--;
2644 if (jnl->nested_count > 0) {
2645 return 0;
2646 } else if (jnl->nested_count < 0) {
2647 panic("jnl: jnl @ 0x%x has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count);
2648 }
2649
2650 if (jnl->flags & JOURNAL_INVALID) {
2651 if (jnl->active_tr) {
2652 if (jnl->cur_tr != NULL) {
2653 panic("jnl: journal @ 0x%x has active tr (0x%x) and cur tr (0x%x)\n",
2654 jnl, jnl->active_tr, jnl->cur_tr);
2655 }
2656
2657 tr = jnl->active_tr;
2658 jnl->active_tr = NULL;
2659 abort_transaction(jnl, tr);
2660 }
2661
2662 jnl->owner = NULL;
2663 unlock_journal(jnl);
2664
2665 return EINVAL;
2666 }
2667
2668 tr = jnl->active_tr;
2669 CHECK_TRANSACTION(tr);
2670
2671 // clear this out here so that when check_free_space() calls
2672 // the FS flush function, we don't panic in journal_flush()
2673 // if the FS were to call that. note: check_free_space() is
2674 // called from end_transaction().
2675 //
2676 jnl->active_tr = NULL;
2677 ret = end_transaction(tr, 0);
2678
2679 jnl->owner = NULL;
2680 unlock_journal(jnl);
2681
2682 return ret;
2683 }
2684
2685
2686 int
2687 journal_flush(journal *jnl)
2688 {
2689 int need_signal = 0;
2690
2691 CHECK_JOURNAL(jnl);
2692
2693 if (jnl->flags & JOURNAL_INVALID) {
2694 return -1;
2695 }
2696
2697 if (jnl->owner != current_thread()) {
2698 int ret;
2699
2700 lock_journal(jnl);
2701 need_signal = 1;
2702 }
2703
2704 free_old_stuff(jnl);
2705
2706 // if we're not active, flush any buffered transactions
2707 if (jnl->active_tr == NULL && jnl->cur_tr) {
2708 transaction *tr = jnl->cur_tr;
2709
2710 jnl->cur_tr = NULL;
2711 end_transaction(tr, 1); // force it to get flushed
2712 }
2713
2714 if (need_signal) {
2715 unlock_journal(jnl);
2716 }
2717
2718 return 0;
2719 }
2720
2721 int
2722 journal_active(journal *jnl)
2723 {
2724 if (jnl->flags & JOURNAL_INVALID) {
2725 return -1;
2726 }
2727
2728 return (jnl->active_tr == NULL) ? 0 : 1;
2729 }
2730
2731 void *
2732 journal_owner(journal *jnl)
2733 {
2734 return jnl->owner;
2735 }