]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_journal.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_journal.c
1 /*
2 * Copyright (c) 1995-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 //
29 // This file implements a simple write-ahead journaling layer.
30 // In theory any file system can make use of it by calling these
31 // functions when the fs wants to modify meta-data blocks. See
32 // vfs_journal.h for a more detailed description of the api and
33 // data structures.
34 //
35 // Dominic Giampaolo (dbg@apple.com)
36 //
37
38 #ifdef KERNEL
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file_internal.h>
44 #include <sys/stat.h>
45 #include <sys/buf_internal.h>
46 #include <sys/proc_internal.h>
47 #include <sys/mount_internal.h>
48 #include <sys/namei.h>
49 #include <sys/vnode_internal.h>
50 #include <sys/ioctl.h>
51 #include <sys/tty.h>
52 #include <sys/ubc.h>
53 #include <sys/malloc.h>
54 #include <kern/thread.h>
55 #include <sys/disk.h>
56 #include <sys/kdebug.h>
57 #include <miscfs/specfs/specdev.h>
58 #include <libkern/OSAtomic.h> /* OSAddAtomic */
59
60 extern task_t kernel_task;
61
62 #define DBG_JOURNAL_FLUSH 1
63
64 #include <sys/sdt.h> /* DTRACE_IO1 */
65 #else
66
67 #include <stdio.h>
68 #include <stdlib.h>
69 #include <string.h>
70 #include <limits.h>
71 #include <errno.h>
72 #include <fcntl.h>
73 #include <unistd.h>
74 #include <stdarg.h>
75 #include <sys/types.h>
76 #include "compat.h"
77
78 #endif /* KERNEL */
79
80 #include "vfs_journal.h"
81
82 #if JOURNALING
83
84 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
85 __private_extern__ void qsort(
86 void * array,
87 size_t nmembers,
88 size_t member_size,
89 int (*)(const void *, const void *));
90
91
92
93 // number of bytes to checksum in a block_list_header
94 // NOTE: this should be enough to clear out the header
95 // fields as well as the first entry of binfo[]
96 #define BLHDR_CHECKSUM_SIZE 32
97
98
99 static int end_transaction(transaction *tr, int force_it, errno_t (*callback)(void*), void *callback_arg);
100 static void abort_transaction(journal *jnl, transaction *tr);
101 static void dump_journal(journal *jnl);
102
103 static __inline__ void lock_journal(journal *jnl);
104 static __inline__ void unlock_journal(journal *jnl);
105 static __inline__ void lock_oldstart(journal *jnl);
106 static __inline__ void unlock_oldstart(journal *jnl);
107
108
109
110
111 //
112 // 3105942 - Coalesce writes to the same block on journal replay
113 //
114
115 typedef struct bucket {
116 off_t block_num;
117 uint32_t jnl_offset;
118 uint32_t block_size;
119 int32_t cksum;
120 } bucket;
121
122 #define STARTING_BUCKETS 256
123
124 static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr);
125 static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size);
126 static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full);
127 static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr);
128 static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr, int overwriting);
129
130 #define CHECK_JOURNAL(jnl) \
131 do { \
132 if (jnl == NULL) {\
133 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__);\
134 }\
135 if (jnl->jdev == NULL) { \
136 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__);\
137 } \
138 if (jnl->fsdev == NULL) { \
139 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__);\
140 } \
141 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) {\
142 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n",\
143 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);\
144 }\
145 if ( jnl->jhdr->start <= 0 \
146 || jnl->jhdr->start > jnl->jhdr->size) {\
147 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
148 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size);\
149 }\
150 if ( jnl->jhdr->end <= 0 \
151 || jnl->jhdr->end > jnl->jhdr->size) {\
152 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
153 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size);\
154 }\
155 } while(0)
156
157 #define CHECK_TRANSACTION(tr) \
158 do {\
159 if (tr == NULL) {\
160 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__);\
161 }\
162 if (tr->jnl == NULL) {\
163 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__);\
164 }\
165 if (tr->blhdr != (block_list_header *)tr->tbuffer) {\
166 panic("%s:%d: blhdr (%p) != tbuffer (%p)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer);\
167 }\
168 if (tr->total_bytes < 0) {\
169 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes);\
170 }\
171 if (tr->journal_start < 0) {\
172 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start);\
173 }\
174 if (tr->journal_end < 0) {\
175 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end);\
176 }\
177 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) {\
178 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks);\
179 }\
180 } while(0)
181
182
183
184 //
185 // this isn't a great checksum routine but it will do for now.
186 // we use it to checksum the journal header and the block list
187 // headers that are at the start of each transaction.
188 //
189 static int
190 calc_checksum(char *ptr, int len)
191 {
192 int i, cksum=0;
193
194 // this is a lame checksum but for now it'll do
195 for(i=0; i < len; i++, ptr++) {
196 cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr);
197 }
198
199 return (~cksum);
200 }
201
202 //
203 // Journal Locking
204 //
205 lck_grp_attr_t * jnl_group_attr;
206 lck_attr_t * jnl_lock_attr;
207 lck_grp_t * jnl_mutex_group;
208
209 void
210 journal_init(void)
211 {
212 jnl_lock_attr = lck_attr_alloc_init();
213 jnl_group_attr = lck_grp_attr_alloc_init();
214 jnl_mutex_group = lck_grp_alloc_init("jnl-mutex", jnl_group_attr);
215 }
216
217 static __inline__ void
218 lock_journal(journal *jnl)
219 {
220 lck_mtx_lock(&jnl->jlock);
221 }
222
223 static __inline__ void
224 unlock_journal(journal *jnl)
225 {
226 lck_mtx_unlock(&jnl->jlock);
227 }
228
229 static __inline__ void
230 lock_oldstart(journal *jnl)
231 {
232 lck_mtx_lock(&jnl->old_start_lock);
233 }
234
235 static __inline__ void
236 unlock_oldstart(journal *jnl)
237 {
238 lck_mtx_unlock(&jnl->old_start_lock);
239 }
240
241
242
243 #define JNL_WRITE 0x0001
244 #define JNL_READ 0x0002
245 #define JNL_HEADER 0x8000
246
247 //
248 // This function sets up a fake buf and passes it directly to the
249 // journal device strategy routine (so that it won't get cached in
250 // the block cache.
251 //
252 // It also handles range checking the i/o so that we don't write
253 // outside the journal boundaries and it will wrap the i/o back
254 // to the beginning if necessary (skipping over the journal header)
255 //
256 static size_t
257 do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction)
258 {
259 int err, curlen=len;
260 size_t io_sz = 0;
261 buf_t bp;
262 off_t max_iosize;
263
264 if (*offset < 0 || *offset > jnl->jhdr->size) {
265 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size);
266 }
267
268 if (direction & JNL_WRITE)
269 max_iosize = jnl->max_write_size;
270 else if (direction & JNL_READ)
271 max_iosize = jnl->max_read_size;
272 else
273 max_iosize = 128 * 1024;
274
275 again:
276 bp = alloc_io_buf(jnl->jdev, 1);
277
278 if (*offset + (off_t)curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) {
279 if (*offset == jnl->jhdr->size) {
280 *offset = jnl->jhdr->jhdr_size;
281 } else {
282 curlen = (off_t)jnl->jhdr->size - *offset;
283 }
284 }
285
286 if (curlen > max_iosize) {
287 curlen = max_iosize;
288 }
289
290 if (curlen <= 0) {
291 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %zd\n", curlen, *offset, len);
292 }
293
294 if (*offset == 0 && (direction & JNL_HEADER) == 0) {
295 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen, data);
296 }
297
298 if (direction & JNL_READ)
299 buf_setflags(bp, B_READ);
300 else {
301 /*
302 * don't have to set any flags
303 */
304 vnode_startwrite(jnl->jdev);
305 }
306 buf_setsize(bp, curlen);
307 buf_setcount(bp, curlen);
308 buf_setdataptr(bp, (uintptr_t)data);
309 buf_setblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
310 buf_setlblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
311 if ((direction & JNL_WRITE) && (jnl->flags & JOURNAL_DO_FUA_WRITES)) {
312 buf_markfua(bp);
313 }
314
315 DTRACE_IO1(journal__start, buf_t, bp);
316 err = VNOP_STRATEGY(bp);
317 if (!err) {
318 err = (int)buf_biowait(bp);
319 }
320 DTRACE_IO1(journal__done, buf_t, bp);
321 free_io_buf(bp);
322
323 if (err) {
324 printf("jnl: %s: do_jnl_io: strategy err 0x%x\n", jnl->jdev_name, err);
325 return 0;
326 }
327
328 *offset += curlen;
329 io_sz += curlen;
330 if (io_sz != len) {
331 // handle wrap-around
332 data = (char *)data + curlen;
333 curlen = len - io_sz;
334 if (*offset >= jnl->jhdr->size) {
335 *offset = jnl->jhdr->jhdr_size;
336 }
337 goto again;
338 }
339
340 return io_sz;
341 }
342
343 static size_t
344 read_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
345 {
346 return do_journal_io(jnl, offset, data, len, JNL_READ);
347 }
348
349 static size_t
350 write_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
351 {
352 return do_journal_io(jnl, offset, data, len, JNL_WRITE);
353 }
354
355
356 static size_t
357 read_journal_header(journal *jnl, void *data, size_t len)
358 {
359 off_t hdr_offset = 0;
360
361 return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER);
362 }
363
364 static int
365 write_journal_header(journal *jnl, int updating_start)
366 {
367 static int num_err_prints = 0;
368 int ret=0;
369 off_t jhdr_offset = 0;
370 struct vfs_context context;
371
372 context.vc_thread = current_thread();
373 context.vc_ucred = NOCRED;
374 //
375 // Flush the track cache if we're not doing force-unit-access
376 // writes.
377 //
378 if (!updating_start && (jnl->flags & JOURNAL_DO_FUA_WRITES) == 0) {
379 ret = VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
380 }
381 if (ret != 0) {
382 //
383 // Only print this error if it's a different error than the
384 // previous one, or if it's the first time for this device
385 // or if the total number of printfs is less than 25. We
386 // allow for up to 25 printfs to insure that some make it
387 // into the on-disk syslog. Otherwise if we only printed
388 // one, it's possible it would never make it to the syslog
389 // for the root volume and that makes debugging hard.
390 //
391 if ( ret != jnl->last_flush_err
392 || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0
393 || num_err_prints++ < 25) {
394
395 printf("jnl: %s: flushing fs disk buffer returned 0x%x\n", jnl->jdev_name, ret);
396
397 jnl->flags |= JOURNAL_FLUSHCACHE_ERR;
398 jnl->last_flush_err = ret;
399 }
400 }
401
402 jnl->jhdr->checksum = 0;
403 jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE);
404 if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != (size_t)jnl->jhdr->jhdr_size) {
405 printf("jnl: %s: write_journal_header: error writing the journal header!\n", jnl->jdev_name);
406 jnl->flags |= JOURNAL_INVALID;
407 return -1;
408 }
409
410 // If we're not doing force-unit-access writes, then we
411 // have to flush after writing the journal header so that
412 // a future transaction doesn't sneak out to disk before
413 // the header does and thus overwrite data that the old
414 // journal header refers to. Saw this exact case happen
415 // on an IDE bus analyzer with Larry Barras so while it
416 // may seem obscure, it's not.
417 //
418 if (updating_start && (jnl->flags & JOURNAL_DO_FUA_WRITES) == 0) {
419 VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, &context);
420 }
421
422 return 0;
423 }
424
425
426
427 //
428 // this is a work function used to free up transactions that
429 // completed. they can't be free'd from buffer_flushed_callback
430 // because it is called from deep with the disk driver stack
431 // and thus can't do something that would potentially cause
432 // paging. it gets called by each of the journal api entry
433 // points so stuff shouldn't hang around for too long.
434 //
435 static void
436 free_old_stuff(journal *jnl)
437 {
438 transaction *tr, *next;
439
440 lock_oldstart(jnl);
441 tr = jnl->tr_freeme;
442 jnl->tr_freeme = NULL;
443 unlock_oldstart(jnl);
444
445 for(; tr; tr=next) {
446 next = tr->next;
447 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
448 }
449
450 }
451
452
453
454 //
455 // This is our callback that lets us know when a buffer has been
456 // flushed to disk. It's called from deep within the driver stack
457 // and thus is quite limited in what it can do. Notably, it can
458 // not initiate any new i/o's or allocate/free memory.
459 //
460 static void
461 buffer_flushed_callback(struct buf *bp, void *arg)
462 {
463 transaction *tr;
464 journal *jnl;
465 transaction *ctr, *prev=NULL, *next;
466 size_t i;
467 int bufsize, amt_flushed, total_bytes;
468
469
470 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
471 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
472
473 // snarf out the bits we want
474 bufsize = buf_size(bp);
475 tr = (transaction *)arg;
476
477 // then we've already seen it
478 if (tr == NULL) {
479 return;
480 }
481
482 CHECK_TRANSACTION(tr);
483
484 jnl = tr->jnl;
485 if (jnl->flags & JOURNAL_INVALID) {
486 return;
487 }
488
489 CHECK_JOURNAL(jnl);
490
491 amt_flushed = tr->num_killed;
492 total_bytes = tr->total_bytes;
493
494 // update the number of blocks that have been flushed.
495 // this buf may represent more than one block so take
496 // that into account.
497 //
498 // OSAddAtomic() returns the value of tr->num_flushed before the add
499 //
500 amt_flushed += OSAddAtomic(bufsize, &tr->num_flushed);
501
502
503 // if this transaction isn't done yet, just return as
504 // there is nothing to do.
505 //
506 // NOTE: we are careful to not reference anything through
507 // the tr pointer after doing the OSAddAtomic(). if
508 // this if statement fails then we are the last one
509 // and then it's ok to dereference "tr".
510 //
511 if ((amt_flushed + bufsize) < total_bytes) {
512 return;
513 }
514
515 // this will single thread checking the transaction
516 lock_oldstart(jnl);
517
518 if (tr->total_bytes == (int)0xfbadc0de) {
519 // then someone beat us to it...
520 unlock_oldstart(jnl);
521 return;
522 }
523
524 // mark this so that we're the owner of dealing with the
525 // cleanup for this transaction
526 tr->total_bytes = 0xfbadc0de;
527
528 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
529 // tr, tr->journal_start, tr->journal_end, jnl);
530
531 // find this entry in the old_start[] index and mark it completed
532 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
533
534 if ((off_t)(jnl->old_start[i] & ~(0x8000000000000000ULL)) == tr->journal_start) {
535 jnl->old_start[i] &= ~(0x8000000000000000ULL);
536 break;
537 }
538 }
539
540 if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
541 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr %p, jnl %p)\n",
542 tr->journal_start, tr, jnl);
543 }
544
545
546 // if we are here then we need to update the journal header
547 // to reflect that this transaction is complete
548 if (tr->journal_start == jnl->active_start) {
549 jnl->active_start = tr->journal_end;
550 tr->journal_start = tr->journal_end = (off_t)0;
551 }
552
553 // go through the completed_trs list and try to coalesce
554 // entries, restarting back at the beginning if we have to.
555 for(ctr=jnl->completed_trs; ctr; prev=ctr, ctr=next) {
556 if (ctr->journal_start == jnl->active_start) {
557 jnl->active_start = ctr->journal_end;
558 if (prev) {
559 prev->next = ctr->next;
560 }
561 if (ctr == jnl->completed_trs) {
562 jnl->completed_trs = ctr->next;
563 }
564
565 next = jnl->completed_trs; // this starts us over again
566 ctr->next = jnl->tr_freeme;
567 jnl->tr_freeme = ctr;
568 ctr = NULL;
569 } else if (tr->journal_end == ctr->journal_start) {
570 ctr->journal_start = tr->journal_start;
571 next = jnl->completed_trs; // this starts us over again
572 ctr = NULL;
573 tr->journal_start = tr->journal_end = (off_t)0;
574 } else if (tr->journal_start == ctr->journal_end) {
575 ctr->journal_end = tr->journal_end;
576 next = ctr->next;
577 tr->journal_start = tr->journal_end = (off_t)0;
578 } else if (ctr->next && ctr->journal_end == ctr->next->journal_start) {
579 // coalesce the next entry with this one and link the next
580 // entry in at the head of the tr_freeme list
581 next = ctr->next; // temporarily use the "next" variable
582 ctr->journal_end = next->journal_end;
583 ctr->next = next->next;
584 next->next = jnl->tr_freeme; // link in the next guy at the head of the tr_freeme list
585 jnl->tr_freeme = next;
586
587 next = jnl->completed_trs; // this starts us over again
588 ctr = NULL;
589 } else {
590 next = ctr->next;
591 }
592 }
593
594 // if this is true then we didn't merge with anyone
595 // so link ourselves in at the head of the completed
596 // transaction list.
597 if (tr->journal_start != 0) {
598 // put this entry into the correct sorted place
599 // in the list instead of just at the head.
600 //
601
602 prev = NULL;
603 for(ctr=jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) {
604 // just keep looping
605 }
606
607 if (ctr == NULL && prev == NULL) {
608 jnl->completed_trs = tr;
609 tr->next = NULL;
610 } else if (ctr == jnl->completed_trs) {
611 tr->next = jnl->completed_trs;
612 jnl->completed_trs = tr;
613 } else {
614 tr->next = prev->next;
615 prev->next = tr;
616 }
617 } else {
618 // if we're here this tr got merged with someone else so
619 // put it on the list to be free'd
620 tr->next = jnl->tr_freeme;
621 jnl->tr_freeme = tr;
622 }
623 unlock_oldstart(jnl);
624 }
625
626
627 #include <libkern/OSByteOrder.h>
628
629 #define SWAP16(x) OSSwapInt16(x)
630 #define SWAP32(x) OSSwapInt32(x)
631 #define SWAP64(x) OSSwapInt64(x)
632
633
634 static void
635 swap_journal_header(journal *jnl)
636 {
637 jnl->jhdr->magic = SWAP32(jnl->jhdr->magic);
638 jnl->jhdr->endian = SWAP32(jnl->jhdr->endian);
639 jnl->jhdr->start = SWAP64(jnl->jhdr->start);
640 jnl->jhdr->end = SWAP64(jnl->jhdr->end);
641 jnl->jhdr->size = SWAP64(jnl->jhdr->size);
642 jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size);
643 jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum);
644 jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size);
645 jnl->jhdr->sequence_num = SWAP32(jnl->jhdr->sequence_num);
646 }
647
648 static void
649 swap_block_list_header(journal *jnl, block_list_header *blhdr)
650 {
651 int i;
652
653 blhdr->max_blocks = SWAP16(blhdr->max_blocks);
654 blhdr->num_blocks = SWAP16(blhdr->num_blocks);
655 blhdr->bytes_used = SWAP32(blhdr->bytes_used);
656 blhdr->checksum = SWAP32(blhdr->checksum);
657 blhdr->flags = SWAP32(blhdr->flags);
658
659 if (blhdr->num_blocks >= ((jnl->jhdr->blhdr_size / sizeof(block_info)) - 1)) {
660 printf("jnl: %s: blhdr num blocks looks suspicious (%d / blhdr size %d). not swapping.\n", jnl->jdev_name, blhdr->num_blocks, jnl->jhdr->blhdr_size);
661 return;
662 }
663
664 for(i=0; i < blhdr->num_blocks; i++) {
665 blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum);
666 blhdr->binfo[i].u.bi.bsize = SWAP32(blhdr->binfo[i].u.bi.bsize);
667 blhdr->binfo[i].u.bi.b.cksum = SWAP32(blhdr->binfo[i].u.bi.b.cksum);
668 }
669 }
670
671
672 static int
673 update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize)
674 {
675 int ret;
676 struct buf *oblock_bp=NULL;
677
678 // first read the block we want.
679 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
680 if (ret != 0) {
681 printf("jnl: %s: update_fs_block: error reading fs block # %lld! (ret %d)\n", jnl->jdev_name, fs_block, ret);
682
683 if (oblock_bp) {
684 buf_brelse(oblock_bp);
685 oblock_bp = NULL;
686 }
687
688 // let's try to be aggressive here and just re-write the block
689 oblock_bp = buf_getblk(jnl->fsdev, (daddr64_t)fs_block, bsize, 0, 0, BLK_META);
690 if (oblock_bp == NULL) {
691 printf("jnl: %s: update_fs_block: buf_getblk() for %lld failed! failing update.\n", jnl->jdev_name, fs_block);
692 return -1;
693 }
694 }
695
696 // make sure it's the correct size.
697 if (buf_size(oblock_bp) != bsize) {
698 buf_brelse(oblock_bp);
699 return -1;
700 }
701
702 // copy the journal data over top of it
703 memcpy((char *)0 + buf_dataptr(oblock_bp), block_ptr, bsize);
704
705 if ((ret = VNOP_BWRITE(oblock_bp)) != 0) {
706 printf("jnl: %s: update_fs_block: failed to update block %lld (ret %d)\n", jnl->jdev_name, fs_block,ret);
707 return ret;
708 }
709
710 // and now invalidate it so that if someone else wants to read
711 // it in a different size they'll be able to do it.
712 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
713 if (oblock_bp) {
714 buf_markinvalid(oblock_bp);
715 buf_brelse(oblock_bp);
716 }
717
718 return 0;
719 }
720
721 static int
722 grow_table(struct bucket **buf_ptr, int num_buckets, int new_size)
723 {
724 struct bucket *newBuf;
725 int current_size = num_buckets, i;
726
727 // return if newsize is less than the current size
728 if (new_size < num_buckets) {
729 return current_size;
730 }
731
732 if ((MALLOC(newBuf, struct bucket *, new_size*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
733 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
734 return -1;
735 }
736
737 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
738
739 // copy existing elements
740 bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket));
741
742 // initialize the new ones
743 for(i=num_buckets; i < new_size; i++) {
744 newBuf[i].block_num = (off_t)-1;
745 }
746
747 // free the old container
748 FREE(*buf_ptr, M_TEMP);
749
750 // reset the buf_ptr
751 *buf_ptr = newBuf;
752
753 return new_size;
754 }
755
756 static int
757 lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full)
758 {
759 int lo, hi, index, matches, i;
760
761 if (num_full == 0) {
762 return 0; // table is empty, so insert at index=0
763 }
764
765 lo = 0;
766 hi = num_full - 1;
767 index = -1;
768
769 // perform binary search for block_num
770 do {
771 int mid = (hi - lo)/2 + lo;
772 off_t this_num = (*buf_ptr)[mid].block_num;
773
774 if (block_num == this_num) {
775 index = mid;
776 break;
777 }
778
779 if (block_num < this_num) {
780 hi = mid;
781 continue;
782 }
783
784 if (block_num > this_num) {
785 lo = mid + 1;
786 continue;
787 }
788 } while(lo < hi);
789
790 // check if lo and hi converged on the match
791 if (block_num == (*buf_ptr)[hi].block_num) {
792 index = hi;
793 }
794
795 // if no existing entry found, find index for new one
796 if (index == -1) {
797 index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1;
798 } else {
799 // make sure that we return the right-most index in the case of multiple matches
800 matches = 0;
801 i = index + 1;
802 while(i < num_full && block_num == (*buf_ptr)[i].block_num) {
803 matches++;
804 i++;
805 }
806
807 index += matches;
808 }
809
810 return index;
811 }
812
813 static int
814 insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr, int overwriting)
815 {
816 if (!overwriting) {
817 // grow the table if we're out of space
818 if (*num_full_ptr >= *num_buckets_ptr) {
819 int new_size = *num_buckets_ptr * 2;
820 int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size);
821
822 if (grow_size < new_size) {
823 printf("jnl: %s: add_block: grow_table returned an error!\n", jnl->jdev_name);
824 return -1;
825 }
826
827 *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size
828 }
829
830 // if we're not inserting at the end, we need to bcopy
831 if (blk_index != *num_full_ptr) {
832 bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) );
833 }
834
835 (*num_full_ptr)++; // increment only if we're not overwriting
836 }
837
838 // sanity check the values we're about to add
839 if ((off_t)offset >= jnl->jhdr->size) {
840 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
841 }
842 if (size <= 0) {
843 panic("jnl: insert_block: bad size in insert_block (%zd)\n", size);
844 }
845
846 (*buf_ptr)[blk_index].block_num = num;
847 (*buf_ptr)[blk_index].block_size = size;
848 (*buf_ptr)[blk_index].jnl_offset = offset;
849 (*buf_ptr)[blk_index].cksum = cksum;
850
851 return blk_index;
852 }
853
854 static int
855 do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, __unused size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr)
856 {
857 int num_to_remove, index, i, overwrite, err;
858 size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset;
859 off_t overlap, block_start, block_end;
860
861 block_start = block_num*jhdr_size;
862 block_end = block_start + size;
863 overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size);
864
865 // first, eliminate any overlap with the previous entry
866 if (blk_index != 0 && !overwrite) {
867 off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size;
868 off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size;
869 overlap = prev_block_end - block_start;
870 if (overlap > 0) {
871 if (overlap % jhdr_size != 0) {
872 panic("jnl: do_overlap: overlap with previous entry not a multiple of %zd\n", jhdr_size);
873 }
874
875 // if the previous entry completely overlaps this one, we need to break it into two pieces.
876 if (prev_block_end > block_end) {
877 off_t new_num = block_end / jhdr_size;
878 size_t new_size = prev_block_end - block_end;
879
880 new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start);
881
882 err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, cksum, num_buckets_ptr, num_full_ptr, 0);
883 if (err < 0) {
884 panic("jnl: do_overlap: error inserting during pre-overlap\n");
885 }
886 }
887
888 // Regardless, we need to truncate the previous entry to the beginning of the overlap
889 (*buf_ptr)[blk_index-1].block_size = block_start - prev_block_start;
890 (*buf_ptr)[blk_index-1].cksum = 0; // have to blow it away because there's no way to check it
891 }
892 }
893
894 // then, bail out fast if there's no overlap with the entries that follow
895 if (!overwrite && block_end <= (off_t)((*buf_ptr)[blk_index].block_num*jhdr_size)) {
896 return 0; // no overlap, no overwrite
897 } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (off_t)((*buf_ptr)[blk_index+1].block_num*jhdr_size))) {
898
899 (*buf_ptr)[blk_index].cksum = cksum; // update this
900 return 1; // simple overwrite
901 }
902
903 // Otherwise, find all cases of total and partial overlap. We use the special
904 // block_num of -2 to designate entries that are completely overlapped and must
905 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
906 // entries must be adjusted to keep the array consistent.
907 index = blk_index;
908 num_to_remove = 0;
909 while(index < *num_full_ptr && block_end > (off_t)((*buf_ptr)[index].block_num*jhdr_size)) {
910 if (block_end >= (off_t)(((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size))) {
911 (*buf_ptr)[index].block_num = -2; // mark this for deletion
912 num_to_remove++;
913 } else {
914 overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size;
915 if (overlap > 0) {
916 if (overlap % jhdr_size != 0) {
917 panic("jnl: do_overlap: overlap of %lld is not multiple of %zd\n", overlap, jhdr_size);
918 }
919
920 // if we partially overlap this entry, adjust its block number, jnl offset, and size
921 (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up
922 (*buf_ptr)[index].cksum = 0;
923
924 new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around
925 if ((off_t)new_offset >= jnl->jhdr->size) {
926 new_offset = jhdr_size + (new_offset - jnl->jhdr->size);
927 }
928 (*buf_ptr)[index].jnl_offset = new_offset;
929
930 (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value
931 if ((*buf_ptr)[index].block_size <= 0) {
932 panic("jnl: do_overlap: after overlap, new block size is invalid (%u)\n", (*buf_ptr)[index].block_size);
933 // return -1; // if above panic is removed, return -1 for error
934 }
935 }
936
937 }
938
939 index++;
940 }
941
942 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
943 index--; // start with the last index used within the above loop
944 while(index >= blk_index) {
945 if ((*buf_ptr)[index].block_num == -2) {
946 if (index == *num_full_ptr-1) {
947 (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free
948 } else {
949 bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) );
950 }
951 (*num_full_ptr)--;
952 }
953 index--;
954 }
955
956 // eliminate any stale entries at the end of the table
957 for(i=*num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) {
958 (*buf_ptr)[i].block_num = -1;
959 }
960
961 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
962 }
963
964 // PR-3105942: Coalesce writes to the same block in journal replay
965 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
966 // to be replayed and the corresponding location in the journal which contains
967 // the most recent data for those blocks. The array is "played" once the all the
968 // blocks in the journal have been coalesced. The code for the case of conflicting/
969 // overlapping writes to a single block is the most dense. Because coalescing can
970 // disrupt the existing time-ordering of blocks in the journal playback, care
971 // is taken to catch any overlaps and keep the array consistent.
972 static int
973 add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, __unused size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr)
974 {
975 int blk_index, overwriting;
976
977 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
978 // inserted (or the index of the elem to overwrite).
979 blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr);
980
981 // check if the index is within bounds (if we're adding this block to the end of
982 // the table, blk_index will be equal to num_full)
983 if (blk_index < 0 || blk_index > *num_full_ptr) {
984 //printf("jnl: add_block: trouble adding block to co_buf\n");
985 return -1;
986 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
987
988 // Determine whether we're overwriting an existing entry by checking for overlap
989 overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, cksum, num_buckets_ptr, num_full_ptr);
990 if (overwriting < 0) {
991 return -1; // if we got an error, pass it along
992 }
993
994 // returns the index, or -1 on error
995 blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, cksum, num_buckets_ptr, num_full_ptr, overwriting);
996
997 return blk_index;
998 }
999
1000 static int
1001 replay_journal(journal *jnl)
1002 {
1003 int i, orig_checksum, checksum, check_block_checksums=0, bad_blocks=0;
1004 size_t ret;
1005 size_t max_bsize = 0; /* protected by block_ptr */
1006 block_list_header *blhdr;
1007 off_t offset, txn_start_offset=0, blhdr_offset, orig_jnl_start;
1008 char *buff, *block_ptr=NULL;
1009 struct bucket *co_buf;
1010 int num_buckets = STARTING_BUCKETS, num_full, check_past_jnl_end = 1, in_uncharted_territory=0;
1011 uint32_t last_sequence_num = 0;
1012
1013 // wrap the start ptr if it points to the very end of the journal
1014 if (jnl->jhdr->start == jnl->jhdr->size) {
1015 jnl->jhdr->start = jnl->jhdr->jhdr_size;
1016 }
1017 if (jnl->jhdr->end == jnl->jhdr->size) {
1018 jnl->jhdr->end = jnl->jhdr->jhdr_size;
1019 }
1020
1021 if (jnl->jhdr->start == jnl->jhdr->end) {
1022 return 0;
1023 }
1024
1025 orig_jnl_start = jnl->jhdr->start;
1026
1027 // allocate memory for the header_block. we'll read each blhdr into this
1028 if (kmem_alloc(kernel_map, (vm_offset_t *)&buff, jnl->jhdr->blhdr_size)) {
1029 printf("jnl: %s: replay_journal: no memory for block buffer! (%d bytes)\n",
1030 jnl->jdev_name, jnl->jhdr->blhdr_size);
1031 return -1;
1032 }
1033
1034 // allocate memory for the coalesce buffer
1035 if ((MALLOC(co_buf, struct bucket *, num_buckets*sizeof(struct bucket), M_TEMP, M_WAITOK)) == NULL) {
1036 printf("jnl: %s: replay_journal: no memory for coalesce buffer!\n", jnl->jdev_name);
1037 return -1;
1038 }
1039
1040 restart_replay:
1041
1042 // initialize entries
1043 for(i=0; i < num_buckets; i++) {
1044 co_buf[i].block_num = -1;
1045 }
1046 num_full = 0; // empty at first
1047
1048
1049 printf("jnl: %s: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
1050 jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset);
1051
1052 while(check_past_jnl_end || jnl->jhdr->start != jnl->jhdr->end) {
1053 offset = blhdr_offset = jnl->jhdr->start;
1054 ret = read_journal_data(jnl, &offset, buff, jnl->jhdr->blhdr_size);
1055 if (ret != (size_t)jnl->jhdr->blhdr_size) {
1056 printf("jnl: %s: replay_journal: Could not read block list header block @ 0x%llx!\n", jnl->jdev_name, offset);
1057 bad_blocks = 1;
1058 goto bad_txn_handling;
1059 }
1060
1061 blhdr = (block_list_header *)buff;
1062
1063 orig_checksum = blhdr->checksum;
1064 blhdr->checksum = 0;
1065 if (jnl->flags & JOURNAL_NEED_SWAP) {
1066 // calculate the checksum based on the unswapped data
1067 // because it is done byte-at-a-time.
1068 orig_checksum = SWAP32(orig_checksum);
1069 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1070 swap_block_list_header(jnl, blhdr);
1071 } else {
1072 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1073 }
1074
1075
1076 //
1077 // XXXdbg - if these checks fail, we should replay as much
1078 // we can in the hopes that it will still leave the
1079 // drive in a better state than if we didn't replay
1080 // anything
1081 //
1082 if (checksum != orig_checksum) {
1083 if (check_past_jnl_end && in_uncharted_territory) {
1084
1085 if (blhdr_offset != jnl->jhdr->end) {
1086 printf("jnl: %s: Extra txn replay stopped @ %lld / 0x%llx\n", jnl->jdev_name, blhdr_offset, blhdr_offset);
1087 }
1088
1089 check_past_jnl_end = 0;
1090 jnl->jhdr->end = blhdr_offset;
1091 continue;
1092 }
1093
1094 printf("jnl: %s: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1095 jnl->jdev_name, blhdr_offset, orig_checksum, checksum);
1096
1097 if (blhdr_offset == orig_jnl_start) {
1098 // if there's nothing in the journal at all, just bail out altogether.
1099 goto bad_replay;
1100 }
1101
1102 bad_blocks = 1;
1103 goto bad_txn_handling;
1104 }
1105
1106 if ( (last_sequence_num != 0)
1107 && (blhdr->binfo[0].u.bi.b.sequence_num != 0)
1108 && (blhdr->binfo[0].u.bi.b.sequence_num != last_sequence_num)
1109 && (blhdr->binfo[0].u.bi.b.sequence_num != last_sequence_num+1)) {
1110
1111 txn_start_offset = jnl->jhdr->end = blhdr_offset;
1112
1113 if (check_past_jnl_end) {
1114 check_past_jnl_end = 0;
1115 printf("jnl: %s: 2: extra replay stopped @ %lld / 0x%llx (seq %d < %d)\n",
1116 jnl->jdev_name, blhdr_offset, blhdr_offset, blhdr->binfo[0].u.bi.b.sequence_num, last_sequence_num);
1117 continue;
1118 }
1119
1120 printf("jnl: %s: txn sequence numbers out of order in txn @ %lld / %llx! (%d < %d)\n",
1121 jnl->jdev_name, blhdr_offset, blhdr_offset, blhdr->binfo[0].u.bi.b.sequence_num, last_sequence_num);
1122 bad_blocks = 1;
1123 goto bad_txn_handling;
1124 }
1125 last_sequence_num = blhdr->binfo[0].u.bi.b.sequence_num;
1126
1127 if (blhdr_offset >= jnl->jhdr->end && jnl->jhdr->start <= jnl->jhdr->end) {
1128 if (last_sequence_num == 0) {
1129 check_past_jnl_end = 0;
1130 printf("jnl: %s: pre-sequence-num-enabled txn's - can not go further than end (%lld %lld).\n",
1131 jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
1132 if (jnl->jhdr->start != jnl->jhdr->end) {
1133 jnl->jhdr->start = jnl->jhdr->end;
1134 }
1135 continue;
1136 }
1137 printf("jnl: %s: examining extra transactions starting @ %lld / 0x%llx\n", jnl->jdev_name, blhdr_offset, blhdr_offset);
1138 }
1139
1140 if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > (jnl->jhdr->size/jnl->jhdr->jhdr_size)
1141 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) {
1142 printf("jnl: %s: replay_journal: bad looking journal entry: max: %d num: %d\n",
1143 jnl->jdev_name, blhdr->max_blocks, blhdr->num_blocks);
1144 bad_blocks = 1;
1145 goto bad_txn_handling;
1146 }
1147
1148 max_bsize = 0;
1149 for(i=1; i < blhdr->num_blocks; i++) {
1150 if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) {
1151 printf("jnl: %s: replay_journal: bogus block number 0x%llx\n", jnl->jdev_name, blhdr->binfo[i].bnum);
1152 bad_blocks = 1;
1153 goto bad_txn_handling;
1154 }
1155
1156 if ((size_t)blhdr->binfo[i].u.bi.bsize > max_bsize) {
1157 max_bsize = blhdr->binfo[i].u.bi.bsize;
1158 }
1159 }
1160
1161 if (blhdr->flags & BLHDR_CHECK_CHECKSUMS) {
1162 check_block_checksums = 1;
1163 if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) {
1164 goto bad_replay;
1165 }
1166 } else {
1167 block_ptr = NULL;
1168 }
1169
1170 if (blhdr->flags & BLHDR_FIRST_HEADER) {
1171 txn_start_offset = blhdr_offset;
1172 }
1173
1174 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1175 // blhdr->num_blocks-1, jnl->jhdr->start);
1176 bad_blocks = 0;
1177 for(i=1; i < blhdr->num_blocks; i++) {
1178 int size, ret_val;
1179 off_t number;
1180
1181 size = blhdr->binfo[i].u.bi.bsize;
1182 number = blhdr->binfo[i].bnum;
1183
1184 // don't add "killed" blocks
1185 if (number == (off_t)-1) {
1186 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1187 } else {
1188
1189 if (check_block_checksums) {
1190 int32_t disk_cksum;
1191 off_t block_offset;
1192
1193 block_offset = offset;
1194
1195 // read the block so we can check the checksum
1196 ret = read_journal_data(jnl, &block_offset, block_ptr, size);
1197 if (ret != (size_t)size) {
1198 printf("jnl: %s: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl->jdev_name, offset);
1199 bad_blocks = 1;
1200 goto bad_txn_handling;
1201 }
1202
1203 disk_cksum = calc_checksum(block_ptr, size);
1204
1205 // there is no need to swap the checksum from disk because
1206 // it got swapped when the blhdr was read in.
1207 if (blhdr->binfo[i].u.bi.b.cksum != 0 && disk_cksum != blhdr->binfo[i].u.bi.b.cksum) {
1208 printf("jnl: %s: txn starting at %lld (%lld) @ index %3d bnum %lld (%d) with disk cksum != blhdr cksum (0x%.8x 0x%.8x)\n",
1209 jnl->jdev_name, txn_start_offset, blhdr_offset, i, number, size, disk_cksum, blhdr->binfo[i].u.bi.b.cksum);
1210 printf("jnl: 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
1211 *(int *)&block_ptr[0*sizeof(int)], *(int *)&block_ptr[1*sizeof(int)], *(int *)&block_ptr[2*sizeof(int)], *(int *)&block_ptr[3*sizeof(int)],
1212 *(int *)&block_ptr[4*sizeof(int)], *(int *)&block_ptr[5*sizeof(int)], *(int *)&block_ptr[6*sizeof(int)], *(int *)&block_ptr[7*sizeof(int)]);
1213
1214 bad_blocks = 1;
1215 goto bad_txn_handling;
1216 }
1217 }
1218
1219
1220 // add this bucket to co_buf, coalescing where possible
1221 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1222 ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, blhdr->binfo[i].u.bi.b.cksum, &num_buckets, &num_full);
1223
1224 if (ret_val == -1) {
1225 printf("jnl: %s: replay_journal: trouble adding block to co_buf\n", jnl->jdev_name);
1226 goto bad_replay;
1227 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1228 }
1229
1230 // increment offset
1231 offset += size;
1232
1233 // check if the last block added puts us off the end of the jnl.
1234 // if so, we need to wrap to the beginning and take any remainder
1235 // into account
1236 //
1237 if (offset >= jnl->jhdr->size) {
1238 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
1239 }
1240 }
1241
1242 if (block_ptr) {
1243 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1244 block_ptr = NULL;
1245 }
1246
1247 bad_txn_handling:
1248 if (bad_blocks) {
1249 if (txn_start_offset == 0) {
1250 printf("jnl: %s: no known good txn start offset! aborting journal replay.\n", jnl->jdev_name);
1251 goto bad_replay;
1252 }
1253
1254 jnl->jhdr->start = orig_jnl_start;
1255 jnl->jhdr->end = txn_start_offset;
1256 check_past_jnl_end = 0;
1257 last_sequence_num = 0;
1258 printf("jnl: %s: restarting journal replay (%lld - %lld)!\n", jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
1259 goto restart_replay;
1260 }
1261
1262 jnl->jhdr->start += blhdr->bytes_used;
1263 if (jnl->jhdr->start >= jnl->jhdr->size) {
1264 // wrap around and skip the journal header block
1265 jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size;
1266 }
1267
1268 if (jnl->jhdr->start == jnl->jhdr->end) {
1269 in_uncharted_territory = 1;
1270 }
1271 }
1272
1273 if (jnl->jhdr->start != jnl->jhdr->end) {
1274 printf("jnl: %s: start %lld != end %lld. resetting end.\n", jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
1275 jnl->jhdr->end = jnl->jhdr->start;
1276 }
1277
1278 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1279
1280 /*
1281 * make sure it's at least one page in size, so
1282 * start max_bsize at PAGE_SIZE
1283 */
1284 for (i = 0, max_bsize = PAGE_SIZE; i < num_full; i++) {
1285
1286 if (co_buf[i].block_num == (off_t)-1)
1287 continue;
1288
1289 if (co_buf[i].block_size > max_bsize)
1290 max_bsize = co_buf[i].block_size;
1291 }
1292 /*
1293 * round max_bsize up to the nearest PAGE_SIZE multiple
1294 */
1295 if (max_bsize & (PAGE_SIZE - 1)) {
1296 max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1297 }
1298
1299 if (kmem_alloc(kernel_map, (vm_offset_t *)&block_ptr, max_bsize)) {
1300 goto bad_replay;
1301 }
1302
1303 // Replay the coalesced entries in the co-buf
1304 for(i=0; i < num_full; i++) {
1305 size_t size = co_buf[i].block_size;
1306 off_t jnl_offset = (off_t) co_buf[i].jnl_offset;
1307 off_t number = co_buf[i].block_num;
1308
1309
1310 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1311 // co_buf[i].block_size, co_buf[i].jnl_offset);
1312
1313 if (number == (off_t)-1) {
1314 // printf("jnl: replay_journal: skipping killed fs block\n");
1315 } else {
1316
1317 // do journal read, and set the phys. block
1318 ret = read_journal_data(jnl, &jnl_offset, block_ptr, size);
1319 if (ret != size) {
1320 printf("jnl: %s: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl->jdev_name, offset);
1321 goto bad_replay;
1322 }
1323
1324 if (update_fs_block(jnl, block_ptr, number, size) != 0) {
1325 goto bad_replay;
1326 }
1327 }
1328 }
1329
1330
1331 // done replaying; update jnl header
1332 if (write_journal_header(jnl, 1) != 0) {
1333 goto bad_replay;
1334 }
1335
1336 printf("jnl: %s: journal replay done.\n", jnl->jdev_name);
1337
1338 // free block_ptr
1339 if (block_ptr) {
1340 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1341 block_ptr = NULL;
1342 }
1343
1344 // free the coalesce buffer
1345 FREE(co_buf, M_TEMP);
1346 co_buf = NULL;
1347
1348 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1349 return 0;
1350
1351 bad_replay:
1352 if (block_ptr) {
1353 kmem_free(kernel_map, (vm_offset_t)block_ptr, max_bsize);
1354 }
1355 if (co_buf) {
1356 FREE(co_buf, M_TEMP);
1357 }
1358 kmem_free(kernel_map, (vm_offset_t)buff, jnl->jhdr->blhdr_size);
1359
1360 return -1;
1361 }
1362
1363
1364 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1365 #define MAX_TRANSACTION_BUFFER_SIZE (2048*1024)
1366
1367 // XXXdbg - so I can change it in the debugger
1368 int def_tbuffer_size = 0;
1369
1370
1371 //
1372 // This function sets the size of the tbuffer and the
1373 // size of the blhdr. It assumes that jnl->jhdr->size
1374 // and jnl->jhdr->jhdr_size are already valid.
1375 //
1376 static void
1377 size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz)
1378 {
1379 //
1380 // one-time initialization based on how much memory
1381 // there is in the machine.
1382 //
1383 if (def_tbuffer_size == 0) {
1384 if (mem_size < (256*1024*1024)) {
1385 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE;
1386 } else if (mem_size < (512*1024*1024)) {
1387 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2;
1388 } else if (mem_size < (1024*1024*1024)) {
1389 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3;
1390 } else {
1391 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * (mem_size / (256*1024*1024));
1392 }
1393 }
1394
1395 // size up the transaction buffer... can't be larger than the number
1396 // of blocks that can fit in a block_list_header block.
1397 if (tbuffer_size == 0) {
1398 jnl->tbuffer_size = def_tbuffer_size;
1399 } else {
1400 // make sure that the specified tbuffer_size isn't too small
1401 if (tbuffer_size < jnl->jhdr->blhdr_size * 2) {
1402 tbuffer_size = jnl->jhdr->blhdr_size * 2;
1403 }
1404 // and make sure it's an even multiple of the block size
1405 if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) {
1406 tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size);
1407 }
1408
1409 jnl->tbuffer_size = tbuffer_size;
1410 }
1411
1412 if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) {
1413 jnl->tbuffer_size = (jnl->jhdr->size / 2);
1414 }
1415
1416 if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) {
1417 jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE;
1418 }
1419
1420 jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info);
1421 if (jnl->jhdr->blhdr_size < phys_blksz) {
1422 jnl->jhdr->blhdr_size = phys_blksz;
1423 } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) {
1424 // have to round up so we're an even multiple of the physical block size
1425 jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1);
1426 }
1427 }
1428
1429
1430
1431 static void
1432 get_io_info(struct vnode *devvp, size_t phys_blksz, journal *jnl, struct vfs_context *context)
1433 {
1434 off_t readblockcnt;
1435 off_t writeblockcnt;
1436 off_t readmaxcnt=0, tmp_readmaxcnt;
1437 off_t writemaxcnt=0, tmp_writemaxcnt;
1438 off_t readsegcnt, writesegcnt;
1439 int32_t features;
1440
1441 if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&features, 0, context) == 0) {
1442 if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
1443 const char *name = vnode_name(devvp);
1444 jnl->flags |= JOURNAL_DO_FUA_WRITES;
1445 printf("jnl: %s: enabling FUA writes (features 0x%x)\n", name ? name : "no-name-dev", features);
1446 }
1447 }
1448
1449 //
1450 // First check the max read size via several different mechanisms...
1451 //
1452 VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD, (caddr_t)&readmaxcnt, 0, context);
1453
1454 if (VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t)&readblockcnt, 0, context) == 0) {
1455 tmp_readmaxcnt = readblockcnt * phys_blksz;
1456 if (readmaxcnt == 0 || (readblockcnt > 0 && tmp_readmaxcnt < readmaxcnt)) {
1457 readmaxcnt = tmp_readmaxcnt;
1458 }
1459 }
1460
1461 if (VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t)&readsegcnt, 0, context)) {
1462 readsegcnt = 0;
1463 }
1464
1465 if (readsegcnt > 0 && (readsegcnt * PAGE_SIZE) < readmaxcnt) {
1466 readmaxcnt = readsegcnt * PAGE_SIZE;
1467 }
1468
1469 if (readmaxcnt == 0) {
1470 readmaxcnt = 128 * 1024;
1471 } else if (readmaxcnt > UINT32_MAX) {
1472 readmaxcnt = UINT32_MAX;
1473 }
1474
1475
1476 //
1477 // Now check the max writes size via several different mechanisms...
1478 //
1479 VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t)&writemaxcnt, 0, context);
1480
1481 if (VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t)&writeblockcnt, 0, context) == 0) {
1482 tmp_writemaxcnt = writeblockcnt * phys_blksz;
1483 if (writemaxcnt == 0 || (writeblockcnt > 0 && tmp_writemaxcnt < writemaxcnt)) {
1484 writemaxcnt = tmp_writemaxcnt;
1485 }
1486 }
1487
1488 if (VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t)&writesegcnt, 0, context)) {
1489 writesegcnt = 0;
1490 }
1491
1492 if (writesegcnt > 0 && (writesegcnt * PAGE_SIZE) < writemaxcnt) {
1493 writemaxcnt = writesegcnt * PAGE_SIZE;
1494 }
1495
1496 if (writemaxcnt == 0) {
1497 writemaxcnt = 128 * 1024;
1498 } else if (writemaxcnt > UINT32_MAX) {
1499 writemaxcnt = UINT32_MAX;
1500 }
1501
1502 jnl->max_read_size = readmaxcnt;
1503 jnl->max_write_size = writemaxcnt;
1504 // printf("jnl: %s: max read/write: %lld k / %lld k\n",
1505 // jnl->jdev_name ? jnl->jdev_name : "unknown",
1506 // jnl->max_read_size/1024, jnl->max_write_size/1024);
1507 }
1508
1509
1510 static const char *
1511 get_jdev_name(struct vnode *jvp)
1512 {
1513 const char *jdev_name;
1514
1515 jdev_name = vnode_name(jvp);
1516 if (jdev_name == NULL) {
1517 jdev_name = vfs_addname("unknown-dev", strlen("unknown-dev"), 0, 0);
1518 } else {
1519 // this just bumps the refcount on the name so we have our own copy
1520 jdev_name = vfs_addname(jdev_name, strlen(jdev_name), 0, 0);
1521 }
1522
1523 return jdev_name;
1524 }
1525
1526
1527 journal *
1528 journal_create(struct vnode *jvp,
1529 off_t offset,
1530 off_t journal_size,
1531 struct vnode *fsvp,
1532 size_t min_fs_blksz,
1533 int32_t flags,
1534 int32_t tbuffer_size,
1535 void (*flush)(void *arg),
1536 void *arg)
1537 {
1538 journal *jnl;
1539 uint32_t phys_blksz, new_txn_base;
1540 struct vfs_context context;
1541 const char *jdev_name;
1542
1543 context.vc_thread = current_thread();
1544 context.vc_ucred = FSCRED;
1545
1546 jdev_name = get_jdev_name(jvp);
1547
1548 /* Get the real physical block size. */
1549 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1550 return NULL;
1551 }
1552
1553 if (journal_size < (256*1024) || journal_size > (1024*1024*1024)) {
1554 printf("jnl: create: journal size %lld looks bogus.\n", journal_size);
1555 return NULL;
1556 }
1557
1558 if (phys_blksz > min_fs_blksz) {
1559 printf("jnl: %s: create: error: phys blksize %u bigger than min fs blksize %zd\n",
1560 jdev_name, phys_blksz, min_fs_blksz);
1561 return NULL;
1562 }
1563
1564 if ((journal_size % phys_blksz) != 0) {
1565 printf("jnl: %s: create: journal size 0x%llx is not an even multiple of block size 0x%ux\n",
1566 jdev_name, journal_size, phys_blksz);
1567 return NULL;
1568 }
1569
1570
1571 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1572 memset(jnl, 0, sizeof(*jnl));
1573
1574 jnl->jdev = jvp;
1575 jnl->jdev_offset = offset;
1576 jnl->fsdev = fsvp;
1577 jnl->flush = flush;
1578 jnl->flush_arg = arg;
1579 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1580 jnl->jdev_name = jdev_name;
1581 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1582
1583 get_io_info(jvp, phys_blksz, jnl, &context);
1584
1585 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1586 printf("jnl: %s: create: could not allocate space for header buffer (%u bytes)\n", jdev_name, phys_blksz);
1587 goto bad_kmem_alloc;
1588 }
1589 jnl->header_buf_size = phys_blksz;
1590
1591 jnl->jhdr = (journal_header *)jnl->header_buf;
1592 memset(jnl->jhdr, 0, sizeof(journal_header));
1593
1594 // we have to set this up here so that do_journal_io() will work
1595 jnl->jhdr->jhdr_size = phys_blksz;
1596
1597 //
1598 // We try and read the journal header to see if there is already one
1599 // out there. If there is, it's possible that it has transactions
1600 // in it that we might replay if we happen to pick a sequence number
1601 // that is a little less than the old one, there is a crash and the
1602 // last txn written ends right at the start of a txn from the previous
1603 // incarnation of this file system. If all that happens we would
1604 // replay the transactions from the old file system and that would
1605 // destroy your disk. Although it is extremely unlikely for all those
1606 // conditions to happen, the probability is non-zero and the result is
1607 // severe - you lose your file system. Therefore if we find a valid
1608 // journal header and the sequence number is non-zero we write junk
1609 // over the entire journal so that there is no way we will encounter
1610 // any old transactions. This is slow but should be a rare event
1611 // since most tools erase the journal.
1612 //
1613 if ( read_journal_header(jnl, jnl->jhdr, phys_blksz) == phys_blksz
1614 && jnl->jhdr->magic == JOURNAL_HEADER_MAGIC
1615 && jnl->jhdr->sequence_num != 0) {
1616
1617 new_txn_base = (jnl->jhdr->sequence_num + (journal_size / phys_blksz) + (random() % 16384)) & 0x00ffffff;
1618 printf("jnl: create: avoiding old sequence number 0x%x (0x%x)\n", jnl->jhdr->sequence_num, new_txn_base);
1619
1620 #if 0
1621 int i;
1622 off_t pos=0;
1623
1624 for(i=1; i < journal_size / phys_blksz; i++) {
1625 pos = i*phys_blksz;
1626
1627 // we don't really care what data we write just so long
1628 // as it's not a valid transaction header. since we have
1629 // the header_buf sitting around we'll use that.
1630 write_journal_data(jnl, &pos, jnl->header_buf, phys_blksz);
1631 }
1632 printf("jnl: create: done clearing journal (i=%d)\n", i);
1633 #endif
1634 } else {
1635 new_txn_base = random() & 0x00ffffff;
1636 }
1637
1638 memset(jnl->header_buf, 0, phys_blksz);
1639
1640 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1641 jnl->jhdr->endian = ENDIAN_MAGIC;
1642 jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself
1643 jnl->jhdr->end = phys_blksz;
1644 jnl->jhdr->size = journal_size;
1645 jnl->jhdr->jhdr_size = phys_blksz;
1646 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1647
1648 jnl->active_start = jnl->jhdr->start;
1649
1650 // XXXdbg - for testing you can force the journal to wrap around
1651 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1652 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1653
1654 jnl->jhdr->sequence_num = new_txn_base;
1655
1656 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1657
1658 if (write_journal_header(jnl, 1) != 0) {
1659 printf("jnl: %s: journal_create: failed to write journal header.\n", jdev_name);
1660 goto bad_write;
1661 }
1662
1663 return jnl;
1664
1665
1666 bad_write:
1667 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1668 bad_kmem_alloc:
1669 if (jdev_name) {
1670 vfs_removename(jdev_name);
1671 }
1672 jnl->jhdr = NULL;
1673 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1674 return NULL;
1675 }
1676
1677
1678 journal *
1679 journal_open(struct vnode *jvp,
1680 off_t offset,
1681 off_t journal_size,
1682 struct vnode *fsvp,
1683 size_t min_fs_blksz,
1684 int32_t flags,
1685 int32_t tbuffer_size,
1686 void (*flush)(void *arg),
1687 void *arg)
1688 {
1689 journal *jnl;
1690 uint32_t orig_blksz=0;
1691 uint32_t phys_blksz;
1692 int orig_checksum, checksum;
1693 struct vfs_context context;
1694 const char *jdev_name = get_jdev_name(jvp);
1695
1696 context.vc_thread = current_thread();
1697 context.vc_ucred = FSCRED;
1698
1699 /* Get the real physical block size. */
1700 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1701 return NULL;
1702 }
1703
1704 if (phys_blksz > min_fs_blksz) {
1705 printf("jnl: %s: open: error: phys blksize %u bigger than min fs blksize %zd\n",
1706 jdev_name, phys_blksz, min_fs_blksz);
1707 return NULL;
1708 }
1709
1710 if (journal_size < (256*1024) || journal_size > (1024*1024*1024)) {
1711 printf("jnl: open: journal size %lld looks bogus.\n", journal_size);
1712 return NULL;
1713 }
1714
1715 if ((journal_size % phys_blksz) != 0) {
1716 printf("jnl: %s: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1717 jdev_name, journal_size, phys_blksz);
1718 return NULL;
1719 }
1720
1721 MALLOC_ZONE(jnl, struct journal *, sizeof(struct journal), M_JNL_JNL, M_WAITOK);
1722 memset(jnl, 0, sizeof(*jnl));
1723
1724 jnl->jdev = jvp;
1725 jnl->jdev_offset = offset;
1726 jnl->fsdev = fsvp;
1727 jnl->flush = flush;
1728 jnl->flush_arg = arg;
1729 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1730 jnl->jdev_name = jdev_name;
1731 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1732
1733 get_io_info(jvp, phys_blksz, jnl, &context);
1734
1735 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl->header_buf, phys_blksz)) {
1736 printf("jnl: %s: create: could not allocate space for header buffer (%u bytes)\n", jdev_name, phys_blksz);
1737 goto bad_kmem_alloc;
1738 }
1739 jnl->header_buf_size = phys_blksz;
1740
1741 jnl->jhdr = (journal_header *)jnl->header_buf;
1742 memset(jnl->jhdr, 0, sizeof(journal_header));
1743
1744 // we have to set this up here so that do_journal_io() will work
1745 jnl->jhdr->jhdr_size = phys_blksz;
1746
1747 if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) {
1748 printf("jnl: %s: open: could not read %u bytes for the journal header.\n",
1749 jdev_name, phys_blksz);
1750 goto bad_journal;
1751 }
1752
1753 orig_checksum = jnl->jhdr->checksum;
1754 jnl->jhdr->checksum = 0;
1755
1756 if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1757 // do this before the swap since it's done byte-at-a-time
1758 orig_checksum = SWAP32(orig_checksum);
1759 checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE);
1760 swap_journal_header(jnl);
1761 jnl->flags |= JOURNAL_NEED_SWAP;
1762 } else {
1763 checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE);
1764 }
1765
1766 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1767 printf("jnl: %s: open: journal magic is bad (0x%x != 0x%x)\n",
1768 jnl->jdev_name, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);
1769 goto bad_journal;
1770 }
1771
1772 // only check if we're the current journal header magic value
1773 if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) {
1774
1775 if (orig_checksum != checksum) {
1776 printf("jnl: %s: open: journal checksum is bad (0x%x != 0x%x)\n",
1777 jdev_name, orig_checksum, checksum);
1778
1779 //goto bad_journal;
1780 }
1781 }
1782
1783 // XXXdbg - convert old style magic numbers to the new one
1784 if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) {
1785 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1786 }
1787
1788 if (phys_blksz != (size_t)jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) {
1789 /*
1790 * The volume has probably been resized (such that we had to adjust the
1791 * logical sector size), or copied to media with a different logical
1792 * sector size. If the journal is empty, then just switch to the
1793 * current logical sector size. If the journal is not empty, then
1794 * fail to open the journal.
1795 */
1796
1797 if (jnl->jhdr->start == jnl->jhdr->end) {
1798 printf("jnl: %s: open: changing journal header size from %d to %u\n",
1799 jdev_name, jnl->jhdr->jhdr_size, phys_blksz);
1800 jnl->jhdr->jhdr_size = phys_blksz;
1801 if (write_journal_header(jnl, 1)) {
1802 printf("jnl: %s: open: failed to update journal header size\n", jdev_name);
1803 goto bad_journal;
1804 }
1805 } else {
1806 printf("jnl: %s: open: phys_blksz %u does not match journal header size %d, and journal is not empty!\n",
1807 jdev_name, phys_blksz, jnl->jhdr->jhdr_size);
1808 goto bad_journal;
1809 }
1810 }
1811
1812 if ( jnl->jhdr->start <= 0
1813 || jnl->jhdr->start > jnl->jhdr->size
1814 || jnl->jhdr->start > 1024*1024*1024) {
1815 printf("jnl: %s: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1816 jdev_name, jnl->jhdr->start, jnl->jhdr->size);
1817 goto bad_journal;
1818 }
1819
1820 if ( jnl->jhdr->end <= 0
1821 || jnl->jhdr->end > jnl->jhdr->size
1822 || jnl->jhdr->end > 1024*1024*1024) {
1823 printf("jnl: %s: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
1824 jdev_name, jnl->jhdr->end, jnl->jhdr->size);
1825 goto bad_journal;
1826 }
1827
1828 if (jnl->jhdr->size < (256*1024) || jnl->jhdr->size > 1024*1024*1024) {
1829 printf("jnl: %s: open: jhdr size looks bad (0x%llx)\n", jdev_name, jnl->jhdr->size);
1830 goto bad_journal;
1831 }
1832
1833 // XXXdbg - can't do these checks because hfs writes all kinds of
1834 // non-uniform sized blocks even on devices that have a block size
1835 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
1836 // therefore these checks will fail and so we just have to punt and
1837 // do more relaxed checking...
1838 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
1839 if ((jnl->jhdr->start % 512) != 0) {
1840 printf("jnl: %s: open: journal start (0x%llx) not a multiple of 512?\n",
1841 jdev_name, jnl->jhdr->start);
1842 goto bad_journal;
1843 }
1844
1845 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
1846 if ((jnl->jhdr->end % 512) != 0) {
1847 printf("jnl: %s: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
1848 jdev_name, jnl->jhdr->end, jnl->jhdr->jhdr_size);
1849 goto bad_journal;
1850 }
1851
1852 // take care of replaying the journal if necessary
1853 if (flags & JOURNAL_RESET) {
1854 printf("jnl: %s: journal start/end pointers reset! (jnl %p; s 0x%llx e 0x%llx)\n",
1855 jdev_name, jnl, jnl->jhdr->start, jnl->jhdr->end);
1856 jnl->jhdr->start = jnl->jhdr->end;
1857 } else if (replay_journal(jnl) != 0) {
1858 printf("jnl: %s: journal_open: Error replaying the journal!\n", jdev_name);
1859 goto bad_journal;
1860 }
1861
1862 if (orig_blksz != 0) {
1863 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1864 phys_blksz = orig_blksz;
1865 if (orig_blksz < (uint32_t)jnl->jhdr->jhdr_size) {
1866 printf("jnl: %s: open: jhdr_size is %d but orig phys blk size is %d. switching.\n",
1867 jdev_name, jnl->jhdr->jhdr_size, orig_blksz);
1868
1869 jnl->jhdr->jhdr_size = orig_blksz;
1870 }
1871 }
1872
1873 // make sure this is in sync!
1874 jnl->active_start = jnl->jhdr->start;
1875
1876 // set this now, after we've replayed the journal
1877 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1878
1879 if ((off_t)(jnl->jhdr->blhdr_size/sizeof(block_info)-1) > (jnl->jhdr->size/jnl->jhdr->jhdr_size)) {
1880 printf("jnl: %s: open: jhdr size and blhdr size are not compatible (0x%llx, %d, %d)\n", jdev_name, jnl->jhdr->size,
1881 jnl->jhdr->blhdr_size, jnl->jhdr->jhdr_size);
1882 goto bad_journal;
1883 }
1884
1885 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1886
1887 return jnl;
1888
1889 bad_journal:
1890 if (orig_blksz != 0) {
1891 phys_blksz = orig_blksz;
1892 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, &context);
1893 }
1894 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, phys_blksz);
1895 bad_kmem_alloc:
1896 if (jdev_name) {
1897 vfs_removename(jdev_name);
1898 }
1899 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
1900 return NULL;
1901 }
1902
1903
1904 int
1905 journal_is_clean(struct vnode *jvp,
1906 off_t offset,
1907 off_t journal_size,
1908 struct vnode *fsvp,
1909 size_t min_fs_block_size)
1910 {
1911 journal jnl;
1912 uint32_t phys_blksz;
1913 int ret;
1914 int orig_checksum, checksum;
1915 struct vfs_context context;
1916 const char *jdev_name = get_jdev_name(jvp);
1917
1918 context.vc_thread = current_thread();
1919 context.vc_ucred = FSCRED;
1920
1921 /* Get the real physical block size. */
1922 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, &context)) {
1923 printf("jnl: %s: is_clean: failed to get device block size.\n", jdev_name);
1924 return EINVAL;
1925 }
1926
1927 if (phys_blksz > (uint32_t)min_fs_block_size) {
1928 printf("jnl: %s: is_clean: error: phys blksize %d bigger than min fs blksize %zd\n",
1929 jdev_name, phys_blksz, min_fs_block_size);
1930 return EINVAL;
1931 }
1932
1933 if (journal_size < (256*1024) || journal_size > (1024*1024*1024)) {
1934 printf("jnl: is_clean: journal size %lld looks bogus.\n", journal_size);
1935 return EINVAL;
1936 }
1937
1938 if ((journal_size % phys_blksz) != 0) {
1939 printf("jnl: %s: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1940 jdev_name, journal_size, phys_blksz);
1941 return EINVAL;
1942 }
1943
1944 memset(&jnl, 0, sizeof(jnl));
1945
1946 if (kmem_alloc(kernel_map, (vm_offset_t *)&jnl.header_buf, phys_blksz)) {
1947 printf("jnl: %s: is_clean: could not allocate space for header buffer (%d bytes)\n", jdev_name, phys_blksz);
1948 return ENOMEM;
1949 }
1950 jnl.header_buf_size = phys_blksz;
1951
1952 get_io_info(jvp, phys_blksz, &jnl, &context);
1953
1954 jnl.jhdr = (journal_header *)jnl.header_buf;
1955 memset(jnl.jhdr, 0, sizeof(journal_header));
1956
1957 jnl.jdev = jvp;
1958 jnl.jdev_offset = offset;
1959 jnl.fsdev = fsvp;
1960
1961 // we have to set this up here so that do_journal_io() will work
1962 jnl.jhdr->jhdr_size = phys_blksz;
1963
1964 if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != (unsigned)phys_blksz) {
1965 printf("jnl: %s: is_clean: could not read %d bytes for the journal header.\n",
1966 jdev_name, phys_blksz);
1967 ret = EINVAL;
1968 goto get_out;
1969 }
1970
1971 orig_checksum = jnl.jhdr->checksum;
1972 jnl.jhdr->checksum = 0;
1973
1974 if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1975 // do this before the swap since it's done byte-at-a-time
1976 orig_checksum = SWAP32(orig_checksum);
1977 checksum = calc_checksum((char *)jnl.jhdr, JOURNAL_HEADER_CKSUM_SIZE);
1978 swap_journal_header(&jnl);
1979 jnl.flags |= JOURNAL_NEED_SWAP;
1980 } else {
1981 checksum = calc_checksum((char *)jnl.jhdr, JOURNAL_HEADER_CKSUM_SIZE);
1982 }
1983
1984 if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1985 printf("jnl: %s: is_clean: journal magic is bad (0x%x != 0x%x)\n",
1986 jdev_name, jnl.jhdr->magic, JOURNAL_HEADER_MAGIC);
1987 ret = EINVAL;
1988 goto get_out;
1989 }
1990
1991 if (orig_checksum != checksum) {
1992 printf("jnl: %s: is_clean: journal checksum is bad (0x%x != 0x%x)\n", jdev_name, orig_checksum, checksum);
1993 ret = EINVAL;
1994 goto get_out;
1995 }
1996
1997 //
1998 // if the start and end are equal then the journal is clean.
1999 // otherwise it's not clean and therefore an error.
2000 //
2001 if (jnl.jhdr->start == jnl.jhdr->end) {
2002 ret = 0;
2003 } else {
2004 ret = EBUSY; // so the caller can differentiate an invalid journal from a "busy" one
2005 }
2006
2007 get_out:
2008 kmem_free(kernel_map, (vm_offset_t)jnl.header_buf, phys_blksz);
2009 if (jdev_name) {
2010 vfs_removename(jdev_name);
2011 }
2012
2013 return ret;
2014
2015
2016 }
2017
2018
2019 void
2020 journal_close(journal *jnl)
2021 {
2022 volatile off_t *start, *end;
2023 int counter=0;
2024
2025 CHECK_JOURNAL(jnl);
2026
2027 // set this before doing anything that would block so that
2028 // we start tearing things down properly.
2029 //
2030 jnl->flags |= JOURNAL_CLOSE_PENDING;
2031
2032 if (jnl->owner != current_thread()) {
2033 lock_journal(jnl);
2034 }
2035
2036 //
2037 // only write stuff to disk if the journal is still valid
2038 //
2039 if ((jnl->flags & JOURNAL_INVALID) == 0) {
2040
2041 if (jnl->active_tr) {
2042 journal_end_transaction(jnl);
2043 }
2044
2045 // flush any buffered transactions
2046 if (jnl->cur_tr) {
2047 transaction *tr = jnl->cur_tr;
2048
2049 jnl->cur_tr = NULL;
2050 end_transaction(tr, 1, NULL, NULL); // force it to get flushed
2051 }
2052
2053 //start = &jnl->jhdr->start;
2054 start = &jnl->active_start;
2055 end = &jnl->jhdr->end;
2056
2057 while (*start != *end && counter++ < 5000) {
2058 //printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
2059 if (jnl->flush) {
2060 jnl->flush(jnl->flush_arg);
2061 }
2062 tsleep((caddr_t)jnl, PRIBIO, "jnl_close", 2);
2063 }
2064
2065 if (*start != *end) {
2066 printf("jnl: %s: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
2067 jnl->jdev_name, *start, *end);
2068 }
2069
2070 // make sure this is in sync when we close the journal
2071 jnl->jhdr->start = jnl->active_start;
2072
2073 // if this fails there's not much we can do at this point...
2074 write_journal_header(jnl, 1);
2075 } else {
2076 // if we're here the journal isn't valid any more.
2077 // so make sure we don't leave any locked blocks lying around
2078 printf("jnl: %s: close: journal %p, is invalid. aborting outstanding transactions\n", jnl->jdev_name, jnl);
2079 if (jnl->active_tr || jnl->cur_tr) {
2080 transaction *tr;
2081 if (jnl->active_tr) {
2082 tr = jnl->active_tr;
2083 jnl->active_tr = NULL;
2084 } else {
2085 tr = jnl->cur_tr;
2086 jnl->cur_tr = NULL;
2087 }
2088
2089 abort_transaction(jnl, tr);
2090 if (jnl->active_tr || jnl->cur_tr) {
2091 panic("jnl: %s: close: jnl @ %p had both an active and cur tr\n", jnl->jdev_name, jnl);
2092 }
2093 }
2094 }
2095
2096 free_old_stuff(jnl);
2097
2098 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->header_buf_size);
2099 jnl->jhdr = (void *)0xbeefbabe;
2100
2101 if (jnl->jdev_name) {
2102 vfs_removename(jnl->jdev_name);
2103 }
2104
2105 FREE_ZONE(jnl, sizeof(struct journal), M_JNL_JNL);
2106 }
2107
2108 static void
2109 dump_journal(journal *jnl)
2110 {
2111 transaction *ctr;
2112
2113 printf("journal for dev %s:", jnl->jdev_name);
2114 printf(" jdev_offset %.8llx\n", jnl->jdev_offset);
2115 printf(" magic: 0x%.8x\n", jnl->jhdr->magic);
2116 printf(" start: 0x%.8llx\n", jnl->jhdr->start);
2117 printf(" end: 0x%.8llx\n", jnl->jhdr->end);
2118 printf(" size: 0x%.8llx\n", jnl->jhdr->size);
2119 printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size);
2120 printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size);
2121 printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum);
2122
2123 printf(" completed transactions:\n");
2124 for(ctr=jnl->completed_trs; ctr; ctr=ctr->next) {
2125 printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end);
2126 }
2127 }
2128
2129
2130
2131 static off_t
2132 free_space(journal *jnl)
2133 {
2134 off_t free_space_offset;
2135
2136 if (jnl->jhdr->start < jnl->jhdr->end) {
2137 free_space_offset = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size;
2138 } else if (jnl->jhdr->start > jnl->jhdr->end) {
2139 free_space_offset = jnl->jhdr->start - jnl->jhdr->end;
2140 } else {
2141 // journal is completely empty
2142 free_space_offset = jnl->jhdr->size - jnl->jhdr->jhdr_size;
2143 }
2144
2145 return free_space_offset;
2146 }
2147
2148
2149 //
2150 // The journal must be locked on entry to this function.
2151 // The "desired_size" is in bytes.
2152 //
2153 static int
2154 check_free_space(journal *jnl, int desired_size)
2155 {
2156 size_t i;
2157 int counter=0;
2158
2159 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
2160 // desired_size, free_space(jnl));
2161
2162 while (1) {
2163 int old_start_empty;
2164
2165 if (counter++ == 5000) {
2166 dump_journal(jnl);
2167 panic("jnl: check_free_space: buffer flushing isn't working "
2168 "(jnl @ %p s %lld e %lld f %lld [active start %lld]).\n", jnl,
2169 jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start);
2170 }
2171 if (counter > 7500) {
2172 printf("jnl: %s: check_free_space: giving up waiting for free space.\n", jnl->jdev_name);
2173 return ENOSPC;
2174 }
2175
2176 // make sure there's space in the journal to hold this transaction
2177 if (free_space(jnl) > desired_size && jnl->old_start[0] == 0) {
2178 break;
2179 }
2180 //
2181 // here's where we lazily bump up jnl->jhdr->start. we'll consume
2182 // entries until there is enough space for the next transaction.
2183 //
2184 old_start_empty = 1;
2185 lock_oldstart(jnl);
2186 for(i=0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
2187 int lcl_counter;
2188
2189 lcl_counter = 0;
2190 while (jnl->old_start[i] & 0x8000000000000000LL) {
2191 if (lcl_counter++ > 1000) {
2192 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl %p).\n",
2193 jnl->old_start[i], jnl);
2194 }
2195
2196 unlock_oldstart(jnl);
2197 if (jnl->flush) {
2198 jnl->flush(jnl->flush_arg);
2199 }
2200 tsleep((caddr_t)jnl, PRIBIO, "check_free_space1", 1);
2201 lock_oldstart(jnl);
2202 }
2203
2204 if (jnl->old_start[i] == 0) {
2205 continue;
2206 }
2207
2208 old_start_empty = 0;
2209 jnl->jhdr->start = jnl->old_start[i];
2210 jnl->old_start[i] = 0;
2211 if (free_space(jnl) > desired_size) {
2212 unlock_oldstart(jnl);
2213 write_journal_header(jnl, 1);
2214 lock_oldstart(jnl);
2215 break;
2216 }
2217 }
2218 unlock_oldstart(jnl);
2219
2220 // if we bumped the start, loop and try again
2221 if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
2222 continue;
2223 } else if (old_start_empty) {
2224 //
2225 // if there is nothing in old_start anymore then we can
2226 // bump the jhdr->start to be the same as active_start
2227 // since it is possible there was only one very large
2228 // transaction in the old_start array. if we didn't do
2229 // this then jhdr->start would never get updated and we
2230 // would wind up looping until we hit the panic at the
2231 // start of the loop.
2232 //
2233 jnl->jhdr->start = jnl->active_start;
2234 write_journal_header(jnl, 1);
2235 continue;
2236 }
2237
2238
2239 // if the file system gave us a flush function, call it to so that
2240 // it can flush some blocks which hopefully will cause some transactions
2241 // to complete and thus free up space in the journal.
2242 if (jnl->flush) {
2243 jnl->flush(jnl->flush_arg);
2244 }
2245
2246 // wait for a while to avoid being cpu-bound (this will
2247 // put us to sleep for 10 milliseconds)
2248 tsleep((caddr_t)jnl, PRIBIO, "check_free_space2", 1);
2249 }
2250
2251 return 0;
2252 }
2253
2254 /*
2255 * Allocate a new active transaction.
2256 */
2257 static errno_t
2258 journal_allocate_transaction(journal *jnl)
2259 {
2260 transaction *tr;
2261
2262 MALLOC_ZONE(tr, transaction *, sizeof(transaction), M_JNL_TR, M_WAITOK);
2263 memset(tr, 0, sizeof(transaction));
2264
2265 tr->tbuffer_size = jnl->tbuffer_size;
2266
2267 if (kmem_alloc(kernel_map, (vm_offset_t *)&tr->tbuffer, tr->tbuffer_size)) {
2268 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
2269 jnl->active_tr = NULL;
2270 return ENOMEM;
2271 }
2272
2273 // journal replay code checksum check depends on this.
2274 memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE);
2275 // Fill up the rest of the block with unimportant bytes (0x5a 'Z' chosen for visibility)
2276 memset(tr->tbuffer + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE);
2277
2278 tr->blhdr = (block_list_header *)tr->tbuffer;
2279 tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2280 tr->blhdr->num_blocks = 1; // accounts for this header block
2281 tr->blhdr->bytes_used = jnl->jhdr->blhdr_size;
2282 tr->blhdr->flags = BLHDR_CHECK_CHECKSUMS | BLHDR_FIRST_HEADER;
2283
2284 tr->sequence_num = ++jnl->jhdr->sequence_num;
2285 tr->num_blhdrs = 1;
2286 tr->total_bytes = jnl->jhdr->blhdr_size;
2287 tr->jnl = jnl;
2288
2289 jnl->active_tr = tr;
2290
2291 return 0;
2292 }
2293
2294 int
2295 journal_start_transaction(journal *jnl)
2296 {
2297 int ret;
2298
2299 CHECK_JOURNAL(jnl);
2300
2301 if (jnl->flags & JOURNAL_INVALID) {
2302 return EINVAL;
2303 }
2304
2305 if (jnl->owner == current_thread()) {
2306 if (jnl->active_tr == NULL) {
2307 panic("jnl: start_tr: active_tr is NULL (jnl @ %p, owner %p, current_thread %p\n",
2308 jnl, jnl->owner, current_thread());
2309 }
2310 jnl->nested_count++;
2311 return 0;
2312 }
2313
2314 lock_journal(jnl);
2315
2316 if (jnl->owner != NULL || jnl->nested_count != 0 || jnl->active_tr != NULL) {
2317 panic("jnl: start_tr: owner %p, nested count %d, active_tr %p jnl @ %p\n",
2318 jnl->owner, jnl->nested_count, jnl->active_tr, jnl);
2319 }
2320
2321 jnl->owner = current_thread();
2322 jnl->nested_count = 1;
2323
2324 free_old_stuff(jnl);
2325
2326 // make sure there's room in the journal
2327 if (free_space(jnl) < jnl->tbuffer_size) {
2328 // this is the call that really waits for space to free up
2329 // as well as updating jnl->jhdr->start
2330 if (check_free_space(jnl, jnl->tbuffer_size) != 0) {
2331 printf("jnl: %s: start transaction failed: no space\n", jnl->jdev_name);
2332 ret = ENOSPC;
2333 goto bad_start;
2334 }
2335 }
2336
2337 // if there's a buffered transaction, use it.
2338 if (jnl->cur_tr) {
2339 jnl->active_tr = jnl->cur_tr;
2340 jnl->cur_tr = NULL;
2341
2342 return 0;
2343 }
2344
2345 ret = journal_allocate_transaction(jnl);
2346 if (ret) {
2347 goto bad_start;
2348 }
2349
2350 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, jnl->active_tr);
2351
2352 return 0;
2353
2354 bad_start:
2355 jnl->owner = NULL;
2356 jnl->nested_count = 0;
2357 unlock_journal(jnl);
2358 return ret;
2359 }
2360
2361
2362 int
2363 journal_modify_block_start(journal *jnl, struct buf *bp)
2364 {
2365 transaction *tr;
2366
2367 CHECK_JOURNAL(jnl);
2368
2369 if (jnl->flags & JOURNAL_INVALID) {
2370 return EINVAL;
2371 }
2372
2373 // XXXdbg - for debugging I want this to be true. later it may
2374 // not be necessary.
2375 if ((buf_flags(bp) & B_META) == 0) {
2376 panic("jnl: modify_block_start: bp @ %p is not a meta-data block! (jnl %p)\n", bp, jnl);
2377 }
2378
2379 tr = jnl->active_tr;
2380 CHECK_TRANSACTION(tr);
2381
2382 if (jnl->owner != current_thread()) {
2383 panic("jnl: modify_block_start: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2384 jnl, jnl->owner, current_thread());
2385 }
2386
2387 free_old_stuff(jnl);
2388
2389 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
2390 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2391
2392 // can't allow blocks that aren't an even multiple of the
2393 // underlying block size.
2394 if ((buf_size(bp) % jnl->jhdr->jhdr_size) != 0) {
2395 uint32_t phys_blksz, bad=0;
2396
2397 if (VNOP_IOCTL(jnl->jdev, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, vfs_context_kernel())) {
2398 bad = 1;
2399 } else if (phys_blksz != (uint32_t)jnl->jhdr->jhdr_size) {
2400 if (phys_blksz < 512) {
2401 panic("jnl: mod block start: phys blksz %d is too small (%d, %d)\n",
2402 phys_blksz, buf_size(bp), jnl->jhdr->jhdr_size);
2403 }
2404
2405 if ((buf_size(bp) % phys_blksz) != 0) {
2406 bad = 1;
2407 } else if (phys_blksz < (uint32_t)jnl->jhdr->jhdr_size) {
2408 jnl->jhdr->jhdr_size = phys_blksz;
2409 } else {
2410 // the phys_blksz is now larger... need to realloc the jhdr
2411 char *new_header_buf;
2412
2413 printf("jnl: %s: phys blksz got bigger (was: %d/%d now %d)\n",
2414 jnl->jdev_name, jnl->header_buf_size, jnl->jhdr->jhdr_size, phys_blksz);
2415 if (kmem_alloc(kernel_map, (vm_offset_t *)&new_header_buf, phys_blksz)) {
2416 printf("jnl: modify_block_start: %s: create: phys blksz change (was %d, now %d) but could not allocate space for new header\n",
2417 jnl->jdev_name, jnl->jhdr->jhdr_size, phys_blksz);
2418 bad = 1;
2419 } else {
2420 memcpy(new_header_buf, jnl->header_buf, jnl->header_buf_size);
2421 memset(&new_header_buf[jnl->header_buf_size], 0x18, (phys_blksz - jnl->header_buf_size));
2422 kmem_free(kernel_map, (vm_offset_t)jnl->header_buf, jnl->header_buf_size);
2423 jnl->header_buf = new_header_buf;
2424 jnl->header_buf_size = phys_blksz;
2425
2426 jnl->jhdr = (journal_header *)jnl->header_buf;
2427 jnl->jhdr->jhdr_size = phys_blksz;
2428 }
2429 }
2430 } else {
2431 bad = 1;
2432 }
2433
2434 if (bad) {
2435 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
2436 buf_size(bp), jnl->jhdr->jhdr_size);
2437 return -1;
2438 }
2439 }
2440
2441 // make sure that this transaction isn't bigger than the whole journal
2442 if (tr->total_bytes+buf_size(bp) >= (jnl->jhdr->size - jnl->jhdr->jhdr_size)) {
2443 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr %p bp %p)\n",
2444 tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), buf_size(bp), tr, bp);
2445 return -1;
2446 }
2447
2448 // if the block is dirty and not already locked we have to write
2449 // it out before we muck with it because it has data that belongs
2450 // (presumably) to another transaction.
2451 //
2452 if ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI) {
2453
2454 if (buf_flags(bp) & B_ASYNC) {
2455 panic("modify_block_start: bp @ %p has async flag set!\n", bp);
2456 }
2457
2458 // this will cause it to not be buf_brelse()'d
2459 buf_setflags(bp, B_NORELSE);
2460 VNOP_BWRITE(bp);
2461 }
2462 buf_setflags(bp, B_LOCKED);
2463
2464 return 0;
2465 }
2466
2467 int
2468 journal_modify_block_abort(journal *jnl, struct buf *bp)
2469 {
2470 transaction *tr;
2471 block_list_header *blhdr;
2472 int i;
2473
2474 CHECK_JOURNAL(jnl);
2475
2476 tr = jnl->active_tr;
2477
2478 //
2479 // if there's no active transaction then we just want to
2480 // call buf_brelse() and return since this is just a block
2481 // that happened to be modified as part of another tr.
2482 //
2483 if (tr == NULL) {
2484 buf_brelse(bp);
2485 return 0;
2486 }
2487
2488 if (jnl->flags & JOURNAL_INVALID) {
2489 /* Still need to buf_brelse(). Callers assume we consume the bp. */
2490 buf_brelse(bp);
2491 return EINVAL;
2492 }
2493
2494 CHECK_TRANSACTION(tr);
2495
2496 if (jnl->owner != current_thread()) {
2497 panic("jnl: modify_block_abort: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2498 jnl, jnl->owner, current_thread());
2499 }
2500
2501 free_old_stuff(jnl);
2502
2503 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2504
2505 // first check if it's already part of this transaction
2506 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2507 for(i=1; i < blhdr->num_blocks; i++) {
2508 if (bp == blhdr->binfo[i].u.bp) {
2509 break;
2510 }
2511 }
2512
2513 if (i < blhdr->num_blocks) {
2514 break;
2515 }
2516 }
2517
2518 //
2519 // if blhdr is null, then this block has only had modify_block_start
2520 // called on it as part of the current transaction. that means that
2521 // it is ok to clear the LOCKED bit since it hasn't actually been
2522 // modified. if blhdr is non-null then modify_block_end was called
2523 // on it and so we need to keep it locked in memory.
2524 //
2525 if (blhdr == NULL) {
2526 buf_clearflags(bp, B_LOCKED);
2527 }
2528
2529 buf_brelse(bp);
2530 return 0;
2531 }
2532
2533
2534 int
2535 journal_modify_block_end(journal *jnl, struct buf *bp, void (*func)(struct buf *bp, void *arg), void *arg)
2536 {
2537 int i = 1;
2538 int tbuffer_offset=0;
2539 char *blkptr;
2540 block_list_header *blhdr, *prev=NULL;
2541 transaction *tr;
2542
2543 CHECK_JOURNAL(jnl);
2544
2545 if (jnl->flags & JOURNAL_INVALID) {
2546 /* Still need to buf_brelse(). Callers assume we consume the bp. */
2547 buf_brelse(bp);
2548 return EINVAL;
2549 }
2550
2551 tr = jnl->active_tr;
2552 CHECK_TRANSACTION(tr);
2553
2554 if (jnl->owner != current_thread()) {
2555 panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2556 jnl, jnl->owner, current_thread());
2557 }
2558
2559 free_old_stuff(jnl);
2560
2561 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2562 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2563
2564 if ((buf_flags(bp) & B_LOCKED) == 0) {
2565 panic("jnl: modify_block_end: bp %p not locked! jnl @ %p\n", bp, jnl);
2566 }
2567
2568 // first check if it's already part of this transaction
2569 for(blhdr=tr->blhdr; blhdr; prev=blhdr,blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2570 tbuffer_offset = jnl->jhdr->blhdr_size;
2571
2572 for(i=1; i < blhdr->num_blocks; i++) {
2573 if (bp == blhdr->binfo[i].u.bp) {
2574 break;
2575 }
2576 if (blhdr->binfo[i].bnum != (off_t)-1) {
2577 tbuffer_offset += buf_size(blhdr->binfo[i].u.bp);
2578 } else {
2579 tbuffer_offset += blhdr->binfo[i].u.bi.bsize;
2580 }
2581 }
2582
2583 if (i < blhdr->num_blocks) {
2584 break;
2585 }
2586 }
2587
2588 if (blhdr == NULL
2589 && prev
2590 && (prev->num_blocks+1) <= prev->max_blocks
2591 && (prev->bytes_used+buf_size(bp)) <= (uint32_t)tr->tbuffer_size) {
2592 blhdr = prev;
2593 } else if (blhdr == NULL) {
2594 block_list_header *nblhdr;
2595
2596 if (prev == NULL) {
2597 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl %p, bp %p\n", jnl, bp);
2598 }
2599
2600 // we got to the end of the list, didn't find the block and there's
2601 // no room in the block_list_header pointed to by prev
2602
2603 // we allocate another tbuffer and link it in at the end of the list
2604 // through prev->binfo[0].bnum. that's a skanky way to do things but
2605 // avoids having yet another linked list of small data structures to manage.
2606
2607 if (kmem_alloc(kernel_map, (vm_offset_t *)&nblhdr, tr->tbuffer_size)) {
2608 panic("jnl: end_tr: no space for new block tr @ %p (total bytes: %d)!\n",
2609 tr, tr->total_bytes);
2610 }
2611
2612 // journal replay code checksum check depends on this.
2613 memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE);
2614 // Fill up the rest of the block with unimportant bytes
2615 memset(nblhdr + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE);
2616
2617 // initialize the new guy
2618 nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2619 nblhdr->num_blocks = 1; // accounts for this header block
2620 nblhdr->bytes_used = jnl->jhdr->blhdr_size;
2621 nblhdr->flags = BLHDR_CHECK_CHECKSUMS;
2622
2623 tr->num_blhdrs++;
2624 tr->total_bytes += jnl->jhdr->blhdr_size;
2625
2626 // then link him in at the end
2627 prev->binfo[0].bnum = (off_t)((long)nblhdr);
2628
2629 // and finally switch to using the new guy
2630 blhdr = nblhdr;
2631 tbuffer_offset = jnl->jhdr->blhdr_size;
2632 i = 1;
2633 }
2634
2635
2636 if ((i+1) > blhdr->max_blocks) {
2637 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks);
2638 }
2639
2640 // if the function pointer is not set then copy the
2641 // block of data now. if the function pointer is set
2642 // the copy will happen after calling the callback in
2643 // end_transaction() just before it goes to disk.
2644 //
2645 if (func == NULL) {
2646 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
2647 memcpy(blkptr, (char *)0 + buf_dataptr(bp), buf_size(bp));
2648 }
2649
2650 // if this is true then this is a new block we haven't seen
2651 if (i >= blhdr->num_blocks) {
2652 int bsize;
2653 vnode_t vp;
2654
2655 vp = buf_vnode(bp);
2656 vnode_ref(vp);
2657 bsize = buf_size(bp);
2658
2659 blhdr->binfo[i].bnum = (off_t)(buf_blkno(bp));
2660 blhdr->binfo[i].u.bp = bp;
2661 if (func) {
2662 void *old_func=NULL, *old_arg=NULL;
2663
2664 buf_setfilter(bp, func, arg, &old_func, &old_arg);
2665 if (old_func != NULL && old_func != func) {
2666 panic("jnl: modify_block_end: old func %p / arg %p (func %p)", old_func, old_arg, func);
2667 }
2668 }
2669
2670 blhdr->bytes_used += bsize;
2671 tr->total_bytes += bsize;
2672
2673 blhdr->num_blocks++;
2674 }
2675 buf_bdwrite(bp);
2676
2677 return 0;
2678 }
2679
2680 int
2681 journal_kill_block(journal *jnl, struct buf *bp)
2682 {
2683 int i;
2684 int bflags;
2685 block_list_header *blhdr;
2686 transaction *tr;
2687
2688 CHECK_JOURNAL(jnl);
2689
2690 if (jnl->flags & JOURNAL_INVALID) {
2691 return EINVAL;
2692 }
2693
2694 tr = jnl->active_tr;
2695 CHECK_TRANSACTION(tr);
2696
2697 if (jnl->owner != current_thread()) {
2698 panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2699 jnl, jnl->owner, current_thread());
2700 }
2701
2702 free_old_stuff(jnl);
2703
2704 bflags = buf_flags(bp);
2705
2706 if ( !(bflags & B_LOCKED))
2707 panic("jnl: modify_block_end: called with bp not B_LOCKED");
2708
2709 /*
2710 * bp must be BL_BUSY and B_LOCKED
2711 */
2712 // first check if it's already part of this transaction
2713 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2714
2715 for(i=1; i < blhdr->num_blocks; i++) {
2716 if (bp == blhdr->binfo[i].u.bp) {
2717 vnode_t vp;
2718
2719 buf_clearflags(bp, B_LOCKED);
2720
2721 // this undoes the vnode_ref() in journal_modify_block_end()
2722 vp = buf_vnode(bp);
2723 vnode_rele_ext(vp, 0, 1);
2724
2725 // if the block has the DELWRI and FILTER bits sets, then
2726 // things are seriously weird. if it was part of another
2727 // transaction then journal_modify_block_start() should
2728 // have force it to be written.
2729 //
2730 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
2731 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
2732 //} else {
2733 tr->num_killed += buf_size(bp);
2734 //}
2735 blhdr->binfo[i].bnum = (off_t)-1;
2736 blhdr->binfo[i].u.bp = NULL;
2737 blhdr->binfo[i].u.bi.bsize = buf_size(bp);
2738
2739 buf_markinvalid(bp);
2740 buf_brelse(bp);
2741
2742 break;
2743 }
2744 }
2745
2746 if (i < blhdr->num_blocks) {
2747 break;
2748 }
2749 }
2750
2751 return 0;
2752 }
2753
2754
2755 static int
2756 journal_binfo_cmp(const void *a, const void *b)
2757 {
2758 const block_info *bi_a = (const struct block_info *)a;
2759 const block_info *bi_b = (const struct block_info *)b;
2760 daddr64_t res;
2761
2762 if (bi_a->bnum == (off_t)-1) {
2763 return 1;
2764 }
2765 if (bi_b->bnum == (off_t)-1) {
2766 return -1;
2767 }
2768
2769 // don't have to worry about negative block
2770 // numbers so this is ok to do.
2771 //
2772 res = (buf_blkno(bi_a->u.bp) - buf_blkno(bi_b->u.bp));
2773
2774 return (int)res;
2775 }
2776
2777
2778 /*
2779 * End a transaction. If the transaction is small enough, and we're not forcing
2780 * a write to disk, the "active" transaction becomes the "current" transaction,
2781 * and will be reused for the next transaction that is started (group commit).
2782 *
2783 * If the transaction gets written to disk (because force_it is true, or no
2784 * group commit, or the transaction is sufficiently full), the blocks get
2785 * written into the journal first, then the are written asynchronously. When
2786 * those async writes complete, the transaction can be freed and removed from
2787 * the journal.
2788 *
2789 * An optional callback can be supplied. If given, it is called after the
2790 * the blocks have been written to the journal, but before the async writes
2791 * of those blocks to their normal on-disk locations. This is used by
2792 * journal_relocate so that the location of the journal can be changed and
2793 * flushed to disk before the blocks get written to their normal locations.
2794 * Note that the callback is only called if the transaction gets written to
2795 * the journal during this end_transaction call; you probably want to set the
2796 * force_it flag.
2797 *
2798 * Inputs:
2799 * tr Transaction to add to the journal
2800 * force_it If true, force this transaction to the on-disk journal immediately.
2801 * callback See description above. Pass NULL for no callback.
2802 * callback_arg Argument passed to callback routine.
2803 *
2804 * Result
2805 * 0 No errors
2806 * -1 An error occurred. The journal is marked invalid.
2807 */
2808 static int
2809 end_transaction(transaction *tr, int force_it, errno_t (*callback)(void*), void *callback_arg)
2810 {
2811 int i, ret, amt;
2812 errno_t errno;
2813 off_t end;
2814 journal *jnl = tr->jnl;
2815 struct buf *bp, **bparray;
2816 block_list_header *blhdr=NULL, *next=NULL;
2817 size_t tbuffer_offset;
2818
2819 if (jnl->cur_tr) {
2820 panic("jnl: jnl @ %p already has cur_tr %p, new tr: %p\n",
2821 jnl, jnl->cur_tr, tr);
2822 }
2823
2824 // if there weren't any modified blocks in the transaction
2825 // just save off the transaction pointer and return.
2826 if (tr->total_bytes == jnl->jhdr->blhdr_size) {
2827 jnl->cur_tr = tr;
2828 return 0;
2829 }
2830
2831 // if our transaction buffer isn't very full, just hang
2832 // on to it and don't actually flush anything. this is
2833 // what is known as "group commit". we will flush the
2834 // transaction buffer if it's full or if we have more than
2835 // one of them so we don't start hogging too much memory.
2836 //
2837 if ( force_it == 0
2838 && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0
2839 && tr->num_blhdrs < 3
2840 && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8))) {
2841
2842 jnl->cur_tr = tr;
2843 return 0;
2844 }
2845
2846
2847 // if we're here we're going to flush the transaction buffer to disk.
2848 // make sure there is room in the journal first.
2849 check_free_space(jnl, tr->total_bytes);
2850
2851 // range check the end index
2852 if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) {
2853 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
2854 jnl->jhdr->end, jnl->jhdr->size);
2855 }
2856
2857 // this transaction starts where the current journal ends
2858 tr->journal_start = jnl->jhdr->end;
2859 end = jnl->jhdr->end;
2860
2861 //
2862 // if the first entry in old_start[] isn't free yet, loop calling the
2863 // file system flush routine until it is (or we panic).
2864 //
2865 i = 0;
2866 lock_oldstart(jnl);
2867 while ((jnl->old_start[0] & 0x8000000000000000LL) != 0) {
2868 if (jnl->flush) {
2869 unlock_oldstart(jnl);
2870
2871 if (jnl->flush) {
2872 jnl->flush(jnl->flush_arg);
2873 }
2874
2875 // yield the cpu so others can get in to clear the lock bit
2876 (void)tsleep((void *)jnl, PRIBIO, "jnl-old-start-sleep", 1);
2877
2878 lock_oldstart(jnl);
2879 }
2880 if (i++ >= 500) {
2881 panic("jnl: transaction that started at 0x%llx is not completing! jnl %p\n",
2882 jnl->old_start[0] & (~0x8000000000000000LL), jnl);
2883 }
2884 }
2885
2886 //
2887 // slide everyone else down and put our latest guy in the last
2888 // entry in the old_start array
2889 //
2890
2891 /* Because old_start is locked above, we can cast away the volatile qualifier before passing it to memcpy. */
2892 memcpy(__CAST_AWAY_QUALIFIER(&jnl->old_start[0], volatile, void *), __CAST_AWAY_QUALIFIER(&jnl->old_start[1], volatile, void *), sizeof(jnl->old_start)-sizeof(jnl->old_start[0]));
2893 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL;
2894
2895 unlock_oldstart(jnl);
2896
2897
2898 // for each block, make sure that the physical block # is set
2899 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
2900 char *blkptr;
2901
2902 tbuffer_offset = jnl->jhdr->blhdr_size;
2903 for(i=1; i < blhdr->num_blocks; i++) {
2904 daddr64_t blkno;
2905 daddr64_t lblkno;
2906 struct vnode *vp;
2907
2908 bp = blhdr->binfo[i].u.bp;
2909
2910 // if this block has a callback function set, call
2911 // it now and then copy the data from the bp into
2912 // the journal.
2913 if (blhdr->binfo[i].bnum != (off_t)-1) {
2914 void (*func)(struct buf *, void *);
2915 void *arg;
2916
2917 if (bp == NULL) {
2918 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ %p, tr %p)\n",
2919 blhdr->binfo[i].bnum, jnl, tr);
2920 }
2921
2922 buf_setfilter(bp, NULL, NULL, (void **)&func, &arg);
2923
2924 if (func) {
2925 // acquire the bp here so that we can safely
2926 // mess around with its data. buf_acquire()
2927 // will return EAGAIN if the buffer was busy,
2928 // so loop trying again.
2929 do {
2930 errno = buf_acquire(bp, 0, 0, 0);
2931 } while (errno == EAGAIN);
2932
2933 if (errno == 0) {
2934
2935 // call the hook function and then copy the
2936 // data into the transaction buffer...
2937 func(bp, arg);
2938
2939 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
2940 memcpy(blkptr, (char *)buf_dataptr(bp), buf_size(bp));
2941
2942 buf_drop(bp);
2943 } else {
2944 panic("could not acquire bp %p (err %d)\n", bp, errno);
2945 }
2946 }
2947
2948 } else { // bnum == -1, only true if a block was "killed"
2949
2950 tbuffer_offset += blhdr->binfo[i].u.bi.bsize;
2951 continue;
2952 }
2953
2954 tbuffer_offset += buf_size(bp);
2955
2956 vp = buf_vnode(bp);
2957 blkno = buf_blkno(bp);
2958 lblkno = buf_lblkno(bp);
2959
2960 if (vp == NULL && lblkno == blkno) {
2961 printf("jnl: %s: end_tr: bad news! bp @ %p w/null vp and l/blkno = %qd/%qd. aborting the transaction (tr %p jnl %p).\n",
2962 jnl->jdev_name, bp, lblkno, blkno, tr, jnl);
2963 goto bad_journal;
2964 }
2965
2966 // if the lblkno is the same as blkno and this bp isn't
2967 // associated with the underlying file system device then
2968 // we need to call bmap() to get the actual physical block.
2969 //
2970 if ((lblkno == blkno) && (vp != jnl->fsdev)) {
2971 off_t f_offset;
2972 size_t contig_bytes;
2973
2974 if (VNOP_BLKTOOFF(vp, lblkno, &f_offset)) {
2975 printf("jnl: %s: end_tr: vnop_blktooff failed @ %p, jnl %p\n", jnl->jdev_name, bp, jnl);
2976 goto bad_journal;
2977 }
2978 if (VNOP_BLOCKMAP(vp, f_offset, buf_count(bp), &blkno, &contig_bytes, NULL, 0, NULL)) {
2979 printf("jnl: %s: end_tr: can't blockmap the bp @ %p, jnl %p\n", jnl->jdev_name, bp, jnl);
2980 goto bad_journal;
2981 }
2982 if ((uint32_t)contig_bytes < buf_count(bp)) {
2983 printf("jnl: %s: end_tr: blk not physically contiguous on disk@ %p, jnl %p\n", jnl->jdev_name, bp, jnl);
2984 goto bad_journal;
2985 }
2986 buf_setblkno(bp, blkno);
2987 }
2988 // update this so we write out the correct physical block number!
2989 blhdr->binfo[i].bnum = (off_t)(blkno);
2990 }
2991
2992 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
2993 }
2994
2995
2996
2997 for(blhdr=tr->blhdr; blhdr; blhdr=(block_list_header *)((long)blhdr->binfo[0].bnum)) {
2998 amt = blhdr->bytes_used;
2999
3000 blhdr->binfo[0].u.bi.b.sequence_num = tr->sequence_num;
3001
3002 blhdr->checksum = 0;
3003 blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
3004
3005 if (kmem_alloc(kernel_map, (vm_offset_t *)&bparray, blhdr->num_blocks * sizeof(struct buf *))) {
3006 panic("can't allocate %zd bytes for bparray\n", blhdr->num_blocks * sizeof(struct buf *));
3007 }
3008
3009 // calculate individual block checksums
3010 tbuffer_offset = jnl->jhdr->blhdr_size;
3011 for(i=1; i < blhdr->num_blocks; i++) {
3012 int32_t bsize;
3013
3014 if (blhdr->binfo[i].bnum != (off_t)-1) {
3015 bparray[i] = blhdr->binfo[i].u.bp;
3016 bsize = buf_size(bparray[i]);
3017 blhdr->binfo[i].u.bi.bsize = bsize;
3018 blhdr->binfo[i].u.bi.b.cksum = calc_checksum(&((char *)blhdr)[tbuffer_offset], bsize);
3019 } else {
3020 bparray[i] = NULL;
3021 bsize = blhdr->binfo[i].u.bi.bsize;
3022 blhdr->binfo[i].u.bi.b.cksum = 0;
3023 }
3024
3025 tbuffer_offset += bsize;
3026 }
3027
3028 ret = write_journal_data(jnl, &end, blhdr, amt);
3029
3030 // always put the bp pointers back
3031 for(i=1; i < blhdr->num_blocks; i++) {
3032 if (blhdr->binfo[i].bnum != (off_t)-1) {
3033 blhdr->binfo[i].u.bp = bparray[i];
3034 }
3035 }
3036
3037 kmem_free(kernel_map, (vm_offset_t)bparray, blhdr->num_blocks * sizeof(struct buf *));
3038
3039 if (ret != amt) {
3040 printf("jnl: %s: end_transaction: only wrote %d of %d bytes to the journal!\n",
3041 jnl->jdev_name, ret, amt);
3042
3043 goto bad_journal;
3044 }
3045 }
3046
3047 jnl->jhdr->end = end; // update where the journal now ends
3048 tr->journal_end = end; // the transaction ends here too
3049 if (tr->journal_start == 0 || tr->journal_end == 0) {
3050 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
3051 tr->journal_start, tr->journal_end);
3052 }
3053
3054 if (write_journal_header(jnl, 0) != 0) {
3055 goto bad_journal;
3056 }
3057
3058 /*
3059 * If the caller supplied a callback, call it now that the blocks have been
3060 * written to the journal. This is used by journal_relocate so, for example,
3061 * the file system can change its pointer to the new journal.
3062 */
3063 if (callback != NULL && callback(callback_arg) != 0) {
3064 goto bad_journal;
3065 }
3066
3067 //
3068 // setup for looping through all the blhdr's. we null out the
3069 // tbuffer and blhdr fields so that they're not used any more.
3070 //
3071 blhdr = tr->blhdr;
3072 tr->tbuffer = NULL;
3073 tr->blhdr = NULL;
3074
3075 // the buffer_flushed_callback will only be called for the
3076 // real blocks that get flushed so we have to account for
3077 // the block_list_headers here.
3078 //
3079 tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size;
3080
3081 // for each block, set the iodone callback and unlock it
3082 for(; blhdr; blhdr=next) {
3083
3084 // we can re-order the buf ptrs because everything is written out already
3085 qsort(&blhdr->binfo[1], blhdr->num_blocks-1, sizeof(block_info), journal_binfo_cmp);
3086
3087 for(i=1; i < blhdr->num_blocks; i++) {
3088 if (blhdr->binfo[i].bnum == (off_t)-1) {
3089 continue;
3090 }
3091
3092 bp = blhdr->binfo[i].u.bp;
3093
3094 // have to pass BAC_REMOVE here because we're going to bawrite()
3095 // the buffer when we're done
3096 do {
3097 errno = buf_acquire(bp, BAC_REMOVE, 0, 0);
3098 } while (errno == EAGAIN);
3099
3100 if (errno == 0) {
3101 struct vnode *save_vp;
3102 void *cur_filter;
3103
3104 if ((buf_flags(bp) & (B_LOCKED|B_DELWRI)) != (B_LOCKED|B_DELWRI)) {
3105 if (jnl->flags & JOURNAL_CLOSE_PENDING) {
3106 buf_clearflags(bp, B_LOCKED);
3107 buf_brelse(bp);
3108 continue;
3109 } else {
3110 panic("jnl: end_tr: !!!DANGER!!! bp %p flags (0x%x) not LOCKED & DELWRI\n", bp, buf_flags(bp));
3111 }
3112 }
3113 save_vp = buf_vnode(bp);
3114
3115 buf_setfilter(bp, buffer_flushed_callback, tr, &cur_filter, NULL);
3116
3117 if (cur_filter) {
3118 panic("jnl: bp @ %p (blkno %qd, vp %p) has non-null iodone (%p) buffflushcb %p\n",
3119 bp, buf_blkno(bp), save_vp, cur_filter, buffer_flushed_callback);
3120 }
3121 buf_clearflags(bp, B_LOCKED);
3122
3123 // kicking off the write here helps performance
3124 buf_bawrite(bp);
3125 // XXXdbg this is good for testing: buf_bdwrite(bp);
3126 //buf_bdwrite(bp);
3127
3128 // this undoes the vnode_ref() in journal_modify_block_end()
3129 vnode_rele_ext(save_vp, 0, 1);
3130 } else {
3131 printf("jnl: %s: end_transaction: could not acquire block %p (errno %d)!\n",
3132 jnl->jdev_name,bp, errno);
3133 }
3134 }
3135
3136 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
3137
3138 // we can free blhdr here since we won't need it any more
3139 blhdr->binfo[0].bnum = 0xdeadc0de;
3140 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
3141 }
3142
3143 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
3144 // tr, tr->journal_start, tr->journal_end);
3145 return 0;
3146
3147
3148 bad_journal:
3149 jnl->flags |= JOURNAL_INVALID;
3150 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] &= ~0x8000000000000000LL;
3151 abort_transaction(jnl, tr);
3152 return -1;
3153 }
3154
3155 static void
3156 abort_transaction(journal *jnl, transaction *tr)
3157 {
3158 int i;
3159 errno_t errno;
3160 block_list_header *blhdr, *next;
3161 struct buf *bp;
3162 struct vnode *save_vp;
3163
3164 // for each block list header, iterate over the blocks then
3165 // free up the memory associated with the block list.
3166 //
3167 // for each block, clear the lock bit and release it.
3168 //
3169 for(blhdr=tr->blhdr; blhdr; blhdr=next) {
3170
3171 for(i=1; i < blhdr->num_blocks; i++) {
3172 if (blhdr->binfo[i].bnum == (off_t)-1) {
3173 continue;
3174 }
3175 if ( (buf_vnode(blhdr->binfo[i].u.bp) == NULL) ||
3176 !(buf_flags(blhdr->binfo[i].u.bp) & B_LOCKED) ) {
3177 continue;
3178 }
3179
3180 errno = buf_meta_bread(buf_vnode(blhdr->binfo[i].u.bp),
3181 buf_lblkno(blhdr->binfo[i].u.bp),
3182 buf_size(blhdr->binfo[i].u.bp),
3183 NOCRED,
3184 &bp);
3185 if (errno == 0) {
3186 if (bp != blhdr->binfo[i].u.bp) {
3187 panic("jnl: abort_tr: got back a different bp! (bp %p should be %p, jnl %p\n",
3188 bp, blhdr->binfo[i].u.bp, jnl);
3189 }
3190
3191 // releasing a bp marked invalid
3192 // also clears the locked and delayed state
3193 buf_markinvalid(bp);
3194 save_vp = buf_vnode(bp);
3195
3196 buf_brelse(bp);
3197
3198 vnode_rele_ext(save_vp, 0, 1);
3199 } else {
3200 printf("jnl: %s: abort_tr: could not find block %Ld vp %p!\n",
3201 jnl->jdev_name, blhdr->binfo[i].bnum, blhdr->binfo[i].u.bp);
3202 if (bp) {
3203 buf_brelse(bp);
3204 }
3205 }
3206 }
3207
3208 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
3209
3210 // we can free blhdr here since we won't need it any more
3211 blhdr->binfo[0].bnum = 0xdeadc0de;
3212 kmem_free(kernel_map, (vm_offset_t)blhdr, tr->tbuffer_size);
3213 }
3214
3215 tr->tbuffer = NULL;
3216 tr->blhdr = NULL;
3217 tr->total_bytes = 0xdbadc0de;
3218 FREE_ZONE(tr, sizeof(transaction), M_JNL_TR);
3219 }
3220
3221
3222 int
3223 journal_end_transaction(journal *jnl)
3224 {
3225 int ret;
3226 transaction *tr;
3227
3228 CHECK_JOURNAL(jnl);
3229
3230 if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) {
3231 return 0;
3232 }
3233
3234 if (jnl->owner != current_thread()) {
3235 panic("jnl: end_tr: I'm not the owner! jnl %p, owner %p, curact %p\n",
3236 jnl, jnl->owner, current_thread());
3237 }
3238
3239 free_old_stuff(jnl);
3240
3241 jnl->nested_count--;
3242 if (jnl->nested_count > 0) {
3243 return 0;
3244 } else if (jnl->nested_count < 0) {
3245 panic("jnl: jnl @ %p has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count);
3246 }
3247
3248 if (jnl->flags & JOURNAL_INVALID) {
3249 if (jnl->active_tr) {
3250 if (jnl->cur_tr != NULL) {
3251 panic("jnl: journal @ %p has active tr (%p) and cur tr (%p)\n",
3252 jnl, jnl->active_tr, jnl->cur_tr);
3253 }
3254
3255 tr = jnl->active_tr;
3256 jnl->active_tr = NULL;
3257 abort_transaction(jnl, tr);
3258 }
3259
3260 jnl->owner = NULL;
3261 unlock_journal(jnl);
3262
3263 return EINVAL;
3264 }
3265
3266 tr = jnl->active_tr;
3267 CHECK_TRANSACTION(tr);
3268
3269 // clear this out here so that when check_free_space() calls
3270 // the FS flush function, we don't panic in journal_flush()
3271 // if the FS were to call that. note: check_free_space() is
3272 // called from end_transaction().
3273 //
3274 jnl->active_tr = NULL;
3275 ret = end_transaction(tr, 0, NULL, NULL);
3276
3277 jnl->owner = NULL;
3278 unlock_journal(jnl);
3279
3280 return ret;
3281 }
3282
3283
3284 int
3285 journal_flush(journal *jnl)
3286 {
3287 int need_signal = 0;
3288
3289 CHECK_JOURNAL(jnl);
3290
3291 if (jnl->flags & JOURNAL_INVALID) {
3292 return -1;
3293 }
3294
3295 KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_JOURNAL, DBG_JOURNAL_FLUSH))
3296 | DBG_FUNC_START, 0, 0, 0, 0, 0);
3297
3298 if (jnl->owner != current_thread()) {
3299 lock_journal(jnl);
3300 need_signal = 1;
3301 }
3302
3303 free_old_stuff(jnl);
3304
3305 // if we're not active, flush any buffered transactions
3306 if (jnl->active_tr == NULL && jnl->cur_tr) {
3307 transaction *tr = jnl->cur_tr;
3308
3309 jnl->cur_tr = NULL;
3310 end_transaction(tr, 1, NULL, NULL); // force it to get flushed
3311 }
3312
3313 if (need_signal) {
3314 unlock_journal(jnl);
3315 }
3316
3317 KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_JOURNAL, DBG_JOURNAL_FLUSH))
3318 | DBG_FUNC_END, 0, 0, 0, 0, 0);
3319
3320 return 0;
3321 }
3322
3323 int
3324 journal_active(journal *jnl)
3325 {
3326 if (jnl->flags & JOURNAL_INVALID) {
3327 return -1;
3328 }
3329
3330 return (jnl->active_tr == NULL) ? 0 : 1;
3331 }
3332
3333 void *
3334 journal_owner(journal *jnl)
3335 {
3336 return jnl->owner;
3337 }
3338
3339 int journal_uses_fua(journal *jnl)
3340 {
3341 if (jnl->flags & JOURNAL_DO_FUA_WRITES)
3342 return 1;
3343 return 0;
3344 }
3345
3346 /*
3347 * Relocate the journal.
3348 *
3349 * You provide the new starting offset and size for the journal. You may
3350 * optionally provide a new tbuffer_size; passing zero defaults to not
3351 * changing the tbuffer size except as needed to fit within the new journal
3352 * size.
3353 *
3354 * You must have already started a transaction. The transaction may contain
3355 * modified blocks (such as those needed to deallocate the old journal,
3356 * allocate the new journal, and update the location and size of the journal
3357 * in filesystem-private structures). Any transactions prior to the active
3358 * transaction will be flushed to the old journal. The new journal will be
3359 * initialized, and the blocks from the active transaction will be written to
3360 * the new journal.
3361 *
3362 * The caller will need to update the structures that identify the location
3363 * and size of the journal. These updates should be made in the supplied
3364 * callback routine. These updates must NOT go into a transaction. You should
3365 * force these updates to the media before returning from the callback. In the
3366 * even of a crash, either the old journal will be found, with an empty journal,
3367 * or the new journal will be found with the contents of the active transaction.
3368 *
3369 * Upon return from the callback, the blocks from the active transaction are
3370 * written to their normal locations on disk.
3371 *
3372 * (Remember that we have to ensure that blocks get committed to the journal
3373 * before being committed to their normal locations. But the blocks don't count
3374 * as committed until the new journal is pointed at.)
3375 *
3376 * Upon return, there is still an active transaction: newly allocated, and
3377 * with no modified blocks. Call journal_end_transaction as normal. You may
3378 * modifiy additional blocks before calling journal_end_transaction, and those
3379 * blocks will (eventually) go to the relocated journal.
3380 *
3381 * Inputs:
3382 * jnl The (opened) journal to relocate.
3383 * offset The new journal byte offset (from start of the journal device).
3384 * journal_size The size, in bytes, of the new journal.
3385 * tbuffer_size The new desired transaction buffer size. Pass zero to keep
3386 * the same size as the current journal. The size will be
3387 * modified as needed to fit the new journal.
3388 * callback Routine called after the new journal has been initialized,
3389 * and the active transaction written to the new journal, but
3390 * before the blocks are written to their normal locations.
3391 * Pass NULL for no callback.
3392 * callback_arg An argument passed to the callback routine.
3393 *
3394 * Result:
3395 * 0 No errors
3396 * EINVAL The offset is not block aligned
3397 * EINVAL The journal_size is not a multiple of the block size
3398 * EINVAL The journal is invalid
3399 * (any) An error returned by journal_flush.
3400 *
3401 */
3402 int journal_relocate(journal *jnl, off_t offset, off_t journal_size, int32_t tbuffer_size,
3403 errno_t (*callback)(void *), void *callback_arg)
3404 {
3405 int ret;
3406 transaction *tr;
3407
3408 /*
3409 * Sanity check inputs, and adjust the size of the transaction buffer.
3410 */
3411 if ((offset % jnl->jhdr->jhdr_size) != 0) {
3412 printf("jnl: %s: relocate: offset 0x%llx is not an even multiple of block size 0x%x\n",
3413 jnl->jdev_name, offset, jnl->jhdr->jhdr_size);
3414 return EINVAL;
3415 }
3416 if ((journal_size % jnl->jhdr->jhdr_size) != 0) {
3417 printf("jnl: %s: relocate: journal size 0x%llx is not an even multiple of block size 0x%x\n",
3418 jnl->jdev_name, journal_size, jnl->jhdr->jhdr_size);
3419 return EINVAL;
3420 }
3421
3422 CHECK_JOURNAL(jnl);
3423
3424 /* Guarantee we own the active transaction. */
3425 if (jnl->flags & JOURNAL_INVALID) {
3426 return EINVAL;
3427 }
3428 if (jnl->owner != current_thread()) {
3429 panic("jnl: relocate: Not the owner! jnl %p, owner %p, curact %p\n",
3430 jnl, jnl->owner, current_thread());
3431 }
3432
3433 if (tbuffer_size == 0)
3434 tbuffer_size = jnl->tbuffer_size;
3435 size_up_tbuffer(jnl, tbuffer_size, jnl->jhdr->jhdr_size);
3436
3437 /*
3438 * Flush any non-active transactions. We have to temporarily hide the
3439 * active transaction to make journal_flush flush out non-active but
3440 * current (unwritten) transactions.
3441 */
3442 tr = jnl->active_tr;
3443 CHECK_TRANSACTION(tr);
3444 jnl->active_tr = NULL;
3445 ret = journal_flush(jnl);
3446 jnl->active_tr = tr;
3447 if (ret) {
3448 return ret;
3449 }
3450
3451 /* Update the journal's offset and size in memory. */
3452 jnl->jdev_offset = offset;
3453 jnl->jhdr->start = jnl->jhdr->end = jnl->jhdr->jhdr_size;
3454 jnl->jhdr->size = journal_size;
3455 jnl->active_start = jnl->jhdr->start;
3456
3457 /*
3458 * Force the active transaction to be written to the new journal. Call the
3459 * supplied callback after the blocks have been written to the journal, but
3460 * before they get written to their normal on-disk locations.
3461 */
3462 jnl->active_tr = NULL;
3463 ret = end_transaction(tr, 1, callback, callback_arg);
3464 if (ret) {
3465 printf("jnl: %s: relocate: end_transaction failed (%d)\n", jnl->jdev_name, ret);
3466 goto bad_journal;
3467 }
3468
3469 /*
3470 * Create a new, empty transaction to be the active transaction. This way
3471 * our caller can use journal_end_transaction as usual.
3472 */
3473 ret = journal_allocate_transaction(jnl);
3474 if (ret) {
3475 printf("jnl: %s: relocate: could not allocate new transaction (%d)\n", jnl->jdev_name, ret);
3476 goto bad_journal;
3477 }
3478
3479 return 0;
3480
3481 bad_journal:
3482 jnl->flags |= JOURNAL_INVALID;
3483 abort_transaction(jnl, tr);
3484 return ret;
3485 }
3486
3487
3488 #else // !JOURNALING - so provide stub functions
3489
3490 int journal_uses_fua(__unused journal *jnl)
3491 {
3492 return 0;
3493 }
3494
3495 journal *
3496 journal_create(__unused struct vnode *jvp,
3497 __unused off_t offset,
3498 __unused off_t journal_size,
3499 __unused struct vnode *fsvp,
3500 __unused size_t min_fs_blksz,
3501 __unused int32_t flags,
3502 __unused int32_t tbuffer_size,
3503 __unused void (*flush)(void *arg),
3504 __unused void *arg)
3505 {
3506 return NULL;
3507 }
3508
3509 journal *
3510 journal_open(__unused struct vnode *jvp,
3511 __unused off_t offset,
3512 __unused off_t journal_size,
3513 __unused struct vnode *fsvp,
3514 __unused size_t min_fs_blksz,
3515 __unused int32_t flags,
3516 __unused int32_t tbuffer_size,
3517 __unused void (*flush)(void *arg),
3518 __unused void *arg)
3519 {
3520 return NULL;
3521 }
3522
3523
3524 int
3525 journal_modify_block_start(__unused journal *jnl, __unused struct buf *bp)
3526 {
3527 return EINVAL;
3528 }
3529
3530 int
3531 journal_modify_block_end(__unused journal *jnl,
3532 __unused struct buf *bp,
3533 __unused void (*func)(struct buf *bp, void *arg),
3534 __unused void *arg)
3535 {
3536 return EINVAL;
3537 }
3538
3539 int
3540 journal_kill_block(__unused journal *jnl, __unused struct buf *bp)
3541 {
3542 return EINVAL;
3543 }
3544
3545 int journal_relocate(__unused journal *jnl,
3546 __unused off_t offset,
3547 __unused off_t journal_size,
3548 __unused int32_t tbuffer_size,
3549 __unused errno_t (*callback)(void *),
3550 __unused void *callback_arg)
3551 {
3552 return EINVAL;
3553 }
3554
3555 void
3556 journal_close(__unused journal *jnl)
3557 {
3558 }
3559
3560 int
3561 journal_start_transaction(__unused journal *jnl)
3562 {
3563 return EINVAL;
3564 }
3565
3566 int
3567 journal_end_transaction(__unused journal *jnl)
3568 {
3569 return EINVAL;
3570 }
3571
3572 int
3573 journal_flush(__unused journal *jnl)
3574 {
3575 return EINVAL;
3576 }
3577
3578 int
3579 journal_is_clean(__unused struct vnode *jvp,
3580 __unused off_t offset,
3581 __unused off_t journal_size,
3582 __unused struct vnode *fsvp,
3583 __unused size_t min_fs_block_size)
3584 {
3585 return 0;
3586 }
3587
3588
3589 void *
3590 journal_owner(__unused journal *jnl)
3591 {
3592 return NULL;
3593 }
3594 #endif // !JOURNALING