]> git.saurik.com Git - apple/hfs.git/blob - core/hfs_journal.c
hfs-556.41.1.tar.gz
[apple/hfs.git] / core / hfs_journal.c
1 /*
2 * Copyright (c) 2002-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 //
29 // This file implements a simple write-ahead journaling layer.
30 // In theory any file system can make use of it by calling these
31 // functions when the fs wants to modify meta-data blocks. See
32 // hfs_journal.h for a more detailed description of the api and
33 // data structures.
34 //
35 // Dominic Giampaolo (dbg@apple.com)
36 //
37
38 #ifdef KERNEL
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/stat.h>
44 #include <sys/namei.h>
45 #include <sys/ioctl.h>
46 #include <sys/ubc.h>
47 #include <sys/malloc.h>
48 #include <kern/task.h>
49 #include <kern/thread.h>
50 #include <sys/disk.h>
51 #include <sys/kdebug.h>
52 #include <sys/kpi_private.h>
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55 #include <miscfs/specfs/specdev.h>
56 #include <libkern/OSAtomic.h> /* OSAddAtomic */
57
58 #include "hfs.h"
59
60 kern_return_t thread_terminate(thread_t);
61
62 /*
63 * Set sysctl vfs.generic.jnl.kdebug.trim=1 to enable KERNEL_DEBUG_CONSTANT
64 * logging of trim-related calls within the journal. (They're
65 * disabled by default because there can be a lot of these events,
66 * and we don't want to overwhelm the kernel debug buffer. If you
67 * want to watch these events in particular, just set the sysctl.)
68 */
69 static int jnl_kdebug = 0;
70
71 HFS_SYSCTL(NODE, _vfs_generic_hfs, OID_AUTO, jnl, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Journal")
72 HFS_SYSCTL(NODE, _vfs_generic_hfs_jnl, OID_AUTO, kdebug, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Journal kdebug")
73 HFS_SYSCTL(INT, _vfs_generic_hfs_jnl_kdebug, OID_AUTO, trim, CTLFLAG_RW|CTLFLAG_LOCKED, &jnl_kdebug, 0, "Enable kdebug logging for journal TRIM")
74
75 #define DBG_JOURNAL_FLUSH FSDBG_CODE(DBG_JOURNAL, 1)
76 #define DBG_JOURNAL_TRIM_ADD FSDBG_CODE(DBG_JOURNAL, 2)
77 #define DBG_JOURNAL_TRIM_REMOVE FSDBG_CODE(DBG_JOURNAL, 3)
78 #define DBG_JOURNAL_TRIM_REMOVE_PENDING FSDBG_CODE(DBG_JOURNAL, 4)
79 #define DBG_JOURNAL_TRIM_REALLOC FSDBG_CODE(DBG_JOURNAL, 5)
80 #define DBG_JOURNAL_TRIM_FLUSH FSDBG_CODE(DBG_JOURNAL, 6)
81 #define DBG_JOURNAL_TRIM_UNMAP FSDBG_CODE(DBG_JOURNAL, 7)
82
83 /*
84 * Cap the journal max size to 2GB. On HFS, it will attempt to occupy
85 * a full allocation block if the current size is smaller than the allocation
86 * block on which it resides. Once we hit the exabyte filesystem range, then
87 * it will use 2GB allocation blocks. As a result, make the cap 2GB.
88 */
89 #define MAX_JOURNAL_SIZE 0x80000000U
90
91 #include <mach/machine/sdt.h>
92 #else
93
94 #include <stdio.h>
95 #include <stdlib.h>
96 #include <string.h>
97 #include <limits.h>
98 #include <errno.h>
99 #include <fcntl.h>
100 #include <unistd.h>
101 #include <stdarg.h>
102 #include <sys/types.h>
103 #include "compat.h"
104
105 #endif /* KERNEL */
106
107 #include "hfs_journal.h"
108
109 #include <sys/kdebug.h>
110
111 //
112 // By default, we grow the list of extents to trim by 4K at a time.
113 // We'll opt to flush a transaction if it contains at least
114 // JOURNAL_FLUSH_TRIM_EXTENTS extents to be trimmed (even if the number
115 // of modified blocks is small).
116 //
117 enum {
118 JOURNAL_DEFAULT_TRIM_BYTES = 4096,
119 JOURNAL_DEFAULT_TRIM_EXTENTS = JOURNAL_DEFAULT_TRIM_BYTES / sizeof(dk_extent_t),
120 JOURNAL_FLUSH_TRIM_EXTENTS = JOURNAL_DEFAULT_TRIM_EXTENTS * 15 / 16
121 };
122
123 unsigned int jnl_trim_flush_limit = JOURNAL_FLUSH_TRIM_EXTENTS;
124
125 HFS_SYSCTL(UINT, _vfs_generic_hfs_jnl, OID_AUTO, trim_flush, CTLFLAG_RW, &jnl_trim_flush_limit, 0, "number of trimmed extents to cause a journal flush")
126
127 // number of bytes to checksum in a block_list_header
128 // NOTE: this should be enough to clear out the header
129 // fields as well as the first entry of binfo[]
130 #define BLHDR_CHECKSUM_SIZE 32
131
132 static void lock_condition(journal *jnl, boolean_t *condition, const char *condition_name);
133 static void wait_condition(journal *jnl, boolean_t *condition, const char *condition_name);
134 static void unlock_condition(journal *jnl, boolean_t *condition);
135 static void finish_end_thread(transaction *tr);
136 static void write_header_thread(journal *jnl);
137 static int finish_end_transaction(transaction *tr, errno_t (*callback)(void*), void *callback_arg);
138 static int end_transaction(transaction *tr, int force_it, errno_t (*callback)(void*), void *callback_arg, boolean_t drop_lock, boolean_t must_wait);
139 static void abort_transaction(journal *jnl, transaction *tr);
140 static void dump_journal(journal *jnl);
141
142 static __inline__ void lock_oldstart(journal *jnl);
143 static __inline__ void unlock_oldstart(journal *jnl);
144 static __inline__ void lock_flush(journal *jnl);
145 static __inline__ void unlock_flush(journal *jnl);
146
147
148 //
149 // 3105942 - Coalesce writes to the same block on journal replay
150 //
151
152 typedef struct bucket {
153 off_t block_num;
154 uint32_t jnl_offset;
155 uint32_t block_size;
156 int32_t cksum;
157 } bucket;
158
159 #define STARTING_BUCKETS 256
160
161 static int add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr);
162 static int grow_table(struct bucket **buf_ptr, int num_buckets, int new_size);
163 static int lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full);
164 static int do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr);
165 static int insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr, int overwriting);
166
167 #define CHECK_JOURNAL(jnl) \
168 do { \
169 if (jnl == NULL) { \
170 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__); \
171 } \
172 if (jnl->jdev == NULL) { \
173 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__); \
174 } \
175 if (jnl->fsdev == NULL) { \
176 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__); \
177 } \
178 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) { \
179 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n", \
180 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC); \
181 } \
182 if ( jnl->jhdr->start <= 0 \
183 || jnl->jhdr->start > jnl->jhdr->size) { \
184 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
185 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size); \
186 } \
187 if ( jnl->jhdr->end <= 0 \
188 || jnl->jhdr->end > jnl->jhdr->size) { \
189 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
190 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size); \
191 } \
192 } while(0)
193
194 #define CHECK_TRANSACTION(tr) \
195 do { \
196 if (tr == NULL) { \
197 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__); \
198 } \
199 if (tr->jnl == NULL) { \
200 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__); \
201 } \
202 if (tr->blhdr != (block_list_header *)tr->tbuffer) { \
203 panic("%s:%d: blhdr (%p) != tbuffer (%p)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer); \
204 } \
205 if (tr->total_bytes < 0) { \
206 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes); \
207 } \
208 if (tr->journal_start < 0) { \
209 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start); \
210 } \
211 if (tr->journal_end < 0) { \
212 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end); \
213 } \
214 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) { \
215 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks); \
216 } \
217 } while(0)
218
219
220
221 //
222 // this isn't a great checksum routine but it will do for now.
223 // we use it to checksum the journal header and the block list
224 // headers that are at the start of each transaction.
225 //
226 static unsigned int
227 calc_checksum(const char *ptr, int len)
228 {
229 int i;
230 unsigned int cksum=0;
231
232 // this is a lame checksum but for now it'll do
233 for(i = 0; i < len; i++, ptr++) {
234 cksum = (cksum << 8) ^ (cksum + *(unsigned char *)ptr);
235 }
236
237 return (~cksum);
238 }
239
240 //
241 // Journal Locking
242 //
243 lck_grp_attr_t * jnl_group_attr;
244 lck_attr_t * jnl_lock_attr;
245 lck_grp_t * jnl_mutex_group;
246
247 void
248 journal_init(void)
249 {
250 jnl_lock_attr = lck_attr_alloc_init();
251 jnl_group_attr = lck_grp_attr_alloc_init();
252 jnl_mutex_group = lck_grp_alloc_init("jnl-mutex", jnl_group_attr);
253 }
254
255 __inline__ void
256 journal_lock(journal *jnl)
257 {
258 lck_mtx_lock(&jnl->jlock);
259 if (jnl->owner) {
260 panic ("jnl: owner is %p, expected NULL\n", jnl->owner);
261 }
262 jnl->owner = current_thread();
263 }
264
265 __inline__ void
266 journal_unlock(journal *jnl)
267 {
268 jnl->owner = NULL;
269 lck_mtx_unlock(&jnl->jlock);
270 }
271
272 static __inline__ void
273 lock_flush(journal *jnl)
274 {
275 lck_mtx_lock(&jnl->flock);
276 }
277
278 static __inline__ void
279 unlock_flush(journal *jnl)
280 {
281 lck_mtx_unlock(&jnl->flock);
282 }
283
284 static __inline__ void
285 lock_oldstart(journal *jnl)
286 {
287 lck_mtx_lock(&jnl->old_start_lock);
288 }
289
290 static __inline__ void
291 unlock_oldstart(journal *jnl)
292 {
293 lck_mtx_unlock(&jnl->old_start_lock);
294 }
295
296
297
298 #define JNL_WRITE 0x0001
299 #define JNL_READ 0x0002
300 #define JNL_HEADER 0x8000
301
302 //
303 // This function sets up a fake buf and passes it directly to the
304 // journal device strategy routine (so that it won't get cached in
305 // the block cache.
306 //
307 // It also handles range checking the i/o so that we don't write
308 // outside the journal boundaries and it will wrap the i/o back
309 // to the beginning if necessary (skipping over the journal header)
310 //
311 static size_t
312 do_journal_io(journal *jnl, off_t *offset, void *data, size_t len, int direction)
313 {
314 int err;
315 off_t curlen = len;
316 size_t io_sz = 0;
317 buf_t bp;
318 off_t max_iosize;
319 bufattr_t bap;
320 boolean_t was_vm_privileged = FALSE;
321 boolean_t need_vm_privilege = FALSE;
322
323 if (vfs_isswapmount(jnl->fsmount))
324 need_vm_privilege = TRUE;
325
326 if (*offset < 0 || *offset > jnl->jhdr->size) {
327 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset, jnl->jhdr->size);
328 }
329
330 if (direction & JNL_WRITE)
331 max_iosize = jnl->max_write_size;
332 else if (direction & JNL_READ)
333 max_iosize = jnl->max_read_size;
334 else
335 max_iosize = 128 * 1024;
336
337 again:
338 bp = buf_alloc(jnl->jdev);
339
340 if (*offset + curlen > jnl->jhdr->size && *offset != 0 && jnl->jhdr->size != 0) {
341 if (*offset == jnl->jhdr->size) {
342 *offset = jnl->jhdr->jhdr_size;
343 } else {
344 curlen = jnl->jhdr->size - *offset;
345 }
346 }
347
348 if (curlen > max_iosize) {
349 curlen = max_iosize;
350 }
351
352 if (curlen <= 0) {
353 panic("jnl: do_jnl_io: curlen == %lld, offset 0x%llx len %zd\n", curlen, *offset, len);
354 }
355
356 if (*offset == 0 && (direction & JNL_HEADER) == 0) {
357 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %lld, data %p)\n", curlen, data);
358 }
359
360 /*
361 * As alluded to in the block comment at the top of the function, we use a "fake" iobuf
362 * here and issue directly to the disk device that the journal protects since we don't
363 * want this to enter the block cache. As a result, we lose the ability to mark it
364 * as a metadata buf_t for the layers below us that may care. If we were to
365 * simply attach the B_META flag into the b_flags this may confuse things further
366 * since this is an iobuf, not a metadata buffer.
367 *
368 * To address this, we use the extended bufattr struct embedded in the bp.
369 * Explicitly mark the buf here as a metadata buffer in its bufattr flags.
370 */
371 bap = buf_attr(bp);
372 bufattr_markmeta(bap);
373
374 if (direction & JNL_READ)
375 buf_setflags(bp, B_READ);
376 else {
377 /*
378 * don't have to set any flags
379 */
380 vnode_startwrite(jnl->jdev);
381 }
382 buf_setsize(bp, curlen);
383 buf_setcount(bp, curlen);
384 buf_setdataptr(bp, (uintptr_t)data);
385 buf_setblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
386 buf_setlblkno(bp, (daddr64_t) ((jnl->jdev_offset + *offset) / (off_t)jnl->jhdr->jhdr_size));
387
388 if ((direction & JNL_WRITE) && (jnl->flags & JOURNAL_DO_FUA_WRITES)) {
389 buf_markfua(bp);
390 }
391
392 if (need_vm_privilege == TRUE) {
393 /*
394 * if we block waiting for memory, and there is enough pressure to
395 * cause us to try and create a new swap file, we may end up deadlocking
396 * due to waiting for the journal on the swap file creation path...
397 * by making ourselves vm_privileged, we give ourselves the best chance
398 * of not blocking
399 */
400 was_vm_privileged = set_vm_privilege(TRUE);
401 }
402 DTRACE_IO1(journal__start, buf_t, bp);
403 err = VNOP_STRATEGY(bp);
404 if (!err) {
405 err = (int)buf_biowait(bp);
406 }
407 DTRACE_IO1(journal__done, buf_t, bp);
408
409 if (need_vm_privilege == TRUE && was_vm_privileged == FALSE)
410 set_vm_privilege(FALSE);
411
412 buf_free(bp);
413
414 if (err) {
415 printf("jnl: %s: do_jnl_io: strategy err 0x%x\n", jnl->jdev_name, err);
416 return 0;
417 }
418
419 *offset += curlen;
420 io_sz += curlen;
421
422 if (io_sz != len) {
423 // handle wrap-around
424 data = (char *)data + curlen;
425 curlen = len - io_sz;
426 if (*offset >= jnl->jhdr->size) {
427 *offset = jnl->jhdr->jhdr_size;
428 }
429 goto again;
430 }
431
432 return io_sz;
433 }
434
435 static size_t
436 read_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
437 {
438 return do_journal_io(jnl, offset, data, len, JNL_READ);
439 }
440
441 static size_t
442 write_journal_data(journal *jnl, off_t *offset, void *data, size_t len)
443 {
444 return do_journal_io(jnl, offset, data, len, JNL_WRITE);
445 }
446
447
448 static size_t
449 read_journal_header(journal *jnl, void *data, size_t len)
450 {
451 off_t hdr_offset = 0;
452
453 return do_journal_io(jnl, &hdr_offset, data, len, JNL_READ|JNL_HEADER);
454 }
455
456 static int
457 write_journal_header(journal *jnl, int updating_start, uint32_t sequence_num)
458 {
459 static int num_err_prints = 0;
460 int ret=0;
461 off_t jhdr_offset = 0;
462 //
463 // Flush the track cache if we're not doing force-unit-access
464 // writes.
465 //
466 if (!updating_start && (jnl->flags & JOURNAL_DO_FUA_WRITES) == 0) {
467
468 dk_synchronize_t sync_request = {
469 .options = DK_SYNCHRONIZE_OPTION_BARRIER,
470 };
471
472 /*
473 * If device doesn't support barrier-only flush, or
474 * the journal is on a different device, use full flush.
475 */
476 if (!(jnl->flags & JOURNAL_FEATURE_BARRIER) || (jnl->jdev != jnl->fsdev)) {
477 sync_request.options = 0;
478 jnl->flush_counter++;
479 }
480
481 ret = VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZE, (caddr_t)&sync_request, FWRITE, vfs_context_kernel());
482 }
483 if (ret != 0) {
484 //
485 // Only print this error if it's a different error than the
486 // previous one, or if it's the first time for this device
487 // or if the total number of printfs is less than 25. We
488 // allow for up to 25 printfs to insure that some make it
489 // into the on-disk syslog. Otherwise if we only printed
490 // one, it's possible it would never make it to the syslog
491 // for the root volume and that makes debugging hard.
492 //
493 if ( ret != jnl->last_flush_err
494 || (jnl->flags & JOURNAL_FLUSHCACHE_ERR) == 0
495 || num_err_prints++ < 25) {
496
497 printf("jnl: %s: flushing fs disk buffer returned 0x%x\n", jnl->jdev_name, ret);
498
499 jnl->flags |= JOURNAL_FLUSHCACHE_ERR;
500 jnl->last_flush_err = ret;
501 }
502 }
503
504 jnl->jhdr->sequence_num = sequence_num;
505 jnl->jhdr->checksum = 0;
506 jnl->jhdr->checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE);
507
508 if (do_journal_io(jnl, &jhdr_offset, jnl->header_buf, jnl->jhdr->jhdr_size, JNL_WRITE|JNL_HEADER) != (size_t)jnl->jhdr->jhdr_size) {
509 printf("jnl: %s: write_journal_header: error writing the journal header!\n", jnl->jdev_name);
510 jnl->flags |= JOURNAL_INVALID;
511 return -1;
512 }
513
514 // If we're not doing force-unit-access writes, then we
515 // have to flush after writing the journal header so that
516 // a future transaction doesn't sneak out to disk before
517 // the header does and thus overwrite data that the old
518 // journal header refers to. Saw this exact case happen
519 // on an IDE bus analyzer with Larry Barras so while it
520 // may seem obscure, it's not.
521 //
522 if (updating_start && (jnl->flags & JOURNAL_DO_FUA_WRITES) == 0) {
523
524 dk_synchronize_t sync_request = {
525 .options = DK_SYNCHRONIZE_OPTION_BARRIER,
526 };
527
528 /*
529 * If device doesn't support barrier-only flush, or
530 * the journal is on a different device, use full flush.
531 */
532 if (!(jnl->flags & JOURNAL_FEATURE_BARRIER) || (jnl->jdev != jnl->fsdev)) {
533 sync_request.options = 0;
534 jnl->flush_counter++;
535 }
536
537 VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZE, (caddr_t)&sync_request, FWRITE, vfs_context_kernel());
538 }
539
540 return 0;
541 }
542
543
544
545 //
546 // this is a work function used to free up transactions that
547 // completed. they can't be free'd from buffer_flushed_callback
548 // because it is called from deep with the disk driver stack
549 // and thus can't do something that would potentially cause
550 // paging. it gets called by each of the journal api entry
551 // points so stuff shouldn't hang around for too long.
552 //
553 static void
554 free_old_stuff(journal *jnl)
555 {
556 transaction *tr, *next;
557 block_list_header *blhdr=NULL, *next_blhdr=NULL;
558
559 if (jnl->tr_freeme == NULL)
560 return;
561
562 lock_oldstart(jnl);
563 tr = jnl->tr_freeme;
564 jnl->tr_freeme = NULL;
565 unlock_oldstart(jnl);
566
567 for(; tr; tr=next) {
568 for (blhdr = tr->blhdr; blhdr; blhdr = next_blhdr) {
569 next_blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum);
570 blhdr->binfo[0].bnum = 0xdeadc0de;
571
572 hfs_free(blhdr, tr->tbuffer_size);
573
574 KERNEL_DEBUG(0xbbbbc01c, jnl, tr, tr->tbuffer_size, 0, 0);
575 }
576 next = tr->next;
577 hfs_free(tr, sizeof(*tr));
578 }
579 }
580
581
582
583 //
584 // This is our callback that lets us know when a buffer has been
585 // flushed to disk. It's called from deep within the driver stack
586 // and thus is quite limited in what it can do. Notably, it can
587 // not initiate any new i/o's or allocate/free memory.
588 //
589 static void
590 buffer_flushed_callback(struct buf *bp, void *arg)
591 {
592 transaction *tr;
593 journal *jnl;
594 transaction *ctr, *prev=NULL, *next;
595 size_t i;
596 int bufsize, amt_flushed, total_bytes;
597
598
599 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
600 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
601
602 // snarf out the bits we want
603 bufsize = buf_size(bp);
604 tr = (transaction *)arg;
605
606 // then we've already seen it
607 if (tr == NULL) {
608 return;
609 }
610
611 CHECK_TRANSACTION(tr);
612
613 jnl = tr->jnl;
614
615 CHECK_JOURNAL(jnl);
616
617 amt_flushed = tr->num_killed;
618 total_bytes = tr->total_bytes;
619
620 // update the number of blocks that have been flushed.
621 // this buf may represent more than one block so take
622 // that into account.
623 //
624 // OSAddAtomic() returns the value of tr->num_flushed before the add
625 //
626 amt_flushed += OSAddAtomic(bufsize, &tr->num_flushed);
627
628
629 // if this transaction isn't done yet, just return as
630 // there is nothing to do.
631 //
632 // NOTE: we are careful to not reference anything through
633 // the tr pointer after doing the OSAddAtomic(). if
634 // this if statement fails then we are the last one
635 // and then it's ok to dereference "tr".
636 //
637 if ((amt_flushed + bufsize) < total_bytes) {
638 return;
639 }
640
641 // this will single thread checking the transaction
642 lock_oldstart(jnl);
643
644 if (tr->total_bytes == (int)0xfbadc0de) {
645 // then someone beat us to it...
646 unlock_oldstart(jnl);
647 return;
648 }
649
650 // mark this so that we're the owner of dealing with the
651 // cleanup for this transaction
652 tr->total_bytes = 0xfbadc0de;
653
654 if (jnl->flags & JOURNAL_INVALID)
655 goto transaction_done;
656
657 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
658 // tr, tr->journal_start, tr->journal_end, jnl);
659
660 // find this entry in the old_start[] index and mark it completed
661 for(i = 0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
662
663 if ((off_t)(jnl->old_start[i] & ~(0x8000000000000000ULL)) == tr->journal_start) {
664 jnl->old_start[i] &= ~(0x8000000000000000ULL);
665 break;
666 }
667 }
668
669 if (i >= sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
670 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr %p, jnl %p)\n",
671 tr->journal_start, tr, jnl);
672 }
673
674
675 // if we are here then we need to update the journal header
676 // to reflect that this transaction is complete
677 if (tr->journal_start == jnl->active_start) {
678 jnl->active_start = tr->journal_end;
679 tr->journal_start = tr->journal_end = (off_t)0;
680 }
681
682 // go through the completed_trs list and try to coalesce
683 // entries, restarting back at the beginning if we have to.
684 for (ctr = jnl->completed_trs; ctr; prev=ctr, ctr=next) {
685 if (ctr->journal_start == jnl->active_start) {
686 jnl->active_start = ctr->journal_end;
687 if (prev) {
688 prev->next = ctr->next;
689 }
690 if (ctr == jnl->completed_trs) {
691 jnl->completed_trs = ctr->next;
692 }
693
694 next = jnl->completed_trs; // this starts us over again
695 ctr->next = jnl->tr_freeme;
696 jnl->tr_freeme = ctr;
697 ctr = NULL;
698 } else if (tr->journal_end == ctr->journal_start) {
699 ctr->journal_start = tr->journal_start;
700 next = jnl->completed_trs; // this starts us over again
701 ctr = NULL;
702 tr->journal_start = tr->journal_end = (off_t)0;
703 } else if (tr->journal_start == ctr->journal_end) {
704 ctr->journal_end = tr->journal_end;
705 next = ctr->next;
706 tr->journal_start = tr->journal_end = (off_t)0;
707 } else if (ctr->next && ctr->journal_end == ctr->next->journal_start) {
708 // coalesce the next entry with this one and link the next
709 // entry in at the head of the tr_freeme list
710 next = ctr->next; // temporarily use the "next" variable
711 ctr->journal_end = next->journal_end;
712 ctr->next = next->next;
713 next->next = jnl->tr_freeme; // link in the next guy at the head of the tr_freeme list
714 jnl->tr_freeme = next;
715
716 next = jnl->completed_trs; // this starts us over again
717 ctr = NULL;
718 } else {
719 next = ctr->next;
720 }
721 }
722
723 // if this is true then we didn't merge with anyone
724 // so link ourselves in at the head of the completed
725 // transaction list.
726 if (tr->journal_start != 0) {
727 // put this entry into the correct sorted place
728 // in the list instead of just at the head.
729 //
730
731 prev = NULL;
732 for (ctr = jnl->completed_trs; ctr && tr->journal_start > ctr->journal_start; prev=ctr, ctr=ctr->next) {
733 // just keep looping
734 }
735
736 if (ctr == NULL && prev == NULL) {
737 jnl->completed_trs = tr;
738 tr->next = NULL;
739 } else if (ctr == jnl->completed_trs) {
740 tr->next = jnl->completed_trs;
741 jnl->completed_trs = tr;
742 } else {
743 tr->next = prev->next;
744 prev->next = tr;
745 }
746 } else {
747 // if we're here this tr got merged with someone else so
748 // put it on the list to be free'd
749 tr->next = jnl->tr_freeme;
750 jnl->tr_freeme = tr;
751 }
752 transaction_done:
753 unlock_oldstart(jnl);
754
755 unlock_condition(jnl, &jnl->asyncIO);
756 }
757
758
759 #include <libkern/OSByteOrder.h>
760
761 #define SWAP16(x) OSSwapInt16(x)
762 #define SWAP32(x) OSSwapInt32(x)
763 #define SWAP64(x) OSSwapInt64(x)
764
765
766 static void
767 swap_journal_header(journal *jnl)
768 {
769 jnl->jhdr->magic = SWAP32(jnl->jhdr->magic);
770 jnl->jhdr->endian = SWAP32(jnl->jhdr->endian);
771 jnl->jhdr->start = SWAP64(jnl->jhdr->start);
772 jnl->jhdr->end = SWAP64(jnl->jhdr->end);
773 jnl->jhdr->size = SWAP64(jnl->jhdr->size);
774 jnl->jhdr->blhdr_size = SWAP32(jnl->jhdr->blhdr_size);
775 jnl->jhdr->checksum = SWAP32(jnl->jhdr->checksum);
776 jnl->jhdr->jhdr_size = SWAP32(jnl->jhdr->jhdr_size);
777 jnl->jhdr->sequence_num = SWAP32(jnl->jhdr->sequence_num);
778 }
779
780 static void
781 swap_block_list_header(journal *jnl, block_list_header *blhdr)
782 {
783 int i;
784
785 blhdr->max_blocks = SWAP16(blhdr->max_blocks);
786 blhdr->num_blocks = SWAP16(blhdr->num_blocks);
787 blhdr->bytes_used = SWAP32(blhdr->bytes_used);
788 blhdr->checksum = SWAP32(blhdr->checksum);
789 blhdr->flags = SWAP32(blhdr->flags);
790
791 if (blhdr->num_blocks >= ((jnl->jhdr->blhdr_size / sizeof(block_info)) - 1)) {
792 printf("jnl: %s: blhdr num blocks looks suspicious (%d / blhdr size %d). not swapping.\n", jnl->jdev_name, blhdr->num_blocks, jnl->jhdr->blhdr_size);
793 return;
794 }
795
796 for(i = 0; i < blhdr->num_blocks; i++) {
797 blhdr->binfo[i].bnum = SWAP64(blhdr->binfo[i].bnum);
798 blhdr->binfo[i].u.bi.bsize = SWAP32(blhdr->binfo[i].u.bi.bsize);
799 blhdr->binfo[i].u.bi.b.cksum = SWAP32(blhdr->binfo[i].u.bi.b.cksum);
800 }
801 }
802
803
804 static int
805 update_fs_block(journal *jnl, void *block_ptr, off_t fs_block, size_t bsize)
806 {
807 int ret;
808 struct buf *oblock_bp=NULL;
809 boolean_t was_vm_privileged = FALSE;
810
811
812 // first read the block we want.
813 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
814 if (ret != 0) {
815 printf("jnl: %s: update_fs_block: error reading fs block # %lld! (ret %d)\n", jnl->jdev_name, fs_block, ret);
816
817 if (oblock_bp) {
818 buf_brelse(oblock_bp);
819 oblock_bp = NULL;
820 }
821
822 // let's try to be aggressive here and just re-write the block
823 oblock_bp = buf_getblk(jnl->fsdev, (daddr64_t)fs_block, bsize, 0, 0, BLK_META);
824 if (oblock_bp == NULL) {
825 printf("jnl: %s: update_fs_block: buf_getblk() for %lld failed! failing update.\n", jnl->jdev_name, fs_block);
826 return -1;
827 }
828 }
829
830 // make sure it's the correct size.
831 if (buf_size(oblock_bp) != bsize) {
832 buf_brelse(oblock_bp);
833 return -1;
834 }
835
836 // copy the journal data over top of it
837 memcpy((char *)buf_dataptr(oblock_bp), block_ptr, bsize);
838
839 if (vfs_isswapmount(jnl->fsmount)) {
840 /*
841 * if we block waiting for memory, and there is enough pressure to
842 * cause us to try and create a new swap file, we may end up deadlocking
843 * due to waiting for the journal on the swap file creation path...
844 * by making ourselves vm_privileged, we give ourselves the best chance
845 * of not blocking
846 */
847 was_vm_privileged = set_vm_privilege(TRUE);
848 }
849 ret = VNOP_BWRITE(oblock_bp);
850
851 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
852 set_vm_privilege(FALSE);
853
854 if (ret != 0) {
855 printf("jnl: %s: update_fs_block: failed to update block %lld (ret %d)\n", jnl->jdev_name, fs_block,ret);
856 return ret;
857 }
858 // and now invalidate it so that if someone else wants to read
859 // it in a different size they'll be able to do it.
860 ret = buf_meta_bread(jnl->fsdev, (daddr64_t)fs_block, bsize, NOCRED, &oblock_bp);
861 if (oblock_bp) {
862 buf_markinvalid(oblock_bp);
863 buf_brelse(oblock_bp);
864 }
865
866 return 0;
867 }
868
869 static int
870 grow_table(struct bucket **buf_ptr, int num_buckets, int new_size)
871 {
872 struct bucket *newBuf;
873 int current_size = num_buckets, i;
874
875 // return if newsize is less than the current size
876 if (new_size < num_buckets) {
877 return current_size;
878 }
879
880 newBuf = hfs_malloc(new_size*sizeof(struct bucket));
881
882 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
883
884 // copy existing elements
885 bcopy(*buf_ptr, newBuf, num_buckets*sizeof(struct bucket));
886
887 // initialize the new ones
888 for(i = num_buckets; i < new_size; i++) {
889 newBuf[i].block_num = (off_t)-1;
890 }
891
892 // free the old container
893 hfs_free(*buf_ptr, num_buckets * sizeof(struct bucket));
894
895 // reset the buf_ptr
896 *buf_ptr = newBuf;
897
898 return new_size;
899 }
900
901 static int
902 lookup_bucket(struct bucket **buf_ptr, off_t block_num, int num_full)
903 {
904 int lo, hi, index, matches, i;
905
906 if (num_full == 0) {
907 return 0; // table is empty, so insert at index=0
908 }
909
910 lo = 0;
911 hi = num_full - 1;
912 index = -1;
913
914 // perform binary search for block_num
915 do {
916 int mid = (hi - lo)/2 + lo;
917 off_t this_num = (*buf_ptr)[mid].block_num;
918
919 if (block_num == this_num) {
920 index = mid;
921 break;
922 }
923
924 if (block_num < this_num) {
925 hi = mid;
926 continue;
927 }
928
929 if (block_num > this_num) {
930 lo = mid + 1;
931 continue;
932 }
933 } while (lo < hi);
934
935 // check if lo and hi converged on the match
936 if (block_num == (*buf_ptr)[hi].block_num) {
937 index = hi;
938 }
939
940 // if no existing entry found, find index for new one
941 if (index == -1) {
942 index = (block_num < (*buf_ptr)[hi].block_num) ? hi : hi + 1;
943 } else {
944 // make sure that we return the right-most index in the case of multiple matches
945 matches = 0;
946 i = index + 1;
947 while (i < num_full && block_num == (*buf_ptr)[i].block_num) {
948 matches++;
949 i++;
950 }
951
952 index += matches;
953 }
954
955 return index;
956 }
957
958 static int
959 insert_block(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr, int overwriting)
960 {
961 if (!overwriting) {
962 // grow the table if we're out of space - we may index the table
963 // with *num_full_ptr (lookup_bucket() can return a maximum value ==
964 // *num_full_ptr), so we need to grow when we hit (*num_buckets_ptr - 1)
965 // to prevent out-of-bounds indexing
966 if (*num_full_ptr >= (*num_buckets_ptr - 1)) {
967 int new_size = *num_buckets_ptr * 2;
968 int grow_size = grow_table(buf_ptr, *num_buckets_ptr, new_size);
969
970 if (grow_size < new_size) {
971 printf("jnl: %s: add_block: grow_table returned an error!\n", jnl->jdev_name);
972 return -1;
973 }
974
975 *num_buckets_ptr = grow_size; //update num_buckets to reflect the new size
976 }
977
978 // if we're not inserting at the end, we need to bcopy
979 if (blk_index != *num_full_ptr) {
980 bcopy( (*buf_ptr)+(blk_index), (*buf_ptr)+(blk_index+1), (*num_full_ptr-blk_index)*sizeof(struct bucket) );
981 }
982
983 (*num_full_ptr)++; // increment only if we're not overwriting
984 }
985
986 // sanity check the values we're about to add
987 if ((off_t)offset >= jnl->jhdr->size) {
988 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
989 }
990 if (size <= 0) {
991 panic("jnl: insert_block: bad size in insert_block (%zd)\n", size);
992 }
993
994 (*buf_ptr)[blk_index].block_num = num;
995 (*buf_ptr)[blk_index].block_size = (uint32_t)size;
996 (*buf_ptr)[blk_index].jnl_offset = (uint32_t)offset;
997 (*buf_ptr)[blk_index].cksum = cksum;
998
999 return blk_index;
1000 }
1001
1002 static int
1003 do_overlap(journal *jnl, struct bucket **buf_ptr, int blk_index, off_t block_num, size_t size, __unused size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr)
1004 {
1005 int num_to_remove, index, i, overwrite, err;
1006 size_t jhdr_size = jnl->jhdr->jhdr_size, new_offset;
1007 off_t overlap, block_start, block_end;
1008
1009 block_start = block_num*jhdr_size;
1010 block_end = block_start + size;
1011 overwrite = (block_num == (*buf_ptr)[blk_index].block_num && size >= (*buf_ptr)[blk_index].block_size);
1012
1013 // first, eliminate any overlap with the previous entry
1014 if (blk_index != 0 && !overwrite) {
1015 off_t prev_block_start = (*buf_ptr)[blk_index-1].block_num*jhdr_size;
1016 off_t prev_block_end = prev_block_start + (*buf_ptr)[blk_index-1].block_size;
1017 overlap = prev_block_end - block_start;
1018 if (overlap > 0) {
1019 if (overlap % jhdr_size != 0) {
1020 panic("jnl: do_overlap: overlap with previous entry not a multiple of %zd\n", jhdr_size);
1021 }
1022
1023 // if the previous entry completely overlaps this one, we need to break it into two pieces.
1024 if (prev_block_end > block_end) {
1025 off_t new_num = block_end / jhdr_size;
1026 size_t new_size = prev_block_end - block_end;
1027
1028 new_offset = (*buf_ptr)[blk_index-1].jnl_offset + (block_end - prev_block_start);
1029
1030 err = insert_block(jnl, buf_ptr, blk_index, new_num, new_size, new_offset, cksum, num_buckets_ptr, num_full_ptr, 0);
1031 if (err < 0) {
1032 panic("jnl: do_overlap: error inserting during pre-overlap\n");
1033 }
1034 }
1035
1036 // Regardless, we need to truncate the previous entry to the beginning of the overlap
1037 (*buf_ptr)[blk_index-1].block_size = (uint32_t)(block_start - prev_block_start);
1038 (*buf_ptr)[blk_index-1].cksum = 0; // have to blow it away because there's no way to check it
1039 }
1040 }
1041
1042 // then, bail out fast if there's no overlap with the entries that follow
1043 if (!overwrite && block_end <= (off_t)((*buf_ptr)[blk_index].block_num*jhdr_size)) {
1044 return 0; // no overlap, no overwrite
1045 } else if (overwrite && (blk_index + 1 >= *num_full_ptr || block_end <= (off_t)((*buf_ptr)[blk_index+1].block_num*jhdr_size))) {
1046
1047 (*buf_ptr)[blk_index].cksum = cksum; // update this
1048 return 1; // simple overwrite
1049 }
1050
1051 // Otherwise, find all cases of total and partial overlap. We use the special
1052 // block_num of -2 to designate entries that are completely overlapped and must
1053 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
1054 // entries must be adjusted to keep the array consistent.
1055 index = blk_index;
1056 num_to_remove = 0;
1057 while (index < *num_full_ptr && block_end > (off_t)((*buf_ptr)[index].block_num*jhdr_size)) {
1058 if (block_end >= (off_t)(((*buf_ptr)[index].block_num*jhdr_size + (*buf_ptr)[index].block_size))) {
1059 (*buf_ptr)[index].block_num = -2; // mark this for deletion
1060 num_to_remove++;
1061 } else {
1062 overlap = block_end - (*buf_ptr)[index].block_num*jhdr_size;
1063 if (overlap > 0) {
1064 if (overlap % jhdr_size != 0) {
1065 panic("jnl: do_overlap: overlap of %lld is not multiple of %zd\n", overlap, jhdr_size);
1066 }
1067
1068 // if we partially overlap this entry, adjust its block number, jnl offset, and size
1069 (*buf_ptr)[index].block_num += (overlap / jhdr_size); // make sure overlap is multiple of jhdr_size, or round up
1070 (*buf_ptr)[index].cksum = 0;
1071
1072 new_offset = (*buf_ptr)[index].jnl_offset + overlap; // check for wrap-around
1073 if ((off_t)new_offset >= jnl->jhdr->size) {
1074 new_offset = jhdr_size + (new_offset - jnl->jhdr->size);
1075 }
1076 (*buf_ptr)[index].jnl_offset = (uint32_t)new_offset;
1077
1078 (*buf_ptr)[index].block_size -= overlap; // sanity check for negative value
1079 if ((*buf_ptr)[index].block_size <= 0) {
1080 panic("jnl: do_overlap: after overlap, new block size is invalid (%u)\n", (*buf_ptr)[index].block_size);
1081 // return -1; // if above panic is removed, return -1 for error
1082 }
1083 }
1084
1085 }
1086
1087 index++;
1088 }
1089
1090 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
1091 index--; // start with the last index used within the above loop
1092 while (index >= blk_index) {
1093 if ((*buf_ptr)[index].block_num == -2) {
1094 if (index == *num_full_ptr-1) {
1095 (*buf_ptr)[index].block_num = -1; // it's the last item in the table... just mark as free
1096 } else {
1097 bcopy( (*buf_ptr)+(index+1), (*buf_ptr)+(index), (*num_full_ptr - (index + 1)) * sizeof(struct bucket) );
1098 }
1099 (*num_full_ptr)--;
1100 }
1101 index--;
1102 }
1103
1104 // eliminate any stale entries at the end of the table
1105 for(i = *num_full_ptr; i < (*num_full_ptr + num_to_remove); i++) {
1106 (*buf_ptr)[i].block_num = -1;
1107 }
1108
1109 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
1110 }
1111
1112 // PR-3105942: Coalesce writes to the same block in journal replay
1113 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
1114 // to be replayed and the corresponding location in the journal which contains
1115 // the most recent data for those blocks. The array is "played" once the all the
1116 // blocks in the journal have been coalesced. The code for the case of conflicting/
1117 // overlapping writes to a single block is the most dense. Because coalescing can
1118 // disrupt the existing time-ordering of blocks in the journal playback, care
1119 // is taken to catch any overlaps and keep the array consistent.
1120 static int
1121 add_block(journal *jnl, struct bucket **buf_ptr, off_t block_num, size_t size, size_t offset, int32_t cksum, int *num_buckets_ptr, int *num_full_ptr)
1122 {
1123 int blk_index, overwriting;
1124
1125 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
1126 // inserted (or the index of the elem to overwrite).
1127 blk_index = lookup_bucket( buf_ptr, block_num, *num_full_ptr);
1128
1129 // check if the index is within bounds (if we're adding this block to the end of
1130 // the table, blk_index will be equal to num_full)
1131 if (blk_index < 0 || blk_index > *num_full_ptr) {
1132 //printf("jnl: add_block: trouble adding block to co_buf\n");
1133 return -1;
1134 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
1135
1136 // Determine whether we're overwriting an existing entry by checking for overlap
1137 overwriting = do_overlap(jnl, buf_ptr, blk_index, block_num, size, offset, cksum, num_buckets_ptr, num_full_ptr);
1138 if (overwriting < 0) {
1139 return -1; // if we got an error, pass it along
1140 }
1141
1142 // returns the index, or -1 on error
1143 blk_index = insert_block(jnl, buf_ptr, blk_index, block_num, size, offset, cksum, num_buckets_ptr, num_full_ptr, overwriting);
1144
1145 return blk_index;
1146 }
1147
1148 static int
1149 replay_journal(journal *jnl)
1150 {
1151 int i, bad_blocks=0;
1152 unsigned int orig_checksum, checksum, check_block_checksums = 0;
1153 size_t ret;
1154 size_t max_bsize = 0; /* protected by block_ptr */
1155 block_list_header *blhdr;
1156 off_t offset, txn_start_offset=0, blhdr_offset, orig_jnl_start;
1157 char *buff, *block_ptr=NULL;
1158 struct bucket *co_buf;
1159 int num_buckets = STARTING_BUCKETS, num_full, check_past_jnl_end = 1, in_uncharted_territory=0;
1160 uint32_t last_sequence_num = 0;
1161 int replay_retry_count = 0;
1162
1163 // wrap the start ptr if it points to the very end of the journal
1164 if (jnl->jhdr->start == jnl->jhdr->size) {
1165 jnl->jhdr->start = jnl->jhdr->jhdr_size;
1166 }
1167 if (jnl->jhdr->end == jnl->jhdr->size) {
1168 jnl->jhdr->end = jnl->jhdr->jhdr_size;
1169 }
1170
1171 if (jnl->jhdr->start == jnl->jhdr->end) {
1172 return 0;
1173 }
1174
1175 orig_jnl_start = jnl->jhdr->start;
1176
1177 // allocate memory for the header_block. we'll read each blhdr into this
1178 buff = hfs_malloc(jnl->jhdr->blhdr_size);
1179
1180 // allocate memory for the coalesce buffer
1181 co_buf = hfs_malloc(num_buckets*sizeof(struct bucket));
1182
1183 restart_replay:
1184
1185 // initialize entries
1186 for(i = 0; i < num_buckets; i++) {
1187 co_buf[i].block_num = -1;
1188 }
1189 num_full = 0; // empty at first
1190
1191
1192 printf("jnl: %s: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
1193 jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end, jnl->jdev_offset);
1194
1195 while (check_past_jnl_end || jnl->jhdr->start != jnl->jhdr->end) {
1196 offset = blhdr_offset = jnl->jhdr->start;
1197 ret = read_journal_data(jnl, &offset, buff, jnl->jhdr->blhdr_size);
1198 if (ret != (size_t)jnl->jhdr->blhdr_size) {
1199 printf("jnl: %s: replay_journal: Could not read block list header block @ 0x%llx!\n", jnl->jdev_name, offset);
1200 bad_blocks = 1;
1201 goto bad_txn_handling;
1202 }
1203
1204 blhdr = (block_list_header *)buff;
1205
1206 orig_checksum = blhdr->checksum;
1207 blhdr->checksum = 0;
1208 if (jnl->flags & JOURNAL_NEED_SWAP) {
1209 // calculate the checksum based on the unswapped data
1210 // because it is done byte-at-a-time.
1211 orig_checksum = (unsigned int)SWAP32(orig_checksum);
1212 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1213 swap_block_list_header(jnl, blhdr);
1214 } else {
1215 checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
1216 }
1217
1218
1219 //
1220 // XXXdbg - if these checks fail, we should replay as much
1221 // we can in the hopes that it will still leave the
1222 // drive in a better state than if we didn't replay
1223 // anything
1224 //
1225 if (checksum != orig_checksum) {
1226 if (check_past_jnl_end && in_uncharted_territory) {
1227
1228 if (blhdr_offset != jnl->jhdr->end) {
1229 printf("jnl: %s: Extra txn replay stopped @ %lld / 0x%llx\n", jnl->jdev_name, blhdr_offset, blhdr_offset);
1230 }
1231
1232 check_past_jnl_end = 0;
1233 jnl->jhdr->end = blhdr_offset;
1234 continue;
1235 }
1236
1237 printf("jnl: %s: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1238 jnl->jdev_name, blhdr_offset, orig_checksum, checksum);
1239
1240 if (blhdr_offset == orig_jnl_start) {
1241 // if there's nothing in the journal at all, just bail out altogether.
1242 goto bad_replay;
1243 }
1244
1245 bad_blocks = 1;
1246 goto bad_txn_handling;
1247 }
1248
1249 if ( (last_sequence_num != 0)
1250 && (blhdr->binfo[0].u.bi.b.sequence_num != 0)
1251 && (blhdr->binfo[0].u.bi.b.sequence_num != last_sequence_num)
1252 && (blhdr->binfo[0].u.bi.b.sequence_num != last_sequence_num+1)) {
1253
1254 txn_start_offset = jnl->jhdr->end = blhdr_offset;
1255
1256 if (check_past_jnl_end) {
1257 check_past_jnl_end = 0;
1258 printf("jnl: %s: 2: extra replay stopped @ %lld / 0x%llx (seq %d < %d)\n",
1259 jnl->jdev_name, blhdr_offset, blhdr_offset, blhdr->binfo[0].u.bi.b.sequence_num, last_sequence_num);
1260 continue;
1261 }
1262
1263 printf("jnl: %s: txn sequence numbers out of order in txn @ %lld / %llx! (%d < %d)\n",
1264 jnl->jdev_name, blhdr_offset, blhdr_offset, blhdr->binfo[0].u.bi.b.sequence_num, last_sequence_num);
1265 bad_blocks = 1;
1266 goto bad_txn_handling;
1267 }
1268 last_sequence_num = blhdr->binfo[0].u.bi.b.sequence_num;
1269
1270 if (blhdr_offset >= jnl->jhdr->end && jnl->jhdr->start <= jnl->jhdr->end) {
1271 if (last_sequence_num == 0) {
1272 check_past_jnl_end = 0;
1273 printf("jnl: %s: pre-sequence-num-enabled txn's - can not go further than end (%lld %lld).\n",
1274 jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
1275 if (jnl->jhdr->start != jnl->jhdr->end) {
1276 jnl->jhdr->start = jnl->jhdr->end;
1277 }
1278 continue;
1279 }
1280 printf("jnl: %s: examining extra transactions starting @ %lld / 0x%llx\n", jnl->jdev_name, blhdr_offset, blhdr_offset);
1281 }
1282
1283 if ( blhdr->max_blocks <= 0 || blhdr->max_blocks > (jnl->jhdr->size/jnl->jhdr->jhdr_size)
1284 || blhdr->num_blocks <= 0 || blhdr->num_blocks > blhdr->max_blocks) {
1285 printf("jnl: %s: replay_journal: bad looking journal entry: max: %d num: %d\n",
1286 jnl->jdev_name, blhdr->max_blocks, blhdr->num_blocks);
1287 bad_blocks = 1;
1288 goto bad_txn_handling;
1289 }
1290
1291 max_bsize = 0;
1292 for (i = 1; i < blhdr->num_blocks; i++) {
1293 if (blhdr->binfo[i].bnum < 0 && blhdr->binfo[i].bnum != (off_t)-1) {
1294 printf("jnl: %s: replay_journal: bogus block number 0x%llx\n", jnl->jdev_name, blhdr->binfo[i].bnum);
1295 bad_blocks = 1;
1296 goto bad_txn_handling;
1297 }
1298
1299 if ((size_t)blhdr->binfo[i].u.bi.bsize > max_bsize) {
1300 max_bsize = blhdr->binfo[i].u.bi.bsize;
1301 }
1302 }
1303
1304 if (blhdr->flags & BLHDR_CHECK_CHECKSUMS) {
1305 check_block_checksums = 1;
1306 block_ptr = hfs_malloc(max_bsize);
1307 } else {
1308 block_ptr = NULL;
1309 }
1310
1311 if (blhdr->flags & BLHDR_FIRST_HEADER) {
1312 txn_start_offset = blhdr_offset;
1313 }
1314
1315 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1316 // blhdr->num_blocks-1, jnl->jhdr->start);
1317 bad_blocks = 0;
1318 for (i = 1; i < blhdr->num_blocks; i++) {
1319 int size, ret_val;
1320 off_t number;
1321
1322 size = blhdr->binfo[i].u.bi.bsize;
1323 number = blhdr->binfo[i].bnum;
1324
1325 // don't add "killed" blocks
1326 if (number == (off_t)-1) {
1327 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1328 } else {
1329
1330 if (check_block_checksums) {
1331 int32_t disk_cksum;
1332 off_t block_offset;
1333
1334 block_offset = offset;
1335
1336 // read the block so we can check the checksum
1337 ret = read_journal_data(jnl, &block_offset, block_ptr, size);
1338 if (ret != (size_t)size) {
1339 printf("jnl: %s: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl->jdev_name, offset);
1340 bad_blocks = 1;
1341 goto bad_txn_handling;
1342 }
1343
1344 disk_cksum = calc_checksum(block_ptr, size);
1345
1346 // there is no need to swap the checksum from disk because
1347 // it got swapped when the blhdr was read in.
1348 if (blhdr->binfo[i].u.bi.b.cksum != 0 && disk_cksum != blhdr->binfo[i].u.bi.b.cksum) {
1349 printf("jnl: %s: txn starting at %lld (%lld) @ index %3d bnum %lld (%d) with disk cksum != blhdr cksum (0x%.8x 0x%.8x)\n",
1350 jnl->jdev_name, txn_start_offset, blhdr_offset, i, number, size, disk_cksum, blhdr->binfo[i].u.bi.b.cksum);
1351 printf("jnl: 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
1352 *(int *)&block_ptr[0*sizeof(int)], *(int *)&block_ptr[1*sizeof(int)], *(int *)&block_ptr[2*sizeof(int)], *(int *)&block_ptr[3*sizeof(int)],
1353 *(int *)&block_ptr[4*sizeof(int)], *(int *)&block_ptr[5*sizeof(int)], *(int *)&block_ptr[6*sizeof(int)], *(int *)&block_ptr[7*sizeof(int)]);
1354
1355 bad_blocks = 1;
1356 goto bad_txn_handling;
1357 }
1358 }
1359
1360
1361 // add this bucket to co_buf, coalescing where possible
1362 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1363 ret_val = add_block(jnl, &co_buf, number, size, (size_t) offset, blhdr->binfo[i].u.bi.b.cksum, &num_buckets, &num_full);
1364
1365 if (ret_val == -1) {
1366 printf("jnl: %s: replay_journal: trouble adding block to co_buf\n", jnl->jdev_name);
1367 goto bad_replay;
1368 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1369 }
1370
1371 // increment offset
1372 offset += size;
1373
1374 // check if the last block added puts us off the end of the jnl.
1375 // if so, we need to wrap to the beginning and take any remainder
1376 // into account
1377 //
1378 if (offset >= jnl->jhdr->size) {
1379 offset = jnl->jhdr->jhdr_size + (offset - jnl->jhdr->size);
1380 }
1381 }
1382
1383 if (block_ptr) {
1384 hfs_free(block_ptr, max_bsize);
1385 block_ptr = NULL;
1386 }
1387
1388 if (bad_blocks) {
1389 bad_txn_handling:
1390 /* Journal replay got error before it found any valid
1391 * transations, abort replay */
1392 if (txn_start_offset == 0) {
1393 printf("jnl: %s: no known good txn start offset! aborting journal replay.\n", jnl->jdev_name);
1394 goto bad_replay;
1395 }
1396
1397 /* Repeated error during journal replay, abort replay */
1398 if (replay_retry_count == 3) {
1399 printf("jnl: %s: repeated errors replaying journal! aborting journal replay.\n", jnl->jdev_name);
1400 goto bad_replay;
1401 }
1402 replay_retry_count++;
1403
1404 /* There was an error replaying the journal (possibly
1405 * EIO/ENXIO from the device). So retry replaying all
1406 * the good transactions that we found before getting
1407 * the error.
1408 */
1409 jnl->jhdr->start = orig_jnl_start;
1410 jnl->jhdr->end = txn_start_offset;
1411 check_past_jnl_end = 0;
1412 last_sequence_num = 0;
1413 printf("jnl: %s: restarting journal replay (%lld - %lld)!\n", jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
1414 goto restart_replay;
1415 }
1416
1417 jnl->jhdr->start += blhdr->bytes_used;
1418 if (jnl->jhdr->start >= jnl->jhdr->size) {
1419 // wrap around and skip the journal header block
1420 jnl->jhdr->start = (jnl->jhdr->start % jnl->jhdr->size) + jnl->jhdr->jhdr_size;
1421 }
1422
1423 if (jnl->jhdr->start == jnl->jhdr->end) {
1424 in_uncharted_territory = 1;
1425 }
1426 }
1427
1428 if (jnl->jhdr->start != jnl->jhdr->end) {
1429 printf("jnl: %s: start %lld != end %lld. resetting end.\n", jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
1430 jnl->jhdr->end = jnl->jhdr->start;
1431 }
1432
1433 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1434
1435 /*
1436 * make sure it's at least one page in size, so
1437 * start max_bsize at PAGE_SIZE
1438 */
1439 for (i = 0, max_bsize = PAGE_SIZE; i < num_full; i++) {
1440
1441 if (co_buf[i].block_num == (off_t)-1)
1442 continue;
1443
1444 if (co_buf[i].block_size > max_bsize)
1445 max_bsize = co_buf[i].block_size;
1446 }
1447 /*
1448 * round max_bsize up to the nearest PAGE_SIZE multiple
1449 */
1450 if (max_bsize & (PAGE_SIZE - 1)) {
1451 max_bsize = (max_bsize + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1452 }
1453
1454 block_ptr = hfs_malloc(max_bsize);
1455
1456 // Replay the coalesced entries in the co-buf
1457 for(i = 0; i < num_full; i++) {
1458 size_t size = co_buf[i].block_size;
1459 off_t jnl_offset = (off_t) co_buf[i].jnl_offset;
1460 off_t number = co_buf[i].block_num;
1461
1462
1463 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1464 // co_buf[i].block_size, co_buf[i].jnl_offset);
1465
1466 if (number == (off_t)-1) {
1467 // printf("jnl: replay_journal: skipping killed fs block\n");
1468 } else {
1469
1470 // do journal read, and set the phys. block
1471 ret = read_journal_data(jnl, &jnl_offset, block_ptr, size);
1472 if (ret != size) {
1473 printf("jnl: %s: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl->jdev_name, jnl_offset);
1474 goto bad_replay;
1475 }
1476
1477 if (update_fs_block(jnl, block_ptr, number, size) != 0) {
1478 goto bad_replay;
1479 }
1480 }
1481 }
1482
1483
1484 // done replaying; update jnl header
1485 if (write_journal_header(jnl, 1, jnl->jhdr->sequence_num) != 0) {
1486 goto bad_replay;
1487 }
1488
1489 printf("jnl: %s: journal replay done.\n", jnl->jdev_name);
1490
1491 // free block_ptr
1492 if (block_ptr) {
1493 hfs_free(block_ptr, max_bsize);
1494 block_ptr = NULL;
1495 }
1496
1497 // free the coalesce buffer
1498 hfs_free(co_buf, num_buckets*sizeof(struct bucket));
1499 co_buf = NULL;
1500
1501 hfs_free(buff, jnl->jhdr->blhdr_size);
1502 return 0;
1503
1504 bad_replay:
1505 hfs_free(block_ptr, max_bsize);
1506 hfs_free(co_buf, num_buckets*sizeof(struct bucket));
1507 hfs_free(buff, jnl->jhdr->blhdr_size);
1508
1509 return -1;
1510 }
1511
1512
1513 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1514 #define MAX_TRANSACTION_BUFFER_SIZE (3072*1024)
1515
1516 // XXXdbg - so I can change it in the debugger
1517 int def_tbuffer_size = 0;
1518
1519
1520 //
1521 // This function sets the size of the tbuffer and the
1522 // size of the blhdr. It assumes that jnl->jhdr->size
1523 // and jnl->jhdr->jhdr_size are already valid.
1524 //
1525 static void
1526 size_up_tbuffer(journal *jnl, int tbuffer_size, int phys_blksz)
1527 {
1528 //
1529 // one-time initialization based on how much memory
1530 // there is in the machine.
1531 //
1532 if (def_tbuffer_size == 0) {
1533 uint64_t memsize = 0;
1534 size_t l = sizeof(memsize);
1535 sysctlbyname("hw.memsize", &memsize, &l, NULL, 0);
1536
1537 if (memsize < (256*1024*1024)) {
1538 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE;
1539 } else if (memsize < (512*1024*1024)) {
1540 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 2;
1541 } else if (memsize < (1024*1024*1024)) {
1542 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * 3;
1543 } else {
1544 def_tbuffer_size = DEFAULT_TRANSACTION_BUFFER_SIZE * (memsize / (256*1024*1024));
1545 }
1546 }
1547
1548 // For analyzer
1549 hfs_assert(jnl->jhdr->jhdr_size > 0);
1550
1551 // size up the transaction buffer... can't be larger than the number
1552 // of blocks that can fit in a block_list_header block.
1553 if (tbuffer_size == 0) {
1554 jnl->tbuffer_size = def_tbuffer_size;
1555 } else {
1556 // make sure that the specified tbuffer_size isn't too small
1557 if (tbuffer_size < jnl->jhdr->blhdr_size * 2) {
1558 tbuffer_size = jnl->jhdr->blhdr_size * 2;
1559 }
1560 // and make sure it's an even multiple of the block size
1561 if ((tbuffer_size % jnl->jhdr->jhdr_size) != 0) {
1562 tbuffer_size -= (tbuffer_size % jnl->jhdr->jhdr_size);
1563 }
1564
1565 jnl->tbuffer_size = tbuffer_size;
1566 }
1567
1568 if (jnl->tbuffer_size > (jnl->jhdr->size / 2)) {
1569 jnl->tbuffer_size = (jnl->jhdr->size / 2);
1570 }
1571
1572 if (jnl->tbuffer_size > MAX_TRANSACTION_BUFFER_SIZE) {
1573 jnl->tbuffer_size = MAX_TRANSACTION_BUFFER_SIZE;
1574 }
1575
1576 jnl->jhdr->blhdr_size = (jnl->tbuffer_size / jnl->jhdr->jhdr_size) * sizeof(block_info);
1577 if (jnl->jhdr->blhdr_size < phys_blksz) {
1578 jnl->jhdr->blhdr_size = phys_blksz;
1579 } else if ((jnl->jhdr->blhdr_size % phys_blksz) != 0) {
1580 // have to round up so we're an even multiple of the physical block size
1581 jnl->jhdr->blhdr_size = (jnl->jhdr->blhdr_size + (phys_blksz - 1)) & ~(phys_blksz - 1);
1582 }
1583 }
1584
1585 static void
1586 get_io_info(struct vnode *devvp, size_t phys_blksz, journal *jnl, struct vfs_context *context)
1587 {
1588 off_t readblockcnt;
1589 off_t writeblockcnt;
1590 off_t readmaxcnt=0, tmp_readmaxcnt;
1591 off_t writemaxcnt=0, tmp_writemaxcnt;
1592 off_t readsegcnt, writesegcnt;
1593 int32_t features;
1594
1595 if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&features, 0, context) == 0) {
1596 if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
1597 const char *name = vnode_getname_printable(devvp);
1598 jnl->flags |= JOURNAL_DO_FUA_WRITES;
1599 printf("jnl: %s: enabling FUA writes (features 0x%x)\n", name, features);
1600 vnode_putname_printable(name);
1601 }
1602 if (features & DK_FEATURE_UNMAP) {
1603 jnl->flags |= JOURNAL_USE_UNMAP;
1604 }
1605
1606 if (features & DK_FEATURE_BARRIER) {
1607 jnl->flags |= JOURNAL_FEATURE_BARRIER;
1608 }
1609 }
1610
1611 //
1612 // First check the max read size via several different mechanisms...
1613 //
1614 VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD, (caddr_t)&readmaxcnt, 0, context);
1615
1616 if (VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t)&readblockcnt, 0, context) == 0) {
1617 tmp_readmaxcnt = readblockcnt * phys_blksz;
1618 if (readmaxcnt == 0 || (readblockcnt > 0 && tmp_readmaxcnt < readmaxcnt)) {
1619 readmaxcnt = tmp_readmaxcnt;
1620 }
1621 }
1622
1623 if (VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t)&readsegcnt, 0, context)) {
1624 readsegcnt = 0;
1625 }
1626
1627 if (readsegcnt > 0 && (readsegcnt * PAGE_SIZE) < readmaxcnt) {
1628 readmaxcnt = readsegcnt * PAGE_SIZE;
1629 }
1630
1631 if (readmaxcnt == 0) {
1632 readmaxcnt = 128 * 1024;
1633 } else if (readmaxcnt > UINT32_MAX) {
1634 readmaxcnt = UINT32_MAX;
1635 }
1636
1637
1638 //
1639 // Now check the max writes size via several different mechanisms...
1640 //
1641 VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t)&writemaxcnt, 0, context);
1642
1643 if (VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t)&writeblockcnt, 0, context) == 0) {
1644 tmp_writemaxcnt = writeblockcnt * phys_blksz;
1645 if (writemaxcnt == 0 || (writeblockcnt > 0 && tmp_writemaxcnt < writemaxcnt)) {
1646 writemaxcnt = tmp_writemaxcnt;
1647 }
1648 }
1649
1650 if (VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t)&writesegcnt, 0, context)) {
1651 writesegcnt = 0;
1652 }
1653
1654 if (writesegcnt > 0 && (writesegcnt * PAGE_SIZE) < writemaxcnt) {
1655 writemaxcnt = writesegcnt * PAGE_SIZE;
1656 }
1657
1658 if (writemaxcnt == 0) {
1659 writemaxcnt = 128 * 1024;
1660 } else if (writemaxcnt > UINT32_MAX) {
1661 writemaxcnt = UINT32_MAX;
1662 }
1663
1664 jnl->max_read_size = readmaxcnt;
1665 jnl->max_write_size = writemaxcnt;
1666 // printf("jnl: %s: max read/write: %lld k / %lld k\n",
1667 // jnl->jdev_name ? jnl->jdev_name : "unknown",
1668 // jnl->max_read_size/1024, jnl->max_write_size/1024);
1669 }
1670
1671
1672 journal *
1673 journal_create(struct vnode *jvp,
1674 off_t offset,
1675 off_t journal_size,
1676 struct vnode *fsvp,
1677 size_t min_fs_blksz,
1678 int32_t flags,
1679 int32_t tbuffer_size,
1680 void (*flush)(void *arg),
1681 void *arg,
1682 struct mount *fsmount)
1683 {
1684 journal *jnl;
1685 uint32_t phys_blksz, new_txn_base;
1686 u_int32_t min_size;
1687 const char *jdev_name;
1688 /*
1689 * Cap the journal max size to 2GB. On HFS, it will attempt to occupy
1690 * a full allocation block if the current size is smaller than the allocation
1691 * block on which it resides. Once we hit the exabyte filesystem range, then
1692 * it will use 2GB allocation blocks. As a result, make the cap 2GB.
1693 */
1694
1695 jdev_name = vnode_getname_printable(jvp);
1696
1697 /* Get the real physical block size. */
1698 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, vfs_context_kernel())) {
1699 goto cleanup_jdev_name;
1700 }
1701
1702 if (journal_size < (256*1024) || journal_size > (MAX_JOURNAL_SIZE)) {
1703 printf("jnl: %s: create: journal size %lld looks bogus.\n", jdev_name, journal_size);
1704 goto cleanup_jdev_name;
1705 }
1706
1707 min_size = phys_blksz * (phys_blksz / sizeof(block_info));
1708 /* Reject journals that are too small given the sector size of the device */
1709 if (journal_size < min_size) {
1710 printf("jnl: %s: create: journal size (%lld) too small given sector size of (%u)\n",
1711 jdev_name, journal_size, phys_blksz);
1712 goto cleanup_jdev_name;
1713 }
1714
1715 if (phys_blksz > min_fs_blksz) {
1716 printf("jnl: %s: create: error: phys blksize %u bigger than min fs blksize %zd\n",
1717 jdev_name, phys_blksz, min_fs_blksz);
1718 goto cleanup_jdev_name;
1719 }
1720
1721 if ((journal_size % phys_blksz) != 0) {
1722 printf("jnl: %s: create: journal size 0x%llx is not an even multiple of block size 0x%ux\n",
1723 jdev_name, journal_size, phys_blksz);
1724 goto cleanup_jdev_name;
1725 }
1726
1727
1728 jnl = hfs_mallocz(sizeof(struct journal));
1729
1730 jnl->jdev = jvp;
1731 jnl->jdev_offset = offset;
1732 jnl->fsdev = fsvp;
1733 jnl->flush = flush;
1734 jnl->flush_arg = arg;
1735 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1736 jnl->jdev_name = jdev_name;
1737 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1738
1739 // Keep a point to the mount around for use in IO throttling.
1740 jnl->fsmount = fsmount;
1741
1742 get_io_info(jvp, phys_blksz, jnl, vfs_context_kernel());
1743
1744 jnl->header_buf = hfs_malloc(phys_blksz);
1745 jnl->header_buf_size = phys_blksz;
1746
1747 jnl->jhdr = (journal_header *)jnl->header_buf;
1748 memset(jnl->jhdr, 0, sizeof(journal_header));
1749
1750 // we have to set this up here so that do_journal_io() will work
1751 jnl->jhdr->jhdr_size = phys_blksz;
1752
1753 //
1754 // We try and read the journal header to see if there is already one
1755 // out there. If there is, it's possible that it has transactions
1756 // in it that we might replay if we happen to pick a sequence number
1757 // that is a little less than the old one, there is a crash and the
1758 // last txn written ends right at the start of a txn from the previous
1759 // incarnation of this file system. If all that happens we would
1760 // replay the transactions from the old file system and that would
1761 // destroy your disk. Although it is extremely unlikely for all those
1762 // conditions to happen, the probability is non-zero and the result is
1763 // severe - you lose your file system. Therefore if we find a valid
1764 // journal header and the sequence number is non-zero we write junk
1765 // over the entire journal so that there is no way we will encounter
1766 // any old transactions. This is slow but should be a rare event
1767 // since most tools erase the journal.
1768 //
1769 if ( read_journal_header(jnl, jnl->jhdr, phys_blksz) == phys_blksz
1770 && jnl->jhdr->magic == JOURNAL_HEADER_MAGIC
1771 && jnl->jhdr->sequence_num != 0) {
1772
1773 new_txn_base = (jnl->jhdr->sequence_num + (journal_size / phys_blksz) + (random() % 16384)) & 0x00ffffff;
1774 printf("jnl: %s: create: avoiding old sequence number 0x%x (0x%x)\n", jdev_name, jnl->jhdr->sequence_num, new_txn_base);
1775
1776 #if 0
1777 int i;
1778 off_t pos=0;
1779
1780 for(i = 1; i < journal_size / phys_blksz; i++) {
1781 pos = i*phys_blksz;
1782
1783 // we don't really care what data we write just so long
1784 // as it's not a valid transaction header. since we have
1785 // the header_buf sitting around we'll use that.
1786 write_journal_data(jnl, &pos, jnl->header_buf, phys_blksz);
1787 }
1788 printf("jnl: create: done clearing journal (i=%d)\n", i);
1789 #endif
1790 } else {
1791 new_txn_base = random() & 0x00ffffff;
1792 }
1793
1794 memset(jnl->header_buf, 0, phys_blksz);
1795
1796 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1797 jnl->jhdr->endian = ENDIAN_MAGIC;
1798 jnl->jhdr->start = phys_blksz; // start at block #1, block #0 is for the jhdr itself
1799 jnl->jhdr->end = phys_blksz;
1800 jnl->jhdr->size = journal_size;
1801 jnl->jhdr->jhdr_size = phys_blksz;
1802 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
1803
1804 jnl->active_start = jnl->jhdr->start;
1805
1806 // XXXdbg - for testing you can force the journal to wrap around
1807 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1808 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1809
1810 jnl->jhdr->sequence_num = new_txn_base;
1811
1812 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
1813 lck_mtx_init(&jnl->flock, jnl_mutex_group, jnl_lock_attr);
1814 lck_rw_init(&jnl->trim_lock, jnl_mutex_group, jnl_lock_attr);
1815
1816
1817 jnl->flushing = FALSE;
1818 jnl->asyncIO = FALSE;
1819 jnl->flush_aborted = FALSE;
1820 jnl->writing_header = FALSE;
1821 jnl->async_trim = NULL;
1822 jnl->sequence_num = jnl->jhdr->sequence_num;
1823
1824 if (write_journal_header(jnl, 1, jnl->jhdr->sequence_num) != 0) {
1825 printf("jnl: %s: journal_create: failed to write journal header.\n", jdev_name);
1826 goto bad_write;
1827 }
1828
1829 goto journal_create_complete;
1830
1831
1832 bad_write:
1833 hfs_free(jnl->header_buf, phys_blksz);
1834 jnl->jhdr = NULL;
1835 hfs_free(jnl, sizeof(*jnl));
1836 cleanup_jdev_name:
1837 vnode_putname_printable(jdev_name);
1838 jnl = NULL;
1839 journal_create_complete:
1840 return jnl;
1841 }
1842
1843
1844 journal *
1845 journal_open(struct vnode *jvp,
1846 off_t offset,
1847 off_t journal_size,
1848 struct vnode *fsvp,
1849 size_t min_fs_blksz,
1850 int32_t flags,
1851 int32_t tbuffer_size,
1852 void (*flush)(void *arg),
1853 void *arg,
1854 struct mount *fsmount)
1855 {
1856 journal *jnl;
1857 uint32_t orig_blksz=0;
1858 uint32_t phys_blksz;
1859 u_int32_t min_size = 0;
1860 int orig_checksum, checksum;
1861 const char *jdev_name = vnode_getname_printable(jvp);
1862
1863 /* Get the real physical block size. */
1864 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, vfs_context_kernel())) {
1865 goto cleanup_jdev_name;
1866 }
1867
1868 if (phys_blksz > min_fs_blksz) {
1869 printf("jnl: %s: open: error: phys blksize %u bigger than min fs blksize %zd\n",
1870 jdev_name, phys_blksz, min_fs_blksz);
1871 goto cleanup_jdev_name;
1872 }
1873
1874 if (journal_size < (256*1024) || journal_size > (1024*1024*1024)) {
1875 printf("jnl: %s: open: journal size %lld looks bogus.\n", jdev_name, journal_size);
1876 goto cleanup_jdev_name;
1877 }
1878
1879 min_size = phys_blksz * (phys_blksz / sizeof(block_info));
1880 /* Reject journals that are too small given the sector size of the device */
1881 if (journal_size < min_size) {
1882 printf("jnl: %s: open: journal size (%lld) too small given sector size of (%u)\n",
1883 jdev_name, journal_size, phys_blksz);
1884 goto cleanup_jdev_name;
1885 }
1886
1887 if ((journal_size % phys_blksz) != 0) {
1888 printf("jnl: %s: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1889 jdev_name, journal_size, phys_blksz);
1890 goto cleanup_jdev_name;
1891 }
1892
1893 jnl = hfs_mallocz(sizeof(struct journal));
1894
1895 jnl->jdev = jvp;
1896 jnl->jdev_offset = offset;
1897 jnl->fsdev = fsvp;
1898 jnl->flush = flush;
1899 jnl->flush_arg = arg;
1900 jnl->flags = (flags & JOURNAL_OPTION_FLAGS_MASK);
1901 jnl->jdev_name = jdev_name;
1902 lck_mtx_init(&jnl->old_start_lock, jnl_mutex_group, jnl_lock_attr);
1903
1904 /* We hold the mount to later pass to the throttling code for IO
1905 * accounting.
1906 */
1907 jnl->fsmount = fsmount;
1908
1909 get_io_info(jvp, phys_blksz, jnl, vfs_context_kernel());
1910
1911 jnl->header_buf = hfs_malloc(phys_blksz);
1912 jnl->header_buf_size = phys_blksz;
1913
1914 jnl->jhdr = (journal_header *)jnl->header_buf;
1915 memset(jnl->jhdr, 0, sizeof(journal_header));
1916
1917 // we have to set this up here so that do_journal_io() will work
1918 jnl->jhdr->jhdr_size = phys_blksz;
1919
1920 if (read_journal_header(jnl, jnl->jhdr, phys_blksz) != phys_blksz) {
1921 printf("jnl: %s: open: could not read %u bytes for the journal header.\n",
1922 jdev_name, phys_blksz);
1923 goto bad_journal;
1924 }
1925
1926 /*
1927 * Check for a bad jhdr size after reading in the journal header.
1928 * The journal header length cannot be zero
1929 */
1930 if (jnl->jhdr->jhdr_size == 0) {
1931 printf("jnl: %s: open: bad jhdr size (%d) \n", jdev_name, jnl->jhdr->jhdr_size);
1932 goto bad_journal;
1933 }
1934
1935 orig_checksum = jnl->jhdr->checksum;
1936 jnl->jhdr->checksum = 0;
1937
1938 if (jnl->jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
1939 // do this before the swap since it's done byte-at-a-time
1940 orig_checksum = SWAP32(orig_checksum);
1941 checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE);
1942 swap_journal_header(jnl);
1943 jnl->flags |= JOURNAL_NEED_SWAP;
1944 } else {
1945 checksum = calc_checksum((char *)jnl->jhdr, JOURNAL_HEADER_CKSUM_SIZE);
1946 }
1947
1948 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC && jnl->jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
1949 printf("jnl: %s: open: journal magic is bad (0x%x != 0x%x)\n",
1950 jnl->jdev_name, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC);
1951 goto bad_journal;
1952 }
1953
1954 // only check if we're the current journal header magic value
1955 if (jnl->jhdr->magic == JOURNAL_HEADER_MAGIC) {
1956
1957 if (orig_checksum != checksum) {
1958 printf("jnl: %s: open: journal checksum is bad (0x%x != 0x%x)\n",
1959 jdev_name, orig_checksum, checksum);
1960
1961 //goto bad_journal;
1962 }
1963 }
1964
1965 // XXXdbg - convert old style magic numbers to the new one
1966 if (jnl->jhdr->magic == OLD_JOURNAL_HEADER_MAGIC) {
1967 jnl->jhdr->magic = JOURNAL_HEADER_MAGIC;
1968 }
1969
1970 if (phys_blksz != (size_t)jnl->jhdr->jhdr_size && jnl->jhdr->jhdr_size != 0) {
1971 /*
1972 * The volume has probably been resized (such that we had to adjust the
1973 * logical sector size), or copied to media with a different logical
1974 * sector size.
1975 *
1976 * Temporarily change the device's logical block size to match the
1977 * journal's header size. This will allow us to replay the journal
1978 * safely. If the replay succeeds, we will update the journal's header
1979 * size (later in this function).
1980 */
1981 orig_blksz = phys_blksz;
1982 phys_blksz = jnl->jhdr->jhdr_size;
1983 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&phys_blksz, FWRITE, vfs_context_kernel());
1984 printf("jnl: %s: open: temporarily switched block size from %u to %u\n",
1985 jdev_name, orig_blksz, phys_blksz);
1986 }
1987
1988 if ( jnl->jhdr->start <= 0
1989 || jnl->jhdr->start > jnl->jhdr->size
1990 || jnl->jhdr->start > 1024*1024*1024) {
1991 printf("jnl: %s: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
1992 jdev_name, jnl->jhdr->start, jnl->jhdr->size);
1993 goto bad_journal;
1994 }
1995
1996 if ( jnl->jhdr->end <= 0
1997 || jnl->jhdr->end > jnl->jhdr->size
1998 || jnl->jhdr->end > 1024*1024*1024) {
1999 printf("jnl: %s: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
2000 jdev_name, jnl->jhdr->end, jnl->jhdr->size);
2001 goto bad_journal;
2002 }
2003
2004 if (jnl->jhdr->size < (256*1024) || jnl->jhdr->size > 1024*1024*1024) {
2005 printf("jnl: %s: open: jhdr size looks bad (0x%llx)\n", jdev_name, jnl->jhdr->size);
2006 goto bad_journal;
2007 }
2008
2009 // XXXdbg - can't do these checks because hfs writes all kinds of
2010 // non-uniform sized blocks even on devices that have a block size
2011 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
2012 // therefore these checks will fail and so we just have to punt and
2013 // do more relaxed checking...
2014 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
2015 if ((jnl->jhdr->start % 512) != 0) {
2016 printf("jnl: %s: open: journal start (0x%llx) not a multiple of 512?\n",
2017 jdev_name, jnl->jhdr->start);
2018 goto bad_journal;
2019 }
2020
2021 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
2022 if ((jnl->jhdr->end % 512) != 0) {
2023 printf("jnl: %s: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
2024 jdev_name, jnl->jhdr->end, jnl->jhdr->jhdr_size);
2025 goto bad_journal;
2026 }
2027
2028 if (jnl->jhdr->blhdr_size < 0) {
2029 //throw out invalid sizes
2030 printf("jnl %s: open: blhdr size looks bogus! (%d) \n",
2031 jdev_name, jnl->jhdr->blhdr_size);
2032 goto bad_journal;
2033 }
2034
2035 // take care of replaying the journal if necessary
2036 if (flags & JOURNAL_RESET) {
2037 printf("jnl: %s: journal start/end pointers reset! (s 0x%llx e 0x%llx)\n",
2038 jdev_name, jnl->jhdr->start, jnl->jhdr->end);
2039 jnl->jhdr->start = jnl->jhdr->end;
2040 } else if (replay_journal(jnl) != 0) {
2041 printf("jnl: %s: journal_open: Error replaying the journal!\n", jdev_name);
2042 goto bad_journal;
2043 }
2044
2045 /*
2046 * When we get here, we know that the journal is empty (jnl->jhdr->start ==
2047 * jnl->jhdr->end). If the device's logical block size was different from
2048 * the journal's header size, then we can now restore the device's logical
2049 * block size and update the journal's header size to match.
2050 *
2051 * Note that we also adjust the journal's start and end so that they will
2052 * be aligned on the new block size. We pick a new sequence number to
2053 * avoid any problems if a replay found previous transactions using the old
2054 * journal header size. (See the comments in journal_create(), above.)
2055 */
2056
2057 if (orig_blksz != 0) {
2058 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, vfs_context_kernel());
2059 phys_blksz = orig_blksz;
2060
2061 orig_blksz = 0;
2062
2063 jnl->jhdr->jhdr_size = phys_blksz;
2064 jnl->jhdr->start = phys_blksz;
2065 jnl->jhdr->end = phys_blksz;
2066 jnl->jhdr->sequence_num = (jnl->jhdr->sequence_num +
2067 (journal_size / phys_blksz) +
2068 (random() % 16384)) & 0x00ffffff;
2069
2070 if (write_journal_header(jnl, 1, jnl->jhdr->sequence_num)) {
2071 printf("jnl: %s: open: failed to update journal header size\n", jdev_name);
2072 goto bad_journal;
2073 }
2074 }
2075
2076 // make sure this is in sync!
2077 jnl->active_start = jnl->jhdr->start;
2078 jnl->sequence_num = jnl->jhdr->sequence_num;
2079
2080 // set this now, after we've replayed the journal
2081 size_up_tbuffer(jnl, tbuffer_size, phys_blksz);
2082
2083 // TODO: Does this need to change if the device's logical block size changed?
2084 if ((off_t)(jnl->jhdr->blhdr_size/sizeof(block_info)-1) > (jnl->jhdr->size/jnl->jhdr->jhdr_size)) {
2085 printf("jnl: %s: open: jhdr size and blhdr size are not compatible (0x%llx, %d, %d)\n", jdev_name, jnl->jhdr->size,
2086 jnl->jhdr->blhdr_size, jnl->jhdr->jhdr_size);
2087 goto bad_journal;
2088 }
2089
2090 lck_mtx_init(&jnl->jlock, jnl_mutex_group, jnl_lock_attr);
2091 lck_mtx_init(&jnl->flock, jnl_mutex_group, jnl_lock_attr);
2092 lck_rw_init(&jnl->trim_lock, jnl_mutex_group, jnl_lock_attr);
2093
2094 goto journal_open_complete;
2095
2096 bad_journal:
2097 if (orig_blksz != 0) {
2098 phys_blksz = orig_blksz;
2099 VNOP_IOCTL(jvp, DKIOCSETBLOCKSIZE, (caddr_t)&orig_blksz, FWRITE, vfs_context_kernel());
2100 printf("jnl: %s: open: restored block size after error\n", jdev_name);
2101 }
2102 hfs_free(jnl->header_buf, jnl->header_buf_size);
2103 hfs_free(jnl, sizeof(*jnl));
2104 cleanup_jdev_name:
2105 vnode_putname_printable(jdev_name);
2106 jnl = NULL;
2107 journal_open_complete:
2108 return jnl;
2109 }
2110
2111
2112 int
2113 journal_is_clean(struct vnode *jvp,
2114 off_t offset,
2115 off_t journal_size,
2116 struct vnode *fsvp,
2117 size_t min_fs_block_size)
2118 {
2119 journal jnl;
2120 uint32_t phys_blksz;
2121 int ret;
2122 int orig_checksum, checksum;
2123 const char *jdev_name = vnode_getname_printable(jvp);
2124
2125 /* Get the real physical block size. */
2126 if (VNOP_IOCTL(jvp, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, vfs_context_kernel())) {
2127 printf("jnl: %s: is_clean: failed to get device block size.\n", jdev_name);
2128 ret = EINVAL;
2129 goto cleanup_jdev_name;
2130 }
2131
2132 if (phys_blksz > (uint32_t)min_fs_block_size) {
2133 printf("jnl: %s: is_clean: error: phys blksize %d bigger than min fs blksize %zd\n",
2134 jdev_name, phys_blksz, min_fs_block_size);
2135 ret = EINVAL;
2136 goto cleanup_jdev_name;
2137 }
2138
2139 if (journal_size < (256*1024) || journal_size > (MAX_JOURNAL_SIZE)) {
2140 printf("jnl: %s: is_clean: journal size %lld looks bogus.\n", jdev_name, journal_size);
2141 ret = EINVAL;
2142 goto cleanup_jdev_name;
2143 }
2144
2145 if ((journal_size % phys_blksz) != 0) {
2146 printf("jnl: %s: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
2147 jdev_name, journal_size, phys_blksz);
2148 ret = EINVAL;
2149 goto cleanup_jdev_name;
2150 }
2151
2152 memset(&jnl, 0, sizeof(jnl));
2153
2154 jnl.header_buf = hfs_malloc(phys_blksz);
2155 jnl.header_buf_size = phys_blksz;
2156
2157 get_io_info(jvp, phys_blksz, &jnl, vfs_context_kernel());
2158
2159 jnl.jhdr = (journal_header *)jnl.header_buf;
2160 memset(jnl.jhdr, 0, sizeof(journal_header));
2161
2162 jnl.jdev = jvp;
2163 jnl.jdev_offset = offset;
2164 jnl.fsdev = fsvp;
2165
2166 // we have to set this up here so that do_journal_io() will work
2167 jnl.jhdr->jhdr_size = phys_blksz;
2168
2169 if (read_journal_header(&jnl, jnl.jhdr, phys_blksz) != (unsigned)phys_blksz) {
2170 printf("jnl: %s: is_clean: could not read %d bytes for the journal header.\n",
2171 jdev_name, phys_blksz);
2172 ret = EINVAL;
2173 goto get_out;
2174 }
2175
2176 orig_checksum = jnl.jhdr->checksum;
2177 jnl.jhdr->checksum = 0;
2178
2179 if (jnl.jhdr->magic == SWAP32(JOURNAL_HEADER_MAGIC)) {
2180 // do this before the swap since it's done byte-at-a-time
2181 orig_checksum = SWAP32(orig_checksum);
2182 checksum = calc_checksum((char *)jnl.jhdr, JOURNAL_HEADER_CKSUM_SIZE);
2183 swap_journal_header(&jnl);
2184 jnl.flags |= JOURNAL_NEED_SWAP;
2185 } else {
2186 checksum = calc_checksum((char *)jnl.jhdr, JOURNAL_HEADER_CKSUM_SIZE);
2187 }
2188
2189 if (jnl.jhdr->magic != JOURNAL_HEADER_MAGIC && jnl.jhdr->magic != OLD_JOURNAL_HEADER_MAGIC) {
2190 printf("jnl: %s: is_clean: journal magic is bad (0x%x != 0x%x)\n",
2191 jdev_name, jnl.jhdr->magic, JOURNAL_HEADER_MAGIC);
2192 ret = EINVAL;
2193 goto get_out;
2194 }
2195
2196 if (orig_checksum != checksum) {
2197 printf("jnl: %s: is_clean: journal checksum is bad (0x%x != 0x%x)\n", jdev_name, orig_checksum, checksum);
2198 ret = EINVAL;
2199 goto get_out;
2200 }
2201
2202 //
2203 // if the start and end are equal then the journal is clean.
2204 // otherwise it's not clean and therefore an error.
2205 //
2206 if (jnl.jhdr->start == jnl.jhdr->end) {
2207 ret = 0;
2208 } else {
2209 ret = EBUSY; // so the caller can differentiate an invalid journal from a "busy" one
2210 }
2211
2212 get_out:
2213 hfs_free(jnl.header_buf, jnl.header_buf_size);
2214 cleanup_jdev_name:
2215 vnode_putname_printable(jdev_name);
2216 return ret;
2217 }
2218
2219
2220 void
2221 journal_close(journal *jnl)
2222 {
2223 volatile off_t *start, *end;
2224 int counter=0;
2225
2226 CHECK_JOURNAL(jnl);
2227
2228 // set this before doing anything that would block so that
2229 // we start tearing things down properly.
2230 //
2231 jnl->flags |= JOURNAL_CLOSE_PENDING;
2232
2233 if (jnl->owner != current_thread()) {
2234 journal_lock(jnl);
2235 }
2236
2237 wait_condition(jnl, &jnl->flushing, "journal_close");
2238
2239 //
2240 // only write stuff to disk if the journal is still valid
2241 //
2242 if ((jnl->flags & JOURNAL_INVALID) == 0) {
2243
2244 if (jnl->active_tr) {
2245 /*
2246 * "journal_end_transaction" will fire the flush asynchronously
2247 */
2248 journal_end_transaction(jnl);
2249 }
2250
2251 // flush any buffered transactions
2252 if (jnl->cur_tr) {
2253 transaction *tr = jnl->cur_tr;
2254
2255 jnl->cur_tr = NULL;
2256 /*
2257 * "end_transaction" will wait for any in-progress flush to complete
2258 * before flushing "cur_tr" synchronously("must_wait" == TRUE)
2259 */
2260 end_transaction(tr, 1, NULL, NULL, FALSE, TRUE);
2261 }
2262 /*
2263 * if there was an "active_tr", make sure we wait for
2264 * it to flush if there was no "cur_tr" to process
2265 */
2266 wait_condition(jnl, &jnl->flushing, "journal_close");
2267
2268 //start = &jnl->jhdr->start;
2269 start = &jnl->active_start;
2270 end = &jnl->jhdr->end;
2271
2272 while (*start != *end && counter++ < 5000) {
2273 //printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
2274 if (jnl->flush) {
2275 jnl->flush(jnl->flush_arg);
2276 }
2277 tsleep((caddr_t)jnl, PRIBIO, "jnl_close", 2);
2278 }
2279
2280 if (*start != *end) {
2281 printf("jnl: %s: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
2282 jnl->jdev_name, *start, *end);
2283 }
2284
2285 // make sure this is in sync when we close the journal
2286 jnl->jhdr->start = jnl->active_start;
2287
2288 // if this fails there's not much we can do at this point...
2289 write_journal_header(jnl, 1, jnl->sequence_num);
2290 } else {
2291 // if we're here the journal isn't valid any more.
2292 // so make sure we don't leave any locked blocks lying around
2293 printf("jnl: %s: close: journal is invalid. aborting outstanding transactions\n", jnl->jdev_name);
2294 if (jnl->active_tr || jnl->cur_tr) {
2295 transaction *tr;
2296
2297 if (jnl->active_tr) {
2298 tr = jnl->active_tr;
2299 jnl->active_tr = NULL;
2300 } else {
2301 tr = jnl->cur_tr;
2302 jnl->cur_tr = NULL;
2303 }
2304 abort_transaction(jnl, tr);
2305
2306 if (jnl->active_tr || jnl->cur_tr) {
2307 panic("jnl: %s: close: jnl @ %p had both an active and cur tr\n", jnl->jdev_name, jnl);
2308 }
2309 }
2310 }
2311 wait_condition(jnl, &jnl->asyncIO, "journal_close");
2312
2313 free_old_stuff(jnl);
2314
2315 hfs_free(jnl->header_buf, jnl->header_buf_size);
2316 jnl->jhdr = (void *)0xbeefbabe;
2317
2318 vnode_putname_printable(jnl->jdev_name);
2319
2320 journal_unlock(jnl);
2321 lck_mtx_destroy(&jnl->old_start_lock, jnl_mutex_group);
2322 lck_mtx_destroy(&jnl->jlock, jnl_mutex_group);
2323 lck_mtx_destroy(&jnl->flock, jnl_mutex_group);
2324 hfs_free(jnl, sizeof(*jnl));
2325 }
2326
2327 static void
2328 dump_journal(journal *jnl)
2329 {
2330 transaction *ctr;
2331
2332 printf("journal for dev %s:", jnl->jdev_name);
2333 printf(" jdev_offset %.8llx\n", jnl->jdev_offset);
2334 printf(" magic: 0x%.8x\n", jnl->jhdr->magic);
2335 printf(" start: 0x%.8llx\n", jnl->jhdr->start);
2336 printf(" end: 0x%.8llx\n", jnl->jhdr->end);
2337 printf(" size: 0x%.8llx\n", jnl->jhdr->size);
2338 printf(" blhdr size: %d\n", jnl->jhdr->blhdr_size);
2339 printf(" jhdr size: %d\n", jnl->jhdr->jhdr_size);
2340 printf(" chksum: 0x%.8x\n", jnl->jhdr->checksum);
2341
2342 printf(" completed transactions:\n");
2343 for (ctr = jnl->completed_trs; ctr; ctr = ctr->next) {
2344 printf(" 0x%.8llx - 0x%.8llx\n", ctr->journal_start, ctr->journal_end);
2345 }
2346 }
2347
2348
2349
2350 static off_t
2351 free_space(journal *jnl)
2352 {
2353 off_t free_space_offset;
2354
2355 if (jnl->jhdr->start < jnl->jhdr->end) {
2356 free_space_offset = jnl->jhdr->size - (jnl->jhdr->end - jnl->jhdr->start) - jnl->jhdr->jhdr_size;
2357 } else if (jnl->jhdr->start > jnl->jhdr->end) {
2358 free_space_offset = jnl->jhdr->start - jnl->jhdr->end;
2359 } else {
2360 // journal is completely empty
2361 free_space_offset = jnl->jhdr->size - jnl->jhdr->jhdr_size;
2362 }
2363
2364 return free_space_offset;
2365 }
2366
2367
2368 //
2369 // The journal must be locked on entry to this function.
2370 // The "desired_size" is in bytes.
2371 //
2372 static int
2373 check_free_space(journal *jnl, int desired_size, boolean_t *delayed_header_write, uint32_t sequence_num)
2374 {
2375 size_t i;
2376 int counter=0;
2377
2378 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
2379 // desired_size, free_space(jnl));
2380
2381 if (delayed_header_write)
2382 *delayed_header_write = FALSE;
2383
2384 while (1) {
2385 int old_start_empty;
2386
2387 // make sure there's space in the journal to hold this transaction
2388 if (free_space(jnl) > desired_size && jnl->old_start[0] == 0) {
2389 break;
2390 }
2391 if (counter++ == 5000) {
2392 dump_journal(jnl);
2393 panic("jnl: check_free_space: buffer flushing isn't working "
2394 "(jnl @ %p s %lld e %lld f %lld [active start %lld]).\n", jnl,
2395 jnl->jhdr->start, jnl->jhdr->end, free_space(jnl), jnl->active_start);
2396 }
2397 if (counter > 7500) {
2398 printf("jnl: %s: check_free_space: giving up waiting for free space.\n", jnl->jdev_name);
2399 return ENOSPC;
2400 }
2401
2402 //
2403 // here's where we lazily bump up jnl->jhdr->start. we'll consume
2404 // entries until there is enough space for the next transaction.
2405 //
2406 old_start_empty = 1;
2407 lock_oldstart(jnl);
2408
2409 for (i = 0; i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0]); i++) {
2410 int lcl_counter;
2411
2412 lcl_counter = 0;
2413 while (jnl->old_start[i] & 0x8000000000000000LL) {
2414 if (lcl_counter++ > 10000) {
2415 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl %p).\n",
2416 jnl->old_start[i], jnl);
2417 }
2418
2419 unlock_oldstart(jnl);
2420 if (jnl->flush) {
2421 jnl->flush(jnl->flush_arg);
2422 }
2423 tsleep((caddr_t)jnl, PRIBIO, "check_free_space1", 1);
2424 lock_oldstart(jnl);
2425 }
2426
2427 if (jnl->old_start[i] == 0) {
2428 continue;
2429 }
2430
2431 old_start_empty = 0;
2432 jnl->jhdr->start = jnl->old_start[i];
2433 jnl->old_start[i] = 0;
2434
2435 if (free_space(jnl) > desired_size) {
2436
2437 if (delayed_header_write)
2438 *delayed_header_write = TRUE;
2439 else {
2440 unlock_oldstart(jnl);
2441 write_journal_header(jnl, 1, sequence_num);
2442 lock_oldstart(jnl);
2443 }
2444 break;
2445 }
2446 }
2447 unlock_oldstart(jnl);
2448
2449 // if we bumped the start, loop and try again
2450 if (i < sizeof(jnl->old_start)/sizeof(jnl->old_start[0])) {
2451 continue;
2452 } else if (old_start_empty) {
2453 //
2454 // if there is nothing in old_start anymore then we can
2455 // bump the jhdr->start to be the same as active_start
2456 // since it is possible there was only one very large
2457 // transaction in the old_start array. if we didn't do
2458 // this then jhdr->start would never get updated and we
2459 // would wind up looping until we hit the panic at the
2460 // start of the loop.
2461 //
2462 jnl->jhdr->start = jnl->active_start;
2463
2464 if (delayed_header_write)
2465 *delayed_header_write = TRUE;
2466 else
2467 write_journal_header(jnl, 1, sequence_num);
2468 continue;
2469 }
2470
2471
2472 // if the file system gave us a flush function, call it to so that
2473 // it can flush some blocks which hopefully will cause some transactions
2474 // to complete and thus free up space in the journal.
2475 if (jnl->flush) {
2476 jnl->flush(jnl->flush_arg);
2477 }
2478
2479 // wait for a while to avoid being cpu-bound (this will
2480 // put us to sleep for 10 milliseconds)
2481 tsleep((caddr_t)jnl, PRIBIO, "check_free_space2", 1);
2482 }
2483
2484 return 0;
2485 }
2486
2487 /*
2488 * Allocate a new active transaction.
2489 */
2490 static errno_t
2491 journal_allocate_transaction(journal *jnl)
2492 {
2493 transaction *tr;
2494 boolean_t was_vm_privileged = FALSE;
2495
2496 if (vfs_isswapmount(jnl->fsmount)) {
2497 /*
2498 * the disk driver can allocate memory on this path...
2499 * if we block waiting for memory, and there is enough pressure to
2500 * cause us to try and create a new swap file, we may end up deadlocking
2501 * due to waiting for the journal on the swap file creation path...
2502 * by making ourselves vm_privileged, we give ourselves the best chance
2503 * of not blocking
2504 */
2505 was_vm_privileged = set_vm_privilege(TRUE);
2506 }
2507 tr = hfs_mallocz(sizeof(transaction));
2508
2509 tr->tbuffer_size = jnl->tbuffer_size;
2510
2511 tr->tbuffer = hfs_malloc(tr->tbuffer_size);
2512
2513 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
2514 set_vm_privilege(FALSE);
2515
2516 // journal replay code checksum check depends on this.
2517 memset(tr->tbuffer, 0, BLHDR_CHECKSUM_SIZE);
2518 // Fill up the rest of the block with unimportant bytes (0x5a 'Z' chosen for visibility)
2519 memset(tr->tbuffer + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE);
2520
2521 tr->blhdr = (block_list_header *)tr->tbuffer;
2522 tr->blhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2523 tr->blhdr->num_blocks = 1; // accounts for this header block
2524 tr->blhdr->bytes_used = jnl->jhdr->blhdr_size;
2525 tr->blhdr->flags = BLHDR_CHECK_CHECKSUMS | BLHDR_FIRST_HEADER;
2526
2527 tr->sequence_num = ++jnl->sequence_num;
2528 tr->num_blhdrs = 1;
2529 tr->total_bytes = jnl->jhdr->blhdr_size;
2530 tr->jnl = jnl;
2531
2532 jnl->active_tr = tr;
2533
2534 return 0;
2535 }
2536
2537 int
2538 journal_start_transaction(journal *jnl)
2539 {
2540 int ret;
2541
2542 CHECK_JOURNAL(jnl);
2543
2544 free_old_stuff(jnl);
2545
2546 if (jnl->flags & JOURNAL_INVALID) {
2547 return EINVAL;
2548 }
2549 if (jnl->owner == current_thread()) {
2550 if (jnl->active_tr == NULL) {
2551 panic("jnl: start_tr: active_tr is NULL (jnl @ %p, owner %p, current_thread %p\n",
2552 jnl, jnl->owner, current_thread());
2553 }
2554 jnl->nested_count++;
2555 return 0;
2556 }
2557
2558 journal_lock(jnl);
2559
2560 if (jnl->nested_count != 0 || jnl->active_tr != NULL) {
2561 panic("jnl: start_tr: owner %p, nested count %d, active_tr %p jnl @ %p\n",
2562 jnl->owner, jnl->nested_count, jnl->active_tr, jnl);
2563 }
2564
2565 jnl->nested_count = 1;
2566
2567 #if JOE
2568 // make sure there's room in the journal
2569 if (free_space(jnl) < jnl->tbuffer_size) {
2570
2571 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_START, jnl, 0, 0, 0, 0);
2572
2573 // this is the call that really waits for space to free up
2574 // as well as updating jnl->jhdr->start
2575 if (check_free_space(jnl, jnl->tbuffer_size, NULL, jnl->sequence_num) != 0) {
2576 printf("jnl: %s: start transaction failed: no space\n", jnl->jdev_name);
2577 ret = ENOSPC;
2578 goto bad_start;
2579 }
2580 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_END, jnl, 0, 0, 0, 0);
2581 }
2582 #endif
2583
2584 // if there's a buffered transaction, use it.
2585 if (jnl->cur_tr) {
2586 jnl->active_tr = jnl->cur_tr;
2587 jnl->cur_tr = NULL;
2588
2589 return 0;
2590 }
2591
2592 ret = journal_allocate_transaction(jnl);
2593 if (ret) {
2594 goto bad_start;
2595 }
2596
2597 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, jnl->active_tr);
2598
2599 return 0;
2600
2601 bad_start:
2602 jnl->nested_count = 0;
2603 journal_unlock(jnl);
2604
2605 return ret;
2606 }
2607
2608
2609 int
2610 journal_modify_block_start(journal *jnl, struct buf *bp)
2611 {
2612 transaction *tr;
2613 boolean_t was_vm_privileged = FALSE;
2614
2615 CHECK_JOURNAL(jnl);
2616
2617
2618 free_old_stuff(jnl);
2619
2620 if (jnl->flags & JOURNAL_INVALID) {
2621 return EINVAL;
2622 }
2623
2624 if (vfs_isswapmount(jnl->fsmount)) {
2625 /*
2626 * if we block waiting for memory, and there is enough pressure to
2627 * cause us to try and create a new swap file, we may end up deadlocking
2628 * due to waiting for the journal on the swap file creation path...
2629 * by making ourselves vm_privileged, we give ourselves the best chance
2630 * of not blocking
2631 */
2632 was_vm_privileged = set_vm_privilege(TRUE);
2633 }
2634
2635 // XXXdbg - for debugging I want this to be true. later it may
2636 // not be necessary.
2637 if ((buf_flags(bp) & B_META) == 0) {
2638 panic("jnl: modify_block_start: bp @ %p is not a meta-data block! (jnl %p)\n", bp, jnl);
2639 }
2640
2641 tr = jnl->active_tr;
2642 CHECK_TRANSACTION(tr);
2643
2644 if (jnl->owner != current_thread()) {
2645 panic("jnl: modify_block_start: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2646 jnl, jnl->owner, current_thread());
2647 }
2648
2649 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
2650 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2651
2652 // can't allow blocks that aren't an even multiple of the
2653 // underlying block size.
2654 if ((buf_size(bp) % jnl->jhdr->jhdr_size) != 0) {
2655 uint32_t phys_blksz, bad=0;
2656
2657 if (VNOP_IOCTL(jnl->jdev, DKIOCGETBLOCKSIZE, (caddr_t)&phys_blksz, 0, vfs_context_kernel())) {
2658 bad = 1;
2659 } else if (phys_blksz != (uint32_t)jnl->jhdr->jhdr_size) {
2660 if (phys_blksz < 512) {
2661 panic("jnl: mod block start: phys blksz %d is too small (%d, %d)\n",
2662 phys_blksz, buf_size(bp), jnl->jhdr->jhdr_size);
2663 }
2664
2665 if ((buf_size(bp) % phys_blksz) != 0) {
2666 bad = 1;
2667 } else if (phys_blksz < (uint32_t)jnl->jhdr->jhdr_size) {
2668 jnl->jhdr->jhdr_size = phys_blksz;
2669 } else {
2670 // the phys_blksz is now larger... need to realloc the jhdr
2671 char *new_header_buf;
2672
2673 printf("jnl: %s: phys blksz got bigger (was: %d/%d now %d)\n",
2674 jnl->jdev_name, jnl->header_buf_size, jnl->jhdr->jhdr_size, phys_blksz);
2675 new_header_buf = hfs_malloc(phys_blksz);
2676 memcpy(new_header_buf, jnl->header_buf, jnl->header_buf_size);
2677 memset(&new_header_buf[jnl->header_buf_size], 0x18, (phys_blksz - jnl->header_buf_size));
2678 hfs_free(jnl->header_buf, jnl->header_buf_size);
2679 jnl->header_buf = new_header_buf;
2680 jnl->header_buf_size = phys_blksz;
2681
2682 jnl->jhdr = (journal_header *)jnl->header_buf;
2683 jnl->jhdr->jhdr_size = phys_blksz;
2684 }
2685 } else {
2686 bad = 1;
2687 }
2688
2689 if (bad) {
2690 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
2691 buf_size(bp), jnl->jhdr->jhdr_size);
2692
2693 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
2694 set_vm_privilege(FALSE);
2695 return -1;
2696 }
2697 }
2698
2699 // make sure that this transaction isn't bigger than the whole journal
2700 if (tr->total_bytes+buf_size(bp) >= (jnl->jhdr->size - jnl->jhdr->jhdr_size)) {
2701 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr %p bp %p)\n",
2702 tr->total_bytes, (tr->jnl->jhdr->size - jnl->jhdr->jhdr_size), buf_size(bp), tr, bp);
2703
2704 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
2705 set_vm_privilege(FALSE);
2706 return -1;
2707 }
2708
2709 #if DEBUG
2710 const int f = buf_flags(bp);
2711 hfs_assert(!ISSET(f, B_DELWRI) || ISSET(f, B_LOCKED));
2712 #endif
2713
2714 buf_setflags(bp, B_LOCKED);
2715
2716 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
2717 set_vm_privilege(FALSE);
2718
2719 return 0;
2720 }
2721
2722 int
2723 journal_modify_block_abort(journal *jnl, struct buf *bp)
2724 {
2725 transaction *tr;
2726 block_list_header *blhdr;
2727 int i;
2728
2729 CHECK_JOURNAL(jnl);
2730
2731 free_old_stuff(jnl);
2732
2733 tr = jnl->active_tr;
2734
2735 //
2736 // if there's no active transaction then we just want to
2737 // call buf_brelse() and return since this is just a block
2738 // that happened to be modified as part of another tr.
2739 //
2740 if (tr == NULL) {
2741 buf_brelse(bp);
2742 return 0;
2743 }
2744
2745 if (jnl->flags & JOURNAL_INVALID) {
2746 /* Still need to buf_brelse(). Callers assume we consume the bp. */
2747 buf_brelse(bp);
2748 return EINVAL;
2749 }
2750
2751 CHECK_TRANSACTION(tr);
2752
2753 if (jnl->owner != current_thread()) {
2754 panic("jnl: modify_block_abort: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2755 jnl, jnl->owner, current_thread());
2756 }
2757
2758 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2759
2760 // first check if it's already part of this transaction
2761 for (blhdr = tr->blhdr; blhdr; blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) {
2762 for (i = 1; i < blhdr->num_blocks; i++) {
2763 if (bp == blhdr->binfo[i].u.bp) {
2764 break;
2765 }
2766 }
2767
2768 if (i < blhdr->num_blocks) {
2769 break;
2770 }
2771 }
2772
2773 //
2774 // if blhdr is null, then this block has only had modify_block_start
2775 // called on it as part of the current transaction. that means that
2776 // it is ok to clear the LOCKED bit since it hasn't actually been
2777 // modified. if blhdr is non-null then modify_block_end was called
2778 // on it and so we need to keep it locked in memory.
2779 //
2780 if (blhdr == NULL) {
2781 buf_clearflags(bp, B_LOCKED);
2782 }
2783
2784 buf_brelse(bp);
2785 return 0;
2786 }
2787
2788
2789 int
2790 journal_modify_block_end(journal *jnl, struct buf *bp, void (*func)(buf_t bp, void *arg), void *arg)
2791 {
2792 int i = 1;
2793 int tbuffer_offset=0;
2794 block_list_header *blhdr, *prev=NULL;
2795 transaction *tr;
2796
2797 CHECK_JOURNAL(jnl);
2798
2799 free_old_stuff(jnl);
2800
2801 if (jnl->flags & JOURNAL_INVALID) {
2802 /* Still need to buf_brelse(). Callers assume we consume the bp. */
2803 buf_brelse(bp);
2804 return EINVAL;
2805 }
2806
2807 tr = jnl->active_tr;
2808 CHECK_TRANSACTION(tr);
2809
2810 if (jnl->owner != current_thread()) {
2811 panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2812 jnl, jnl->owner, current_thread());
2813 }
2814
2815 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2816 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2817
2818 if ((buf_flags(bp) & B_LOCKED) == 0) {
2819 panic("jnl: modify_block_end: bp %p not locked! jnl @ %p\n", bp, jnl);
2820 }
2821
2822 // first check if it's already part of this transaction
2823 for (blhdr = tr->blhdr; blhdr; prev = blhdr, blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) {
2824 tbuffer_offset = jnl->jhdr->blhdr_size;
2825
2826 for (i = 1; i < blhdr->num_blocks; i++) {
2827 if (bp == blhdr->binfo[i].u.bp) {
2828 break;
2829 }
2830 if (blhdr->binfo[i].bnum != (off_t)-1) {
2831 tbuffer_offset += buf_size(blhdr->binfo[i].u.bp);
2832 } else {
2833 tbuffer_offset += blhdr->binfo[i].u.bi.bsize;
2834 }
2835 }
2836
2837 if (i < blhdr->num_blocks) {
2838 break;
2839 }
2840 }
2841
2842 if (blhdr == NULL
2843 && prev
2844 && (prev->num_blocks+1) <= prev->max_blocks
2845 && (prev->bytes_used+buf_size(bp)) <= (uint32_t)tr->tbuffer_size) {
2846 blhdr = prev;
2847
2848 } else if (blhdr == NULL) {
2849 block_list_header *nblhdr;
2850 if (prev == NULL) {
2851 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl %p, bp %p\n", jnl, bp);
2852 }
2853
2854 // we got to the end of the list, didn't find the block and there's
2855 // no room in the block_list_header pointed to by prev
2856
2857 // we allocate another tbuffer and link it in at the end of the list
2858 // through prev->binfo[0].bnum. that's a skanky way to do things but
2859 // avoids having yet another linked list of small data structures to manage.
2860
2861 nblhdr = hfs_malloc(tr->tbuffer_size);
2862
2863 // journal replay code checksum check depends on this.
2864 memset(nblhdr, 0, BLHDR_CHECKSUM_SIZE);
2865 // Fill up the rest of the block with unimportant bytes
2866 memset(nblhdr + BLHDR_CHECKSUM_SIZE, 0x5a, jnl->jhdr->blhdr_size - BLHDR_CHECKSUM_SIZE);
2867
2868 // initialize the new guy
2869 nblhdr->max_blocks = (jnl->jhdr->blhdr_size / sizeof(block_info)) - 1;
2870 nblhdr->num_blocks = 1; // accounts for this header block
2871 nblhdr->bytes_used = jnl->jhdr->blhdr_size;
2872 nblhdr->flags = BLHDR_CHECK_CHECKSUMS;
2873
2874 tr->num_blhdrs++;
2875 tr->total_bytes += jnl->jhdr->blhdr_size;
2876
2877 // then link him in at the end
2878 prev->binfo[0].bnum = (off_t)((long)nblhdr);
2879
2880 // and finally switch to using the new guy
2881 blhdr = nblhdr;
2882 tbuffer_offset = jnl->jhdr->blhdr_size;
2883 i = 1;
2884 }
2885
2886
2887 if ((i+1) > blhdr->max_blocks) {
2888 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i, blhdr->max_blocks);
2889 }
2890
2891 // if this is true then this is a new block we haven't seen
2892 if (i >= blhdr->num_blocks) {
2893 int bsize;
2894 vnode_t vp;
2895
2896 vp = buf_vnode(bp);
2897 if (vnode_ref(vp)) {
2898 // Nobody checks the return values, so...
2899 jnl->flags |= JOURNAL_INVALID;
2900
2901 buf_brelse(bp);
2902
2903 // We're probably here due to a force unmount, so EIO is appropriate
2904 return EIO;
2905 }
2906
2907 bsize = buf_size(bp);
2908
2909 blhdr->binfo[i].bnum = (off_t)(buf_blkno(bp));
2910 blhdr->binfo[i].u.bp = bp;
2911
2912 KERNEL_DEBUG_CONSTANT(0x3018004, kdebug_vnode(vp), blhdr->binfo[i].bnum, bsize, 0, 0);
2913 /*
2914 * Update the per-task logical counter for metadata write.
2915 * We use (2 * bsize) to account for the write to the journal and the
2916 * corresponding write to the btree.
2917 */
2918 task_update_logical_writes(current_task(), (2 * bsize), TASK_WRITE_METADATA, vp);
2919 /*
2920 * Update the physical writes counter for metadata writes.
2921 * We use (2 * bsize) to account for the write to the on-disk journal
2922 * followed by write to actual location later.
2923 */
2924 task_update_physical_writes(current_task(), TASK_PHYSICAL_WRITE_METADATA,
2925 (2 * bsize),
2926 TASK_BALANCE_CREDIT);
2927
2928 if (func) {
2929 void (*old_func)(buf_t, void *)=NULL, *old_arg=NULL;
2930
2931 buf_setfilter(bp, func, arg, &old_func, &old_arg);
2932 if (old_func != NULL && old_func != func) {
2933 panic("jnl: modify_block_end: old func %p / arg %p (func %p)", old_func, old_arg, func);
2934 }
2935 }
2936
2937 blhdr->bytes_used += bsize;
2938 tr->total_bytes += bsize;
2939
2940 blhdr->num_blocks++;
2941 }
2942 buf_bdwrite(bp);
2943
2944 return 0;
2945 }
2946
2947 int
2948 journal_kill_block(journal *jnl, struct buf *bp)
2949 {
2950 int i;
2951 int bflags;
2952 block_list_header *blhdr;
2953 transaction *tr;
2954
2955 CHECK_JOURNAL(jnl);
2956
2957 free_old_stuff(jnl);
2958
2959 if (jnl->flags & JOURNAL_INVALID) {
2960 buf_brelse(bp);
2961 return 0;
2962 }
2963
2964 tr = jnl->active_tr;
2965 CHECK_TRANSACTION(tr);
2966
2967 if (jnl->owner != current_thread()) {
2968 panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2969 jnl, jnl->owner, current_thread());
2970 }
2971
2972 bflags = buf_flags(bp);
2973
2974 if ( !(bflags & B_LOCKED))
2975 panic("jnl: modify_block_end: called with bp not B_LOCKED");
2976
2977 /*
2978 * bp must be BL_BUSY and B_LOCKED
2979 * first check if it's already part of this transaction
2980 */
2981 for (blhdr = tr->blhdr; blhdr; blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) {
2982
2983 for (i = 1; i < blhdr->num_blocks; i++) {
2984 if (bp == blhdr->binfo[i].u.bp) {
2985 vnode_t vp;
2986
2987 buf_clearflags(bp, B_LOCKED);
2988
2989 // this undoes the vnode_ref() in journal_modify_block_end()
2990 vp = buf_vnode(bp);
2991 vnode_rele_ext(vp, 0, 1);
2992
2993 // if the block has the DELWRI and FILTER bits sets, then
2994 // things are seriously weird. if it was part of another
2995 // transaction then journal_modify_block_start() should
2996 // have force it to be written.
2997 //
2998 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
2999 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
3000 //} else {
3001 tr->num_killed += buf_size(bp);
3002 //}
3003 blhdr->binfo[i].bnum = (off_t)-1;
3004 blhdr->binfo[i].u.bp = NULL;
3005 blhdr->binfo[i].u.bi.bsize = buf_size(bp);
3006
3007 buf_markinvalid(bp);
3008 buf_brelse(bp);
3009
3010 return 0;
3011 }
3012 }
3013 }
3014
3015 /*
3016 * We did not find the block in any transaction buffer but we still
3017 * need to release it or else it will be left locked forever.
3018 */
3019 buf_brelse(bp);
3020
3021 return 0;
3022 }
3023
3024 /*
3025 ;________________________________________________________________________________
3026 ;
3027 ; Routine: journal_trim_set_callback
3028 ;
3029 ; Function: Provide the journal with a routine to be called back when a
3030 ; TRIM has (or would have) been issued to the device. That
3031 ; is, the transaction has been flushed to the device, and the
3032 ; blocks freed by the transaction are now safe for reuse.
3033 ;
3034 ; CAUTION: If the journal becomes invalid (eg., due to an I/O
3035 ; error when trying to write to the journal), this callback
3036 ; will stop getting called, even if extents got freed before
3037 ; the journal became invalid!
3038 ;
3039 ; Input Arguments:
3040 ; jnl - The journal structure for the filesystem.
3041 ; callback - The function to call when the TRIM is complete.
3042 ; arg - An argument to be passed to callback.
3043 ;________________________________________________________________________________
3044 */
3045 void
3046 journal_trim_set_callback(journal *jnl, jnl_trim_callback_t callback, void *arg)
3047 {
3048 jnl->trim_callback = callback;
3049 jnl->trim_callback_arg = arg;
3050 }
3051
3052
3053 /*
3054 ;________________________________________________________________________________
3055 ;
3056 ; Routine: journal_trim_realloc
3057 ;
3058 ; Function: Increase the amount of memory allocated for the list of extents
3059 ; to be unmapped (trimmed). This routine will be called when
3060 ; adding an extent to the list, and the list already occupies
3061 ; all of the space allocated to it. This routine returns ENOMEM
3062 ; if unable to allocate more space, or 0 if the extent list was
3063 ; grown successfully.
3064 ;
3065 ; Input Arguments:
3066 ; trim - The trim list to be resized.
3067 ;
3068 ; Output:
3069 ; (result) - ENOMEM or 0.
3070 ;
3071 ; Side effects:
3072 ; The allocated_count and extents fields of tr->trim are updated
3073 ; if the function returned 0.
3074 ;________________________________________________________________________________
3075 */
3076 static int
3077 trim_realloc(journal *jnl, struct jnl_trim_list *trim)
3078 {
3079 void *new_extents;
3080 uint32_t new_allocated_count;
3081 boolean_t was_vm_privileged = FALSE;
3082
3083 if (jnl_kdebug)
3084 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REALLOC | DBG_FUNC_START, obfuscate_addr(trim), 0, trim->allocated_count, trim->extent_count, 0);
3085
3086 new_allocated_count = trim->allocated_count + JOURNAL_DEFAULT_TRIM_EXTENTS;
3087
3088 if (vfs_isswapmount(jnl->fsmount)) {
3089 /*
3090 * if we block waiting for memory, and there is enough pressure to
3091 * cause us to try and create a new swap file, we may end up deadlocking
3092 * due to waiting for the journal on the swap file creation path...
3093 * by making ourselves vm_privileged, we give ourselves the best chance
3094 * of not blocking
3095 */
3096 was_vm_privileged = set_vm_privilege(TRUE);
3097 }
3098 new_extents = hfs_malloc(new_allocated_count * sizeof(dk_extent_t));
3099 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
3100 set_vm_privilege(FALSE);
3101
3102 if (new_extents == NULL) {
3103 printf("jnl: trim_realloc: unable to grow extent list!\n");
3104 /*
3105 * Since we could be called when allocating space previously marked
3106 * to be trimmed, we need to empty out the list to be safe.
3107 */
3108 trim->extent_count = 0;
3109 if (jnl_kdebug)
3110 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REALLOC | DBG_FUNC_END, ENOMEM, 0, trim->allocated_count, 0, 0);
3111 return ENOMEM;
3112 }
3113
3114 /* Copy the old extent list to the newly allocated list. */
3115 if (trim->extents != NULL) {
3116 memmove(new_extents,
3117 trim->extents,
3118 trim->allocated_count * sizeof(dk_extent_t));
3119 hfs_free(trim->extents, trim->allocated_count * sizeof(dk_extent_t));
3120 }
3121
3122 trim->allocated_count = new_allocated_count;
3123 trim->extents = new_extents;
3124
3125 if (jnl_kdebug)
3126 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REALLOC | DBG_FUNC_END, 0, 0, new_allocated_count, trim->extent_count, 0);
3127
3128 return 0;
3129 }
3130
3131 /*
3132 ;________________________________________________________________________________
3133 ;
3134 ; Routine: trim_search_extent
3135 ;
3136 ; Function: Search the given extent list to see if any of its extents
3137 ; overlap the given extent.
3138 ;
3139 ; Input Arguments:
3140 ; trim - The trim list to be searched.
3141 ; offset - The first byte of the range to be searched for.
3142 ; length - The number of bytes of the extent being searched for.
3143 ; overlap_start - start of the overlapping extent
3144 ; overlap_len - length of the overlapping extent
3145 ;
3146 ; Output:
3147 ; (result) - TRUE if one or more extents overlap, FALSE otherwise.
3148 ;________________________________________________________________________________
3149 */
3150 static int
3151 trim_search_extent(struct jnl_trim_list *trim, uint64_t offset,
3152 uint64_t length, uint64_t *overlap_start, uint64_t *overlap_len)
3153 {
3154 uint64_t end = offset + length;
3155 uint32_t lower = 0; /* Lowest index to search */
3156 uint32_t upper = trim->extent_count; /* Highest index to search + 1 */
3157 uint32_t middle;
3158
3159 /* A binary search over the extent list. */
3160 while (lower < upper) {
3161 middle = (lower + upper) / 2;
3162
3163 if (trim->extents[middle].offset >= end)
3164 upper = middle;
3165 else if (trim->extents[middle].offset + trim->extents[middle].length <= offset)
3166 lower = middle + 1;
3167 else {
3168 if (overlap_start) {
3169 *overlap_start = trim->extents[middle].offset;
3170 }
3171 if (overlap_len) {
3172 *overlap_len = trim->extents[middle].length;
3173 }
3174 return TRUE;
3175 }
3176 }
3177
3178 return FALSE;
3179 }
3180
3181
3182 /*
3183 ;________________________________________________________________________________
3184 ;
3185 ; Routine: journal_trim_add_extent
3186 ;
3187 ; Function: Keep track of extents that have been freed as part of this
3188 ; transaction. If the underlying device supports TRIM (UNMAP),
3189 ; then those extents will be trimmed/unmapped once the
3190 ; transaction has been written to the journal. (For example,
3191 ; SSDs can support trim/unmap and avoid having to recopy those
3192 ; blocks when doing wear leveling, and may reuse the same
3193 ; phsyical blocks for different logical blocks.)
3194 ;
3195 ; HFS also uses this, in combination with journal_trim_set_callback,
3196 ; to add recently freed extents to its free extent cache, but
3197 ; only after the transaction that freed them is committed to
3198 ; disk. (This reduces the chance of overwriting live data in
3199 ; a way that causes data loss if a transaction never gets
3200 ; written to the journal.)
3201 ;
3202 ; Input Arguments:
3203 ; jnl - The journal for the volume containing the byte range.
3204 ; offset - The first byte of the range to be trimmed.
3205 ; length - The number of bytes of the extent being trimmed.
3206 ;________________________________________________________________________________
3207 */
3208 int
3209 journal_trim_add_extent(journal *jnl, uint64_t offset, uint64_t length)
3210 {
3211 uint64_t end;
3212 transaction *tr;
3213 dk_extent_t *extent;
3214 uint32_t insert_index;
3215 uint32_t replace_count;
3216
3217 CHECK_JOURNAL(jnl);
3218
3219 /* TODO: Is it OK to manipulate the trim list even if JOURNAL_INVALID is set? I think so... */
3220 if (jnl->flags & JOURNAL_INVALID) {
3221 return EINVAL;
3222 }
3223
3224 tr = jnl->active_tr;
3225 CHECK_TRANSACTION(tr);
3226
3227 if (jnl_kdebug)
3228 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD | DBG_FUNC_START, obfuscate_addr(jnl), offset, length, tr->trim.extent_count, 0);
3229
3230 if (jnl->owner != current_thread()) {
3231 panic("jnl: trim_add_extent: called w/out a transaction! jnl %p, owner %p, curact %p\n",
3232 jnl, jnl->owner, current_thread());
3233 }
3234
3235 free_old_stuff(jnl);
3236
3237 end = offset + length;
3238
3239 /*
3240 * Find the range of existing extents that can be combined with the
3241 * input extent. We start by counting the number of extents that end
3242 * strictly before the input extent, then count the number of extents
3243 * that overlap or are contiguous with the input extent.
3244 */
3245 extent = tr->trim.extents;
3246 insert_index = 0;
3247 while (insert_index < tr->trim.extent_count && extent->offset + extent->length < offset) {
3248 ++insert_index;
3249 ++extent;
3250 }
3251 replace_count = 0;
3252 while (insert_index + replace_count < tr->trim.extent_count && extent->offset <= end) {
3253 ++replace_count;
3254 ++extent;
3255 }
3256
3257 /*
3258 * If none of the existing extents can be combined with the input extent,
3259 * then just insert it in the list (before item number insert_index).
3260 */
3261 if (replace_count == 0) {
3262 /* If the list was already full, we need to grow it. */
3263 if (tr->trim.extent_count == tr->trim.allocated_count) {
3264 if (trim_realloc(jnl, &tr->trim) != 0) {
3265 printf("jnl: trim_add_extent: out of memory!");
3266 if (jnl_kdebug)
3267 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD | DBG_FUNC_END, ENOMEM, 0, 0, tr->trim.extent_count, 0);
3268 return ENOMEM;
3269 }
3270 }
3271
3272 /* Shift any existing extents with larger offsets. */
3273 if (insert_index < tr->trim.extent_count) {
3274 memmove(&tr->trim.extents[insert_index+1],
3275 &tr->trim.extents[insert_index],
3276 (tr->trim.extent_count - insert_index) * sizeof(dk_extent_t));
3277 }
3278 tr->trim.extent_count++;
3279
3280 /* Store the new extent in the list. */
3281 tr->trim.extents[insert_index].offset = offset;
3282 tr->trim.extents[insert_index].length = length;
3283
3284 /* We're done. */
3285 if (jnl_kdebug)
3286 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD | DBG_FUNC_END, 0, 0, 0, tr->trim.extent_count, 0);
3287 return 0;
3288 }
3289
3290 /*
3291 * Update extent number insert_index to be the union of the input extent
3292 * and all of the replaced extents.
3293 */
3294 if (tr->trim.extents[insert_index].offset < offset)
3295 offset = tr->trim.extents[insert_index].offset;
3296 extent = &tr->trim.extents[insert_index + replace_count - 1];
3297 if (extent->offset + extent->length > end)
3298 end = extent->offset + extent->length;
3299 tr->trim.extents[insert_index].offset = offset;
3300 tr->trim.extents[insert_index].length = end - offset;
3301
3302 /*
3303 * If we were replacing more than one existing extent, then shift any
3304 * extents with larger offsets, and update the count of extents.
3305 *
3306 * We're going to leave extent #insert_index alone since it was just updated, above.
3307 * We need to move extents from index (insert_index + replace_count) through the end of
3308 * the list by (replace_count - 1) positions so that they overwrite extent #(insert_index + 1).
3309 */
3310 if (replace_count > 1 && (insert_index + replace_count) < tr->trim.extent_count) {
3311 memmove(&tr->trim.extents[insert_index + 1],
3312 &tr->trim.extents[insert_index + replace_count],
3313 (tr->trim.extent_count - insert_index - replace_count) * sizeof(dk_extent_t));
3314 }
3315 tr->trim.extent_count -= replace_count - 1;
3316
3317 if (jnl_kdebug)
3318 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD | DBG_FUNC_END, 0, 0, 0, tr->trim.extent_count, 0);
3319 return 0;
3320 }
3321
3322 /*
3323 * journal_trim_extent_overlap
3324 *
3325 * Return 1 if there are any pending TRIMs that overlap with the given offset and length
3326 * Return 0 otherwise.
3327 */
3328
3329 int journal_trim_extent_overlap (journal *jnl, uint64_t offset, uint64_t length, uint64_t *end) {
3330 transaction *tr = NULL;
3331 int overlap = 0;
3332
3333 uint64_t overlap_start;
3334 uint64_t overlap_len;
3335 tr = jnl->active_tr;
3336 CHECK_TRANSACTION(tr);
3337
3338 /*
3339 * There are two lists that need to be examined for potential overlaps:
3340 *
3341 * The first is the current transaction. Since this function requires that
3342 * a transaction be active when this is called, this is the "active_tr"
3343 * pointer in the journal struct. This has a trimlist pointer which needs
3344 * to be searched.
3345 */
3346 overlap = trim_search_extent (&tr->trim, offset, length, &overlap_start, &overlap_len);
3347 if (overlap == 0) {
3348 /*
3349 * The second is the async trim list, which is only done if the current
3350 * transaction group (active transaction) did not overlap with our target
3351 * extent. This async trim list is the set of all previously
3352 * committed transaction groups whose I/Os are now in-flight. We need to hold the
3353 * trim lock in order to search this list. If we grab the list before the
3354 * TRIM has completed, then we will compare it. If it is grabbed AFTER the
3355 * TRIM has completed, then the pointer will be zeroed out and we won't have
3356 * to check anything.
3357 */
3358 lck_rw_lock_shared (&jnl->trim_lock);
3359 if (jnl->async_trim != NULL) {
3360 overlap = trim_search_extent(jnl->async_trim, offset, length, &overlap_start, &overlap_len);
3361 }
3362 lck_rw_unlock_shared (&jnl->trim_lock);
3363 }
3364
3365 if (overlap) {
3366 /* compute the end (min) of the overlapping range */
3367 if ( (overlap_start + overlap_len) < (offset + length)) {
3368 *end = (overlap_start + overlap_len);
3369 }
3370 else {
3371 *end = (offset + length);
3372 }
3373 }
3374
3375
3376 return overlap;
3377 }
3378
3379 /*
3380 * journal_request_immediate_flush
3381 *
3382 * FS requests that the journal flush immediately upon the
3383 * active transaction's completion.
3384 *
3385 * Returns 0 if operation succeeds
3386 * Returns EPERM if we failed to leave hint
3387 */
3388 int
3389 journal_request_immediate_flush (journal *jnl) {
3390
3391 transaction *tr = NULL;
3392 /*
3393 * Is a transaction still in process? You must do
3394 * this while there are txns open
3395 */
3396 tr = jnl->active_tr;
3397 if (tr != NULL) {
3398 CHECK_TRANSACTION(tr);
3399 tr->flush_on_completion = TRUE;
3400 }
3401 else {
3402 return EPERM;
3403 }
3404 return 0;
3405 }
3406
3407
3408
3409 /*
3410 ;________________________________________________________________________________
3411 ;
3412 ; Routine: trim_remove_extent
3413 ;
3414 ; Function: Indicate that a range of bytes, some of which may have previously
3415 ; been passed to journal_trim_add_extent, is now allocated.
3416 ; Any overlapping ranges currently in the journal's trim list will
3417 ; be removed. If the underlying device supports TRIM (UNMAP), then
3418 ; these extents will not be trimmed/unmapped when the transaction
3419 ; is written to the journal.
3420 ;
3421 ; HFS also uses this to prevent newly allocated space from being
3422 ; added to its free extent cache (if some portion of the newly
3423 ; allocated space was recently freed).
3424 ;
3425 ; Input Arguments:
3426 ; trim - The trim list to update.
3427 ; offset - The first byte of the range to be trimmed.
3428 ; length - The number of bytes of the extent being trimmed.
3429 ;________________________________________________________________________________
3430 */
3431 static int
3432 trim_remove_extent(journal *jnl, struct jnl_trim_list *trim, uint64_t offset, uint64_t length)
3433 {
3434 u_int64_t end;
3435 dk_extent_t *extent;
3436 u_int32_t keep_before;
3437 u_int32_t keep_after;
3438
3439 end = offset + length;
3440
3441 /*
3442 * Find any existing extents that start before or end after the input
3443 * extent. These extents will be modified if they overlap the input
3444 * extent. Other extents between them will be deleted.
3445 */
3446 extent = trim->extents;
3447 keep_before = 0;
3448 while (keep_before < trim->extent_count && extent->offset < offset) {
3449 ++keep_before;
3450 ++extent;
3451 }
3452 keep_after = keep_before;
3453 if (keep_after > 0) {
3454 /* See if previous extent extends beyond both ends of input extent. */
3455 --keep_after;
3456 --extent;
3457 }
3458 while (keep_after < trim->extent_count && (extent->offset + extent->length) <= end) {
3459 ++keep_after;
3460 ++extent;
3461 }
3462
3463 /*
3464 * When we get here, the first keep_before extents (0 .. keep_before-1)
3465 * start before the input extent, and extents (keep_after .. extent_count-1)
3466 * end after the input extent. We'll need to keep, all of those extents,
3467 * but possibly modify #(keep_before-1) and #keep_after to remove the portion
3468 * that overlaps with the input extent.
3469 */
3470
3471 /*
3472 * Does the input extent start after and end before the same existing
3473 * extent? If so, we have to "punch a hole" in that extent and convert
3474 * it to two separate extents.
3475 */
3476 if (keep_before > keep_after) {
3477 /* If the list was already full, we need to grow it. */
3478 if (trim->extent_count == trim->allocated_count) {
3479 if (trim_realloc(jnl, trim) != 0) {
3480 printf("jnl: trim_remove_extent: out of memory!");
3481 return ENOMEM;
3482 }
3483 }
3484
3485 /*
3486 * Make room for a new extent by shifting extents #keep_after and later
3487 * down by one extent. When we're done, extents #keep_before and
3488 * #keep_after will be identical, and we can fall through to removing
3489 * the portion that overlaps the input extent.
3490 */
3491 memmove(&trim->extents[keep_before],
3492 &trim->extents[keep_after],
3493 (trim->extent_count - keep_after) * sizeof(dk_extent_t));
3494 ++trim->extent_count;
3495 ++keep_after;
3496
3497 /*
3498 * Fall through. We now have the case where the length of extent
3499 * #(keep_before - 1) needs to be updated, and the start of extent
3500 * #(keep_after) needs to be updated.
3501 */
3502 }
3503
3504 /*
3505 * May need to truncate the end of extent #(keep_before - 1) if it overlaps
3506 * the input extent.
3507 */
3508 if (keep_before > 0) {
3509 extent = &trim->extents[keep_before - 1];
3510 if (extent->offset + extent->length > offset) {
3511 extent->length = offset - extent->offset;
3512 }
3513 }
3514
3515 /*
3516 * May need to update the start of extent #(keep_after) if it overlaps the
3517 * input extent.
3518 */
3519 if (keep_after < trim->extent_count) {
3520 extent = &trim->extents[keep_after];
3521 if (extent->offset < end) {
3522 extent->length = extent->offset + extent->length - end;
3523 extent->offset = end;
3524 }
3525 }
3526
3527 /*
3528 * If there were whole extents that overlapped the input extent, get rid
3529 * of them by shifting any following extents, and updating the count.
3530 */
3531 if (keep_after > keep_before && keep_after < trim->extent_count) {
3532 memmove(&trim->extents[keep_before],
3533 &trim->extents[keep_after],
3534 (trim->extent_count - keep_after) * sizeof(dk_extent_t));
3535 }
3536 trim->extent_count -= keep_after - keep_before;
3537
3538 return 0;
3539 }
3540
3541 /*
3542 ;________________________________________________________________________________
3543 ;
3544 ; Routine: journal_trim_remove_extent
3545 ;
3546 ; Function: Make note of a range of bytes, some of which may have previously
3547 ; been passed to journal_trim_add_extent, is now in use on the
3548 ; volume. The given bytes will be not be trimmed as part of
3549 ; this transaction, or a pending trim of a transaction being
3550 ; asynchronously flushed.
3551 ;
3552 ; Input Arguments:
3553 ; jnl - The journal for the volume containing the byte range.
3554 ; offset - The first byte of the range to be trimmed.
3555 ; length - The number of bytes of the extent being trimmed.
3556 ;________________________________________________________________________________
3557 */
3558 int
3559 journal_trim_remove_extent(journal *jnl, uint64_t offset, uint64_t length)
3560 {
3561 int error = 0;
3562 transaction *tr;
3563
3564 CHECK_JOURNAL(jnl);
3565
3566 /* TODO: Is it OK to manipulate the trim list even if JOURNAL_INVALID is set? I think so... */
3567 if (jnl->flags & JOURNAL_INVALID) {
3568 return EINVAL;
3569 }
3570
3571 tr = jnl->active_tr;
3572 CHECK_TRANSACTION(tr);
3573
3574 if (jnl_kdebug)
3575 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE | DBG_FUNC_START, obfuscate_addr(jnl), offset, length, tr->trim.extent_count, 0);
3576
3577 if (jnl->owner != current_thread()) {
3578 panic("jnl: trim_remove_extent: called w/out a transaction! jnl %p, owner %p, curact %p\n",
3579 jnl, jnl->owner, current_thread());
3580 }
3581
3582 free_old_stuff(jnl);
3583
3584 error = trim_remove_extent(jnl, &tr->trim, offset, length);
3585 if (error == 0) {
3586 int found = FALSE;
3587
3588 /*
3589 * See if a pending trim has any extents that overlap with the
3590 * one we were given.
3591 */
3592 lck_rw_lock_shared(&jnl->trim_lock);
3593 if (jnl->async_trim != NULL)
3594 found = trim_search_extent(jnl->async_trim, offset, length, NULL, NULL);
3595 lck_rw_unlock_shared(&jnl->trim_lock);
3596
3597 if (found) {
3598 /*
3599 * There was an overlap, so avoid trimming the extent we
3600 * just allocated. (Otherwise, it might get trimmed after
3601 * we've written to it, which will cause that data to be
3602 * corrupted.)
3603 */
3604 uint32_t async_extent_count = 0;
3605
3606 if (jnl_kdebug)
3607 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE_PENDING | DBG_FUNC_START, obfuscate_addr(jnl), offset, length, 0, 0);
3608 lck_rw_lock_exclusive(&jnl->trim_lock);
3609 if (jnl->async_trim != NULL) {
3610 error = trim_remove_extent(jnl, jnl->async_trim, offset, length);
3611 async_extent_count = jnl->async_trim->extent_count;
3612 }
3613 lck_rw_unlock_exclusive(&jnl->trim_lock);
3614 if (jnl_kdebug)
3615 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE_PENDING | DBG_FUNC_END, error, 0, 0, async_extent_count, 0);
3616 }
3617 }
3618
3619 if (jnl_kdebug)
3620 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE | DBG_FUNC_END, error, 0, 0, tr->trim.extent_count, 0);
3621 return error;
3622 }
3623
3624
3625 static int
3626 journal_trim_flush(journal *jnl, transaction *tr)
3627 {
3628 int err = 0;
3629 boolean_t was_vm_privileged = FALSE;
3630
3631 if (jnl_kdebug)
3632 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_FLUSH | DBG_FUNC_START, obfuscate_addr(jnl), tr, 0, tr->trim.extent_count, 0);
3633
3634 if (vfs_isswapmount(jnl->fsmount)) {
3635 /*
3636 * the disk driver can allocate memory on this path...
3637 * if we block waiting for memory, and there is enough pressure to
3638 * cause us to try and create a new swap file, we may end up deadlocking
3639 * due to waiting for the journal on the swap file creation path...
3640 * by making ourselves vm_privileged, we give ourselves the best chance
3641 * of not blocking
3642 */
3643 was_vm_privileged = set_vm_privilege(TRUE);
3644 }
3645 lck_rw_lock_shared(&jnl->trim_lock);
3646 if (tr->trim.extent_count > 0) {
3647 dk_unmap_t unmap;
3648
3649 bzero(&unmap, sizeof(unmap));
3650 if (jnl->flags & JOURNAL_USE_UNMAP) {
3651 unmap.extents = tr->trim.extents;
3652 unmap.extentsCount = tr->trim.extent_count;
3653 if (jnl_kdebug)
3654 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_UNMAP | DBG_FUNC_START, obfuscate_addr(jnl), tr, 0, tr->trim.extent_count, 0);
3655 err = VNOP_IOCTL(jnl->fsdev, DKIOCUNMAP, (caddr_t)&unmap, FWRITE, vfs_context_kernel());
3656 if (jnl_kdebug)
3657 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_UNMAP | DBG_FUNC_END, err, 0, 0, 0, 0);
3658 }
3659
3660 /*
3661 * Call back into the file system to tell them that we have
3662 * trimmed some extents and that they can now be reused.
3663 *
3664 * CAUTION: If the journal becomes invalid (eg., due to an I/O
3665 * error when trying to write to the journal), this callback
3666 * will stop getting called, even if extents got freed before
3667 * the journal became invalid!
3668 */
3669 if (jnl->trim_callback)
3670 jnl->trim_callback(jnl->trim_callback_arg, tr->trim.extent_count, tr->trim.extents);
3671 }
3672 lck_rw_unlock_shared(&jnl->trim_lock);
3673
3674 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
3675 set_vm_privilege(FALSE);
3676 /*
3677 * If the transaction we're flushing was the async transaction, then
3678 * tell the current transaction that there is no pending trim
3679 * any more.
3680 *
3681 * NOTE: Since we released the lock, another thread could have
3682 * removed one or more extents from our list. That's not a
3683 * problem since any writes to the re-allocated blocks
3684 * would get sent to the device after the DKIOCUNMAP.
3685 */
3686 lck_rw_lock_exclusive(&jnl->trim_lock);
3687 if (jnl->async_trim == &tr->trim)
3688 jnl->async_trim = NULL;
3689 lck_rw_unlock_exclusive(&jnl->trim_lock);
3690
3691 /*
3692 * By the time we get here, no other thread can discover the address
3693 * of "tr", so it is safe for us to manipulate tr->trim without
3694 * holding any locks.
3695 */
3696 if (tr->trim.extents) {
3697 hfs_free(tr->trim.extents, tr->trim.allocated_count * sizeof(dk_extent_t));
3698 tr->trim.allocated_count = 0;
3699 tr->trim.extent_count = 0;
3700 tr->trim.extents = NULL;
3701 }
3702
3703 if (jnl_kdebug)
3704 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_FLUSH | DBG_FUNC_END, err, 0, 0, 0, 0);
3705
3706 return err;
3707 }
3708
3709 static int
3710 journal_binfo_cmp(const void *a, const void *b)
3711 {
3712 const block_info *bi_a = (const struct block_info *)a;
3713 const block_info *bi_b = (const struct block_info *)b;
3714 daddr64_t res;
3715
3716 if (bi_a->bnum == (off_t)-1) {
3717 return 1;
3718 }
3719 if (bi_b->bnum == (off_t)-1) {
3720 return -1;
3721 }
3722
3723 // don't have to worry about negative block
3724 // numbers so this is ok to do.
3725 //
3726 res = (buf_blkno(bi_a->u.bp) - buf_blkno(bi_b->u.bp));
3727
3728 return (int)res;
3729 }
3730
3731
3732 /*
3733 * End a transaction. If the transaction is small enough, and we're not forcing
3734 * a write to disk, the "active" transaction becomes the "current" transaction,
3735 * and will be reused for the next transaction that is started (group commit).
3736 *
3737 * If the transaction gets written to disk (because force_it is true, or no
3738 * group commit, or the transaction is sufficiently full), the blocks get
3739 * written into the journal first, then the are written asynchronously. When
3740 * those async writes complete, the transaction can be freed and removed from
3741 * the journal.
3742 *
3743 * An optional callback can be supplied. If given, it is called after the
3744 * the blocks have been written to the journal, but before the async writes
3745 * of those blocks to their normal on-disk locations. This is used by
3746 * journal_relocate so that the location of the journal can be changed and
3747 * flushed to disk before the blocks get written to their normal locations.
3748 * Note that the callback is only called if the transaction gets written to
3749 * the journal during this end_transaction call; you probably want to set the
3750 * force_it flag.
3751 *
3752 * Inputs:
3753 * tr Transaction to add to the journal
3754 * force_it If true, force this transaction to the on-disk journal immediately.
3755 * callback See description above. Pass NULL for no callback.
3756 * callback_arg Argument passed to callback routine.
3757 *
3758 * Result
3759 * 0 No errors
3760 * -1 An error occurred. The journal is marked invalid.
3761 */
3762 static int
3763 end_transaction(transaction *tr, int force_it, errno_t (*callback)(void*), void *callback_arg, boolean_t drop_lock, boolean_t must_wait)
3764 {
3765 block_list_header *blhdr=NULL, *next=NULL;
3766 int i, ret_val = 0;
3767 errno_t err;
3768 journal *jnl = tr->jnl;
3769 struct buf *bp;
3770 size_t tbuffer_offset;
3771 boolean_t drop_lock_early;
3772
3773 if (jnl->cur_tr) {
3774 panic("jnl: jnl @ %p already has cur_tr %p, new tr: %p\n",
3775 jnl, jnl->cur_tr, tr);
3776 }
3777
3778 // if there weren't any modified blocks in the transaction
3779 // just save off the transaction pointer and return.
3780 if (tr->total_bytes == jnl->jhdr->blhdr_size) {
3781 jnl->cur_tr = tr;
3782 goto done;
3783 }
3784
3785 // if our transaction buffer isn't very full, just hang
3786 // on to it and don't actually flush anything. this is
3787 // what is known as "group commit". we will flush the
3788 // transaction buffer if it's full or if we have more than
3789 // one of them so we don't start hogging too much memory.
3790 //
3791 // We also check the device supports UNMAP/TRIM, and if so,
3792 // the number of extents waiting to be trimmed. If it is
3793 // small enough, then keep accumulating more (so we can
3794 // reduce the overhead of trimming). If there was a prior
3795 // trim error, then we stop issuing trims for this
3796 // volume, so we can also coalesce transactions.
3797 //
3798 if ( force_it == 0
3799 && (jnl->flags & JOURNAL_NO_GROUP_COMMIT) == 0
3800 && tr->num_blhdrs < 3
3801 && (tr->total_bytes <= ((tr->tbuffer_size*tr->num_blhdrs) - tr->tbuffer_size/8))
3802 && (!(jnl->flags & JOURNAL_USE_UNMAP) || (tr->trim.extent_count < jnl_trim_flush_limit))) {
3803
3804 jnl->cur_tr = tr;
3805 goto done;
3806 }
3807
3808 KERNEL_DEBUG(0xbbbbc018|DBG_FUNC_START, jnl, tr, drop_lock, must_wait, 0);
3809
3810 lock_condition(jnl, &jnl->flushing, "end_transaction");
3811
3812 /*
3813 * if the previous 'finish_end_transaction' was being run
3814 * asynchronously, it could have encountered a condition
3815 * that caused it to mark the journal invalid... if that
3816 * occurred while we were waiting for it to finish, we
3817 * need to notice and abort the current transaction
3818 */
3819 if ((jnl->flags & JOURNAL_INVALID) || jnl->flush_aborted == TRUE) {
3820 unlock_condition(jnl, &jnl->flushing);
3821
3822 abort_transaction(jnl, tr);
3823 ret_val = -1;
3824 KERNEL_DEBUG(0xbbbbc018|DBG_FUNC_END, jnl, tr, ret_val, 0, 0);
3825 goto done;
3826 }
3827
3828 /*
3829 * Store a pointer to this transaction's trim list so that
3830 * future transactions can find it.
3831 *
3832 * Note: if there are no extents in the trim list, then don't
3833 * bother saving the pointer since nothing can add new extents
3834 * to the list (and other threads/transactions only care if
3835 * there is a trim pending).
3836 */
3837 lck_rw_lock_exclusive(&jnl->trim_lock);
3838 if (jnl->async_trim != NULL)
3839 panic("jnl: end_transaction: async_trim already non-NULL!");
3840 if (tr->trim.extent_count > 0)
3841 jnl->async_trim = &tr->trim;
3842 lck_rw_unlock_exclusive(&jnl->trim_lock);
3843
3844 /*
3845 * snapshot the transaction sequence number while we are still behind
3846 * the journal lock since it will be bumped upon the start of the
3847 * next transaction group which may overlap the current journal flush...
3848 * we pass the snapshot into write_journal_header during the journal
3849 * flush so that it can write the correct version in the header...
3850 * because we hold the 'flushing' condition variable for the duration
3851 * of the journal flush, 'saved_sequence_num' remains stable
3852 */
3853 jnl->saved_sequence_num = jnl->sequence_num;
3854
3855 /*
3856 * if we're here we're going to flush the transaction buffer to disk.
3857 * 'check_free_space' will not return untl there is enough free
3858 * space for this transaction in the journal and jnl->old_start[0]
3859 * is avaiable for use
3860 */
3861 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_START, jnl, 0, 0, 0, 0);
3862
3863 check_free_space(jnl, tr->total_bytes, &tr->delayed_header_write, jnl->saved_sequence_num);
3864
3865 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_END, jnl, tr->delayed_header_write, 0, 0, 0);
3866
3867 // range check the end index
3868 if (jnl->jhdr->end <= 0 || jnl->jhdr->end > jnl->jhdr->size) {
3869 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
3870 jnl->jhdr->end, jnl->jhdr->size);
3871 }
3872 if (tr->delayed_header_write == TRUE) {
3873 thread_t thread = THREAD_NULL;
3874
3875 lock_condition(jnl, &jnl->writing_header, "end_transaction");
3876 /*
3877 * fire up a thread to write the journal header
3878 * asynchronously... when it finishes, it will call
3879 * unlock_condition... we can overlap the preparation of
3880 * the log and buffers during this time
3881 */
3882 kernel_thread_start((thread_continue_t)write_header_thread, jnl, &thread);
3883 } else
3884 jnl->write_header_failed = FALSE;
3885
3886
3887 // this transaction starts where the current journal ends
3888 tr->journal_start = jnl->jhdr->end;
3889
3890 lock_oldstart(jnl);
3891 /*
3892 * Because old_start is locked above, we can cast away the volatile qualifier before passing it to memmove.
3893 * slide everyone else down and put our latest guy in the last
3894 * entry in the old_start array
3895 */
3896 memmove(__CAST_AWAY_QUALIFIER(&jnl->old_start[0], volatile, void *), __CAST_AWAY_QUALIFIER(&jnl->old_start[1], volatile, void *), sizeof(jnl->old_start)-sizeof(jnl->old_start[0]));
3897 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] = tr->journal_start | 0x8000000000000000LL;
3898
3899 unlock_oldstart(jnl);
3900
3901
3902 for (blhdr = tr->blhdr; blhdr; blhdr = next) {
3903 char *blkptr;
3904 buf_t sbp;
3905 int32_t bsize;
3906
3907 tbuffer_offset = jnl->jhdr->blhdr_size;
3908
3909 for (i = 1; i < blhdr->num_blocks; i++) {
3910
3911 if (blhdr->binfo[i].bnum != (off_t)-1) {
3912 void (*func)(buf_t, void *);
3913 void *arg;
3914
3915 bp = blhdr->binfo[i].u.bp;
3916
3917 if (bp == NULL) {
3918 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ %p, tr %p)\n",
3919 blhdr->binfo[i].bnum, jnl, tr);
3920 }
3921 /*
3922 * acquire the bp here so that we can safely
3923 * mess around with its data. buf_acquire()
3924 * will return EAGAIN if the buffer was busy,
3925 * so loop trying again.
3926 */
3927 do {
3928 err = buf_acquire(bp, BAC_REMOVE, 0, 0);
3929 } while (err == EAGAIN);
3930
3931 if (err)
3932 panic("could not acquire bp %p (err %d)\n", bp, err);
3933
3934 if ((buf_flags(bp) & (B_LOCKED|B_DELWRI)) != (B_LOCKED|B_DELWRI)) {
3935 if (jnl->flags & JOURNAL_CLOSE_PENDING) {
3936 buf_clearflags(bp, B_LOCKED);
3937 buf_brelse(bp);
3938
3939 /*
3940 * this is an odd case that appears to happen occasionally
3941 * make sure we mark this block as no longer valid
3942 * so that we don't process it in "finish_end_transaction" since
3943 * the bp that is recorded in our array no longer belongs
3944 * to us (normally we substitute a shadow bp to be processed
3945 * issuing a 'buf_bawrite' on a stale buf_t pointer leads
3946 * to all kinds of problems.
3947 */
3948 blhdr->binfo[i].bnum = (off_t)-1;
3949 continue;
3950 } else {
3951 panic("jnl: end_tr: !!!DANGER!!! bp %p flags (0x%x) not LOCKED & DELWRI\n", bp, buf_flags(bp));
3952 }
3953 }
3954 bsize = buf_size(bp);
3955
3956 buf_setfilter(bp, NULL, NULL, &func, &arg);
3957
3958 blkptr = (char *)&((char *)blhdr)[tbuffer_offset];
3959
3960 sbp = buf_create_shadow_priv(bp, FALSE, (uintptr_t)blkptr, 0, 0);
3961
3962 if (sbp == NULL)
3963 panic("jnl: buf_create_shadow returned NULL");
3964
3965 /*
3966 * copy the data into the transaction buffer...
3967 */
3968 memcpy(blkptr, (char *)buf_dataptr(bp), bsize);
3969
3970 buf_clearflags(bp, B_LOCKED);
3971 buf_markclean(bp);
3972 buf_drop(bp);
3973
3974 /*
3975 * adopt the shadow buffer for this block
3976 */
3977 if (func) {
3978 /*
3979 * transfer FS hook function to the
3980 * shadow buffer... it will get called
3981 * in finish_end_transaction
3982 */
3983 buf_setfilter(sbp, func, arg, NULL, NULL);
3984 }
3985 blhdr->binfo[i].u.bp = sbp;
3986
3987 } else {
3988 // bnum == -1, only true if a block was "killed"
3989 bsize = blhdr->binfo[i].u.bi.bsize;
3990 }
3991 tbuffer_offset += bsize;
3992 }
3993 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
3994 }
3995 /*
3996 * if callback != NULL, we don't want to drop the journal
3997 * lock, or complete end_transaction asynchronously, since
3998 * the caller is expecting the callback to run in the calling
3999 * context
4000 *
4001 * if drop_lock == FALSE, we can't complete end_transaction
4002 * asynchronously
4003 */
4004 if (callback)
4005 drop_lock_early = FALSE;
4006 else
4007 drop_lock_early = drop_lock;
4008
4009 if (drop_lock_early == FALSE)
4010 must_wait = TRUE;
4011
4012 if (drop_lock_early == TRUE) {
4013 journal_unlock(jnl);
4014 drop_lock = FALSE;
4015 }
4016 if (must_wait == TRUE)
4017 ret_val = finish_end_transaction(tr, callback, callback_arg);
4018 else {
4019 thread_t thread = THREAD_NULL;
4020
4021 /*
4022 * fire up a thread to complete processing this transaction
4023 * asynchronously... when it finishes, it will call
4024 * unlock_condition
4025 */
4026 kernel_thread_start((thread_continue_t)finish_end_thread, tr, &thread);
4027 }
4028 KERNEL_DEBUG(0xbbbbc018|DBG_FUNC_END, jnl, tr, ret_val, 0, 0);
4029 done:
4030 if (drop_lock == TRUE) {
4031 journal_unlock(jnl);
4032 }
4033 return (ret_val);
4034 }
4035
4036
4037 static void
4038 finish_end_thread(transaction *tr)
4039 {
4040 throttle_set_thread_io_policy(IOPOL_PASSIVE);
4041
4042 finish_end_transaction(tr, NULL, NULL);
4043
4044 thread_deallocate(current_thread());
4045 thread_terminate(current_thread());
4046 }
4047
4048 static void
4049 write_header_thread(journal *jnl)
4050 {
4051 throttle_set_thread_io_policy(IOPOL_PASSIVE);
4052
4053 if (write_journal_header(jnl, 1, jnl->saved_sequence_num))
4054 jnl->write_header_failed = TRUE;
4055 else
4056 jnl->write_header_failed = FALSE;
4057 unlock_condition(jnl, &jnl->writing_header);
4058
4059 thread_deallocate(current_thread());
4060 thread_terminate(current_thread());
4061 }
4062
4063 static int
4064 finish_end_transaction(transaction *tr, errno_t (*callback)(void*), void *callback_arg)
4065 {
4066 int i, amt;
4067 int ret = 0;
4068 off_t end;
4069 journal *jnl = tr->jnl;
4070 buf_t bp, *bparray;
4071 vnode_t vp;
4072 block_list_header *blhdr=NULL, *next=NULL;
4073 size_t tbuffer_offset;
4074 int bufs_written = 0;
4075 int ret_val = 0;
4076 boolean_t was_vm_privileged = FALSE;
4077
4078 KERNEL_DEBUG(0xbbbbc028|DBG_FUNC_START, jnl, tr, 0, 0, 0);
4079
4080 if (vfs_isswapmount(jnl->fsmount)) {
4081 /*
4082 * if we block waiting for memory, and there is enough pressure to
4083 * cause us to try and create a new swap file, we may end up deadlocking
4084 * due to waiting for the journal on the swap file creation path...
4085 * by making ourselves vm_privileged, we give ourselves the best chance
4086 * of not blocking
4087 */
4088 was_vm_privileged = set_vm_privilege(TRUE);
4089 }
4090 end = jnl->jhdr->end;
4091
4092 for (blhdr = tr->blhdr; blhdr; blhdr = (block_list_header *)((long)blhdr->binfo[0].bnum)) {
4093
4094 amt = blhdr->bytes_used;
4095
4096 blhdr->binfo[0].u.bi.b.sequence_num = tr->sequence_num;
4097
4098 blhdr->checksum = 0;
4099 blhdr->checksum = calc_checksum((char *)blhdr, BLHDR_CHECKSUM_SIZE);
4100
4101 bparray = hfs_malloc(blhdr->num_blocks * sizeof(buf_t));
4102 tbuffer_offset = jnl->jhdr->blhdr_size;
4103
4104 for (i = 1; i < blhdr->num_blocks; i++) {
4105 void (*func)(buf_t, void *);
4106 void *arg;
4107 int32_t bsize;
4108
4109 /*
4110 * finish preparing the shadow buf_t before
4111 * calculating the individual block checksums
4112 */
4113 if (blhdr->binfo[i].bnum != (off_t)-1) {
4114 daddr64_t blkno;
4115 daddr64_t lblkno;
4116
4117 bp = blhdr->binfo[i].u.bp;
4118
4119 vp = buf_vnode(bp);
4120 blkno = buf_blkno(bp);
4121 lblkno = buf_lblkno(bp);
4122
4123 if (vp == NULL && lblkno == blkno) {
4124 printf("jnl: %s: end_tr: bad news! buffer w/null vp and l/blkno = %qd/%qd. aborting the transaction.\n",
4125 jnl->jdev_name, lblkno, blkno);
4126 ret_val = -1;
4127 goto bad_journal;
4128 }
4129
4130 // if the lblkno is the same as blkno and this bp isn't
4131 // associated with the underlying file system device then
4132 // we need to call bmap() to get the actual physical block.
4133 //
4134 if ((lblkno == blkno) && (vp != jnl->fsdev)) {
4135 off_t f_offset;
4136 size_t contig_bytes;
4137
4138 if (hfs_vnop_blktooff(&(struct vnop_blktooff_args){
4139 .a_vp = vp,
4140 .a_lblkno = lblkno,
4141 .a_offset = &f_offset
4142 })) {
4143 printf("jnl: %s: end_tr: vnop_blktooff failed\n", jnl->jdev_name);
4144 ret_val = -1;
4145 goto bad_journal;
4146 }
4147
4148 if (hfs_vnop_blockmap(&(struct vnop_blockmap_args) {
4149 .a_vp = vp,
4150 .a_foffset = f_offset,
4151 .a_size = buf_count(bp),
4152 .a_bpn = &blkno,
4153 .a_run = &contig_bytes
4154 })) {
4155 printf("jnl: %s: end_tr: can't blockmap the buffer\n", jnl->jdev_name);
4156 ret_val = -1;
4157 goto bad_journal;
4158 }
4159
4160 if ((uint32_t)contig_bytes < buf_count(bp)) {
4161 printf("jnl: %s: end_tr: blk not physically contiguous on disk\n", jnl->jdev_name);
4162 ret_val = -1;
4163 goto bad_journal;
4164 }
4165 buf_setblkno(bp, blkno);
4166 }
4167 // update this so we write out the correct physical block number!
4168 blhdr->binfo[i].bnum = (off_t)(blkno);
4169
4170 /*
4171 * pick up the FS hook function (if any) and prepare
4172 * to fire this buffer off in the next pass
4173 */
4174 buf_setfilter(bp, buffer_flushed_callback, tr, &func, &arg);
4175
4176 if (func) {
4177 /*
4178 * call the hook function supplied by the filesystem...
4179 * this needs to happen BEFORE cacl_checksum in case
4180 * the FS morphs the data in the buffer
4181 */
4182 func(bp, arg);
4183 }
4184 bparray[i] = bp;
4185 bsize = buf_size(bp);
4186 blhdr->binfo[i].u.bi.bsize = bsize;
4187 blhdr->binfo[i].u.bi.b.cksum = calc_checksum(&((char *)blhdr)[tbuffer_offset], bsize);
4188 } else {
4189 bparray[i] = NULL;
4190 bsize = blhdr->binfo[i].u.bi.bsize;
4191 blhdr->binfo[i].u.bi.b.cksum = 0;
4192 }
4193 tbuffer_offset += bsize;
4194 }
4195 /*
4196 * if we fired off the journal_write_header asynchronously in
4197 * 'end_transaction', we need to wait for its completion
4198 * before writing the actual journal data
4199 */
4200 wait_condition(jnl, &jnl->writing_header, "finish_end_transaction");
4201
4202 if (jnl->write_header_failed == FALSE)
4203 ret = write_journal_data(jnl, &end, blhdr, amt);
4204 else
4205 ret_val = -1;
4206 /*
4207 * put the bp pointers back so that we can
4208 * make the final pass on them
4209 */
4210 for (i = 1; i < blhdr->num_blocks; i++)
4211 blhdr->binfo[i].u.bp = bparray[i];
4212
4213 hfs_free(bparray, blhdr->num_blocks * sizeof(buf_t));
4214
4215 if (ret_val == -1)
4216 goto bad_journal;
4217
4218 if (ret != amt) {
4219 printf("jnl: %s: end_transaction: only wrote %d of %d bytes to the journal!\n",
4220 jnl->jdev_name, ret, amt);
4221
4222 ret_val = -1;
4223 goto bad_journal;
4224 }
4225 }
4226 jnl->jhdr->end = end; // update where the journal now ends
4227 tr->journal_end = end; // the transaction ends here too
4228
4229 if (tr->journal_start == 0 || tr->journal_end == 0) {
4230 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
4231 tr->journal_start, tr->journal_end);
4232 }
4233
4234 if (write_journal_header(jnl, 0, jnl->saved_sequence_num) != 0) {
4235 ret_val = -1;
4236 goto bad_journal;
4237 }
4238 /*
4239 * If the caller supplied a callback, call it now that the blocks have been
4240 * written to the journal. This is used by journal_relocate so, for example,
4241 * the file system can change its pointer to the new journal.
4242 */
4243 if (callback != NULL && callback(callback_arg) != 0) {
4244 ret_val = -1;
4245 goto bad_journal;
4246 }
4247
4248 //
4249 // Send a DKIOCUNMAP for the extents trimmed by this transaction, and
4250 // free up the extent list.
4251 //
4252 journal_trim_flush(jnl, tr);
4253
4254 // the buffer_flushed_callback will only be called for the
4255 // real blocks that get flushed so we have to account for
4256 // the block_list_headers here.
4257 //
4258 tr->num_flushed = tr->num_blhdrs * jnl->jhdr->blhdr_size;
4259
4260 lock_condition(jnl, &jnl->asyncIO, "finish_end_transaction");
4261
4262 //
4263 // setup for looping through all the blhdr's.
4264 //
4265 for (blhdr = tr->blhdr; blhdr; blhdr = next) {
4266 uint16_t num_blocks;
4267
4268 /*
4269 * grab this info ahead of issuing the buf_bawrites...
4270 * once the last one goes out, its possible for blhdr
4271 * to be freed (especially if we get preempted) before
4272 * we do the last check of num_blocks or
4273 * grab the next blhdr pointer...
4274 */
4275 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
4276 num_blocks = blhdr->num_blocks;
4277
4278 /*
4279 * we can re-order the buf ptrs because everything is written out already
4280 */
4281 kx_qsort(&blhdr->binfo[1], num_blocks-1, sizeof(block_info), journal_binfo_cmp);
4282
4283 /*
4284 * need to make sure that the loop issuing the buf_bawrite's
4285 * does not touch blhdr once the last buf_bawrite has been
4286 * issued... at that point, we no longer have a legitmate
4287 * reference on the associated storage since it will be
4288 * released upon the completion of that last buf_bawrite
4289 */
4290 for (i = num_blocks-1; i >= 1; i--) {
4291 if (blhdr->binfo[i].bnum != (off_t)-1)
4292 break;
4293 num_blocks--;
4294 }
4295 for (i = 1; i < num_blocks; i++) {
4296
4297 if ((bp = blhdr->binfo[i].u.bp)) {
4298 vp = buf_vnode(bp);
4299
4300 buf_bawrite(bp);
4301
4302 // this undoes the vnode_ref() in journal_modify_block_end()
4303 vnode_rele_ext(vp, 0, 1);
4304
4305 bufs_written++;
4306 }
4307 }
4308 }
4309 if (bufs_written == 0) {
4310 /*
4311 * since we didn't issue any buf_bawrite's, there is no
4312 * async trigger to cause the memory associated with this
4313 * transaction to be freed... so, move it to the garbage
4314 * list now
4315 */
4316 lock_oldstart(jnl);
4317
4318 tr->next = jnl->tr_freeme;
4319 jnl->tr_freeme = tr;
4320
4321 unlock_oldstart(jnl);
4322
4323 unlock_condition(jnl, &jnl->asyncIO);
4324 }
4325
4326 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
4327 // tr, tr->journal_start, tr->journal_end);
4328
4329 bad_journal:
4330 if (ret_val == -1) {
4331 abort_transaction(jnl, tr); // cleans up list of extents to be trimmed
4332
4333 /*
4334 * 'flush_aborted' is protected by the flushing condition... we need to
4335 * set it before dropping the condition so that it will be
4336 * noticed in 'end_transaction'... we add this additional
4337 * aborted condition so that we can drop the 'flushing' condition
4338 * before grabbing the journal lock... this avoids a deadlock
4339 * in 'end_transaction' which is holding the journal lock while
4340 * waiting for the 'flushing' condition to clear...
4341 * everyone else will notice the JOURNAL_INVALID flag
4342 */
4343 jnl->flush_aborted = TRUE;
4344
4345 unlock_condition(jnl, &jnl->flushing);
4346 journal_lock(jnl);
4347
4348 jnl->flags |= JOURNAL_INVALID;
4349 jnl->old_start[sizeof(jnl->old_start)/sizeof(jnl->old_start[0]) - 1] &= ~0x8000000000000000LL;
4350
4351 journal_unlock(jnl);
4352 } else
4353 unlock_condition(jnl, &jnl->flushing);
4354
4355 if (vfs_isswapmount(jnl->fsmount) && (was_vm_privileged == FALSE))
4356 set_vm_privilege(FALSE);
4357
4358 KERNEL_DEBUG(0xbbbbc028|DBG_FUNC_END, jnl, tr, bufs_written, ret_val, 0);
4359
4360 return (ret_val);
4361 }
4362
4363
4364 static void
4365 lock_condition(journal *jnl, boolean_t *condition, const char *condition_name)
4366 {
4367
4368 KERNEL_DEBUG(0xbbbbc020|DBG_FUNC_START, jnl, condition, 0, 0, 0);
4369
4370 lock_flush(jnl);
4371
4372 while (*condition == TRUE)
4373 msleep(condition, &jnl->flock, PRIBIO, condition_name, NULL);
4374
4375 *condition = TRUE;
4376 unlock_flush(jnl);
4377
4378 KERNEL_DEBUG(0xbbbbc020|DBG_FUNC_END, jnl, condition, 0, 0, 0);
4379 }
4380
4381 static void
4382 wait_condition(journal *jnl, boolean_t *condition, const char *condition_name)
4383 {
4384
4385 if (*condition == FALSE)
4386 return;
4387
4388 KERNEL_DEBUG(0xbbbbc02c|DBG_FUNC_START, jnl, condition, 0, 0, 0);
4389
4390 lock_flush(jnl);
4391
4392 while (*condition == TRUE)
4393 msleep(condition, &jnl->flock, PRIBIO, condition_name, NULL);
4394
4395 unlock_flush(jnl);
4396
4397 KERNEL_DEBUG(0xbbbbc02c|DBG_FUNC_END, jnl, condition, 0, 0, 0);
4398 }
4399
4400 static void
4401 unlock_condition(journal *jnl, boolean_t *condition)
4402 {
4403 lock_flush(jnl);
4404
4405 *condition = FALSE;
4406 wakeup(condition);
4407
4408 unlock_flush(jnl);
4409 }
4410
4411 static void
4412 abort_transaction(journal *jnl, transaction *tr)
4413 {
4414 block_list_header *blhdr, *next;
4415
4416 // for each block list header, iterate over the blocks then
4417 // free up the memory associated with the block list.
4418 //
4419 // find each of the primary blocks (i.e. the list could
4420 // contain a mix of shadowed and real buf_t's depending
4421 // on when the abort condition was detected) and mark them
4422 // clean and locked in the cache... this at least allows
4423 // the FS a consistent view between it's incore data structures
4424 // and the meta-data held in the cache
4425 //
4426 KERNEL_DEBUG(0xbbbbc034|DBG_FUNC_START, jnl, tr, 0, 0, 0);
4427
4428 for (blhdr = tr->blhdr; blhdr; blhdr = next) {
4429 int i;
4430
4431 for (i = 1; i < blhdr->num_blocks; i++) {
4432 buf_t bp, tbp, sbp;
4433 vnode_t bp_vp;
4434 errno_t err;
4435
4436 if (blhdr->binfo[i].bnum == (off_t)-1)
4437 continue;
4438
4439 tbp = blhdr->binfo[i].u.bp;
4440
4441 bp_vp = buf_vnode(tbp);
4442
4443 if (buf_shadow(tbp)) {
4444 sbp = tbp;
4445 buf_setfilter(tbp, NULL, NULL, NULL, NULL);
4446 } else {
4447 hfs_assert(ISSET(buf_flags(tbp), B_LOCKED));
4448
4449 sbp = NULL;
4450
4451 do {
4452 err = buf_acquire(tbp, BAC_REMOVE, 0, 0);
4453 } while (err == EAGAIN);
4454
4455 if (!err) {
4456 buf_setfilter(tbp, NULL, NULL, NULL, NULL);
4457 buf_brelse(tbp);
4458 }
4459 }
4460
4461 if (bp_vp) {
4462 err = buf_meta_bread(bp_vp,
4463 buf_lblkno(tbp),
4464 buf_size(tbp),
4465 NOCRED,
4466 &bp);
4467 if (err == 0) {
4468 if (sbp == NULL && bp != tbp && (buf_flags(tbp) & B_LOCKED)) {
4469 panic("jnl: abort_tr: got back a different bp! (bp %p should be %p, jnl %p\n",
4470 bp, tbp, jnl);
4471 }
4472 /*
4473 * once the journal has been marked INVALID and aborted,
4474 * NO meta data can be written back to the disk, so
4475 * mark the buf_t clean and make sure it's locked in the cache
4476 * note: if we found a shadow, the real buf_t needs to be relocked
4477 */
4478 buf_setflags(bp, B_LOCKED);
4479 buf_markclean(bp);
4480 buf_brelse(bp);
4481
4482 KERNEL_DEBUG(0xbbbbc034|DBG_FUNC_NONE, jnl, tr, bp, 0, 0);
4483
4484 /*
4485 * this undoes the vnode_ref() in journal_modify_block_end()
4486 */
4487 vnode_rele_ext(bp_vp, 0, 1);
4488 } else {
4489 printf("jnl: %s: abort_tr: could not find block %lld for vnode!\n",
4490 jnl->jdev_name, blhdr->binfo[i].bnum);
4491 if (bp) {
4492 buf_brelse(bp);
4493 }
4494 }
4495 }
4496 if (sbp)
4497 buf_brelse(sbp);
4498 }
4499 next = (block_list_header *)((long)blhdr->binfo[0].bnum);
4500
4501 // we can free blhdr here since we won't need it any more
4502 blhdr->binfo[0].bnum = 0xdeadc0de;
4503 hfs_free(blhdr, tr->tbuffer_size);
4504 }
4505
4506 /*
4507 * If the transaction we're aborting was the async transaction, then
4508 * tell the current transaction that there is no pending trim
4509 * any more.
4510 */
4511 lck_rw_lock_exclusive(&jnl->trim_lock);
4512 if (jnl->async_trim == &tr->trim)
4513 jnl->async_trim = NULL;
4514 lck_rw_unlock_exclusive(&jnl->trim_lock);
4515
4516
4517 if (tr->trim.extents) {
4518 hfs_free(tr->trim.extents, tr->trim.allocated_count * sizeof(dk_extent_t));
4519 }
4520 tr->trim.allocated_count = 0;
4521 tr->trim.extent_count = 0;
4522 tr->trim.extents = NULL;
4523 tr->tbuffer = NULL;
4524 tr->blhdr = NULL;
4525 tr->total_bytes = 0xdbadc0de;
4526 hfs_free(tr, sizeof(*tr));
4527
4528 KERNEL_DEBUG(0xbbbbc034|DBG_FUNC_END, jnl, tr, 0, 0, 0);
4529 }
4530
4531
4532 int
4533 journal_end_transaction(journal *jnl)
4534 {
4535 int ret;
4536 transaction *tr;
4537
4538 CHECK_JOURNAL(jnl);
4539
4540 free_old_stuff(jnl);
4541
4542 if ((jnl->flags & JOURNAL_INVALID) && jnl->owner == NULL) {
4543 return 0;
4544 }
4545
4546 if (jnl->owner != current_thread()) {
4547 panic("jnl: end_tr: I'm not the owner! jnl %p, owner %p, curact %p\n",
4548 jnl, jnl->owner, current_thread());
4549 }
4550 jnl->nested_count--;
4551
4552 if (jnl->nested_count > 0) {
4553 return 0;
4554 } else if (jnl->nested_count < 0) {
4555 panic("jnl: jnl @ %p has negative nested count (%d). bad boy.\n", jnl, jnl->nested_count);
4556 }
4557
4558 if (jnl->flags & JOURNAL_INVALID) {
4559 if (jnl->active_tr) {
4560 if (jnl->cur_tr != NULL) {
4561 panic("jnl: journal @ %p has active tr (%p) and cur tr (%p)\n",
4562 jnl, jnl->active_tr, jnl->cur_tr);
4563 }
4564 tr = jnl->active_tr;
4565 jnl->active_tr = NULL;
4566
4567 abort_transaction(jnl, tr);
4568 }
4569 journal_unlock(jnl);
4570
4571 return EINVAL;
4572 }
4573
4574 tr = jnl->active_tr;
4575 CHECK_TRANSACTION(tr);
4576
4577 // clear this out here so that when check_free_space() calls
4578 // the FS flush function, we don't panic in journal_flush()
4579 // if the FS were to call that. note: check_free_space() is
4580 // called from end_transaction().
4581 //
4582 jnl->active_tr = NULL;
4583
4584 /* Examine the force-journal-flush state in the active txn */
4585 if (tr->flush_on_completion == TRUE) {
4586 /*
4587 * If the FS requested it, disallow group commit and force the
4588 * transaction out to disk immediately.
4589 */
4590 ret = end_transaction(tr, 1, NULL, NULL, TRUE, TRUE);
4591 }
4592 else {
4593 /* in the common path we can simply use the double-buffered journal */
4594 ret = end_transaction(tr, 0, NULL, NULL, TRUE, FALSE);
4595 }
4596
4597 return ret;
4598 }
4599
4600
4601 /*
4602 * Flush the contents of the journal to the disk.
4603 *
4604 * Input:
4605 * wait_for_IO -
4606 * If TRUE, wait to write in-memory journal to the disk
4607 * consistently, and also wait to write all asynchronous
4608 * metadata blocks to its corresponding locations
4609 * consistently on the disk. This means that the journal
4610 * is empty at this point and does not contain any
4611 * transactions. This is overkill in normal scenarios
4612 * but is useful whenever the metadata blocks are required
4613 * to be consistent on-disk instead of just the journal
4614 * being consistent; like before live verification
4615 * and live volume resizing.
4616 *
4617 * If FALSE, only wait to write in-memory journal to the
4618 * disk consistently. This means that the journal still
4619 * contains uncommitted transactions and the file system
4620 * metadata blocks in the journal transactions might be
4621 * written asynchronously to the disk. But there is no
4622 * guarantee that they are written to the disk before
4623 * returning to the caller. Note that this option is
4624 * sufficient for file system data integrity as it
4625 * guarantees consistent journal content on the disk.
4626 */
4627 int
4628 journal_flush(journal *jnl, journal_flush_options_t options)
4629 {
4630 boolean_t drop_lock = FALSE;
4631 errno_t error = 0;
4632 uint32_t flush_count = 0;
4633
4634 CHECK_JOURNAL(jnl);
4635
4636 free_old_stuff(jnl);
4637
4638 if (jnl->flags & JOURNAL_INVALID) {
4639 return -1;
4640 }
4641
4642 KDBG(DBG_JOURNAL_FLUSH | DBG_FUNC_START, jnl);
4643
4644 if (jnl->owner != current_thread()) {
4645 journal_lock(jnl);
4646 drop_lock = TRUE;
4647 }
4648
4649 if (ISSET(options, JOURNAL_FLUSH_FULL))
4650 flush_count = jnl->flush_counter;
4651
4652 // if we're not active, flush any buffered transactions
4653 if (jnl->active_tr == NULL && jnl->cur_tr) {
4654 transaction *tr = jnl->cur_tr;
4655
4656 jnl->cur_tr = NULL;
4657
4658 if (ISSET(options, JOURNAL_WAIT_FOR_IO)) {
4659 wait_condition(jnl, &jnl->flushing, "journal_flush");
4660 wait_condition(jnl, &jnl->asyncIO, "journal_flush");
4661 }
4662 /*
4663 * "end_transction" will wait for any current async flush
4664 * to complete, before flushing "cur_tr"... because we've
4665 * specified the 'must_wait' arg as TRUE, it will then
4666 * synchronously flush the "cur_tr"
4667 */
4668 end_transaction(tr, 1, NULL, NULL, drop_lock, TRUE); // force it to get flushed
4669
4670 } else {
4671 if (drop_lock == TRUE) {
4672 journal_unlock(jnl);
4673 }
4674
4675 /* Because of pipelined journal, the journal transactions
4676 * might be in process of being flushed on another thread.
4677 * If there is nothing to flush currently, we should
4678 * synchronize ourselves with the pipelined journal thread
4679 * to ensure that all inflight transactions, if any, are
4680 * flushed before we return success to caller.
4681 */
4682 wait_condition(jnl, &jnl->flushing, "journal_flush");
4683 }
4684 if (ISSET(options, JOURNAL_WAIT_FOR_IO)) {
4685 wait_condition(jnl, &jnl->asyncIO, "journal_flush");
4686 }
4687
4688 if (ISSET(options, JOURNAL_FLUSH_FULL)) {
4689
4690 dk_synchronize_t sync_request = {
4691 .options = 0,
4692 };
4693
4694 // We need a full cache flush. If it has not been done, do it here.
4695 if (flush_count == jnl->flush_counter)
4696 error = VNOP_IOCTL(jnl->jdev, DKIOCSYNCHRONIZE, (caddr_t)&sync_request, FWRITE, vfs_context_kernel());
4697
4698 // If external journal partition is enabled, flush filesystem data partition.
4699 if (jnl->jdev != jnl->fsdev)
4700 error = VNOP_IOCTL(jnl->fsdev, DKIOCSYNCHRONIZE, (caddr_t)&sync_request, FWRITE, vfs_context_kernel());
4701
4702 }
4703
4704 KDBG(DBG_JOURNAL_FLUSH | DBG_FUNC_END, jnl);
4705
4706 return 0;
4707 }
4708
4709 int
4710 journal_active(journal *jnl)
4711 {
4712 if (jnl->flags & JOURNAL_INVALID) {
4713 return -1;
4714 }
4715
4716 return (jnl->active_tr == NULL) ? 0 : 1;
4717 }
4718
4719 void *
4720 journal_owner(journal *jnl)
4721 {
4722 return jnl->owner;
4723 }
4724
4725 int journal_uses_fua(journal *jnl)
4726 {
4727 if (jnl->flags & JOURNAL_DO_FUA_WRITES)
4728 return 1;
4729 return 0;
4730 }
4731
4732 /*
4733 * Relocate the journal.
4734 *
4735 * You provide the new starting offset and size for the journal. You may
4736 * optionally provide a new tbuffer_size; passing zero defaults to not
4737 * changing the tbuffer size except as needed to fit within the new journal
4738 * size.
4739 *
4740 * You must have already started a transaction. The transaction may contain
4741 * modified blocks (such as those needed to deallocate the old journal,
4742 * allocate the new journal, and update the location and size of the journal
4743 * in filesystem-private structures). Any transactions prior to the active
4744 * transaction will be flushed to the old journal. The new journal will be
4745 * initialized, and the blocks from the active transaction will be written to
4746 * the new journal.
4747 *
4748 * The caller will need to update the structures that identify the location
4749 * and size of the journal. These updates should be made in the supplied
4750 * callback routine. These updates must NOT go into a transaction. You should
4751 * force these updates to the media before returning from the callback. In the
4752 * even of a crash, either the old journal will be found, with an empty journal,
4753 * or the new journal will be found with the contents of the active transaction.
4754 *
4755 * Upon return from the callback, the blocks from the active transaction are
4756 * written to their normal locations on disk.
4757 *
4758 * (Remember that we have to ensure that blocks get committed to the journal
4759 * before being committed to their normal locations. But the blocks don't count
4760 * as committed until the new journal is pointed at.)
4761 *
4762 * Upon return, there is still an active transaction: newly allocated, and
4763 * with no modified blocks. Call journal_end_transaction as normal. You may
4764 * modifiy additional blocks before calling journal_end_transaction, and those
4765 * blocks will (eventually) go to the relocated journal.
4766 *
4767 * Inputs:
4768 * jnl The (opened) journal to relocate.
4769 * offset The new journal byte offset (from start of the journal device).
4770 * journal_size The size, in bytes, of the new journal.
4771 * tbuffer_size The new desired transaction buffer size. Pass zero to keep
4772 * the same size as the current journal. The size will be
4773 * modified as needed to fit the new journal.
4774 * callback Routine called after the new journal has been initialized,
4775 * and the active transaction written to the new journal, but
4776 * before the blocks are written to their normal locations.
4777 * Pass NULL for no callback.
4778 * callback_arg An argument passed to the callback routine.
4779 *
4780 * Result:
4781 * 0 No errors
4782 * EINVAL The offset is not block aligned
4783 * EINVAL The journal_size is not a multiple of the block size
4784 * EINVAL The journal is invalid
4785 * (any) An error returned by journal_flush.
4786 *
4787 */
4788 int journal_relocate(journal *jnl, off_t offset, off_t journal_size, int32_t tbuffer_size,
4789 errno_t (*callback)(void *), void *callback_arg)
4790 {
4791 int ret;
4792 transaction *tr;
4793 size_t i = 0;
4794
4795 /*
4796 * Sanity check inputs, and adjust the size of the transaction buffer.
4797 */
4798 if (jnl->jhdr->jhdr_size == 0) {
4799 printf("jnl: %s: relocate: bad jhdr size (%d)\n", jnl->jdev_name, jnl->jhdr->jhdr_size);
4800 return EINVAL;
4801 }
4802
4803 if ((offset % jnl->jhdr->jhdr_size) != 0) {
4804 printf("jnl: %s: relocate: offset 0x%llx is not an even multiple of block size 0x%x\n",
4805 jnl->jdev_name, offset, jnl->jhdr->jhdr_size);
4806 return EINVAL;
4807 }
4808 if ((journal_size % jnl->jhdr->jhdr_size) != 0) {
4809 printf("jnl: %s: relocate: journal size 0x%llx is not an even multiple of block size 0x%x\n",
4810 jnl->jdev_name, journal_size, jnl->jhdr->jhdr_size);
4811 return EINVAL;
4812 }
4813
4814 CHECK_JOURNAL(jnl);
4815
4816 /* Guarantee we own the active transaction. */
4817 if (jnl->flags & JOURNAL_INVALID) {
4818 return EINVAL;
4819 }
4820 if (jnl->owner != current_thread()) {
4821 panic("jnl: relocate: Not the owner! jnl %p, owner %p, curact %p\n",
4822 jnl, jnl->owner, current_thread());
4823 }
4824
4825 if (tbuffer_size == 0)
4826 tbuffer_size = jnl->tbuffer_size;
4827 size_up_tbuffer(jnl, tbuffer_size, jnl->jhdr->jhdr_size);
4828
4829 /*
4830 * Flush any non-active transactions. We have to temporarily hide the
4831 * active transaction to make journal_flush flush out non-active but
4832 * current (unwritten) transactions.
4833 */
4834 tr = jnl->active_tr;
4835 CHECK_TRANSACTION(tr);
4836 jnl->active_tr = NULL;
4837 ret = journal_flush(jnl, JOURNAL_WAIT_FOR_IO);
4838 jnl->active_tr = tr;
4839
4840 if (ret) {
4841 return ret;
4842 }
4843 wait_condition(jnl, &jnl->flushing, "end_transaction");
4844
4845 /*
4846 * At this point, we have completely flushed the contents of the current
4847 * journal to disk (and have asynchronously written all of the txns to
4848 * their actual desired locations). As a result, we can (and must) clear
4849 * out the old_start array. If we do not, then if the last written transaction
4850 * started at the beginning of the journal (starting 1 block into the
4851 * journal file) it could confuse the buffer_flushed callback. This is
4852 * because we're about to reset the start/end pointers of the journal header
4853 * below.
4854 */
4855 lock_oldstart(jnl);
4856 for (i = 0; i < sizeof (jnl->old_start) / sizeof(jnl->old_start[0]); i++) {
4857 jnl->old_start[i] = 0;
4858 }
4859 unlock_oldstart(jnl);
4860
4861 /* Update the journal's offset and size in memory. */
4862 jnl->jdev_offset = offset;
4863 jnl->jhdr->start = jnl->jhdr->end = jnl->jhdr->jhdr_size;
4864 jnl->jhdr->size = journal_size;
4865 jnl->active_start = jnl->jhdr->start;
4866
4867 /*
4868 * Force the active transaction to be written to the new journal. Call the
4869 * supplied callback after the blocks have been written to the journal, but
4870 * before they get written to their normal on-disk locations.
4871 */
4872 jnl->active_tr = NULL;
4873 ret = end_transaction(tr, 1, callback, callback_arg, FALSE, TRUE);
4874 if (ret) {
4875 printf("jnl: %s: relocate: end_transaction failed (%d)\n", jnl->jdev_name, ret);
4876 goto bad_journal;
4877 }
4878
4879 /*
4880 * Create a new, empty transaction to be the active transaction. This way
4881 * our caller can use journal_end_transaction as usual.
4882 */
4883 ret = journal_allocate_transaction(jnl);
4884 if (ret) {
4885 printf("jnl: %s: relocate: could not allocate new transaction (%d)\n", jnl->jdev_name, ret);
4886 goto bad_journal;
4887 }
4888
4889 return 0;
4890
4891 bad_journal:
4892 jnl->flags |= JOURNAL_INVALID;
4893 abort_transaction(jnl, tr);
4894 return ret;
4895 }
4896
4897 uint32_t journal_current_txn(journal *jnl)
4898 {
4899 return jnl->sequence_num + (jnl->active_tr || jnl->cur_tr ? 0 : 1);
4900 }