2 * Copyright (c) 2002-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 // This file implements a simple write-ahead journaling layer.
30 // In theory any file system can make use of it by calling these
31 // functions when the fs wants to modify meta-data blocks. See
32 // vfs_journal.h for a more detailed description of the api and
35 // Dominic Giampaolo (dbg@apple.com)
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file_internal.h>
45 #include <sys/buf_internal.h>
46 #include <sys/proc_internal.h>
47 #include <sys/mount_internal.h>
48 #include <sys/namei.h>
49 #include <sys/vnode_internal.h>
50 #include <sys/ioctl.h>
53 #include <sys/malloc.h>
54 #include <kern/task.h>
55 #include <kern/thread.h>
56 #include <kern/kalloc.h>
58 #include <sys/kdebug.h>
59 #include <miscfs/specfs/specdev.h>
60 #include <libkern/OSAtomic.h> /* OSAddAtomic */
62 kern_return_t
thread_terminate(thread_t
);
65 * Set sysctl vfs.generic.jnl.kdebug.trim=1 to enable KERNEL_DEBUG_CONSTANT
66 * logging of trim-related calls within the journal. (They're
67 * disabled by default because there can be a lot of these events,
68 * and we don't want to overwhelm the kernel debug buffer. If you
69 * want to watch these events in particular, just set the sysctl.)
71 static int jnl_kdebug
= 0;
72 SYSCTL_DECL(_vfs_generic
);
73 SYSCTL_NODE(_vfs_generic
, OID_AUTO
, jnl
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "Journal");
74 SYSCTL_NODE(_vfs_generic_jnl
, OID_AUTO
, kdebug
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "Journal kdebug");
75 SYSCTL_INT(_vfs_generic_jnl_kdebug
, OID_AUTO
, trim
, CTLFLAG_RW
|CTLFLAG_LOCKED
, &jnl_kdebug
, 0, "Enable kdebug logging for journal TRIM");
77 #define DBG_JOURNAL_FLUSH FSDBG_CODE(DBG_JOURNAL, 1)
78 #define DBG_JOURNAL_TRIM_ADD FSDBG_CODE(DBG_JOURNAL, 2)
79 #define DBG_JOURNAL_TRIM_REMOVE FSDBG_CODE(DBG_JOURNAL, 3)
80 #define DBG_JOURNAL_TRIM_REMOVE_PENDING FSDBG_CODE(DBG_JOURNAL, 4)
81 #define DBG_JOURNAL_TRIM_REALLOC FSDBG_CODE(DBG_JOURNAL, 5)
82 #define DBG_JOURNAL_TRIM_FLUSH FSDBG_CODE(DBG_JOURNAL, 6)
83 #define DBG_JOURNAL_TRIM_UNMAP FSDBG_CODE(DBG_JOURNAL, 7)
86 * Cap the journal max size to 2GB. On HFS, it will attempt to occupy
87 * a full allocation block if the current size is smaller than the allocation
88 * block on which it resides. Once we hit the exabyte filesystem range, then
89 * it will use 2GB allocation blocks. As a result, make the cap 2GB.
91 #define MAX_JOURNAL_SIZE 0x80000000U
93 #include <sys/sdt.h> /* DTRACE_IO1 */
104 #include <sys/types.h>
109 #include "vfs_journal.h"
111 #include <sys/kdebug.h>
115 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
119 #ifndef CONFIG_HFS_TRIM
120 #define CONFIG_HFS_TRIM 0
127 // By default, we grow the list of extents to trim by 4K at a time.
128 // We'll opt to flush a transaction if it contains at least
129 // JOURNAL_FLUSH_TRIM_EXTENTS extents to be trimmed (even if the number
130 // of modified blocks is small).
133 JOURNAL_DEFAULT_TRIM_BYTES
= 4096,
134 JOURNAL_DEFAULT_TRIM_EXTENTS
= JOURNAL_DEFAULT_TRIM_BYTES
/ sizeof(dk_extent_t
),
135 JOURNAL_FLUSH_TRIM_EXTENTS
= JOURNAL_DEFAULT_TRIM_EXTENTS
* 15 / 16
138 unsigned int jnl_trim_flush_limit
= JOURNAL_FLUSH_TRIM_EXTENTS
;
139 SYSCTL_UINT (_kern
, OID_AUTO
, jnl_trim_flush
, CTLFLAG_RW
, &jnl_trim_flush_limit
, 0, "number of trimmed extents to cause a journal flush");
141 /* XXX next prototype should be from libsa/stdlib.h> but conflicts libkern */
142 __private_extern__
void qsort(
146 int (*)(const void *, const void *));
150 // number of bytes to checksum in a block_list_header
151 // NOTE: this should be enough to clear out the header
152 // fields as well as the first entry of binfo[]
153 #define BLHDR_CHECKSUM_SIZE 32
155 static void lock_condition(journal
*jnl
, boolean_t
*condition
, const char *condition_name
);
156 static void wait_condition(journal
*jnl
, boolean_t
*condition
, const char *condition_name
);
157 static void unlock_condition(journal
*jnl
, boolean_t
*condition
);
158 static void finish_end_thread(transaction
*tr
);
159 static void write_header_thread(journal
*jnl
);
160 static int finish_end_transaction(transaction
*tr
, errno_t (*callback
)(void*), void *callback_arg
);
161 static int end_transaction(transaction
*tr
, int force_it
, errno_t (*callback
)(void*), void *callback_arg
, boolean_t drop_lock
, boolean_t must_wait
);
162 static void abort_transaction(journal
*jnl
, transaction
*tr
);
163 static void dump_journal(journal
*jnl
);
165 static __inline__
void lock_oldstart(journal
*jnl
);
166 static __inline__
void unlock_oldstart(journal
*jnl
);
167 static __inline__
void lock_flush(journal
*jnl
);
168 static __inline__
void unlock_flush(journal
*jnl
);
172 // 3105942 - Coalesce writes to the same block on journal replay
175 typedef struct bucket
{
182 #define STARTING_BUCKETS 256
184 static int add_block(journal
*jnl
, struct bucket
**buf_ptr
, off_t block_num
, size_t size
, size_t offset
, int32_t cksum
, int *num_buckets_ptr
, int *num_full_ptr
);
185 static int grow_table(struct bucket
**buf_ptr
, int num_buckets
, int new_size
);
186 static int lookup_bucket(struct bucket
**buf_ptr
, off_t block_num
, int num_full
);
187 static int do_overlap(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t block_num
, size_t size
, size_t offset
, int32_t cksum
, int *num_buckets_ptr
, int *num_full_ptr
);
188 static int insert_block(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t num
, size_t size
, size_t offset
, int32_t cksum
, int *num_buckets_ptr
, int *num_full_ptr
, int overwriting
);
190 #define CHECK_JOURNAL(jnl) \
193 panic("%s:%d: null journal ptr?\n", __FILE__, __LINE__); \
195 if (jnl->jdev == NULL) { \
196 panic("%s:%d: jdev is null!\n", __FILE__, __LINE__); \
198 if (jnl->fsdev == NULL) { \
199 panic("%s:%d: fsdev is null!\n", __FILE__, __LINE__); \
201 if (jnl->jhdr->magic != JOURNAL_HEADER_MAGIC) { \
202 panic("%s:%d: jhdr magic corrupted (0x%x != 0x%x)\n", \
203 __FILE__, __LINE__, jnl->jhdr->magic, JOURNAL_HEADER_MAGIC); \
205 if ( jnl->jhdr->start <= 0 \
206 || jnl->jhdr->start > jnl->jhdr->size) { \
207 panic("%s:%d: jhdr start looks bad (0x%llx max size 0x%llx)\n", \
208 __FILE__, __LINE__, jnl->jhdr->start, jnl->jhdr->size); \
210 if ( jnl->jhdr->end <= 0 \
211 || jnl->jhdr->end > jnl->jhdr->size) { \
212 panic("%s:%d: jhdr end looks bad (0x%llx max size 0x%llx)\n", \
213 __FILE__, __LINE__, jnl->jhdr->end, jnl->jhdr->size); \
217 #define CHECK_TRANSACTION(tr) \
220 panic("%s:%d: null transaction ptr?\n", __FILE__, __LINE__); \
222 if (tr->jnl == NULL) { \
223 panic("%s:%d: null tr->jnl ptr?\n", __FILE__, __LINE__); \
225 if (tr->blhdr != (block_list_header *)tr->tbuffer) { \
226 panic("%s:%d: blhdr (%p) != tbuffer (%p)\n", __FILE__, __LINE__, tr->blhdr, tr->tbuffer); \
228 if (tr->total_bytes < 0) { \
229 panic("%s:%d: tr total_bytes looks bad: %d\n", __FILE__, __LINE__, tr->total_bytes); \
231 if (tr->journal_start < 0) { \
232 panic("%s:%d: tr journal start looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_start); \
234 if (tr->journal_end < 0) { \
235 panic("%s:%d: tr journal end looks bad: 0x%llx\n", __FILE__, __LINE__, tr->journal_end); \
237 if (tr->blhdr && (tr->blhdr->max_blocks <= 0 || tr->blhdr->max_blocks > (tr->jnl->jhdr->size/tr->jnl->jhdr->jhdr_size))) { \
238 panic("%s:%d: tr blhdr max_blocks looks bad: %d\n", __FILE__, __LINE__, tr->blhdr->max_blocks); \
245 // this isn't a great checksum routine but it will do for now.
246 // we use it to checksum the journal header and the block list
247 // headers that are at the start of each transaction.
250 calc_checksum(char *ptr
, int len
)
253 unsigned int cksum
=0;
255 // this is a lame checksum but for now it'll do
256 for(i
= 0; i
< len
; i
++, ptr
++) {
257 cksum
= (cksum
<< 8) ^ (cksum
+ *(unsigned char *)ptr
);
266 lck_grp_attr_t
* jnl_group_attr
;
267 lck_attr_t
* jnl_lock_attr
;
268 lck_grp_t
* jnl_mutex_group
;
273 jnl_lock_attr
= lck_attr_alloc_init();
274 jnl_group_attr
= lck_grp_attr_alloc_init();
275 jnl_mutex_group
= lck_grp_alloc_init("jnl-mutex", jnl_group_attr
);
279 journal_lock(journal
*jnl
)
281 lck_mtx_lock(&jnl
->jlock
);
283 panic ("jnl: owner is %p, expected NULL\n", jnl
->owner
);
285 jnl
->owner
= current_thread();
289 journal_unlock(journal
*jnl
)
292 lck_mtx_unlock(&jnl
->jlock
);
295 static __inline__
void
296 lock_flush(journal
*jnl
)
298 lck_mtx_lock(&jnl
->flock
);
301 static __inline__
void
302 unlock_flush(journal
*jnl
)
304 lck_mtx_unlock(&jnl
->flock
);
307 static __inline__
void
308 lock_oldstart(journal
*jnl
)
310 lck_mtx_lock(&jnl
->old_start_lock
);
313 static __inline__
void
314 unlock_oldstart(journal
*jnl
)
316 lck_mtx_unlock(&jnl
->old_start_lock
);
321 #define JNL_WRITE 0x0001
322 #define JNL_READ 0x0002
323 #define JNL_HEADER 0x8000
326 // This function sets up a fake buf and passes it directly to the
327 // journal device strategy routine (so that it won't get cached in
330 // It also handles range checking the i/o so that we don't write
331 // outside the journal boundaries and it will wrap the i/o back
332 // to the beginning if necessary (skipping over the journal header)
335 do_journal_io(journal
*jnl
, off_t
*offset
, void *data
, size_t len
, int direction
)
342 boolean_t was_vm_privileged
= FALSE
;
343 boolean_t need_vm_privilege
= FALSE
;
346 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
)
347 need_vm_privilege
= TRUE
;
350 if (*offset
< 0 || *offset
> jnl
->jhdr
->size
) {
351 panic("jnl: do_jnl_io: bad offset 0x%llx (max 0x%llx)\n", *offset
, jnl
->jhdr
->size
);
354 if (direction
& JNL_WRITE
)
355 max_iosize
= jnl
->max_write_size
;
356 else if (direction
& JNL_READ
)
357 max_iosize
= jnl
->max_read_size
;
359 max_iosize
= 128 * 1024;
362 bp
= alloc_io_buf(jnl
->jdev
, 1);
364 if (*offset
+ (off_t
)curlen
> jnl
->jhdr
->size
&& *offset
!= 0 && jnl
->jhdr
->size
!= 0) {
365 if (*offset
== jnl
->jhdr
->size
) {
366 *offset
= jnl
->jhdr
->jhdr_size
;
368 curlen
= (off_t
)jnl
->jhdr
->size
- *offset
;
372 if (curlen
> max_iosize
) {
377 panic("jnl: do_jnl_io: curlen == %d, offset 0x%llx len %zd\n", curlen
, *offset
, len
);
380 if (*offset
== 0 && (direction
& JNL_HEADER
) == 0) {
381 panic("jnl: request for i/o to jnl-header without JNL_HEADER flag set! (len %d, data %p)\n", curlen
, data
);
385 * As alluded to in the block comment at the top of the function, we use a "fake" iobuf
386 * here and issue directly to the disk device that the journal protects since we don't
387 * want this to enter the block cache. As a result, we lose the ability to mark it
388 * as a metadata buf_t for the layers below us that may care. If we were to
389 * simply attach the B_META flag into the b_flags this may confuse things further
390 * since this is an iobuf, not a metadata buffer.
392 * To address this, we use the extended bufattr struct embedded in the bp.
393 * Explicitly mark the buf here as a metadata buffer in its bufattr flags.
396 bap
->ba_flags
|= BA_META
;
398 if (direction
& JNL_READ
)
399 buf_setflags(bp
, B_READ
);
402 * don't have to set any flags
404 vnode_startwrite(jnl
->jdev
);
406 buf_setsize(bp
, curlen
);
407 buf_setcount(bp
, curlen
);
408 buf_setdataptr(bp
, (uintptr_t)data
);
409 buf_setblkno(bp
, (daddr64_t
) ((jnl
->jdev_offset
+ *offset
) / (off_t
)jnl
->jhdr
->jhdr_size
));
410 buf_setlblkno(bp
, (daddr64_t
) ((jnl
->jdev_offset
+ *offset
) / (off_t
)jnl
->jhdr
->jhdr_size
));
412 if ((direction
& JNL_WRITE
) && (jnl
->flags
& JOURNAL_DO_FUA_WRITES
)) {
416 if (need_vm_privilege
== TRUE
) {
418 * if we block waiting for memory, and there is enough pressure to
419 * cause us to try and create a new swap file, we may end up deadlocking
420 * due to waiting for the journal on the swap file creation path...
421 * by making ourselves vm_privileged, we give ourselves the best chance
424 was_vm_privileged
= set_vm_privilege(TRUE
);
426 DTRACE_IO1(journal__start
, buf_t
, bp
);
427 err
= VNOP_STRATEGY(bp
);
429 err
= (int)buf_biowait(bp
);
431 DTRACE_IO1(journal__done
, buf_t
, bp
);
433 if (need_vm_privilege
== TRUE
&& was_vm_privileged
== FALSE
)
434 set_vm_privilege(FALSE
);
439 printf("jnl: %s: do_jnl_io: strategy err 0x%x\n", jnl
->jdev_name
, err
);
447 // handle wrap-around
448 data
= (char *)data
+ curlen
;
449 curlen
= len
- io_sz
;
450 if (*offset
>= jnl
->jhdr
->size
) {
451 *offset
= jnl
->jhdr
->jhdr_size
;
460 read_journal_data(journal
*jnl
, off_t
*offset
, void *data
, size_t len
)
462 return do_journal_io(jnl
, offset
, data
, len
, JNL_READ
);
466 write_journal_data(journal
*jnl
, off_t
*offset
, void *data
, size_t len
)
468 return do_journal_io(jnl
, offset
, data
, len
, JNL_WRITE
);
473 read_journal_header(journal
*jnl
, void *data
, size_t len
)
475 off_t hdr_offset
= 0;
477 return do_journal_io(jnl
, &hdr_offset
, data
, len
, JNL_READ
|JNL_HEADER
);
481 write_journal_header(journal
*jnl
, int updating_start
, uint32_t sequence_num
)
483 static int num_err_prints
= 0;
485 off_t jhdr_offset
= 0;
486 struct vfs_context context
;
488 context
.vc_thread
= current_thread();
489 context
.vc_ucred
= NOCRED
;
491 // Flush the track cache if we're not doing force-unit-access
494 if (!updating_start
&& (jnl
->flags
& JOURNAL_DO_FUA_WRITES
) == 0) {
496 dk_synchronize_t sync_request
= {
497 .options
= DK_SYNCHRONIZE_OPTION_BARRIER
,
501 * If device doesn't support barrier-only flush, or
502 * the journal is on a different device, use full flush.
504 if (!(jnl
->flags
& JOURNAL_FEATURE_BARRIER
) || (jnl
->jdev
!= jnl
->fsdev
)) {
505 sync_request
.options
= 0;
506 jnl
->flush_counter
++;
509 ret
= VNOP_IOCTL(jnl
->jdev
, DKIOCSYNCHRONIZE
, (caddr_t
)&sync_request
, FWRITE
, &context
);
513 // Only print this error if it's a different error than the
514 // previous one, or if it's the first time for this device
515 // or if the total number of printfs is less than 25. We
516 // allow for up to 25 printfs to insure that some make it
517 // into the on-disk syslog. Otherwise if we only printed
518 // one, it's possible it would never make it to the syslog
519 // for the root volume and that makes debugging hard.
521 if ( ret
!= jnl
->last_flush_err
522 || (jnl
->flags
& JOURNAL_FLUSHCACHE_ERR
) == 0
523 || num_err_prints
++ < 25) {
525 printf("jnl: %s: flushing fs disk buffer returned 0x%x\n", jnl
->jdev_name
, ret
);
527 jnl
->flags
|= JOURNAL_FLUSHCACHE_ERR
;
528 jnl
->last_flush_err
= ret
;
532 jnl
->jhdr
->sequence_num
= sequence_num
;
533 jnl
->jhdr
->checksum
= 0;
534 jnl
->jhdr
->checksum
= calc_checksum((char *)jnl
->jhdr
, JOURNAL_HEADER_CKSUM_SIZE
);
536 if (do_journal_io(jnl
, &jhdr_offset
, jnl
->header_buf
, jnl
->jhdr
->jhdr_size
, JNL_WRITE
|JNL_HEADER
) != (size_t)jnl
->jhdr
->jhdr_size
) {
537 printf("jnl: %s: write_journal_header: error writing the journal header!\n", jnl
->jdev_name
);
538 jnl
->flags
|= JOURNAL_INVALID
;
542 // If we're not doing force-unit-access writes, then we
543 // have to flush after writing the journal header so that
544 // a future transaction doesn't sneak out to disk before
545 // the header does and thus overwrite data that the old
546 // journal header refers to. Saw this exact case happen
547 // on an IDE bus analyzer with Larry Barras so while it
548 // may seem obscure, it's not.
550 if (updating_start
&& (jnl
->flags
& JOURNAL_DO_FUA_WRITES
) == 0) {
552 dk_synchronize_t sync_request
= {
553 .options
= DK_SYNCHRONIZE_OPTION_BARRIER
,
557 * If device doesn't support barrier-only flush, or
558 * the journal is on a different device, use full flush.
560 if (!(jnl
->flags
& JOURNAL_FEATURE_BARRIER
) || (jnl
->jdev
!= jnl
->fsdev
)) {
561 sync_request
.options
= 0;
562 jnl
->flush_counter
++;
565 VNOP_IOCTL(jnl
->jdev
, DKIOCSYNCHRONIZE
, (caddr_t
)&sync_request
, FWRITE
, &context
);
574 // this is a work function used to free up transactions that
575 // completed. they can't be free'd from buffer_flushed_callback
576 // because it is called from deep with the disk driver stack
577 // and thus can't do something that would potentially cause
578 // paging. it gets called by each of the journal api entry
579 // points so stuff shouldn't hang around for too long.
582 free_old_stuff(journal
*jnl
)
584 transaction
*tr
, *next
;
585 block_list_header
*blhdr
=NULL
, *next_blhdr
=NULL
;
587 if (jnl
->tr_freeme
== NULL
)
592 jnl
->tr_freeme
= NULL
;
593 unlock_oldstart(jnl
);
596 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= next_blhdr
) {
597 next_blhdr
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
598 blhdr
->binfo
[0].bnum
= 0xdeadc0de;
600 kmem_free(kernel_map
, (vm_offset_t
)blhdr
, tr
->tbuffer_size
);
602 KERNEL_DEBUG(0xbbbbc01c, jnl
, tr
, tr
->tbuffer_size
, 0, 0);
605 FREE_ZONE(tr
, sizeof(transaction
), M_JNL_TR
);
612 // This is our callback that lets us know when a buffer has been
613 // flushed to disk. It's called from deep within the driver stack
614 // and thus is quite limited in what it can do. Notably, it can
615 // not initiate any new i/o's or allocate/free memory.
618 buffer_flushed_callback(struct buf
*bp
, void *arg
)
622 transaction
*ctr
, *prev
=NULL
, *next
;
624 int bufsize
, amt_flushed
, total_bytes
;
627 //printf("jnl: buf flush: bp @ 0x%x l/blkno %qd/%qd vp 0x%x tr @ 0x%x\n",
628 // bp, buf_lblkno(bp), buf_blkno(bp), buf_vnode(bp), arg);
630 // snarf out the bits we want
631 bufsize
= buf_size(bp
);
632 tr
= (transaction
*)arg
;
634 // then we've already seen it
639 CHECK_TRANSACTION(tr
);
645 amt_flushed
= tr
->num_killed
;
646 total_bytes
= tr
->total_bytes
;
648 // update the number of blocks that have been flushed.
649 // this buf may represent more than one block so take
650 // that into account.
652 // OSAddAtomic() returns the value of tr->num_flushed before the add
654 amt_flushed
+= OSAddAtomic(bufsize
, &tr
->num_flushed
);
657 // if this transaction isn't done yet, just return as
658 // there is nothing to do.
660 // NOTE: we are careful to not reference anything through
661 // the tr pointer after doing the OSAddAtomic(). if
662 // this if statement fails then we are the last one
663 // and then it's ok to dereference "tr".
665 if ((amt_flushed
+ bufsize
) < total_bytes
) {
669 // this will single thread checking the transaction
672 if (tr
->total_bytes
== (int)0xfbadc0de) {
673 // then someone beat us to it...
674 unlock_oldstart(jnl
);
678 // mark this so that we're the owner of dealing with the
679 // cleanup for this transaction
680 tr
->total_bytes
= 0xfbadc0de;
682 if (jnl
->flags
& JOURNAL_INVALID
)
683 goto transaction_done
;
685 //printf("jnl: tr 0x%x (0x%llx 0x%llx) in jnl 0x%x completed.\n",
686 // tr, tr->journal_start, tr->journal_end, jnl);
688 // find this entry in the old_start[] index and mark it completed
689 for(i
= 0; i
< sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]); i
++) {
691 if ((off_t
)(jnl
->old_start
[i
] & ~(0x8000000000000000ULL
)) == tr
->journal_start
) {
692 jnl
->old_start
[i
] &= ~(0x8000000000000000ULL
);
697 if (i
>= sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0])) {
698 panic("jnl: buffer_flushed: did not find tr w/start @ %lld (tr %p, jnl %p)\n",
699 tr
->journal_start
, tr
, jnl
);
703 // if we are here then we need to update the journal header
704 // to reflect that this transaction is complete
705 if (tr
->journal_start
== jnl
->active_start
) {
706 jnl
->active_start
= tr
->journal_end
;
707 tr
->journal_start
= tr
->journal_end
= (off_t
)0;
710 // go through the completed_trs list and try to coalesce
711 // entries, restarting back at the beginning if we have to.
712 for (ctr
= jnl
->completed_trs
; ctr
; prev
=ctr
, ctr
=next
) {
713 if (ctr
->journal_start
== jnl
->active_start
) {
714 jnl
->active_start
= ctr
->journal_end
;
716 prev
->next
= ctr
->next
;
718 if (ctr
== jnl
->completed_trs
) {
719 jnl
->completed_trs
= ctr
->next
;
722 next
= jnl
->completed_trs
; // this starts us over again
723 ctr
->next
= jnl
->tr_freeme
;
724 jnl
->tr_freeme
= ctr
;
726 } else if (tr
->journal_end
== ctr
->journal_start
) {
727 ctr
->journal_start
= tr
->journal_start
;
728 next
= jnl
->completed_trs
; // this starts us over again
730 tr
->journal_start
= tr
->journal_end
= (off_t
)0;
731 } else if (tr
->journal_start
== ctr
->journal_end
) {
732 ctr
->journal_end
= tr
->journal_end
;
734 tr
->journal_start
= tr
->journal_end
= (off_t
)0;
735 } else if (ctr
->next
&& ctr
->journal_end
== ctr
->next
->journal_start
) {
736 // coalesce the next entry with this one and link the next
737 // entry in at the head of the tr_freeme list
738 next
= ctr
->next
; // temporarily use the "next" variable
739 ctr
->journal_end
= next
->journal_end
;
740 ctr
->next
= next
->next
;
741 next
->next
= jnl
->tr_freeme
; // link in the next guy at the head of the tr_freeme list
742 jnl
->tr_freeme
= next
;
744 next
= jnl
->completed_trs
; // this starts us over again
751 // if this is true then we didn't merge with anyone
752 // so link ourselves in at the head of the completed
754 if (tr
->journal_start
!= 0) {
755 // put this entry into the correct sorted place
756 // in the list instead of just at the head.
760 for (ctr
= jnl
->completed_trs
; ctr
&& tr
->journal_start
> ctr
->journal_start
; prev
=ctr
, ctr
=ctr
->next
) {
764 if (ctr
== NULL
&& prev
== NULL
) {
765 jnl
->completed_trs
= tr
;
767 } else if (ctr
== jnl
->completed_trs
) {
768 tr
->next
= jnl
->completed_trs
;
769 jnl
->completed_trs
= tr
;
771 tr
->next
= prev
->next
;
775 // if we're here this tr got merged with someone else so
776 // put it on the list to be free'd
777 tr
->next
= jnl
->tr_freeme
;
781 unlock_oldstart(jnl
);
783 unlock_condition(jnl
, &jnl
->asyncIO
);
787 #include <libkern/OSByteOrder.h>
789 #define SWAP16(x) OSSwapInt16(x)
790 #define SWAP32(x) OSSwapInt32(x)
791 #define SWAP64(x) OSSwapInt64(x)
795 swap_journal_header(journal
*jnl
)
797 jnl
->jhdr
->magic
= SWAP32(jnl
->jhdr
->magic
);
798 jnl
->jhdr
->endian
= SWAP32(jnl
->jhdr
->endian
);
799 jnl
->jhdr
->start
= SWAP64(jnl
->jhdr
->start
);
800 jnl
->jhdr
->end
= SWAP64(jnl
->jhdr
->end
);
801 jnl
->jhdr
->size
= SWAP64(jnl
->jhdr
->size
);
802 jnl
->jhdr
->blhdr_size
= SWAP32(jnl
->jhdr
->blhdr_size
);
803 jnl
->jhdr
->checksum
= SWAP32(jnl
->jhdr
->checksum
);
804 jnl
->jhdr
->jhdr_size
= SWAP32(jnl
->jhdr
->jhdr_size
);
805 jnl
->jhdr
->sequence_num
= SWAP32(jnl
->jhdr
->sequence_num
);
809 swap_block_list_header(journal
*jnl
, block_list_header
*blhdr
)
813 blhdr
->max_blocks
= SWAP16(blhdr
->max_blocks
);
814 blhdr
->num_blocks
= SWAP16(blhdr
->num_blocks
);
815 blhdr
->bytes_used
= SWAP32(blhdr
->bytes_used
);
816 blhdr
->checksum
= SWAP32(blhdr
->checksum
);
817 blhdr
->flags
= SWAP32(blhdr
->flags
);
819 if (blhdr
->num_blocks
>= ((jnl
->jhdr
->blhdr_size
/ sizeof(block_info
)) - 1)) {
820 printf("jnl: %s: blhdr num blocks looks suspicious (%d / blhdr size %d). not swapping.\n", jnl
->jdev_name
, blhdr
->num_blocks
, jnl
->jhdr
->blhdr_size
);
824 for(i
= 0; i
< blhdr
->num_blocks
; i
++) {
825 blhdr
->binfo
[i
].bnum
= SWAP64(blhdr
->binfo
[i
].bnum
);
826 blhdr
->binfo
[i
].u
.bi
.bsize
= SWAP32(blhdr
->binfo
[i
].u
.bi
.bsize
);
827 blhdr
->binfo
[i
].u
.bi
.b
.cksum
= SWAP32(blhdr
->binfo
[i
].u
.bi
.b
.cksum
);
833 update_fs_block(journal
*jnl
, void *block_ptr
, off_t fs_block
, size_t bsize
)
836 struct buf
*oblock_bp
=NULL
;
837 boolean_t was_vm_privileged
= FALSE
;
840 // first read the block we want.
841 ret
= buf_meta_bread(jnl
->fsdev
, (daddr64_t
)fs_block
, bsize
, NOCRED
, &oblock_bp
);
843 printf("jnl: %s: update_fs_block: error reading fs block # %lld! (ret %d)\n", jnl
->jdev_name
, fs_block
, ret
);
846 buf_brelse(oblock_bp
);
850 // let's try to be aggressive here and just re-write the block
851 oblock_bp
= buf_getblk(jnl
->fsdev
, (daddr64_t
)fs_block
, bsize
, 0, 0, BLK_META
);
852 if (oblock_bp
== NULL
) {
853 printf("jnl: %s: update_fs_block: buf_getblk() for %lld failed! failing update.\n", jnl
->jdev_name
, fs_block
);
858 // make sure it's the correct size.
859 if (buf_size(oblock_bp
) != bsize
) {
860 buf_brelse(oblock_bp
);
864 // copy the journal data over top of it
865 memcpy((char *)buf_dataptr(oblock_bp
), block_ptr
, bsize
);
867 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
869 * if we block waiting for memory, and there is enough pressure to
870 * cause us to try and create a new swap file, we may end up deadlocking
871 * due to waiting for the journal on the swap file creation path...
872 * by making ourselves vm_privileged, we give ourselves the best chance
875 was_vm_privileged
= set_vm_privilege(TRUE
);
877 ret
= VNOP_BWRITE(oblock_bp
);
879 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
880 set_vm_privilege(FALSE
);
883 printf("jnl: %s: update_fs_block: failed to update block %lld (ret %d)\n", jnl
->jdev_name
, fs_block
,ret
);
886 // and now invalidate it so that if someone else wants to read
887 // it in a different size they'll be able to do it.
888 ret
= buf_meta_bread(jnl
->fsdev
, (daddr64_t
)fs_block
, bsize
, NOCRED
, &oblock_bp
);
890 buf_markinvalid(oblock_bp
);
891 buf_brelse(oblock_bp
);
898 grow_table(struct bucket
**buf_ptr
, int num_buckets
, int new_size
)
900 struct bucket
*newBuf
;
901 int current_size
= num_buckets
, i
;
903 // return if newsize is less than the current size
904 if (new_size
< num_buckets
) {
908 if ((MALLOC(newBuf
, struct bucket
*, new_size
*sizeof(struct bucket
), M_TEMP
, M_WAITOK
)) == NULL
) {
909 printf("jnl: grow_table: no memory to expand coalesce buffer!\n");
913 // printf("jnl: lookup_bucket: expanded co_buf to %d elems\n", new_size);
915 // copy existing elements
916 bcopy(*buf_ptr
, newBuf
, num_buckets
*sizeof(struct bucket
));
918 // initialize the new ones
919 for(i
= num_buckets
; i
< new_size
; i
++) {
920 newBuf
[i
].block_num
= (off_t
)-1;
923 // free the old container
924 FREE(*buf_ptr
, M_TEMP
);
933 lookup_bucket(struct bucket
**buf_ptr
, off_t block_num
, int num_full
)
935 int lo
, hi
, index
, matches
, i
;
938 return 0; // table is empty, so insert at index=0
945 // perform binary search for block_num
947 int mid
= (hi
- lo
)/2 + lo
;
948 off_t this_num
= (*buf_ptr
)[mid
].block_num
;
950 if (block_num
== this_num
) {
955 if (block_num
< this_num
) {
960 if (block_num
> this_num
) {
966 // check if lo and hi converged on the match
967 if (block_num
== (*buf_ptr
)[hi
].block_num
) {
971 // if no existing entry found, find index for new one
973 index
= (block_num
< (*buf_ptr
)[hi
].block_num
) ? hi
: hi
+ 1;
975 // make sure that we return the right-most index in the case of multiple matches
978 while (i
< num_full
&& block_num
== (*buf_ptr
)[i
].block_num
) {
990 insert_block(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t num
, size_t size
, size_t offset
, int32_t cksum
, int *num_buckets_ptr
, int *num_full_ptr
, int overwriting
)
993 // grow the table if we're out of space
994 if (*num_full_ptr
>= *num_buckets_ptr
) {
995 int new_size
= *num_buckets_ptr
* 2;
996 int grow_size
= grow_table(buf_ptr
, *num_buckets_ptr
, new_size
);
998 if (grow_size
< new_size
) {
999 printf("jnl: %s: add_block: grow_table returned an error!\n", jnl
->jdev_name
);
1003 *num_buckets_ptr
= grow_size
; //update num_buckets to reflect the new size
1006 // if we're not inserting at the end, we need to bcopy
1007 if (blk_index
!= *num_full_ptr
) {
1008 bcopy( (*buf_ptr
)+(blk_index
), (*buf_ptr
)+(blk_index
+1), (*num_full_ptr
-blk_index
)*sizeof(struct bucket
) );
1011 (*num_full_ptr
)++; // increment only if we're not overwriting
1014 // sanity check the values we're about to add
1015 if ((off_t
)offset
>= jnl
->jhdr
->size
) {
1016 offset
= jnl
->jhdr
->jhdr_size
+ (offset
- jnl
->jhdr
->size
);
1019 panic("jnl: insert_block: bad size in insert_block (%zd)\n", size
);
1022 (*buf_ptr
)[blk_index
].block_num
= num
;
1023 (*buf_ptr
)[blk_index
].block_size
= size
;
1024 (*buf_ptr
)[blk_index
].jnl_offset
= offset
;
1025 (*buf_ptr
)[blk_index
].cksum
= cksum
;
1031 do_overlap(journal
*jnl
, struct bucket
**buf_ptr
, int blk_index
, off_t block_num
, size_t size
, __unused
size_t offset
, int32_t cksum
, int *num_buckets_ptr
, int *num_full_ptr
)
1033 int num_to_remove
, index
, i
, overwrite
, err
;
1034 size_t jhdr_size
= jnl
->jhdr
->jhdr_size
, new_offset
;
1035 off_t overlap
, block_start
, block_end
;
1037 block_start
= block_num
*jhdr_size
;
1038 block_end
= block_start
+ size
;
1039 overwrite
= (block_num
== (*buf_ptr
)[blk_index
].block_num
&& size
>= (*buf_ptr
)[blk_index
].block_size
);
1041 // first, eliminate any overlap with the previous entry
1042 if (blk_index
!= 0 && !overwrite
) {
1043 off_t prev_block_start
= (*buf_ptr
)[blk_index
-1].block_num
*jhdr_size
;
1044 off_t prev_block_end
= prev_block_start
+ (*buf_ptr
)[blk_index
-1].block_size
;
1045 overlap
= prev_block_end
- block_start
;
1047 if (overlap
% jhdr_size
!= 0) {
1048 panic("jnl: do_overlap: overlap with previous entry not a multiple of %zd\n", jhdr_size
);
1051 // if the previous entry completely overlaps this one, we need to break it into two pieces.
1052 if (prev_block_end
> block_end
) {
1053 off_t new_num
= block_end
/ jhdr_size
;
1054 size_t new_size
= prev_block_end
- block_end
;
1056 new_offset
= (*buf_ptr
)[blk_index
-1].jnl_offset
+ (block_end
- prev_block_start
);
1058 err
= insert_block(jnl
, buf_ptr
, blk_index
, new_num
, new_size
, new_offset
, cksum
, num_buckets_ptr
, num_full_ptr
, 0);
1060 panic("jnl: do_overlap: error inserting during pre-overlap\n");
1064 // Regardless, we need to truncate the previous entry to the beginning of the overlap
1065 (*buf_ptr
)[blk_index
-1].block_size
= block_start
- prev_block_start
;
1066 (*buf_ptr
)[blk_index
-1].cksum
= 0; // have to blow it away because there's no way to check it
1070 // then, bail out fast if there's no overlap with the entries that follow
1071 if (!overwrite
&& block_end
<= (off_t
)((*buf_ptr
)[blk_index
].block_num
*jhdr_size
)) {
1072 return 0; // no overlap, no overwrite
1073 } else if (overwrite
&& (blk_index
+ 1 >= *num_full_ptr
|| block_end
<= (off_t
)((*buf_ptr
)[blk_index
+1].block_num
*jhdr_size
))) {
1075 (*buf_ptr
)[blk_index
].cksum
= cksum
; // update this
1076 return 1; // simple overwrite
1079 // Otherwise, find all cases of total and partial overlap. We use the special
1080 // block_num of -2 to designate entries that are completely overlapped and must
1081 // be eliminated. The block_num, size, and jnl_offset of partially overlapped
1082 // entries must be adjusted to keep the array consistent.
1085 while (index
< *num_full_ptr
&& block_end
> (off_t
)((*buf_ptr
)[index
].block_num
*jhdr_size
)) {
1086 if (block_end
>= (off_t
)(((*buf_ptr
)[index
].block_num
*jhdr_size
+ (*buf_ptr
)[index
].block_size
))) {
1087 (*buf_ptr
)[index
].block_num
= -2; // mark this for deletion
1090 overlap
= block_end
- (*buf_ptr
)[index
].block_num
*jhdr_size
;
1092 if (overlap
% jhdr_size
!= 0) {
1093 panic("jnl: do_overlap: overlap of %lld is not multiple of %zd\n", overlap
, jhdr_size
);
1096 // if we partially overlap this entry, adjust its block number, jnl offset, and size
1097 (*buf_ptr
)[index
].block_num
+= (overlap
/ jhdr_size
); // make sure overlap is multiple of jhdr_size, or round up
1098 (*buf_ptr
)[index
].cksum
= 0;
1100 new_offset
= (*buf_ptr
)[index
].jnl_offset
+ overlap
; // check for wrap-around
1101 if ((off_t
)new_offset
>= jnl
->jhdr
->size
) {
1102 new_offset
= jhdr_size
+ (new_offset
- jnl
->jhdr
->size
);
1104 (*buf_ptr
)[index
].jnl_offset
= new_offset
;
1106 (*buf_ptr
)[index
].block_size
-= overlap
; // sanity check for negative value
1107 if ((*buf_ptr
)[index
].block_size
<= 0) {
1108 panic("jnl: do_overlap: after overlap, new block size is invalid (%u)\n", (*buf_ptr
)[index
].block_size
);
1109 // return -1; // if above panic is removed, return -1 for error
1118 // bcopy over any completely overlapped entries, starting at the right (where the above loop broke out)
1119 index
--; // start with the last index used within the above loop
1120 while (index
>= blk_index
) {
1121 if ((*buf_ptr
)[index
].block_num
== -2) {
1122 if (index
== *num_full_ptr
-1) {
1123 (*buf_ptr
)[index
].block_num
= -1; // it's the last item in the table... just mark as free
1125 bcopy( (*buf_ptr
)+(index
+1), (*buf_ptr
)+(index
), (*num_full_ptr
- (index
+ 1)) * sizeof(struct bucket
) );
1132 // eliminate any stale entries at the end of the table
1133 for(i
= *num_full_ptr
; i
< (*num_full_ptr
+ num_to_remove
); i
++) {
1134 (*buf_ptr
)[i
].block_num
= -1;
1137 return 0; // if we got this far, we need to insert the entry into the table (rather than overwrite)
1140 // PR-3105942: Coalesce writes to the same block in journal replay
1141 // We coalesce writes by maintaining a dynamic sorted array of physical disk blocks
1142 // to be replayed and the corresponding location in the journal which contains
1143 // the most recent data for those blocks. The array is "played" once the all the
1144 // blocks in the journal have been coalesced. The code for the case of conflicting/
1145 // overlapping writes to a single block is the most dense. Because coalescing can
1146 // disrupt the existing time-ordering of blocks in the journal playback, care
1147 // is taken to catch any overlaps and keep the array consistent.
1149 add_block(journal
*jnl
, struct bucket
**buf_ptr
, off_t block_num
, size_t size
, __unused
size_t offset
, int32_t cksum
, int *num_buckets_ptr
, int *num_full_ptr
)
1151 int blk_index
, overwriting
;
1153 // on return from lookup_bucket(), blk_index is the index into the table where block_num should be
1154 // inserted (or the index of the elem to overwrite).
1155 blk_index
= lookup_bucket( buf_ptr
, block_num
, *num_full_ptr
);
1157 // check if the index is within bounds (if we're adding this block to the end of
1158 // the table, blk_index will be equal to num_full)
1159 if (blk_index
< 0 || blk_index
> *num_full_ptr
) {
1160 //printf("jnl: add_block: trouble adding block to co_buf\n");
1162 } // else printf("jnl: add_block: adding block 0x%llx at i=%d\n", block_num, blk_index);
1164 // Determine whether we're overwriting an existing entry by checking for overlap
1165 overwriting
= do_overlap(jnl
, buf_ptr
, blk_index
, block_num
, size
, offset
, cksum
, num_buckets_ptr
, num_full_ptr
);
1166 if (overwriting
< 0) {
1167 return -1; // if we got an error, pass it along
1170 // returns the index, or -1 on error
1171 blk_index
= insert_block(jnl
, buf_ptr
, blk_index
, block_num
, size
, offset
, cksum
, num_buckets_ptr
, num_full_ptr
, overwriting
);
1177 replay_journal(journal
*jnl
)
1179 int i
, bad_blocks
=0;
1180 unsigned int orig_checksum
, checksum
, check_block_checksums
= 0;
1182 size_t max_bsize
= 0; /* protected by block_ptr */
1183 block_list_header
*blhdr
;
1184 off_t offset
, txn_start_offset
=0, blhdr_offset
, orig_jnl_start
;
1185 char *buff
, *block_ptr
=NULL
;
1186 struct bucket
*co_buf
;
1187 int num_buckets
= STARTING_BUCKETS
, num_full
, check_past_jnl_end
= 1, in_uncharted_territory
=0;
1188 uint32_t last_sequence_num
= 0;
1189 int replay_retry_count
= 0;
1191 // wrap the start ptr if it points to the very end of the journal
1192 if (jnl
->jhdr
->start
== jnl
->jhdr
->size
) {
1193 jnl
->jhdr
->start
= jnl
->jhdr
->jhdr_size
;
1195 if (jnl
->jhdr
->end
== jnl
->jhdr
->size
) {
1196 jnl
->jhdr
->end
= jnl
->jhdr
->jhdr_size
;
1199 if (jnl
->jhdr
->start
== jnl
->jhdr
->end
) {
1203 orig_jnl_start
= jnl
->jhdr
->start
;
1205 // allocate memory for the header_block. we'll read each blhdr into this
1206 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&buff
, jnl
->jhdr
->blhdr_size
, VM_KERN_MEMORY_FILE
)) {
1207 printf("jnl: %s: replay_journal: no memory for block buffer! (%d bytes)\n",
1208 jnl
->jdev_name
, jnl
->jhdr
->blhdr_size
);
1212 // allocate memory for the coalesce buffer
1213 if ((MALLOC(co_buf
, struct bucket
*, num_buckets
*sizeof(struct bucket
), M_TEMP
, M_WAITOK
)) == NULL
) {
1214 printf("jnl: %s: replay_journal: no memory for coalesce buffer!\n", jnl
->jdev_name
);
1220 // initialize entries
1221 for(i
= 0; i
< num_buckets
; i
++) {
1222 co_buf
[i
].block_num
= -1;
1224 num_full
= 0; // empty at first
1227 printf("jnl: %s: replay_journal: from: %lld to: %lld (joffset 0x%llx)\n",
1228 jnl
->jdev_name
, jnl
->jhdr
->start
, jnl
->jhdr
->end
, jnl
->jdev_offset
);
1230 while (check_past_jnl_end
|| jnl
->jhdr
->start
!= jnl
->jhdr
->end
) {
1231 offset
= blhdr_offset
= jnl
->jhdr
->start
;
1232 ret
= read_journal_data(jnl
, &offset
, buff
, jnl
->jhdr
->blhdr_size
);
1233 if (ret
!= (size_t)jnl
->jhdr
->blhdr_size
) {
1234 printf("jnl: %s: replay_journal: Could not read block list header block @ 0x%llx!\n", jnl
->jdev_name
, offset
);
1236 goto bad_txn_handling
;
1239 blhdr
= (block_list_header
*)buff
;
1241 orig_checksum
= blhdr
->checksum
;
1242 blhdr
->checksum
= 0;
1243 if (jnl
->flags
& JOURNAL_NEED_SWAP
) {
1244 // calculate the checksum based on the unswapped data
1245 // because it is done byte-at-a-time.
1246 orig_checksum
= (unsigned int)SWAP32(orig_checksum
);
1247 checksum
= calc_checksum((char *)blhdr
, BLHDR_CHECKSUM_SIZE
);
1248 swap_block_list_header(jnl
, blhdr
);
1250 checksum
= calc_checksum((char *)blhdr
, BLHDR_CHECKSUM_SIZE
);
1255 // XXXdbg - if these checks fail, we should replay as much
1256 // we can in the hopes that it will still leave the
1257 // drive in a better state than if we didn't replay
1260 if (checksum
!= orig_checksum
) {
1261 if (check_past_jnl_end
&& in_uncharted_territory
) {
1263 if (blhdr_offset
!= jnl
->jhdr
->end
) {
1264 printf("jnl: %s: Extra txn replay stopped @ %lld / 0x%llx\n", jnl
->jdev_name
, blhdr_offset
, blhdr_offset
);
1267 check_past_jnl_end
= 0;
1268 jnl
->jhdr
->end
= blhdr_offset
;
1272 printf("jnl: %s: replay_journal: bad block list header @ 0x%llx (checksum 0x%x != 0x%x)\n",
1273 jnl
->jdev_name
, blhdr_offset
, orig_checksum
, checksum
);
1275 if (blhdr_offset
== orig_jnl_start
) {
1276 // if there's nothing in the journal at all, just bail out altogether.
1281 goto bad_txn_handling
;
1284 if ( (last_sequence_num
!= 0)
1285 && (blhdr
->binfo
[0].u
.bi
.b
.sequence_num
!= 0)
1286 && (blhdr
->binfo
[0].u
.bi
.b
.sequence_num
!= last_sequence_num
)
1287 && (blhdr
->binfo
[0].u
.bi
.b
.sequence_num
!= last_sequence_num
+1)) {
1289 txn_start_offset
= jnl
->jhdr
->end
= blhdr_offset
;
1291 if (check_past_jnl_end
) {
1292 check_past_jnl_end
= 0;
1293 printf("jnl: %s: 2: extra replay stopped @ %lld / 0x%llx (seq %d < %d)\n",
1294 jnl
->jdev_name
, blhdr_offset
, blhdr_offset
, blhdr
->binfo
[0].u
.bi
.b
.sequence_num
, last_sequence_num
);
1298 printf("jnl: %s: txn sequence numbers out of order in txn @ %lld / %llx! (%d < %d)\n",
1299 jnl
->jdev_name
, blhdr_offset
, blhdr_offset
, blhdr
->binfo
[0].u
.bi
.b
.sequence_num
, last_sequence_num
);
1301 goto bad_txn_handling
;
1303 last_sequence_num
= blhdr
->binfo
[0].u
.bi
.b
.sequence_num
;
1305 if (blhdr_offset
>= jnl
->jhdr
->end
&& jnl
->jhdr
->start
<= jnl
->jhdr
->end
) {
1306 if (last_sequence_num
== 0) {
1307 check_past_jnl_end
= 0;
1308 printf("jnl: %s: pre-sequence-num-enabled txn's - can not go further than end (%lld %lld).\n",
1309 jnl
->jdev_name
, jnl
->jhdr
->start
, jnl
->jhdr
->end
);
1310 if (jnl
->jhdr
->start
!= jnl
->jhdr
->end
) {
1311 jnl
->jhdr
->start
= jnl
->jhdr
->end
;
1315 printf("jnl: %s: examining extra transactions starting @ %lld / 0x%llx\n", jnl
->jdev_name
, blhdr_offset
, blhdr_offset
);
1318 if ( blhdr
->max_blocks
<= 0 || blhdr
->max_blocks
> (jnl
->jhdr
->size
/jnl
->jhdr
->jhdr_size
)
1319 || blhdr
->num_blocks
<= 0 || blhdr
->num_blocks
> blhdr
->max_blocks
) {
1320 printf("jnl: %s: replay_journal: bad looking journal entry: max: %d num: %d\n",
1321 jnl
->jdev_name
, blhdr
->max_blocks
, blhdr
->num_blocks
);
1323 goto bad_txn_handling
;
1327 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
1328 if (blhdr
->binfo
[i
].bnum
< 0 && blhdr
->binfo
[i
].bnum
!= (off_t
)-1) {
1329 printf("jnl: %s: replay_journal: bogus block number 0x%llx\n", jnl
->jdev_name
, blhdr
->binfo
[i
].bnum
);
1331 goto bad_txn_handling
;
1334 if ((size_t)blhdr
->binfo
[i
].u
.bi
.bsize
> max_bsize
) {
1335 max_bsize
= blhdr
->binfo
[i
].u
.bi
.bsize
;
1339 if (blhdr
->flags
& BLHDR_CHECK_CHECKSUMS
) {
1340 check_block_checksums
= 1;
1341 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&block_ptr
, max_bsize
, VM_KERN_MEMORY_FILE
)) {
1348 if (blhdr
->flags
& BLHDR_FIRST_HEADER
) {
1349 txn_start_offset
= blhdr_offset
;
1352 //printf("jnl: replay_journal: adding %d blocks in journal entry @ 0x%llx to co_buf\n",
1353 // blhdr->num_blocks-1, jnl->jhdr->start);
1355 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
1359 size
= blhdr
->binfo
[i
].u
.bi
.bsize
;
1360 number
= blhdr
->binfo
[i
].bnum
;
1362 // don't add "killed" blocks
1363 if (number
== (off_t
)-1) {
1364 //printf("jnl: replay_journal: skipping killed fs block (index %d)\n", i);
1367 if (check_block_checksums
) {
1371 block_offset
= offset
;
1373 // read the block so we can check the checksum
1374 ret
= read_journal_data(jnl
, &block_offset
, block_ptr
, size
);
1375 if (ret
!= (size_t)size
) {
1376 printf("jnl: %s: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl
->jdev_name
, offset
);
1378 goto bad_txn_handling
;
1381 disk_cksum
= calc_checksum(block_ptr
, size
);
1383 // there is no need to swap the checksum from disk because
1384 // it got swapped when the blhdr was read in.
1385 if (blhdr
->binfo
[i
].u
.bi
.b
.cksum
!= 0 && disk_cksum
!= blhdr
->binfo
[i
].u
.bi
.b
.cksum
) {
1386 printf("jnl: %s: txn starting at %lld (%lld) @ index %3d bnum %lld (%d) with disk cksum != blhdr cksum (0x%.8x 0x%.8x)\n",
1387 jnl
->jdev_name
, txn_start_offset
, blhdr_offset
, i
, number
, size
, disk_cksum
, blhdr
->binfo
[i
].u
.bi
.b
.cksum
);
1388 printf("jnl: 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x\n",
1389 *(int *)&block_ptr
[0*sizeof(int)], *(int *)&block_ptr
[1*sizeof(int)], *(int *)&block_ptr
[2*sizeof(int)], *(int *)&block_ptr
[3*sizeof(int)],
1390 *(int *)&block_ptr
[4*sizeof(int)], *(int *)&block_ptr
[5*sizeof(int)], *(int *)&block_ptr
[6*sizeof(int)], *(int *)&block_ptr
[7*sizeof(int)]);
1393 goto bad_txn_handling
;
1398 // add this bucket to co_buf, coalescing where possible
1399 // printf("jnl: replay_journal: adding block 0x%llx\n", number);
1400 ret_val
= add_block(jnl
, &co_buf
, number
, size
, (size_t) offset
, blhdr
->binfo
[i
].u
.bi
.b
.cksum
, &num_buckets
, &num_full
);
1402 if (ret_val
== -1) {
1403 printf("jnl: %s: replay_journal: trouble adding block to co_buf\n", jnl
->jdev_name
);
1405 } // else printf("jnl: replay_journal: added block 0x%llx at i=%d\n", number);
1411 // check if the last block added puts us off the end of the jnl.
1412 // if so, we need to wrap to the beginning and take any remainder
1415 if (offset
>= jnl
->jhdr
->size
) {
1416 offset
= jnl
->jhdr
->jhdr_size
+ (offset
- jnl
->jhdr
->size
);
1421 kmem_free(kernel_map
, (vm_offset_t
)block_ptr
, max_bsize
);
1427 /* Journal replay got error before it found any valid
1428 * transations, abort replay */
1429 if (txn_start_offset
== 0) {
1430 printf("jnl: %s: no known good txn start offset! aborting journal replay.\n", jnl
->jdev_name
);
1434 /* Repeated error during journal replay, abort replay */
1435 if (replay_retry_count
== 3) {
1436 printf("jnl: %s: repeated errors replaying journal! aborting journal replay.\n", jnl
->jdev_name
);
1439 replay_retry_count
++;
1441 /* There was an error replaying the journal (possibly
1442 * EIO/ENXIO from the device). So retry replaying all
1443 * the good transactions that we found before getting
1446 jnl
->jhdr
->start
= orig_jnl_start
;
1447 jnl
->jhdr
->end
= txn_start_offset
;
1448 check_past_jnl_end
= 0;
1449 last_sequence_num
= 0;
1450 printf("jnl: %s: restarting journal replay (%lld - %lld)!\n", jnl
->jdev_name
, jnl
->jhdr
->start
, jnl
->jhdr
->end
);
1451 goto restart_replay
;
1454 jnl
->jhdr
->start
+= blhdr
->bytes_used
;
1455 if (jnl
->jhdr
->start
>= jnl
->jhdr
->size
) {
1456 // wrap around and skip the journal header block
1457 jnl
->jhdr
->start
= (jnl
->jhdr
->start
% jnl
->jhdr
->size
) + jnl
->jhdr
->jhdr_size
;
1460 if (jnl
->jhdr
->start
== jnl
->jhdr
->end
) {
1461 in_uncharted_territory
= 1;
1465 if (jnl
->jhdr
->start
!= jnl
->jhdr
->end
) {
1466 printf("jnl: %s: start %lld != end %lld. resetting end.\n", jnl
->jdev_name
, jnl
->jhdr
->start
, jnl
->jhdr
->end
);
1467 jnl
->jhdr
->end
= jnl
->jhdr
->start
;
1470 //printf("jnl: replay_journal: replaying %d blocks\n", num_full);
1473 * make sure it's at least one page in size, so
1474 * start max_bsize at PAGE_SIZE
1476 for (i
= 0, max_bsize
= PAGE_SIZE
; i
< num_full
; i
++) {
1478 if (co_buf
[i
].block_num
== (off_t
)-1)
1481 if (co_buf
[i
].block_size
> max_bsize
)
1482 max_bsize
= co_buf
[i
].block_size
;
1485 * round max_bsize up to the nearest PAGE_SIZE multiple
1487 if (max_bsize
& (PAGE_SIZE
- 1)) {
1488 max_bsize
= (max_bsize
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
1491 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&block_ptr
, max_bsize
, VM_KERN_MEMORY_FILE
)) {
1495 // Replay the coalesced entries in the co-buf
1496 for(i
= 0; i
< num_full
; i
++) {
1497 size_t size
= co_buf
[i
].block_size
;
1498 off_t jnl_offset
= (off_t
) co_buf
[i
].jnl_offset
;
1499 off_t number
= co_buf
[i
].block_num
;
1502 // printf("replaying co_buf[%d]: block 0x%llx, size 0x%x, jnl_offset 0x%llx\n", i, co_buf[i].block_num,
1503 // co_buf[i].block_size, co_buf[i].jnl_offset);
1505 if (number
== (off_t
)-1) {
1506 // printf("jnl: replay_journal: skipping killed fs block\n");
1509 // do journal read, and set the phys. block
1510 ret
= read_journal_data(jnl
, &jnl_offset
, block_ptr
, size
);
1512 printf("jnl: %s: replay_journal: Could not read journal entry data @ offset 0x%llx!\n", jnl
->jdev_name
, offset
);
1516 if (update_fs_block(jnl
, block_ptr
, number
, size
) != 0) {
1523 // done replaying; update jnl header
1524 if (write_journal_header(jnl
, 1, jnl
->jhdr
->sequence_num
) != 0) {
1528 printf("jnl: %s: journal replay done.\n", jnl
->jdev_name
);
1532 kmem_free(kernel_map
, (vm_offset_t
)block_ptr
, max_bsize
);
1536 // free the coalesce buffer
1537 FREE(co_buf
, M_TEMP
);
1540 kmem_free(kernel_map
, (vm_offset_t
)buff
, jnl
->jhdr
->blhdr_size
);
1545 kmem_free(kernel_map
, (vm_offset_t
)block_ptr
, max_bsize
);
1548 FREE(co_buf
, M_TEMP
);
1550 kmem_free(kernel_map
, (vm_offset_t
)buff
, jnl
->jhdr
->blhdr_size
);
1556 #define DEFAULT_TRANSACTION_BUFFER_SIZE (128*1024)
1557 #define MAX_TRANSACTION_BUFFER_SIZE (3072*1024)
1559 // XXXdbg - so I can change it in the debugger
1560 int def_tbuffer_size
= 0;
1564 // This function sets the size of the tbuffer and the
1565 // size of the blhdr. It assumes that jnl->jhdr->size
1566 // and jnl->jhdr->jhdr_size are already valid.
1569 size_up_tbuffer(journal
*jnl
, int tbuffer_size
, int phys_blksz
)
1572 // one-time initialization based on how much memory
1573 // there is in the machine.
1575 if (def_tbuffer_size
== 0) {
1576 if (max_mem
< (256*1024*1024)) {
1577 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
;
1578 } else if (max_mem
< (512*1024*1024)) {
1579 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
* 2;
1580 } else if (max_mem
< (1024*1024*1024)) {
1581 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
* 3;
1583 def_tbuffer_size
= DEFAULT_TRANSACTION_BUFFER_SIZE
* (max_mem
/ (256*1024*1024));
1587 // size up the transaction buffer... can't be larger than the number
1588 // of blocks that can fit in a block_list_header block.
1589 if (tbuffer_size
== 0) {
1590 jnl
->tbuffer_size
= def_tbuffer_size
;
1592 // make sure that the specified tbuffer_size isn't too small
1593 if (tbuffer_size
< jnl
->jhdr
->blhdr_size
* 2) {
1594 tbuffer_size
= jnl
->jhdr
->blhdr_size
* 2;
1596 // and make sure it's an even multiple of the block size
1597 if ((tbuffer_size
% jnl
->jhdr
->jhdr_size
) != 0) {
1598 tbuffer_size
-= (tbuffer_size
% jnl
->jhdr
->jhdr_size
);
1601 jnl
->tbuffer_size
= tbuffer_size
;
1604 if (jnl
->tbuffer_size
> (jnl
->jhdr
->size
/ 2)) {
1605 jnl
->tbuffer_size
= (jnl
->jhdr
->size
/ 2);
1608 if (jnl
->tbuffer_size
> MAX_TRANSACTION_BUFFER_SIZE
) {
1609 jnl
->tbuffer_size
= MAX_TRANSACTION_BUFFER_SIZE
;
1612 jnl
->jhdr
->blhdr_size
= (jnl
->tbuffer_size
/ jnl
->jhdr
->jhdr_size
) * sizeof(block_info
);
1613 if (jnl
->jhdr
->blhdr_size
< phys_blksz
) {
1614 jnl
->jhdr
->blhdr_size
= phys_blksz
;
1615 } else if ((jnl
->jhdr
->blhdr_size
% phys_blksz
) != 0) {
1616 // have to round up so we're an even multiple of the physical block size
1617 jnl
->jhdr
->blhdr_size
= (jnl
->jhdr
->blhdr_size
+ (phys_blksz
- 1)) & ~(phys_blksz
- 1);
1622 get_io_info(struct vnode
*devvp
, size_t phys_blksz
, journal
*jnl
, struct vfs_context
*context
)
1625 off_t writeblockcnt
;
1626 off_t readmaxcnt
=0, tmp_readmaxcnt
;
1627 off_t writemaxcnt
=0, tmp_writemaxcnt
;
1628 off_t readsegcnt
, writesegcnt
;
1631 if (VNOP_IOCTL(devvp
, DKIOCGETFEATURES
, (caddr_t
)&features
, 0, context
) == 0) {
1632 if (features
& DK_FEATURE_FORCE_UNIT_ACCESS
) {
1633 const char *name
= vnode_getname_printable(devvp
);
1634 jnl
->flags
|= JOURNAL_DO_FUA_WRITES
;
1635 printf("jnl: %s: enabling FUA writes (features 0x%x)\n", name
, features
);
1636 vnode_putname_printable(name
);
1638 if (features
& DK_FEATURE_UNMAP
) {
1639 jnl
->flags
|= JOURNAL_USE_UNMAP
;
1642 if (features
& DK_FEATURE_BARRIER
) {
1643 jnl
->flags
|= JOURNAL_FEATURE_BARRIER
;
1648 // First check the max read size via several different mechanisms...
1650 VNOP_IOCTL(devvp
, DKIOCGETMAXBYTECOUNTREAD
, (caddr_t
)&readmaxcnt
, 0, context
);
1652 if (VNOP_IOCTL(devvp
, DKIOCGETMAXBLOCKCOUNTREAD
, (caddr_t
)&readblockcnt
, 0, context
) == 0) {
1653 tmp_readmaxcnt
= readblockcnt
* phys_blksz
;
1654 if (readmaxcnt
== 0 || (readblockcnt
> 0 && tmp_readmaxcnt
< readmaxcnt
)) {
1655 readmaxcnt
= tmp_readmaxcnt
;
1659 if (VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTCOUNTREAD
, (caddr_t
)&readsegcnt
, 0, context
)) {
1663 if (readsegcnt
> 0 && (readsegcnt
* PAGE_SIZE
) < readmaxcnt
) {
1664 readmaxcnt
= readsegcnt
* PAGE_SIZE
;
1667 if (readmaxcnt
== 0) {
1668 readmaxcnt
= 128 * 1024;
1669 } else if (readmaxcnt
> UINT32_MAX
) {
1670 readmaxcnt
= UINT32_MAX
;
1675 // Now check the max writes size via several different mechanisms...
1677 VNOP_IOCTL(devvp
, DKIOCGETMAXBYTECOUNTWRITE
, (caddr_t
)&writemaxcnt
, 0, context
);
1679 if (VNOP_IOCTL(devvp
, DKIOCGETMAXBLOCKCOUNTWRITE
, (caddr_t
)&writeblockcnt
, 0, context
) == 0) {
1680 tmp_writemaxcnt
= writeblockcnt
* phys_blksz
;
1681 if (writemaxcnt
== 0 || (writeblockcnt
> 0 && tmp_writemaxcnt
< writemaxcnt
)) {
1682 writemaxcnt
= tmp_writemaxcnt
;
1686 if (VNOP_IOCTL(devvp
, DKIOCGETMAXSEGMENTCOUNTWRITE
, (caddr_t
)&writesegcnt
, 0, context
)) {
1690 if (writesegcnt
> 0 && (writesegcnt
* PAGE_SIZE
) < writemaxcnt
) {
1691 writemaxcnt
= writesegcnt
* PAGE_SIZE
;
1694 if (writemaxcnt
== 0) {
1695 writemaxcnt
= 128 * 1024;
1696 } else if (writemaxcnt
> UINT32_MAX
) {
1697 writemaxcnt
= UINT32_MAX
;
1700 jnl
->max_read_size
= readmaxcnt
;
1701 jnl
->max_write_size
= writemaxcnt
;
1702 // printf("jnl: %s: max read/write: %lld k / %lld k\n",
1703 // jnl->jdev_name ? jnl->jdev_name : "unknown",
1704 // jnl->max_read_size/1024, jnl->max_write_size/1024);
1709 journal_create(struct vnode
*jvp
,
1713 size_t min_fs_blksz
,
1715 int32_t tbuffer_size
,
1716 void (*flush
)(void *arg
),
1718 struct mount
*fsmount
)
1721 uint32_t phys_blksz
, new_txn_base
;
1723 struct vfs_context context
;
1724 const char *jdev_name
;
1726 * Cap the journal max size to 2GB. On HFS, it will attempt to occupy
1727 * a full allocation block if the current size is smaller than the allocation
1728 * block on which it resides. Once we hit the exabyte filesystem range, then
1729 * it will use 2GB allocation blocks. As a result, make the cap 2GB.
1731 context
.vc_thread
= current_thread();
1732 context
.vc_ucred
= FSCRED
;
1734 jdev_name
= vnode_getname_printable(jvp
);
1736 /* Get the real physical block size. */
1737 if (VNOP_IOCTL(jvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, &context
)) {
1738 goto cleanup_jdev_name
;
1741 if (journal_size
< (256*1024) || journal_size
> (MAX_JOURNAL_SIZE
)) {
1742 printf("jnl: %s: create: journal size %lld looks bogus.\n", jdev_name
, journal_size
);
1743 goto cleanup_jdev_name
;
1746 min_size
= phys_blksz
* (phys_blksz
/ sizeof(block_info
));
1747 /* Reject journals that are too small given the sector size of the device */
1748 if (journal_size
< min_size
) {
1749 printf("jnl: %s: create: journal size (%lld) too small given sector size of (%u)\n",
1750 jdev_name
, journal_size
, phys_blksz
);
1751 goto cleanup_jdev_name
;
1754 if (phys_blksz
> min_fs_blksz
) {
1755 printf("jnl: %s: create: error: phys blksize %u bigger than min fs blksize %zd\n",
1756 jdev_name
, phys_blksz
, min_fs_blksz
);
1757 goto cleanup_jdev_name
;
1760 if ((journal_size
% phys_blksz
) != 0) {
1761 printf("jnl: %s: create: journal size 0x%llx is not an even multiple of block size 0x%ux\n",
1762 jdev_name
, journal_size
, phys_blksz
);
1763 goto cleanup_jdev_name
;
1767 MALLOC_ZONE(jnl
, struct journal
*, sizeof(struct journal
), M_JNL_JNL
, M_WAITOK
);
1768 memset(jnl
, 0, sizeof(*jnl
));
1771 jnl
->jdev_offset
= offset
;
1774 jnl
->flush_arg
= arg
;
1775 jnl
->flags
= (flags
& JOURNAL_OPTION_FLAGS_MASK
);
1776 jnl
->jdev_name
= jdev_name
;
1777 lck_mtx_init(&jnl
->old_start_lock
, jnl_mutex_group
, jnl_lock_attr
);
1779 // Keep a point to the mount around for use in IO throttling.
1780 jnl
->fsmount
= fsmount
;
1781 // XXX: This lock discipline looks correct based on dounmount(), but it
1782 // doesn't seem to be documented anywhere.
1783 mount_ref(fsmount
, 0);
1785 get_io_info(jvp
, phys_blksz
, jnl
, &context
);
1787 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&jnl
->header_buf
, phys_blksz
, VM_KERN_MEMORY_FILE
)) {
1788 printf("jnl: %s: create: could not allocate space for header buffer (%u bytes)\n", jdev_name
, phys_blksz
);
1789 goto bad_kmem_alloc
;
1791 jnl
->header_buf_size
= phys_blksz
;
1793 jnl
->jhdr
= (journal_header
*)jnl
->header_buf
;
1794 memset(jnl
->jhdr
, 0, sizeof(journal_header
));
1796 // we have to set this up here so that do_journal_io() will work
1797 jnl
->jhdr
->jhdr_size
= phys_blksz
;
1800 // We try and read the journal header to see if there is already one
1801 // out there. If there is, it's possible that it has transactions
1802 // in it that we might replay if we happen to pick a sequence number
1803 // that is a little less than the old one, there is a crash and the
1804 // last txn written ends right at the start of a txn from the previous
1805 // incarnation of this file system. If all that happens we would
1806 // replay the transactions from the old file system and that would
1807 // destroy your disk. Although it is extremely unlikely for all those
1808 // conditions to happen, the probability is non-zero and the result is
1809 // severe - you lose your file system. Therefore if we find a valid
1810 // journal header and the sequence number is non-zero we write junk
1811 // over the entire journal so that there is no way we will encounter
1812 // any old transactions. This is slow but should be a rare event
1813 // since most tools erase the journal.
1815 if ( read_journal_header(jnl
, jnl
->jhdr
, phys_blksz
) == phys_blksz
1816 && jnl
->jhdr
->magic
== JOURNAL_HEADER_MAGIC
1817 && jnl
->jhdr
->sequence_num
!= 0) {
1819 new_txn_base
= (jnl
->jhdr
->sequence_num
+ (journal_size
/ phys_blksz
) + (random() % 16384)) & 0x00ffffff;
1820 printf("jnl: %s: create: avoiding old sequence number 0x%x (0x%x)\n", jdev_name
, jnl
->jhdr
->sequence_num
, new_txn_base
);
1826 for(i
= 1; i
< journal_size
/ phys_blksz
; i
++) {
1829 // we don't really care what data we write just so long
1830 // as it's not a valid transaction header. since we have
1831 // the header_buf sitting around we'll use that.
1832 write_journal_data(jnl
, &pos
, jnl
->header_buf
, phys_blksz
);
1834 printf("jnl: create: done clearing journal (i=%d)\n", i
);
1837 new_txn_base
= random() & 0x00ffffff;
1840 memset(jnl
->header_buf
, 0, phys_blksz
);
1842 jnl
->jhdr
->magic
= JOURNAL_HEADER_MAGIC
;
1843 jnl
->jhdr
->endian
= ENDIAN_MAGIC
;
1844 jnl
->jhdr
->start
= phys_blksz
; // start at block #1, block #0 is for the jhdr itself
1845 jnl
->jhdr
->end
= phys_blksz
;
1846 jnl
->jhdr
->size
= journal_size
;
1847 jnl
->jhdr
->jhdr_size
= phys_blksz
;
1848 size_up_tbuffer(jnl
, tbuffer_size
, phys_blksz
);
1850 jnl
->active_start
= jnl
->jhdr
->start
;
1852 // XXXdbg - for testing you can force the journal to wrap around
1853 // jnl->jhdr->start = jnl->jhdr->size - (phys_blksz*3);
1854 // jnl->jhdr->end = jnl->jhdr->size - (phys_blksz*3);
1856 jnl
->jhdr
->sequence_num
= new_txn_base
;
1858 lck_mtx_init(&jnl
->jlock
, jnl_mutex_group
, jnl_lock_attr
);
1859 lck_mtx_init(&jnl
->flock
, jnl_mutex_group
, jnl_lock_attr
);
1860 lck_rw_init(&jnl
->trim_lock
, jnl_mutex_group
, jnl_lock_attr
);
1863 jnl
->flushing
= FALSE
;
1864 jnl
->asyncIO
= FALSE
;
1865 jnl
->flush_aborted
= FALSE
;
1866 jnl
->writing_header
= FALSE
;
1867 jnl
->async_trim
= NULL
;
1868 jnl
->sequence_num
= jnl
->jhdr
->sequence_num
;
1870 if (write_journal_header(jnl
, 1, jnl
->jhdr
->sequence_num
) != 0) {
1871 printf("jnl: %s: journal_create: failed to write journal header.\n", jdev_name
);
1875 goto journal_create_complete
;
1879 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, phys_blksz
);
1882 FREE_ZONE(jnl
, sizeof(struct journal
), M_JNL_JNL
);
1883 mount_drop(fsmount
, 0);
1885 vnode_putname_printable(jdev_name
);
1887 journal_create_complete
:
1893 journal_open(struct vnode
*jvp
,
1897 size_t min_fs_blksz
,
1899 int32_t tbuffer_size
,
1900 void (*flush
)(void *arg
),
1902 struct mount
*fsmount
)
1905 uint32_t orig_blksz
=0;
1906 uint32_t phys_blksz
;
1907 u_int32_t min_size
= 0;
1908 int orig_checksum
, checksum
;
1909 struct vfs_context context
;
1910 const char *jdev_name
= vnode_getname_printable(jvp
);
1912 context
.vc_thread
= current_thread();
1913 context
.vc_ucred
= FSCRED
;
1915 /* Get the real physical block size. */
1916 if (VNOP_IOCTL(jvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, &context
)) {
1917 goto cleanup_jdev_name
;
1920 if (phys_blksz
> min_fs_blksz
) {
1921 printf("jnl: %s: open: error: phys blksize %u bigger than min fs blksize %zd\n",
1922 jdev_name
, phys_blksz
, min_fs_blksz
);
1923 goto cleanup_jdev_name
;
1926 if (journal_size
< (256*1024) || journal_size
> (1024*1024*1024)) {
1927 printf("jnl: %s: open: journal size %lld looks bogus.\n", jdev_name
, journal_size
);
1928 goto cleanup_jdev_name
;
1931 min_size
= phys_blksz
* (phys_blksz
/ sizeof(block_info
));
1932 /* Reject journals that are too small given the sector size of the device */
1933 if (journal_size
< min_size
) {
1934 printf("jnl: %s: open: journal size (%lld) too small given sector size of (%u)\n",
1935 jdev_name
, journal_size
, phys_blksz
);
1936 goto cleanup_jdev_name
;
1939 if ((journal_size
% phys_blksz
) != 0) {
1940 printf("jnl: %s: open: journal size 0x%llx is not an even multiple of block size 0x%x\n",
1941 jdev_name
, journal_size
, phys_blksz
);
1942 goto cleanup_jdev_name
;
1945 MALLOC_ZONE(jnl
, struct journal
*, sizeof(struct journal
), M_JNL_JNL
, M_WAITOK
);
1946 memset(jnl
, 0, sizeof(*jnl
));
1949 jnl
->jdev_offset
= offset
;
1952 jnl
->flush_arg
= arg
;
1953 jnl
->flags
= (flags
& JOURNAL_OPTION_FLAGS_MASK
);
1954 jnl
->jdev_name
= jdev_name
;
1955 lck_mtx_init(&jnl
->old_start_lock
, jnl_mutex_group
, jnl_lock_attr
);
1957 /* We need a reference to the mount to later pass to the throttling code for
1960 jnl
->fsmount
= fsmount
;
1961 mount_ref(fsmount
, 0);
1963 get_io_info(jvp
, phys_blksz
, jnl
, &context
);
1965 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&jnl
->header_buf
, phys_blksz
, VM_KERN_MEMORY_FILE
)) {
1966 printf("jnl: %s: create: could not allocate space for header buffer (%u bytes)\n", jdev_name
, phys_blksz
);
1967 goto bad_kmem_alloc
;
1969 jnl
->header_buf_size
= phys_blksz
;
1971 jnl
->jhdr
= (journal_header
*)jnl
->header_buf
;
1972 memset(jnl
->jhdr
, 0, sizeof(journal_header
));
1974 // we have to set this up here so that do_journal_io() will work
1975 jnl
->jhdr
->jhdr_size
= phys_blksz
;
1977 if (read_journal_header(jnl
, jnl
->jhdr
, phys_blksz
) != phys_blksz
) {
1978 printf("jnl: %s: open: could not read %u bytes for the journal header.\n",
1979 jdev_name
, phys_blksz
);
1983 orig_checksum
= jnl
->jhdr
->checksum
;
1984 jnl
->jhdr
->checksum
= 0;
1986 if (jnl
->jhdr
->magic
== SWAP32(JOURNAL_HEADER_MAGIC
)) {
1987 // do this before the swap since it's done byte-at-a-time
1988 orig_checksum
= SWAP32(orig_checksum
);
1989 checksum
= calc_checksum((char *)jnl
->jhdr
, JOURNAL_HEADER_CKSUM_SIZE
);
1990 swap_journal_header(jnl
);
1991 jnl
->flags
|= JOURNAL_NEED_SWAP
;
1993 checksum
= calc_checksum((char *)jnl
->jhdr
, JOURNAL_HEADER_CKSUM_SIZE
);
1996 if (jnl
->jhdr
->magic
!= JOURNAL_HEADER_MAGIC
&& jnl
->jhdr
->magic
!= OLD_JOURNAL_HEADER_MAGIC
) {
1997 printf("jnl: %s: open: journal magic is bad (0x%x != 0x%x)\n",
1998 jnl
->jdev_name
, jnl
->jhdr
->magic
, JOURNAL_HEADER_MAGIC
);
2002 // only check if we're the current journal header magic value
2003 if (jnl
->jhdr
->magic
== JOURNAL_HEADER_MAGIC
) {
2005 if (orig_checksum
!= checksum
) {
2006 printf("jnl: %s: open: journal checksum is bad (0x%x != 0x%x)\n",
2007 jdev_name
, orig_checksum
, checksum
);
2013 // XXXdbg - convert old style magic numbers to the new one
2014 if (jnl
->jhdr
->magic
== OLD_JOURNAL_HEADER_MAGIC
) {
2015 jnl
->jhdr
->magic
= JOURNAL_HEADER_MAGIC
;
2018 if (phys_blksz
!= (size_t)jnl
->jhdr
->jhdr_size
&& jnl
->jhdr
->jhdr_size
!= 0) {
2020 * The volume has probably been resized (such that we had to adjust the
2021 * logical sector size), or copied to media with a different logical
2024 * Temporarily change the device's logical block size to match the
2025 * journal's header size. This will allow us to replay the journal
2026 * safely. If the replay succeeds, we will update the journal's header
2027 * size (later in this function).
2029 orig_blksz
= phys_blksz
;
2030 phys_blksz
= jnl
->jhdr
->jhdr_size
;
2031 VNOP_IOCTL(jvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&phys_blksz
, FWRITE
, &context
);
2032 printf("jnl: %s: open: temporarily switched block size from %u to %u\n",
2033 jdev_name
, orig_blksz
, phys_blksz
);
2036 if ( jnl
->jhdr
->start
<= 0
2037 || jnl
->jhdr
->start
> jnl
->jhdr
->size
2038 || jnl
->jhdr
->start
> 1024*1024*1024) {
2039 printf("jnl: %s: open: jhdr start looks bad (0x%llx max size 0x%llx)\n",
2040 jdev_name
, jnl
->jhdr
->start
, jnl
->jhdr
->size
);
2044 if ( jnl
->jhdr
->end
<= 0
2045 || jnl
->jhdr
->end
> jnl
->jhdr
->size
2046 || jnl
->jhdr
->end
> 1024*1024*1024) {
2047 printf("jnl: %s: open: jhdr end looks bad (0x%llx max size 0x%llx)\n",
2048 jdev_name
, jnl
->jhdr
->end
, jnl
->jhdr
->size
);
2052 if (jnl
->jhdr
->size
< (256*1024) || jnl
->jhdr
->size
> 1024*1024*1024) {
2053 printf("jnl: %s: open: jhdr size looks bad (0x%llx)\n", jdev_name
, jnl
->jhdr
->size
);
2057 // XXXdbg - can't do these checks because hfs writes all kinds of
2058 // non-uniform sized blocks even on devices that have a block size
2059 // that is larger than 512 bytes (i.e. optical media w/2k blocks).
2060 // therefore these checks will fail and so we just have to punt and
2061 // do more relaxed checking...
2062 // XXXdbg if ((jnl->jhdr->start % jnl->jhdr->jhdr_size) != 0) {
2063 if ((jnl
->jhdr
->start
% 512) != 0) {
2064 printf("jnl: %s: open: journal start (0x%llx) not a multiple of 512?\n",
2065 jdev_name
, jnl
->jhdr
->start
);
2069 //XXXdbg if ((jnl->jhdr->end % jnl->jhdr->jhdr_size) != 0) {
2070 if ((jnl
->jhdr
->end
% 512) != 0) {
2071 printf("jnl: %s: open: journal end (0x%llx) not a multiple of block size (0x%x)?\n",
2072 jdev_name
, jnl
->jhdr
->end
, jnl
->jhdr
->jhdr_size
);
2076 // take care of replaying the journal if necessary
2077 if (flags
& JOURNAL_RESET
) {
2078 printf("jnl: %s: journal start/end pointers reset! (s 0x%llx e 0x%llx)\n",
2079 jdev_name
, jnl
->jhdr
->start
, jnl
->jhdr
->end
);
2080 jnl
->jhdr
->start
= jnl
->jhdr
->end
;
2081 } else if (replay_journal(jnl
) != 0) {
2082 printf("jnl: %s: journal_open: Error replaying the journal!\n", jdev_name
);
2087 * When we get here, we know that the journal is empty (jnl->jhdr->start ==
2088 * jnl->jhdr->end). If the device's logical block size was different from
2089 * the journal's header size, then we can now restore the device's logical
2090 * block size and update the journal's header size to match.
2092 * Note that we also adjust the journal's start and end so that they will
2093 * be aligned on the new block size. We pick a new sequence number to
2094 * avoid any problems if a replay found previous transactions using the old
2095 * journal header size. (See the comments in journal_create(), above.)
2098 if (orig_blksz
!= 0) {
2099 VNOP_IOCTL(jvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&orig_blksz
, FWRITE
, &context
);
2100 phys_blksz
= orig_blksz
;
2104 jnl
->jhdr
->jhdr_size
= phys_blksz
;
2105 jnl
->jhdr
->start
= phys_blksz
;
2106 jnl
->jhdr
->end
= phys_blksz
;
2107 jnl
->jhdr
->sequence_num
= (jnl
->jhdr
->sequence_num
+
2108 (journal_size
/ phys_blksz
) +
2109 (random() % 16384)) & 0x00ffffff;
2111 if (write_journal_header(jnl
, 1, jnl
->jhdr
->sequence_num
)) {
2112 printf("jnl: %s: open: failed to update journal header size\n", jdev_name
);
2117 // make sure this is in sync!
2118 jnl
->active_start
= jnl
->jhdr
->start
;
2119 jnl
->sequence_num
= jnl
->jhdr
->sequence_num
;
2121 // set this now, after we've replayed the journal
2122 size_up_tbuffer(jnl
, tbuffer_size
, phys_blksz
);
2124 // TODO: Does this need to change if the device's logical block size changed?
2125 if ((off_t
)(jnl
->jhdr
->blhdr_size
/sizeof(block_info
)-1) > (jnl
->jhdr
->size
/jnl
->jhdr
->jhdr_size
)) {
2126 printf("jnl: %s: open: jhdr size and blhdr size are not compatible (0x%llx, %d, %d)\n", jdev_name
, jnl
->jhdr
->size
,
2127 jnl
->jhdr
->blhdr_size
, jnl
->jhdr
->jhdr_size
);
2131 lck_mtx_init(&jnl
->jlock
, jnl_mutex_group
, jnl_lock_attr
);
2132 lck_mtx_init(&jnl
->flock
, jnl_mutex_group
, jnl_lock_attr
);
2133 lck_rw_init(&jnl
->trim_lock
, jnl_mutex_group
, jnl_lock_attr
);
2135 goto journal_open_complete
;
2138 if (orig_blksz
!= 0) {
2139 phys_blksz
= orig_blksz
;
2140 VNOP_IOCTL(jvp
, DKIOCSETBLOCKSIZE
, (caddr_t
)&orig_blksz
, FWRITE
, &context
);
2141 printf("jnl: %s: open: restored block size after error\n", jdev_name
);
2143 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, phys_blksz
);
2145 FREE_ZONE(jnl
, sizeof(struct journal
), M_JNL_JNL
);
2146 mount_drop(fsmount
, 0);
2148 vnode_putname_printable(jdev_name
);
2150 journal_open_complete
:
2156 journal_is_clean(struct vnode
*jvp
,
2160 size_t min_fs_block_size
)
2163 uint32_t phys_blksz
;
2165 int orig_checksum
, checksum
;
2166 struct vfs_context context
;
2167 const char *jdev_name
= vnode_getname_printable(jvp
);
2169 context
.vc_thread
= current_thread();
2170 context
.vc_ucred
= FSCRED
;
2172 /* Get the real physical block size. */
2173 if (VNOP_IOCTL(jvp
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, &context
)) {
2174 printf("jnl: %s: is_clean: failed to get device block size.\n", jdev_name
);
2176 goto cleanup_jdev_name
;
2179 if (phys_blksz
> (uint32_t)min_fs_block_size
) {
2180 printf("jnl: %s: is_clean: error: phys blksize %d bigger than min fs blksize %zd\n",
2181 jdev_name
, phys_blksz
, min_fs_block_size
);
2183 goto cleanup_jdev_name
;
2186 if (journal_size
< (256*1024) || journal_size
> (MAX_JOURNAL_SIZE
)) {
2187 printf("jnl: %s: is_clean: journal size %lld looks bogus.\n", jdev_name
, journal_size
);
2189 goto cleanup_jdev_name
;
2192 if ((journal_size
% phys_blksz
) != 0) {
2193 printf("jnl: %s: is_clean: journal size 0x%llx is not an even multiple of block size 0x%x\n",
2194 jdev_name
, journal_size
, phys_blksz
);
2196 goto cleanup_jdev_name
;
2199 memset(&jnl
, 0, sizeof(jnl
));
2201 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&jnl
.header_buf
, phys_blksz
, VM_KERN_MEMORY_FILE
)) {
2202 printf("jnl: %s: is_clean: could not allocate space for header buffer (%d bytes)\n", jdev_name
, phys_blksz
);
2204 goto cleanup_jdev_name
;
2206 jnl
.header_buf_size
= phys_blksz
;
2208 get_io_info(jvp
, phys_blksz
, &jnl
, &context
);
2210 jnl
.jhdr
= (journal_header
*)jnl
.header_buf
;
2211 memset(jnl
.jhdr
, 0, sizeof(journal_header
));
2214 jnl
.jdev_offset
= offset
;
2217 // we have to set this up here so that do_journal_io() will work
2218 jnl
.jhdr
->jhdr_size
= phys_blksz
;
2220 if (read_journal_header(&jnl
, jnl
.jhdr
, phys_blksz
) != (unsigned)phys_blksz
) {
2221 printf("jnl: %s: is_clean: could not read %d bytes for the journal header.\n",
2222 jdev_name
, phys_blksz
);
2227 orig_checksum
= jnl
.jhdr
->checksum
;
2228 jnl
.jhdr
->checksum
= 0;
2230 if (jnl
.jhdr
->magic
== SWAP32(JOURNAL_HEADER_MAGIC
)) {
2231 // do this before the swap since it's done byte-at-a-time
2232 orig_checksum
= SWAP32(orig_checksum
);
2233 checksum
= calc_checksum((char *)jnl
.jhdr
, JOURNAL_HEADER_CKSUM_SIZE
);
2234 swap_journal_header(&jnl
);
2235 jnl
.flags
|= JOURNAL_NEED_SWAP
;
2237 checksum
= calc_checksum((char *)jnl
.jhdr
, JOURNAL_HEADER_CKSUM_SIZE
);
2240 if (jnl
.jhdr
->magic
!= JOURNAL_HEADER_MAGIC
&& jnl
.jhdr
->magic
!= OLD_JOURNAL_HEADER_MAGIC
) {
2241 printf("jnl: %s: is_clean: journal magic is bad (0x%x != 0x%x)\n",
2242 jdev_name
, jnl
.jhdr
->magic
, JOURNAL_HEADER_MAGIC
);
2247 if (orig_checksum
!= checksum
) {
2248 printf("jnl: %s: is_clean: journal checksum is bad (0x%x != 0x%x)\n", jdev_name
, orig_checksum
, checksum
);
2254 // if the start and end are equal then the journal is clean.
2255 // otherwise it's not clean and therefore an error.
2257 if (jnl
.jhdr
->start
== jnl
.jhdr
->end
) {
2260 ret
= EBUSY
; // so the caller can differentiate an invalid journal from a "busy" one
2264 kmem_free(kernel_map
, (vm_offset_t
)jnl
.header_buf
, phys_blksz
);
2266 vnode_putname_printable(jdev_name
);
2272 journal_close(journal
*jnl
)
2274 volatile off_t
*start
, *end
;
2279 // set this before doing anything that would block so that
2280 // we start tearing things down properly.
2282 jnl
->flags
|= JOURNAL_CLOSE_PENDING
;
2284 if (jnl
->owner
!= current_thread()) {
2288 wait_condition(jnl
, &jnl
->flushing
, "journal_close");
2291 // only write stuff to disk if the journal is still valid
2293 if ((jnl
->flags
& JOURNAL_INVALID
) == 0) {
2295 if (jnl
->active_tr
) {
2297 * "journal_end_transaction" will fire the flush asynchronously
2299 journal_end_transaction(jnl
);
2302 // flush any buffered transactions
2304 transaction
*tr
= jnl
->cur_tr
;
2308 * "end_transaction" will wait for any in-progress flush to complete
2309 * before flushing "cur_tr" synchronously("must_wait" == TRUE)
2311 end_transaction(tr
, 1, NULL
, NULL
, FALSE
, TRUE
);
2314 * if there was an "active_tr", make sure we wait for
2315 * it to flush if there was no "cur_tr" to process
2317 wait_condition(jnl
, &jnl
->flushing
, "journal_close");
2319 //start = &jnl->jhdr->start;
2320 start
= &jnl
->active_start
;
2321 end
= &jnl
->jhdr
->end
;
2323 while (*start
!= *end
&& counter
++ < 5000) {
2324 //printf("jnl: close: flushing the buffer cache (start 0x%llx end 0x%llx)\n", *start, *end);
2326 jnl
->flush(jnl
->flush_arg
);
2328 tsleep((caddr_t
)jnl
, PRIBIO
, "jnl_close", 2);
2331 if (*start
!= *end
) {
2332 printf("jnl: %s: close: buffer flushing didn't seem to flush out all the transactions! (0x%llx - 0x%llx)\n",
2333 jnl
->jdev_name
, *start
, *end
);
2336 // make sure this is in sync when we close the journal
2337 jnl
->jhdr
->start
= jnl
->active_start
;
2339 // if this fails there's not much we can do at this point...
2340 write_journal_header(jnl
, 1, jnl
->sequence_num
);
2342 // if we're here the journal isn't valid any more.
2343 // so make sure we don't leave any locked blocks lying around
2344 printf("jnl: %s: close: journal is invalid. aborting outstanding transactions\n", jnl
->jdev_name
);
2345 if (jnl
->active_tr
|| jnl
->cur_tr
) {
2348 if (jnl
->active_tr
) {
2349 tr
= jnl
->active_tr
;
2350 jnl
->active_tr
= NULL
;
2355 abort_transaction(jnl
, tr
);
2357 if (jnl
->active_tr
|| jnl
->cur_tr
) {
2358 panic("jnl: %s: close: jnl @ %p had both an active and cur tr\n", jnl
->jdev_name
, jnl
);
2362 wait_condition(jnl
, &jnl
->asyncIO
, "journal_close");
2364 free_old_stuff(jnl
);
2366 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, jnl
->header_buf_size
);
2367 jnl
->jhdr
= (void *)0xbeefbabe;
2369 // Release reference on the mount
2371 mount_drop(jnl
->fsmount
, 0);
2373 vnode_putname_printable(jnl
->jdev_name
);
2375 journal_unlock(jnl
);
2376 lck_mtx_destroy(&jnl
->old_start_lock
, jnl_mutex_group
);
2377 lck_mtx_destroy(&jnl
->jlock
, jnl_mutex_group
);
2378 lck_mtx_destroy(&jnl
->flock
, jnl_mutex_group
);
2379 FREE_ZONE(jnl
, sizeof(struct journal
), M_JNL_JNL
);
2383 dump_journal(journal
*jnl
)
2387 printf("journal for dev %s:", jnl
->jdev_name
);
2388 printf(" jdev_offset %.8llx\n", jnl
->jdev_offset
);
2389 printf(" magic: 0x%.8x\n", jnl
->jhdr
->magic
);
2390 printf(" start: 0x%.8llx\n", jnl
->jhdr
->start
);
2391 printf(" end: 0x%.8llx\n", jnl
->jhdr
->end
);
2392 printf(" size: 0x%.8llx\n", jnl
->jhdr
->size
);
2393 printf(" blhdr size: %d\n", jnl
->jhdr
->blhdr_size
);
2394 printf(" jhdr size: %d\n", jnl
->jhdr
->jhdr_size
);
2395 printf(" chksum: 0x%.8x\n", jnl
->jhdr
->checksum
);
2397 printf(" completed transactions:\n");
2398 for (ctr
= jnl
->completed_trs
; ctr
; ctr
= ctr
->next
) {
2399 printf(" 0x%.8llx - 0x%.8llx\n", ctr
->journal_start
, ctr
->journal_end
);
2406 free_space(journal
*jnl
)
2408 off_t free_space_offset
;
2410 if (jnl
->jhdr
->start
< jnl
->jhdr
->end
) {
2411 free_space_offset
= jnl
->jhdr
->size
- (jnl
->jhdr
->end
- jnl
->jhdr
->start
) - jnl
->jhdr
->jhdr_size
;
2412 } else if (jnl
->jhdr
->start
> jnl
->jhdr
->end
) {
2413 free_space_offset
= jnl
->jhdr
->start
- jnl
->jhdr
->end
;
2415 // journal is completely empty
2416 free_space_offset
= jnl
->jhdr
->size
- jnl
->jhdr
->jhdr_size
;
2419 return free_space_offset
;
2424 // The journal must be locked on entry to this function.
2425 // The "desired_size" is in bytes.
2428 check_free_space(journal
*jnl
, int desired_size
, boolean_t
*delayed_header_write
, uint32_t sequence_num
)
2433 //printf("jnl: check free space (desired 0x%x, avail 0x%Lx)\n",
2434 // desired_size, free_space(jnl));
2436 if (delayed_header_write
)
2437 *delayed_header_write
= FALSE
;
2440 int old_start_empty
;
2442 // make sure there's space in the journal to hold this transaction
2443 if (free_space(jnl
) > desired_size
&& jnl
->old_start
[0] == 0) {
2446 if (counter
++ == 5000) {
2448 panic("jnl: check_free_space: buffer flushing isn't working "
2449 "(jnl @ %p s %lld e %lld f %lld [active start %lld]).\n", jnl
,
2450 jnl
->jhdr
->start
, jnl
->jhdr
->end
, free_space(jnl
), jnl
->active_start
);
2452 if (counter
> 7500) {
2453 printf("jnl: %s: check_free_space: giving up waiting for free space.\n", jnl
->jdev_name
);
2458 // here's where we lazily bump up jnl->jhdr->start. we'll consume
2459 // entries until there is enough space for the next transaction.
2461 old_start_empty
= 1;
2464 for (i
= 0; i
< sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]); i
++) {
2468 while (jnl
->old_start
[i
] & 0x8000000000000000LL
) {
2469 if (lcl_counter
++ > 10000) {
2470 panic("jnl: check_free_space: tr starting @ 0x%llx not flushing (jnl %p).\n",
2471 jnl
->old_start
[i
], jnl
);
2474 unlock_oldstart(jnl
);
2476 jnl
->flush(jnl
->flush_arg
);
2478 tsleep((caddr_t
)jnl
, PRIBIO
, "check_free_space1", 1);
2482 if (jnl
->old_start
[i
] == 0) {
2486 old_start_empty
= 0;
2487 jnl
->jhdr
->start
= jnl
->old_start
[i
];
2488 jnl
->old_start
[i
] = 0;
2490 if (free_space(jnl
) > desired_size
) {
2492 if (delayed_header_write
)
2493 *delayed_header_write
= TRUE
;
2495 unlock_oldstart(jnl
);
2496 write_journal_header(jnl
, 1, sequence_num
);
2502 unlock_oldstart(jnl
);
2504 // if we bumped the start, loop and try again
2505 if (i
< sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0])) {
2507 } else if (old_start_empty
) {
2509 // if there is nothing in old_start anymore then we can
2510 // bump the jhdr->start to be the same as active_start
2511 // since it is possible there was only one very large
2512 // transaction in the old_start array. if we didn't do
2513 // this then jhdr->start would never get updated and we
2514 // would wind up looping until we hit the panic at the
2515 // start of the loop.
2517 jnl
->jhdr
->start
= jnl
->active_start
;
2519 if (delayed_header_write
)
2520 *delayed_header_write
= TRUE
;
2522 write_journal_header(jnl
, 1, sequence_num
);
2527 // if the file system gave us a flush function, call it to so that
2528 // it can flush some blocks which hopefully will cause some transactions
2529 // to complete and thus free up space in the journal.
2531 jnl
->flush(jnl
->flush_arg
);
2534 // wait for a while to avoid being cpu-bound (this will
2535 // put us to sleep for 10 milliseconds)
2536 tsleep((caddr_t
)jnl
, PRIBIO
, "check_free_space2", 1);
2543 * Allocate a new active transaction.
2546 journal_allocate_transaction(journal
*jnl
)
2549 boolean_t was_vm_privileged
= FALSE
;
2550 kern_return_t retval
;
2552 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
2554 * the disk driver can allocate memory on this path...
2555 * if we block waiting for memory, and there is enough pressure to
2556 * cause us to try and create a new swap file, we may end up deadlocking
2557 * due to waiting for the journal on the swap file creation path...
2558 * by making ourselves vm_privileged, we give ourselves the best chance
2561 was_vm_privileged
= set_vm_privilege(TRUE
);
2563 MALLOC_ZONE(tr
, transaction
*, sizeof(transaction
), M_JNL_TR
, M_WAITOK
);
2564 memset(tr
, 0, sizeof(transaction
));
2566 tr
->tbuffer_size
= jnl
->tbuffer_size
;
2568 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&tr
->tbuffer
, tr
->tbuffer_size
, VM_KERN_MEMORY_FILE
);
2570 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
2571 set_vm_privilege(FALSE
);
2574 FREE_ZONE(tr
, sizeof(transaction
), M_JNL_TR
);
2575 jnl
->active_tr
= NULL
;
2579 // journal replay code checksum check depends on this.
2580 memset(tr
->tbuffer
, 0, BLHDR_CHECKSUM_SIZE
);
2581 // Fill up the rest of the block with unimportant bytes (0x5a 'Z' chosen for visibility)
2582 memset(tr
->tbuffer
+ BLHDR_CHECKSUM_SIZE
, 0x5a, jnl
->jhdr
->blhdr_size
- BLHDR_CHECKSUM_SIZE
);
2584 tr
->blhdr
= (block_list_header
*)tr
->tbuffer
;
2585 tr
->blhdr
->max_blocks
= (jnl
->jhdr
->blhdr_size
/ sizeof(block_info
)) - 1;
2586 tr
->blhdr
->num_blocks
= 1; // accounts for this header block
2587 tr
->blhdr
->bytes_used
= jnl
->jhdr
->blhdr_size
;
2588 tr
->blhdr
->flags
= BLHDR_CHECK_CHECKSUMS
| BLHDR_FIRST_HEADER
;
2590 tr
->sequence_num
= ++jnl
->sequence_num
;
2592 tr
->total_bytes
= jnl
->jhdr
->blhdr_size
;
2595 jnl
->active_tr
= tr
;
2601 journal_start_transaction(journal
*jnl
)
2607 free_old_stuff(jnl
);
2609 if (jnl
->flags
& JOURNAL_INVALID
) {
2612 if (jnl
->owner
== current_thread()) {
2613 if (jnl
->active_tr
== NULL
) {
2614 panic("jnl: start_tr: active_tr is NULL (jnl @ %p, owner %p, current_thread %p\n",
2615 jnl
, jnl
->owner
, current_thread());
2617 jnl
->nested_count
++;
2623 if (jnl
->nested_count
!= 0 || jnl
->active_tr
!= NULL
) {
2624 panic("jnl: start_tr: owner %p, nested count %d, active_tr %p jnl @ %p\n",
2625 jnl
->owner
, jnl
->nested_count
, jnl
->active_tr
, jnl
);
2628 jnl
->nested_count
= 1;
2631 // make sure there's room in the journal
2632 if (free_space(jnl
) < jnl
->tbuffer_size
) {
2634 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_START
, jnl
, 0, 0, 0, 0);
2636 // this is the call that really waits for space to free up
2637 // as well as updating jnl->jhdr->start
2638 if (check_free_space(jnl
, jnl
->tbuffer_size
, NULL
, jnl
->sequence_num
) != 0) {
2639 printf("jnl: %s: start transaction failed: no space\n", jnl
->jdev_name
);
2643 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_END
, jnl
, 0, 0, 0, 0);
2647 // if there's a buffered transaction, use it.
2649 jnl
->active_tr
= jnl
->cur_tr
;
2655 ret
= journal_allocate_transaction(jnl
);
2660 // printf("jnl: start_tr: owner 0x%x new tr @ 0x%x\n", jnl->owner, jnl->active_tr);
2665 jnl
->nested_count
= 0;
2666 journal_unlock(jnl
);
2673 journal_modify_block_start(journal
*jnl
, struct buf
*bp
)
2676 boolean_t was_vm_privileged
= FALSE
;
2681 free_old_stuff(jnl
);
2683 if (jnl
->flags
& JOURNAL_INVALID
) {
2687 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
2689 * if we block waiting for memory, and there is enough pressure to
2690 * cause us to try and create a new swap file, we may end up deadlocking
2691 * due to waiting for the journal on the swap file creation path...
2692 * by making ourselves vm_privileged, we give ourselves the best chance
2695 was_vm_privileged
= set_vm_privilege(TRUE
);
2698 // XXXdbg - for debugging I want this to be true. later it may
2699 // not be necessary.
2700 if ((buf_flags(bp
) & B_META
) == 0) {
2701 panic("jnl: modify_block_start: bp @ %p is not a meta-data block! (jnl %p)\n", bp
, jnl
);
2704 tr
= jnl
->active_tr
;
2705 CHECK_TRANSACTION(tr
);
2707 if (jnl
->owner
!= current_thread()) {
2708 panic("jnl: modify_block_start: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2709 jnl
, jnl
->owner
, current_thread());
2712 //printf("jnl: mod block start (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d; total bytes %d)\n",
2713 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2715 // can't allow blocks that aren't an even multiple of the
2716 // underlying block size.
2717 if ((buf_size(bp
) % jnl
->jhdr
->jhdr_size
) != 0) {
2718 uint32_t phys_blksz
, bad
=0;
2720 if (VNOP_IOCTL(jnl
->jdev
, DKIOCGETBLOCKSIZE
, (caddr_t
)&phys_blksz
, 0, vfs_context_kernel())) {
2722 } else if (phys_blksz
!= (uint32_t)jnl
->jhdr
->jhdr_size
) {
2723 if (phys_blksz
< 512) {
2724 panic("jnl: mod block start: phys blksz %d is too small (%d, %d)\n",
2725 phys_blksz
, buf_size(bp
), jnl
->jhdr
->jhdr_size
);
2728 if ((buf_size(bp
) % phys_blksz
) != 0) {
2730 } else if (phys_blksz
< (uint32_t)jnl
->jhdr
->jhdr_size
) {
2731 jnl
->jhdr
->jhdr_size
= phys_blksz
;
2733 // the phys_blksz is now larger... need to realloc the jhdr
2734 char *new_header_buf
;
2736 printf("jnl: %s: phys blksz got bigger (was: %d/%d now %d)\n",
2737 jnl
->jdev_name
, jnl
->header_buf_size
, jnl
->jhdr
->jhdr_size
, phys_blksz
);
2738 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&new_header_buf
, phys_blksz
, VM_KERN_MEMORY_FILE
)) {
2739 printf("jnl: modify_block_start: %s: create: phys blksz change (was %d, now %d) but could not allocate space for new header\n",
2740 jnl
->jdev_name
, jnl
->jhdr
->jhdr_size
, phys_blksz
);
2743 memcpy(new_header_buf
, jnl
->header_buf
, jnl
->header_buf_size
);
2744 memset(&new_header_buf
[jnl
->header_buf_size
], 0x18, (phys_blksz
- jnl
->header_buf_size
));
2745 kmem_free(kernel_map
, (vm_offset_t
)jnl
->header_buf
, jnl
->header_buf_size
);
2746 jnl
->header_buf
= new_header_buf
;
2747 jnl
->header_buf_size
= phys_blksz
;
2749 jnl
->jhdr
= (journal_header
*)jnl
->header_buf
;
2750 jnl
->jhdr
->jhdr_size
= phys_blksz
;
2758 panic("jnl: mod block start: bufsize %d not a multiple of block size %d\n",
2759 buf_size(bp
), jnl
->jhdr
->jhdr_size
);
2761 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
2762 set_vm_privilege(FALSE
);
2767 // make sure that this transaction isn't bigger than the whole journal
2768 if (tr
->total_bytes
+buf_size(bp
) >= (jnl
->jhdr
->size
- jnl
->jhdr
->jhdr_size
)) {
2769 panic("jnl: transaction too big (%d >= %lld bytes, bufsize %d, tr %p bp %p)\n",
2770 tr
->total_bytes
, (tr
->jnl
->jhdr
->size
- jnl
->jhdr
->jhdr_size
), buf_size(bp
), tr
, bp
);
2772 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
2773 set_vm_privilege(FALSE
);
2777 // if the block is dirty and not already locked we have to write
2778 // it out before we muck with it because it has data that belongs
2779 // (presumably) to another transaction.
2781 if ((buf_flags(bp
) & (B_DELWRI
| B_LOCKED
)) == B_DELWRI
) {
2783 if (buf_flags(bp
) & B_ASYNC
) {
2784 panic("modify_block_start: bp @ %p has async flag set!\n", bp
);
2786 if (bp
->b_shadow_ref
)
2787 panic("modify_block_start: dirty bp @ %p has shadows!\n", bp
);
2789 // this will cause it to not be buf_brelse()'d
2790 buf_setflags(bp
, B_NORELSE
);
2793 buf_setflags(bp
, B_LOCKED
);
2795 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
2796 set_vm_privilege(FALSE
);
2802 journal_modify_block_abort(journal
*jnl
, struct buf
*bp
)
2805 block_list_header
*blhdr
;
2810 free_old_stuff(jnl
);
2812 tr
= jnl
->active_tr
;
2815 // if there's no active transaction then we just want to
2816 // call buf_brelse() and return since this is just a block
2817 // that happened to be modified as part of another tr.
2824 if (jnl
->flags
& JOURNAL_INVALID
) {
2825 /* Still need to buf_brelse(). Callers assume we consume the bp. */
2830 CHECK_TRANSACTION(tr
);
2832 if (jnl
->owner
!= current_thread()) {
2833 panic("jnl: modify_block_abort: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2834 jnl
, jnl
->owner
, current_thread());
2837 // printf("jnl: modify_block_abort: tr 0x%x bp 0x%x\n", jnl->active_tr, bp);
2839 // first check if it's already part of this transaction
2840 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
2841 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
2842 if (bp
== blhdr
->binfo
[i
].u
.bp
) {
2847 if (i
< blhdr
->num_blocks
) {
2853 // if blhdr is null, then this block has only had modify_block_start
2854 // called on it as part of the current transaction. that means that
2855 // it is ok to clear the LOCKED bit since it hasn't actually been
2856 // modified. if blhdr is non-null then modify_block_end was called
2857 // on it and so we need to keep it locked in memory.
2859 if (blhdr
== NULL
) {
2860 buf_clearflags(bp
, B_LOCKED
);
2869 journal_modify_block_end(journal
*jnl
, struct buf
*bp
, void (*func
)(buf_t bp
, void *arg
), void *arg
)
2872 int tbuffer_offset
=0;
2873 block_list_header
*blhdr
, *prev
=NULL
;
2878 free_old_stuff(jnl
);
2880 if (jnl
->flags
& JOURNAL_INVALID
) {
2881 /* Still need to buf_brelse(). Callers assume we consume the bp. */
2886 tr
= jnl
->active_tr
;
2887 CHECK_TRANSACTION(tr
);
2889 if (jnl
->owner
!= current_thread()) {
2890 panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n",
2891 jnl
, jnl
->owner
, current_thread());
2894 //printf("jnl: mod block end: (bp 0x%x vp 0x%x l/blkno %qd/%qd bsz %d, total bytes %d)\n",
2895 // bp, buf_vnode(bp), buf_lblkno(bp), buf_blkno(bp), buf_size(bp), tr->total_bytes);
2897 if ((buf_flags(bp
) & B_LOCKED
) == 0) {
2898 panic("jnl: modify_block_end: bp %p not locked! jnl @ %p\n", bp
, jnl
);
2901 // first check if it's already part of this transaction
2902 for (blhdr
= tr
->blhdr
; blhdr
; prev
= blhdr
, blhdr
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
2903 tbuffer_offset
= jnl
->jhdr
->blhdr_size
;
2905 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
2906 if (bp
== blhdr
->binfo
[i
].u
.bp
) {
2909 if (blhdr
->binfo
[i
].bnum
!= (off_t
)-1) {
2910 tbuffer_offset
+= buf_size(blhdr
->binfo
[i
].u
.bp
);
2912 tbuffer_offset
+= blhdr
->binfo
[i
].u
.bi
.bsize
;
2916 if (i
< blhdr
->num_blocks
) {
2923 && (prev
->num_blocks
+1) <= prev
->max_blocks
2924 && (prev
->bytes_used
+buf_size(bp
)) <= (uint32_t)tr
->tbuffer_size
) {
2927 } else if (blhdr
== NULL
) {
2928 block_list_header
*nblhdr
;
2930 panic("jnl: modify block end: no way man, prev == NULL?!?, jnl %p, bp %p\n", jnl
, bp
);
2933 // we got to the end of the list, didn't find the block and there's
2934 // no room in the block_list_header pointed to by prev
2936 // we allocate another tbuffer and link it in at the end of the list
2937 // through prev->binfo[0].bnum. that's a skanky way to do things but
2938 // avoids having yet another linked list of small data structures to manage.
2940 if (kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&nblhdr
, tr
->tbuffer_size
, VM_KERN_MEMORY_FILE
)) {
2941 panic("jnl: end_tr: no space for new block tr @ %p (total bytes: %d)!\n",
2942 tr
, tr
->total_bytes
);
2945 // journal replay code checksum check depends on this.
2946 memset(nblhdr
, 0, BLHDR_CHECKSUM_SIZE
);
2947 // Fill up the rest of the block with unimportant bytes
2948 memset(nblhdr
+ BLHDR_CHECKSUM_SIZE
, 0x5a, jnl
->jhdr
->blhdr_size
- BLHDR_CHECKSUM_SIZE
);
2950 // initialize the new guy
2951 nblhdr
->max_blocks
= (jnl
->jhdr
->blhdr_size
/ sizeof(block_info
)) - 1;
2952 nblhdr
->num_blocks
= 1; // accounts for this header block
2953 nblhdr
->bytes_used
= jnl
->jhdr
->blhdr_size
;
2954 nblhdr
->flags
= BLHDR_CHECK_CHECKSUMS
;
2957 tr
->total_bytes
+= jnl
->jhdr
->blhdr_size
;
2959 // then link him in at the end
2960 prev
->binfo
[0].bnum
= (off_t
)((long)nblhdr
);
2962 // and finally switch to using the new guy
2964 tbuffer_offset
= jnl
->jhdr
->blhdr_size
;
2969 if ((i
+1) > blhdr
->max_blocks
) {
2970 panic("jnl: modify_block_end: i = %d, max_blocks %d\n", i
, blhdr
->max_blocks
);
2973 // if this is true then this is a new block we haven't seen
2974 if (i
>= blhdr
->num_blocks
) {
2979 if (vnode_ref(vp
)) {
2980 // Nobody checks the return values, so...
2981 jnl
->flags
|= JOURNAL_INVALID
;
2985 // We're probably here due to a force unmount, so EIO is appropriate
2989 bsize
= buf_size(bp
);
2991 blhdr
->binfo
[i
].bnum
= (off_t
)(buf_blkno(bp
));
2992 blhdr
->binfo
[i
].u
.bp
= bp
;
2994 task_update_logical_writes(current_task(), (2 * bsize
), TASK_WRITE_METADATA
);
2995 KERNEL_DEBUG_CONSTANT(0x3018004, VM_KERNEL_ADDRPERM(vp
), blhdr
->binfo
[i
].bnum
, bsize
, 0, 0);
2998 void (*old_func
)(buf_t
, void *)=NULL
, *old_arg
=NULL
;
3000 buf_setfilter(bp
, func
, arg
, &old_func
, &old_arg
);
3001 if (old_func
!= NULL
&& old_func
!= func
) {
3002 panic("jnl: modify_block_end: old func %p / arg %p (func %p)", old_func
, old_arg
, func
);
3006 blhdr
->bytes_used
+= bsize
;
3007 tr
->total_bytes
+= bsize
;
3009 blhdr
->num_blocks
++;
3017 journal_kill_block(journal
*jnl
, struct buf
*bp
)
3021 block_list_header
*blhdr
;
3026 free_old_stuff(jnl
);
3028 if (jnl
->flags
& JOURNAL_INVALID
) {
3033 tr
= jnl
->active_tr
;
3034 CHECK_TRANSACTION(tr
);
3036 if (jnl
->owner
!= current_thread()) {
3037 panic("jnl: modify_block_end: called w/out a transaction! jnl %p, owner %p, curact %p\n",
3038 jnl
, jnl
->owner
, current_thread());
3041 bflags
= buf_flags(bp
);
3043 if ( !(bflags
& B_LOCKED
))
3044 panic("jnl: modify_block_end: called with bp not B_LOCKED");
3047 * bp must be BL_BUSY and B_LOCKED
3048 * first check if it's already part of this transaction
3050 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
3052 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
3053 if (bp
== blhdr
->binfo
[i
].u
.bp
) {
3056 buf_clearflags(bp
, B_LOCKED
);
3058 // this undoes the vnode_ref() in journal_modify_block_end()
3060 vnode_rele_ext(vp
, 0, 1);
3062 // if the block has the DELWRI and FILTER bits sets, then
3063 // things are seriously weird. if it was part of another
3064 // transaction then journal_modify_block_start() should
3065 // have force it to be written.
3067 //if ((bflags & B_DELWRI) && (bflags & B_FILTER)) {
3068 // panic("jnl: kill block: this defies all logic! bp 0x%x\n", bp);
3070 tr
->num_killed
+= buf_size(bp
);
3072 blhdr
->binfo
[i
].bnum
= (off_t
)-1;
3073 blhdr
->binfo
[i
].u
.bp
= NULL
;
3074 blhdr
->binfo
[i
].u
.bi
.bsize
= buf_size(bp
);
3076 buf_markinvalid(bp
);
3085 * We did not find the block in any transaction buffer but we still
3086 * need to release it or else it will be left locked forever.
3094 ;________________________________________________________________________________
3096 ; Routine: journal_trim_set_callback
3098 ; Function: Provide the journal with a routine to be called back when a
3099 ; TRIM has (or would have) been issued to the device. That
3100 ; is, the transaction has been flushed to the device, and the
3101 ; blocks freed by the transaction are now safe for reuse.
3103 ; CAUTION: If the journal becomes invalid (eg., due to an I/O
3104 ; error when trying to write to the journal), this callback
3105 ; will stop getting called, even if extents got freed before
3106 ; the journal became invalid!
3109 ; jnl - The journal structure for the filesystem.
3110 ; callback - The function to call when the TRIM is complete.
3111 ; arg - An argument to be passed to callback.
3112 ;________________________________________________________________________________
3114 __private_extern__
void
3115 journal_trim_set_callback(journal
*jnl
, jnl_trim_callback_t callback
, void *arg
)
3117 jnl
->trim_callback
= callback
;
3118 jnl
->trim_callback_arg
= arg
;
3123 ;________________________________________________________________________________
3125 ; Routine: journal_trim_realloc
3127 ; Function: Increase the amount of memory allocated for the list of extents
3128 ; to be unmapped (trimmed). This routine will be called when
3129 ; adding an extent to the list, and the list already occupies
3130 ; all of the space allocated to it. This routine returns ENOMEM
3131 ; if unable to allocate more space, or 0 if the extent list was
3132 ; grown successfully.
3135 ; trim - The trim list to be resized.
3138 ; (result) - ENOMEM or 0.
3141 ; The allocated_count and extents fields of tr->trim are updated
3142 ; if the function returned 0.
3143 ;________________________________________________________________________________
3146 trim_realloc(journal
*jnl
, struct jnl_trim_list
*trim
)
3149 uint32_t new_allocated_count
;
3150 boolean_t was_vm_privileged
= FALSE
;
3153 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REALLOC
| DBG_FUNC_START
, VM_KERNEL_ADDRPERM(trim
), 0, trim
->allocated_count
, trim
->extent_count
, 0);
3155 new_allocated_count
= trim
->allocated_count
+ JOURNAL_DEFAULT_TRIM_EXTENTS
;
3157 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
3159 * if we block waiting for memory, and there is enough pressure to
3160 * cause us to try and create a new swap file, we may end up deadlocking
3161 * due to waiting for the journal on the swap file creation path...
3162 * by making ourselves vm_privileged, we give ourselves the best chance
3165 was_vm_privileged
= set_vm_privilege(TRUE
);
3167 new_extents
= kalloc(new_allocated_count
* sizeof(dk_extent_t
));
3168 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
3169 set_vm_privilege(FALSE
);
3171 if (new_extents
== NULL
) {
3172 printf("jnl: trim_realloc: unable to grow extent list!\n");
3174 * Since we could be called when allocating space previously marked
3175 * to be trimmed, we need to empty out the list to be safe.
3177 trim
->extent_count
= 0;
3179 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REALLOC
| DBG_FUNC_END
, ENOMEM
, 0, trim
->allocated_count
, 0, 0);
3183 /* Copy the old extent list to the newly allocated list. */
3184 if (trim
->extents
!= NULL
) {
3185 memmove(new_extents
,
3187 trim
->allocated_count
* sizeof(dk_extent_t
));
3188 kfree(trim
->extents
,
3189 trim
->allocated_count
* sizeof(dk_extent_t
));
3192 trim
->allocated_count
= new_allocated_count
;
3193 trim
->extents
= new_extents
;
3196 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REALLOC
| DBG_FUNC_END
, 0, 0, new_allocated_count
, trim
->extent_count
, 0);
3202 ;________________________________________________________________________________
3204 ; Routine: trim_search_extent
3206 ; Function: Search the given extent list to see if any of its extents
3207 ; overlap the given extent.
3210 ; trim - The trim list to be searched.
3211 ; offset - The first byte of the range to be searched for.
3212 ; length - The number of bytes of the extent being searched for.
3213 ; overlap_start - start of the overlapping extent
3214 ; overlap_len - length of the overlapping extent
3217 ; (result) - TRUE if one or more extents overlap, FALSE otherwise.
3218 ;________________________________________________________________________________
3221 trim_search_extent(struct jnl_trim_list
*trim
, uint64_t offset
,
3222 uint64_t length
, uint64_t *overlap_start
, uint64_t *overlap_len
)
3224 uint64_t end
= offset
+ length
;
3225 uint32_t lower
= 0; /* Lowest index to search */
3226 uint32_t upper
= trim
->extent_count
; /* Highest index to search + 1 */
3229 /* A binary search over the extent list. */
3230 while (lower
< upper
) {
3231 middle
= (lower
+ upper
) / 2;
3233 if (trim
->extents
[middle
].offset
>= end
)
3235 else if (trim
->extents
[middle
].offset
+ trim
->extents
[middle
].length
<= offset
)
3238 if (overlap_start
) {
3239 *overlap_start
= trim
->extents
[middle
].offset
;
3242 *overlap_len
= trim
->extents
[middle
].length
;
3253 ;________________________________________________________________________________
3255 ; Routine: journal_trim_add_extent
3257 ; Function: Keep track of extents that have been freed as part of this
3258 ; transaction. If the underlying device supports TRIM (UNMAP),
3259 ; then those extents will be trimmed/unmapped once the
3260 ; transaction has been written to the journal. (For example,
3261 ; SSDs can support trim/unmap and avoid having to recopy those
3262 ; blocks when doing wear leveling, and may reuse the same
3263 ; phsyical blocks for different logical blocks.)
3265 ; HFS also uses this, in combination with journal_trim_set_callback,
3266 ; to add recently freed extents to its free extent cache, but
3267 ; only after the transaction that freed them is committed to
3268 ; disk. (This reduces the chance of overwriting live data in
3269 ; a way that causes data loss if a transaction never gets
3270 ; written to the journal.)
3273 ; jnl - The journal for the volume containing the byte range.
3274 ; offset - The first byte of the range to be trimmed.
3275 ; length - The number of bytes of the extent being trimmed.
3276 ;________________________________________________________________________________
3278 __private_extern__
int
3279 journal_trim_add_extent(journal
*jnl
, uint64_t offset
, uint64_t length
)
3283 dk_extent_t
*extent
;
3284 uint32_t insert_index
;
3285 uint32_t replace_count
;
3289 /* TODO: Is it OK to manipulate the trim list even if JOURNAL_INVALID is set? I think so... */
3290 if (jnl
->flags
& JOURNAL_INVALID
) {
3294 tr
= jnl
->active_tr
;
3295 CHECK_TRANSACTION(tr
);
3298 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD
| DBG_FUNC_START
, VM_KERNEL_ADDRPERM(jnl
), offset
, length
, tr
->trim
.extent_count
, 0);
3300 if (jnl
->owner
!= current_thread()) {
3301 panic("jnl: trim_add_extent: called w/out a transaction! jnl %p, owner %p, curact %p\n",
3302 jnl
, jnl
->owner
, current_thread());
3305 free_old_stuff(jnl
);
3307 end
= offset
+ length
;
3310 * Find the range of existing extents that can be combined with the
3311 * input extent. We start by counting the number of extents that end
3312 * strictly before the input extent, then count the number of extents
3313 * that overlap or are contiguous with the input extent.
3315 extent
= tr
->trim
.extents
;
3317 while (insert_index
< tr
->trim
.extent_count
&& extent
->offset
+ extent
->length
< offset
) {
3322 while (insert_index
+ replace_count
< tr
->trim
.extent_count
&& extent
->offset
<= end
) {
3328 * If none of the existing extents can be combined with the input extent,
3329 * then just insert it in the list (before item number insert_index).
3331 if (replace_count
== 0) {
3332 /* If the list was already full, we need to grow it. */
3333 if (tr
->trim
.extent_count
== tr
->trim
.allocated_count
) {
3334 if (trim_realloc(jnl
, &tr
->trim
) != 0) {
3335 printf("jnl: trim_add_extent: out of memory!");
3337 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD
| DBG_FUNC_END
, ENOMEM
, 0, 0, tr
->trim
.extent_count
, 0);
3342 /* Shift any existing extents with larger offsets. */
3343 if (insert_index
< tr
->trim
.extent_count
) {
3344 memmove(&tr
->trim
.extents
[insert_index
+1],
3345 &tr
->trim
.extents
[insert_index
],
3346 (tr
->trim
.extent_count
- insert_index
) * sizeof(dk_extent_t
));
3348 tr
->trim
.extent_count
++;
3350 /* Store the new extent in the list. */
3351 tr
->trim
.extents
[insert_index
].offset
= offset
;
3352 tr
->trim
.extents
[insert_index
].length
= length
;
3356 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD
| DBG_FUNC_END
, 0, 0, 0, tr
->trim
.extent_count
, 0);
3361 * Update extent number insert_index to be the union of the input extent
3362 * and all of the replaced extents.
3364 if (tr
->trim
.extents
[insert_index
].offset
< offset
)
3365 offset
= tr
->trim
.extents
[insert_index
].offset
;
3366 extent
= &tr
->trim
.extents
[insert_index
+ replace_count
- 1];
3367 if (extent
->offset
+ extent
->length
> end
)
3368 end
= extent
->offset
+ extent
->length
;
3369 tr
->trim
.extents
[insert_index
].offset
= offset
;
3370 tr
->trim
.extents
[insert_index
].length
= end
- offset
;
3373 * If we were replacing more than one existing extent, then shift any
3374 * extents with larger offsets, and update the count of extents.
3376 * We're going to leave extent #insert_index alone since it was just updated, above.
3377 * We need to move extents from index (insert_index + replace_count) through the end of
3378 * the list by (replace_count - 1) positions so that they overwrite extent #(insert_index + 1).
3380 if (replace_count
> 1 && (insert_index
+ replace_count
) < tr
->trim
.extent_count
) {
3381 memmove(&tr
->trim
.extents
[insert_index
+ 1],
3382 &tr
->trim
.extents
[insert_index
+ replace_count
],
3383 (tr
->trim
.extent_count
- insert_index
- replace_count
) * sizeof(dk_extent_t
));
3385 tr
->trim
.extent_count
-= replace_count
- 1;
3388 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_ADD
| DBG_FUNC_END
, 0, 0, 0, tr
->trim
.extent_count
, 0);
3393 * journal_trim_extent_overlap
3395 * Return 1 if there are any pending TRIMs that overlap with the given offset and length
3396 * Return 0 otherwise.
3399 int journal_trim_extent_overlap (journal
*jnl
, uint64_t offset
, uint64_t length
, uint64_t *end
) {
3400 transaction
*tr
= NULL
;
3403 uint64_t overlap_start
;
3404 uint64_t overlap_len
;
3405 tr
= jnl
->active_tr
;
3406 CHECK_TRANSACTION(tr
);
3409 * There are two lists that need to be examined for potential overlaps:
3411 * The first is the current transaction. Since this function requires that
3412 * a transaction be active when this is called, this is the "active_tr"
3413 * pointer in the journal struct. This has a trimlist pointer which needs
3416 overlap
= trim_search_extent (&tr
->trim
, offset
, length
, &overlap_start
, &overlap_len
);
3419 * The second is the async trim list, which is only done if the current
3420 * transaction group (active transaction) did not overlap with our target
3421 * extent. This async trim list is the set of all previously
3422 * committed transaction groups whose I/Os are now in-flight. We need to hold the
3423 * trim lock in order to search this list. If we grab the list before the
3424 * TRIM has completed, then we will compare it. If it is grabbed AFTER the
3425 * TRIM has completed, then the pointer will be zeroed out and we won't have
3426 * to check anything.
3428 lck_rw_lock_shared (&jnl
->trim_lock
);
3429 if (jnl
->async_trim
!= NULL
) {
3430 overlap
= trim_search_extent(jnl
->async_trim
, offset
, length
, &overlap_start
, &overlap_len
);
3432 lck_rw_unlock_shared (&jnl
->trim_lock
);
3436 /* compute the end (min) of the overlapping range */
3437 if ( (overlap_start
+ overlap_len
) < (offset
+ length
)) {
3438 *end
= (overlap_start
+ overlap_len
);
3441 *end
= (offset
+ length
);
3450 * journal_request_immediate_flush
3452 * FS requests that the journal flush immediately upon the
3453 * active transaction's completion.
3455 * Returns 0 if operation succeeds
3456 * Returns EPERM if we failed to leave hint
3459 journal_request_immediate_flush (journal
*jnl
) {
3461 transaction
*tr
= NULL
;
3463 * Is a transaction still in process? You must do
3464 * this while there are txns open
3466 tr
= jnl
->active_tr
;
3468 CHECK_TRANSACTION(tr
);
3469 tr
->flush_on_completion
= TRUE
;
3480 ;________________________________________________________________________________
3482 ; Routine: trim_remove_extent
3484 ; Function: Indicate that a range of bytes, some of which may have previously
3485 ; been passed to journal_trim_add_extent, is now allocated.
3486 ; Any overlapping ranges currently in the journal's trim list will
3487 ; be removed. If the underlying device supports TRIM (UNMAP), then
3488 ; these extents will not be trimmed/unmapped when the transaction
3489 ; is written to the journal.
3491 ; HFS also uses this to prevent newly allocated space from being
3492 ; added to its free extent cache (if some portion of the newly
3493 ; allocated space was recently freed).
3496 ; trim - The trim list to update.
3497 ; offset - The first byte of the range to be trimmed.
3498 ; length - The number of bytes of the extent being trimmed.
3499 ;________________________________________________________________________________
3502 trim_remove_extent(journal
*jnl
, struct jnl_trim_list
*trim
, uint64_t offset
, uint64_t length
)
3505 dk_extent_t
*extent
;
3506 u_int32_t keep_before
;
3507 u_int32_t keep_after
;
3509 end
= offset
+ length
;
3512 * Find any existing extents that start before or end after the input
3513 * extent. These extents will be modified if they overlap the input
3514 * extent. Other extents between them will be deleted.
3516 extent
= trim
->extents
;
3518 while (keep_before
< trim
->extent_count
&& extent
->offset
< offset
) {
3522 keep_after
= keep_before
;
3523 if (keep_after
> 0) {
3524 /* See if previous extent extends beyond both ends of input extent. */
3528 while (keep_after
< trim
->extent_count
&& (extent
->offset
+ extent
->length
) <= end
) {
3534 * When we get here, the first keep_before extents (0 .. keep_before-1)
3535 * start before the input extent, and extents (keep_after .. extent_count-1)
3536 * end after the input extent. We'll need to keep, all of those extents,
3537 * but possibly modify #(keep_before-1) and #keep_after to remove the portion
3538 * that overlaps with the input extent.
3542 * Does the input extent start after and end before the same existing
3543 * extent? If so, we have to "punch a hole" in that extent and convert
3544 * it to two separate extents.
3546 if (keep_before
> keep_after
) {
3547 /* If the list was already full, we need to grow it. */
3548 if (trim
->extent_count
== trim
->allocated_count
) {
3549 if (trim_realloc(jnl
, trim
) != 0) {
3550 printf("jnl: trim_remove_extent: out of memory!");
3556 * Make room for a new extent by shifting extents #keep_after and later
3557 * down by one extent. When we're done, extents #keep_before and
3558 * #keep_after will be identical, and we can fall through to removing
3559 * the portion that overlaps the input extent.
3561 memmove(&trim
->extents
[keep_before
],
3562 &trim
->extents
[keep_after
],
3563 (trim
->extent_count
- keep_after
) * sizeof(dk_extent_t
));
3564 ++trim
->extent_count
;
3568 * Fall through. We now have the case where the length of extent
3569 * #(keep_before - 1) needs to be updated, and the start of extent
3570 * #(keep_after) needs to be updated.
3575 * May need to truncate the end of extent #(keep_before - 1) if it overlaps
3578 if (keep_before
> 0) {
3579 extent
= &trim
->extents
[keep_before
- 1];
3580 if (extent
->offset
+ extent
->length
> offset
) {
3581 extent
->length
= offset
- extent
->offset
;
3586 * May need to update the start of extent #(keep_after) if it overlaps the
3589 if (keep_after
< trim
->extent_count
) {
3590 extent
= &trim
->extents
[keep_after
];
3591 if (extent
->offset
< end
) {
3592 extent
->length
= extent
->offset
+ extent
->length
- end
;
3593 extent
->offset
= end
;
3598 * If there were whole extents that overlapped the input extent, get rid
3599 * of them by shifting any following extents, and updating the count.
3601 if (keep_after
> keep_before
&& keep_after
< trim
->extent_count
) {
3602 memmove(&trim
->extents
[keep_before
],
3603 &trim
->extents
[keep_after
],
3604 (trim
->extent_count
- keep_after
) * sizeof(dk_extent_t
));
3606 trim
->extent_count
-= keep_after
- keep_before
;
3612 ;________________________________________________________________________________
3614 ; Routine: journal_trim_remove_extent
3616 ; Function: Make note of a range of bytes, some of which may have previously
3617 ; been passed to journal_trim_add_extent, is now in use on the
3618 ; volume. The given bytes will be not be trimmed as part of
3619 ; this transaction, or a pending trim of a transaction being
3620 ; asynchronously flushed.
3623 ; jnl - The journal for the volume containing the byte range.
3624 ; offset - The first byte of the range to be trimmed.
3625 ; length - The number of bytes of the extent being trimmed.
3626 ;________________________________________________________________________________
3628 __private_extern__
int
3629 journal_trim_remove_extent(journal
*jnl
, uint64_t offset
, uint64_t length
)
3636 /* TODO: Is it OK to manipulate the trim list even if JOURNAL_INVALID is set? I think so... */
3637 if (jnl
->flags
& JOURNAL_INVALID
) {
3641 tr
= jnl
->active_tr
;
3642 CHECK_TRANSACTION(tr
);
3645 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE
| DBG_FUNC_START
, VM_KERNEL_ADDRPERM(jnl
), offset
, length
, tr
->trim
.extent_count
, 0);
3647 if (jnl
->owner
!= current_thread()) {
3648 panic("jnl: trim_remove_extent: called w/out a transaction! jnl %p, owner %p, curact %p\n",
3649 jnl
, jnl
->owner
, current_thread());
3652 free_old_stuff(jnl
);
3654 error
= trim_remove_extent(jnl
, &tr
->trim
, offset
, length
);
3659 * See if a pending trim has any extents that overlap with the
3660 * one we were given.
3662 lck_rw_lock_shared(&jnl
->trim_lock
);
3663 if (jnl
->async_trim
!= NULL
)
3664 found
= trim_search_extent(jnl
->async_trim
, offset
, length
, NULL
, NULL
);
3665 lck_rw_unlock_shared(&jnl
->trim_lock
);
3669 * There was an overlap, so avoid trimming the extent we
3670 * just allocated. (Otherwise, it might get trimmed after
3671 * we've written to it, which will cause that data to be
3674 uint32_t async_extent_count
= 0;
3677 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE_PENDING
| DBG_FUNC_START
, VM_KERNEL_ADDRPERM(jnl
), offset
, length
, 0, 0);
3678 lck_rw_lock_exclusive(&jnl
->trim_lock
);
3679 if (jnl
->async_trim
!= NULL
) {
3680 error
= trim_remove_extent(jnl
, jnl
->async_trim
, offset
, length
);
3681 async_extent_count
= jnl
->async_trim
->extent_count
;
3683 lck_rw_unlock_exclusive(&jnl
->trim_lock
);
3685 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE_PENDING
| DBG_FUNC_END
, error
, 0, 0, async_extent_count
, 0);
3690 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_REMOVE
| DBG_FUNC_END
, error
, 0, 0, tr
->trim
.extent_count
, 0);
3696 journal_trim_flush(journal
*jnl
, transaction
*tr
)
3699 boolean_t was_vm_privileged
= FALSE
;
3702 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_FLUSH
| DBG_FUNC_START
, VM_KERNEL_ADDRPERM(jnl
), tr
, 0, tr
->trim
.extent_count
, 0);
3704 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
3706 * the disk driver can allocate memory on this path...
3707 * if we block waiting for memory, and there is enough pressure to
3708 * cause us to try and create a new swap file, we may end up deadlocking
3709 * due to waiting for the journal on the swap file creation path...
3710 * by making ourselves vm_privileged, we give ourselves the best chance
3713 was_vm_privileged
= set_vm_privilege(TRUE
);
3715 lck_rw_lock_shared(&jnl
->trim_lock
);
3716 if (tr
->trim
.extent_count
> 0) {
3719 bzero(&unmap
, sizeof(unmap
));
3720 if (CONFIG_HFS_TRIM
&& (jnl
->flags
& JOURNAL_USE_UNMAP
)) {
3721 unmap
.extents
= tr
->trim
.extents
;
3722 unmap
.extentsCount
= tr
->trim
.extent_count
;
3724 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_UNMAP
| DBG_FUNC_START
, VM_KERNEL_ADDRPERM(jnl
), tr
, 0, tr
->trim
.extent_count
, 0);
3725 errno
= VNOP_IOCTL(jnl
->fsdev
, DKIOCUNMAP
, (caddr_t
)&unmap
, FWRITE
, vfs_context_kernel());
3727 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_UNMAP
| DBG_FUNC_END
, errno
, 0, 0, 0, 0);
3731 * Call back into the file system to tell them that we have
3732 * trimmed some extents and that they can now be reused.
3734 * CAUTION: If the journal becomes invalid (eg., due to an I/O
3735 * error when trying to write to the journal), this callback
3736 * will stop getting called, even if extents got freed before
3737 * the journal became invalid!
3739 if (jnl
->trim_callback
)
3740 jnl
->trim_callback(jnl
->trim_callback_arg
, tr
->trim
.extent_count
, tr
->trim
.extents
);
3742 lck_rw_unlock_shared(&jnl
->trim_lock
);
3744 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
3745 set_vm_privilege(FALSE
);
3747 * If the transaction we're flushing was the async transaction, then
3748 * tell the current transaction that there is no pending trim
3751 * NOTE: Since we released the lock, another thread could have
3752 * removed one or more extents from our list. That's not a
3753 * problem since any writes to the re-allocated blocks
3754 * would get sent to the device after the DKIOCUNMAP.
3756 lck_rw_lock_exclusive(&jnl
->trim_lock
);
3757 if (jnl
->async_trim
== &tr
->trim
)
3758 jnl
->async_trim
= NULL
;
3759 lck_rw_unlock_exclusive(&jnl
->trim_lock
);
3762 * By the time we get here, no other thread can discover the address
3763 * of "tr", so it is safe for us to manipulate tr->trim without
3764 * holding any locks.
3766 if (tr
->trim
.extents
) {
3767 kfree(tr
->trim
.extents
, tr
->trim
.allocated_count
* sizeof(dk_extent_t
));
3768 tr
->trim
.allocated_count
= 0;
3769 tr
->trim
.extent_count
= 0;
3770 tr
->trim
.extents
= NULL
;
3774 KERNEL_DEBUG_CONSTANT(DBG_JOURNAL_TRIM_FLUSH
| DBG_FUNC_END
, errno
, 0, 0, 0, 0);
3780 journal_binfo_cmp(const void *a
, const void *b
)
3782 const block_info
*bi_a
= (const struct block_info
*)a
;
3783 const block_info
*bi_b
= (const struct block_info
*)b
;
3786 if (bi_a
->bnum
== (off_t
)-1) {
3789 if (bi_b
->bnum
== (off_t
)-1) {
3793 // don't have to worry about negative block
3794 // numbers so this is ok to do.
3796 res
= (buf_blkno(bi_a
->u
.bp
) - buf_blkno(bi_b
->u
.bp
));
3803 * End a transaction. If the transaction is small enough, and we're not forcing
3804 * a write to disk, the "active" transaction becomes the "current" transaction,
3805 * and will be reused for the next transaction that is started (group commit).
3807 * If the transaction gets written to disk (because force_it is true, or no
3808 * group commit, or the transaction is sufficiently full), the blocks get
3809 * written into the journal first, then the are written asynchronously. When
3810 * those async writes complete, the transaction can be freed and removed from
3813 * An optional callback can be supplied. If given, it is called after the
3814 * the blocks have been written to the journal, but before the async writes
3815 * of those blocks to their normal on-disk locations. This is used by
3816 * journal_relocate so that the location of the journal can be changed and
3817 * flushed to disk before the blocks get written to their normal locations.
3818 * Note that the callback is only called if the transaction gets written to
3819 * the journal during this end_transaction call; you probably want to set the
3823 * tr Transaction to add to the journal
3824 * force_it If true, force this transaction to the on-disk journal immediately.
3825 * callback See description above. Pass NULL for no callback.
3826 * callback_arg Argument passed to callback routine.
3830 * -1 An error occurred. The journal is marked invalid.
3833 end_transaction(transaction
*tr
, int force_it
, errno_t (*callback
)(void*), void *callback_arg
, boolean_t drop_lock
, boolean_t must_wait
)
3835 block_list_header
*blhdr
=NULL
, *next
=NULL
;
3838 journal
*jnl
= tr
->jnl
;
3840 size_t tbuffer_offset
;
3841 boolean_t drop_lock_early
;
3844 panic("jnl: jnl @ %p already has cur_tr %p, new tr: %p\n",
3845 jnl
, jnl
->cur_tr
, tr
);
3848 // if there weren't any modified blocks in the transaction
3849 // just save off the transaction pointer and return.
3850 if (tr
->total_bytes
== jnl
->jhdr
->blhdr_size
) {
3855 // if our transaction buffer isn't very full, just hang
3856 // on to it and don't actually flush anything. this is
3857 // what is known as "group commit". we will flush the
3858 // transaction buffer if it's full or if we have more than
3859 // one of them so we don't start hogging too much memory.
3861 // We also check the device supports UNMAP/TRIM, and if so,
3862 // the number of extents waiting to be trimmed. If it is
3863 // small enough, then keep accumulating more (so we can
3864 // reduce the overhead of trimming). If there was a prior
3865 // trim error, then we stop issuing trims for this
3866 // volume, so we can also coalesce transactions.
3869 && (jnl
->flags
& JOURNAL_NO_GROUP_COMMIT
) == 0
3870 && tr
->num_blhdrs
< 3
3871 && (tr
->total_bytes
<= ((tr
->tbuffer_size
*tr
->num_blhdrs
) - tr
->tbuffer_size
/8))
3872 && (!(jnl
->flags
& JOURNAL_USE_UNMAP
) || (tr
->trim
.extent_count
< jnl_trim_flush_limit
))) {
3878 KERNEL_DEBUG(0xbbbbc018|DBG_FUNC_START
, jnl
, tr
, drop_lock
, must_wait
, 0);
3880 lock_condition(jnl
, &jnl
->flushing
, "end_transaction");
3883 * if the previous 'finish_end_transaction' was being run
3884 * asynchronously, it could have encountered a condition
3885 * that caused it to mark the journal invalid... if that
3886 * occurred while we were waiting for it to finish, we
3887 * need to notice and abort the current transaction
3889 if ((jnl
->flags
& JOURNAL_INVALID
) || jnl
->flush_aborted
== TRUE
) {
3890 unlock_condition(jnl
, &jnl
->flushing
);
3892 abort_transaction(jnl
, tr
);
3894 KERNEL_DEBUG(0xbbbbc018|DBG_FUNC_END
, jnl
, tr
, ret_val
, 0, 0);
3899 * Store a pointer to this transaction's trim list so that
3900 * future transactions can find it.
3902 * Note: if there are no extents in the trim list, then don't
3903 * bother saving the pointer since nothing can add new extents
3904 * to the list (and other threads/transactions only care if
3905 * there is a trim pending).
3907 lck_rw_lock_exclusive(&jnl
->trim_lock
);
3908 if (jnl
->async_trim
!= NULL
)
3909 panic("jnl: end_transaction: async_trim already non-NULL!");
3910 if (tr
->trim
.extent_count
> 0)
3911 jnl
->async_trim
= &tr
->trim
;
3912 lck_rw_unlock_exclusive(&jnl
->trim_lock
);
3915 * snapshot the transaction sequence number while we are still behind
3916 * the journal lock since it will be bumped upon the start of the
3917 * next transaction group which may overlap the current journal flush...
3918 * we pass the snapshot into write_journal_header during the journal
3919 * flush so that it can write the correct version in the header...
3920 * because we hold the 'flushing' condition variable for the duration
3921 * of the journal flush, 'saved_sequence_num' remains stable
3923 jnl
->saved_sequence_num
= jnl
->sequence_num
;
3926 * if we're here we're going to flush the transaction buffer to disk.
3927 * 'check_free_space' will not return untl there is enough free
3928 * space for this transaction in the journal and jnl->old_start[0]
3929 * is avaiable for use
3931 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_START
, jnl
, 0, 0, 0, 0);
3933 check_free_space(jnl
, tr
->total_bytes
, &tr
->delayed_header_write
, jnl
->saved_sequence_num
);
3935 KERNEL_DEBUG(0xbbbbc030 | DBG_FUNC_END
, jnl
, tr
->delayed_header_write
, 0, 0, 0);
3937 // range check the end index
3938 if (jnl
->jhdr
->end
<= 0 || jnl
->jhdr
->end
> jnl
->jhdr
->size
) {
3939 panic("jnl: end_transaction: end is bogus 0x%llx (sz 0x%llx)\n",
3940 jnl
->jhdr
->end
, jnl
->jhdr
->size
);
3942 if (tr
->delayed_header_write
== TRUE
) {
3943 thread_t thread
= THREAD_NULL
;
3945 lock_condition(jnl
, &jnl
->writing_header
, "end_transaction");
3947 * fire up a thread to write the journal header
3948 * asynchronously... when it finishes, it will call
3949 * unlock_condition... we can overlap the preparation of
3950 * the log and buffers during this time
3952 kernel_thread_start((thread_continue_t
)write_header_thread
, jnl
, &thread
);
3954 jnl
->write_header_failed
= FALSE
;
3957 // this transaction starts where the current journal ends
3958 tr
->journal_start
= jnl
->jhdr
->end
;
3962 * Because old_start is locked above, we can cast away the volatile qualifier before passing it to memcpy.
3963 * slide everyone else down and put our latest guy in the last
3964 * entry in the old_start array
3966 memcpy(__CAST_AWAY_QUALIFIER(&jnl
->old_start
[0], volatile, void *), __CAST_AWAY_QUALIFIER(&jnl
->old_start
[1], volatile, void *), sizeof(jnl
->old_start
)-sizeof(jnl
->old_start
[0]));
3967 jnl
->old_start
[sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]) - 1] = tr
->journal_start
| 0x8000000000000000LL
;
3969 unlock_oldstart(jnl
);
3972 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= next
) {
3977 tbuffer_offset
= jnl
->jhdr
->blhdr_size
;
3979 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
3981 if (blhdr
->binfo
[i
].bnum
!= (off_t
)-1) {
3982 void (*func
)(buf_t
, void *);
3985 bp
= blhdr
->binfo
[i
].u
.bp
;
3988 panic("jnl: inconsistent binfo (NULL bp w/bnum %lld; jnl @ %p, tr %p)\n",
3989 blhdr
->binfo
[i
].bnum
, jnl
, tr
);
3992 * acquire the bp here so that we can safely
3993 * mess around with its data. buf_acquire()
3994 * will return EAGAIN if the buffer was busy,
3995 * so loop trying again.
3998 errno
= buf_acquire(bp
, BAC_REMOVE
, 0, 0);
3999 } while (errno
== EAGAIN
);
4002 panic("could not acquire bp %p (err %d)\n", bp
, errno
);
4004 if ((buf_flags(bp
) & (B_LOCKED
|B_DELWRI
)) != (B_LOCKED
|B_DELWRI
)) {
4005 if (jnl
->flags
& JOURNAL_CLOSE_PENDING
) {
4006 buf_clearflags(bp
, B_LOCKED
);
4010 * this is an odd case that appears to happen occasionally
4011 * make sure we mark this block as no longer valid
4012 * so that we don't process it in "finish_end_transaction" since
4013 * the bp that is recorded in our array no longer belongs
4014 * to us (normally we substitute a shadow bp to be processed
4015 * issuing a 'buf_bawrite' on a stale buf_t pointer leads
4016 * to all kinds of problems.
4018 blhdr
->binfo
[i
].bnum
= (off_t
)-1;
4021 panic("jnl: end_tr: !!!DANGER!!! bp %p flags (0x%x) not LOCKED & DELWRI\n", bp
, buf_flags(bp
));
4024 bsize
= buf_size(bp
);
4026 buf_setfilter(bp
, NULL
, NULL
, &func
, &arg
);
4028 blkptr
= (char *)&((char *)blhdr
)[tbuffer_offset
];
4030 sbp
= buf_create_shadow_priv(bp
, FALSE
, (uintptr_t)blkptr
, 0, 0);
4033 panic("jnl: buf_create_shadow returned NULL");
4036 * copy the data into the transaction buffer...
4038 memcpy(blkptr
, (char *)buf_dataptr(bp
), bsize
);
4040 buf_clearflags(bp
, B_LOCKED
);
4045 * adopt the shadow buffer for this block
4049 * transfer FS hook function to the
4050 * shadow buffer... it will get called
4051 * in finish_end_transaction
4053 buf_setfilter(sbp
, func
, arg
, NULL
, NULL
);
4055 blhdr
->binfo
[i
].u
.bp
= sbp
;
4058 // bnum == -1, only true if a block was "killed"
4059 bsize
= blhdr
->binfo
[i
].u
.bi
.bsize
;
4061 tbuffer_offset
+= bsize
;
4063 next
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
4066 * if callback != NULL, we don't want to drop the journal
4067 * lock, or complete end_transaction asynchronously, since
4068 * the caller is expecting the callback to run in the calling
4071 * if drop_lock == FALSE, we can't complete end_transaction
4075 drop_lock_early
= FALSE
;
4077 drop_lock_early
= drop_lock
;
4079 if (drop_lock_early
== FALSE
)
4082 if (drop_lock_early
== TRUE
) {
4083 journal_unlock(jnl
);
4086 if (must_wait
== TRUE
)
4087 ret_val
= finish_end_transaction(tr
, callback
, callback_arg
);
4089 thread_t thread
= THREAD_NULL
;
4092 * fire up a thread to complete processing this transaction
4093 * asynchronously... when it finishes, it will call
4096 kernel_thread_start((thread_continue_t
)finish_end_thread
, tr
, &thread
);
4098 KERNEL_DEBUG(0xbbbbc018|DBG_FUNC_END
, jnl
, tr
, ret_val
, 0, 0);
4100 if (drop_lock
== TRUE
) {
4101 journal_unlock(jnl
);
4108 finish_end_thread(transaction
*tr
)
4110 proc_set_task_policy(current_task(), current_thread(),
4111 TASK_POLICY_INTERNAL
, TASK_POLICY_IOPOL
, IOPOL_PASSIVE
);
4113 finish_end_transaction(tr
, NULL
, NULL
);
4115 thread_deallocate(current_thread());
4116 thread_terminate(current_thread());
4120 write_header_thread(journal
*jnl
)
4122 proc_set_task_policy(current_task(), current_thread(),
4123 TASK_POLICY_INTERNAL
, TASK_POLICY_IOPOL
, IOPOL_PASSIVE
);
4125 if (write_journal_header(jnl
, 1, jnl
->saved_sequence_num
))
4126 jnl
->write_header_failed
= TRUE
;
4128 jnl
->write_header_failed
= FALSE
;
4129 unlock_condition(jnl
, &jnl
->writing_header
);
4131 thread_deallocate(current_thread());
4132 thread_terminate(current_thread());
4136 finish_end_transaction(transaction
*tr
, errno_t (*callback
)(void*), void *callback_arg
)
4141 journal
*jnl
= tr
->jnl
;
4144 block_list_header
*blhdr
=NULL
, *next
=NULL
;
4145 size_t tbuffer_offset
;
4146 int bufs_written
= 0;
4148 boolean_t was_vm_privileged
= FALSE
;
4150 KERNEL_DEBUG(0xbbbbc028|DBG_FUNC_START
, jnl
, tr
, 0, 0, 0);
4152 if (jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) {
4154 * if we block waiting for memory, and there is enough pressure to
4155 * cause us to try and create a new swap file, we may end up deadlocking
4156 * due to waiting for the journal on the swap file creation path...
4157 * by making ourselves vm_privileged, we give ourselves the best chance
4160 was_vm_privileged
= set_vm_privilege(TRUE
);
4162 end
= jnl
->jhdr
->end
;
4164 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
)) {
4166 amt
= blhdr
->bytes_used
;
4168 blhdr
->binfo
[0].u
.bi
.b
.sequence_num
= tr
->sequence_num
;
4170 blhdr
->checksum
= 0;
4171 blhdr
->checksum
= calc_checksum((char *)blhdr
, BLHDR_CHECKSUM_SIZE
);
4173 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&bparray
, blhdr
->num_blocks
* sizeof(struct buf
*), VM_KERN_MEMORY_FILE
)) {
4174 panic("can't allocate %zd bytes for bparray\n", blhdr
->num_blocks
* sizeof(struct buf
*));
4176 tbuffer_offset
= jnl
->jhdr
->blhdr_size
;
4178 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
4179 void (*func
)(buf_t
, void *);
4184 * finish preparing the shadow buf_t before
4185 * calculating the individual block checksums
4187 if (blhdr
->binfo
[i
].bnum
!= (off_t
)-1) {
4191 bp
= blhdr
->binfo
[i
].u
.bp
;
4194 blkno
= buf_blkno(bp
);
4195 lblkno
= buf_lblkno(bp
);
4197 if (vp
== NULL
&& lblkno
== blkno
) {
4198 printf("jnl: %s: end_tr: bad news! buffer w/null vp and l/blkno = %qd/%qd. aborting the transaction.\n",
4199 jnl
->jdev_name
, lblkno
, blkno
);
4204 // if the lblkno is the same as blkno and this bp isn't
4205 // associated with the underlying file system device then
4206 // we need to call bmap() to get the actual physical block.
4208 if ((lblkno
== blkno
) && (vp
!= jnl
->fsdev
)) {
4210 size_t contig_bytes
;
4212 if (VNOP_BLKTOOFF(vp
, lblkno
, &f_offset
)) {
4213 printf("jnl: %s: end_tr: vnop_blktooff failed\n", jnl
->jdev_name
);
4217 if (VNOP_BLOCKMAP(vp
, f_offset
, buf_count(bp
), &blkno
, &contig_bytes
, NULL
, 0, NULL
)) {
4218 printf("jnl: %s: end_tr: can't blockmap the buffer", jnl
->jdev_name
);
4222 if ((uint32_t)contig_bytes
< buf_count(bp
)) {
4223 printf("jnl: %s: end_tr: blk not physically contiguous on disk\n", jnl
->jdev_name
);
4227 buf_setblkno(bp
, blkno
);
4229 // update this so we write out the correct physical block number!
4230 blhdr
->binfo
[i
].bnum
= (off_t
)(blkno
);
4233 * pick up the FS hook function (if any) and prepare
4234 * to fire this buffer off in the next pass
4236 buf_setfilter(bp
, buffer_flushed_callback
, tr
, &func
, &arg
);
4240 * call the hook function supplied by the filesystem...
4241 * this needs to happen BEFORE cacl_checksum in case
4242 * the FS morphs the data in the buffer
4247 bsize
= buf_size(bp
);
4248 blhdr
->binfo
[i
].u
.bi
.bsize
= bsize
;
4249 blhdr
->binfo
[i
].u
.bi
.b
.cksum
= calc_checksum(&((char *)blhdr
)[tbuffer_offset
], bsize
);
4252 bsize
= blhdr
->binfo
[i
].u
.bi
.bsize
;
4253 blhdr
->binfo
[i
].u
.bi
.b
.cksum
= 0;
4255 tbuffer_offset
+= bsize
;
4258 * if we fired off the journal_write_header asynchronously in
4259 * 'end_transaction', we need to wait for its completion
4260 * before writing the actual journal data
4262 wait_condition(jnl
, &jnl
->writing_header
, "finish_end_transaction");
4264 if (jnl
->write_header_failed
== FALSE
)
4265 ret
= write_journal_data(jnl
, &end
, blhdr
, amt
);
4269 * put the bp pointers back so that we can
4270 * make the final pass on them
4272 for (i
= 1; i
< blhdr
->num_blocks
; i
++)
4273 blhdr
->binfo
[i
].u
.bp
= bparray
[i
];
4275 kmem_free(kernel_map
, (vm_offset_t
)bparray
, blhdr
->num_blocks
* sizeof(struct buf
*));
4281 printf("jnl: %s: end_transaction: only wrote %d of %d bytes to the journal!\n",
4282 jnl
->jdev_name
, ret
, amt
);
4288 jnl
->jhdr
->end
= end
; // update where the journal now ends
4289 tr
->journal_end
= end
; // the transaction ends here too
4291 if (tr
->journal_start
== 0 || tr
->journal_end
== 0) {
4292 panic("jnl: end_transaction: bad tr journal start/end: 0x%llx 0x%llx\n",
4293 tr
->journal_start
, tr
->journal_end
);
4296 if (write_journal_header(jnl
, 0, jnl
->saved_sequence_num
) != 0) {
4301 * If the caller supplied a callback, call it now that the blocks have been
4302 * written to the journal. This is used by journal_relocate so, for example,
4303 * the file system can change its pointer to the new journal.
4305 if (callback
!= NULL
&& callback(callback_arg
) != 0) {
4311 // Send a DKIOCUNMAP for the extents trimmed by this transaction, and
4312 // free up the extent list.
4314 journal_trim_flush(jnl
, tr
);
4316 // the buffer_flushed_callback will only be called for the
4317 // real blocks that get flushed so we have to account for
4318 // the block_list_headers here.
4320 tr
->num_flushed
= tr
->num_blhdrs
* jnl
->jhdr
->blhdr_size
;
4322 lock_condition(jnl
, &jnl
->asyncIO
, "finish_end_transaction");
4325 // setup for looping through all the blhdr's.
4327 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= next
) {
4328 uint16_t num_blocks
;
4331 * grab this info ahead of issuing the buf_bawrites...
4332 * once the last one goes out, its possible for blhdr
4333 * to be freed (especially if we get preempted) before
4334 * we do the last check of num_blocks or
4335 * grab the next blhdr pointer...
4337 next
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
4338 num_blocks
= blhdr
->num_blocks
;
4341 * we can re-order the buf ptrs because everything is written out already
4343 qsort(&blhdr
->binfo
[1], num_blocks
-1, sizeof(block_info
), journal_binfo_cmp
);
4346 * need to make sure that the loop issuing the buf_bawrite's
4347 * does not touch blhdr once the last buf_bawrite has been
4348 * issued... at that point, we no longer have a legitmate
4349 * reference on the associated storage since it will be
4350 * released upon the completion of that last buf_bawrite
4352 for (i
= num_blocks
-1; i
>= 1; i
--) {
4353 if (blhdr
->binfo
[i
].bnum
!= (off_t
)-1)
4357 for (i
= 1; i
< num_blocks
; i
++) {
4359 if ((bp
= blhdr
->binfo
[i
].u
.bp
)) {
4364 // this undoes the vnode_ref() in journal_modify_block_end()
4365 vnode_rele_ext(vp
, 0, 1);
4371 if (bufs_written
== 0) {
4373 * since we didn't issue any buf_bawrite's, there is no
4374 * async trigger to cause the memory associated with this
4375 * transaction to be freed... so, move it to the garbage
4380 tr
->next
= jnl
->tr_freeme
;
4381 jnl
->tr_freeme
= tr
;
4383 unlock_oldstart(jnl
);
4385 unlock_condition(jnl
, &jnl
->asyncIO
);
4388 //printf("jnl: end_tr: tr @ 0x%x, jnl-blocks: 0x%llx - 0x%llx. exit!\n",
4389 // tr, tr->journal_start, tr->journal_end);
4392 if (ret_val
== -1) {
4393 abort_transaction(jnl
, tr
); // cleans up list of extents to be trimmed
4396 * 'flush_aborted' is protected by the flushing condition... we need to
4397 * set it before dropping the condition so that it will be
4398 * noticed in 'end_transaction'... we add this additional
4399 * aborted condition so that we can drop the 'flushing' condition
4400 * before grabbing the journal lock... this avoids a deadlock
4401 * in 'end_transaction' which is holding the journal lock while
4402 * waiting for the 'flushing' condition to clear...
4403 * everyone else will notice the JOURNAL_INVALID flag
4405 jnl
->flush_aborted
= TRUE
;
4407 unlock_condition(jnl
, &jnl
->flushing
);
4410 jnl
->flags
|= JOURNAL_INVALID
;
4411 jnl
->old_start
[sizeof(jnl
->old_start
)/sizeof(jnl
->old_start
[0]) - 1] &= ~0x8000000000000000LL
;
4413 journal_unlock(jnl
);
4415 unlock_condition(jnl
, &jnl
->flushing
);
4417 if ((jnl
->fsmount
->mnt_kern_flag
& MNTK_SWAP_MOUNT
) && (was_vm_privileged
== FALSE
))
4418 set_vm_privilege(FALSE
);
4420 KERNEL_DEBUG(0xbbbbc028|DBG_FUNC_END
, jnl
, tr
, bufs_written
, ret_val
, 0);
4427 lock_condition(journal
*jnl
, boolean_t
*condition
, const char *condition_name
)
4430 KERNEL_DEBUG(0xbbbbc020|DBG_FUNC_START
, jnl
, condition
, 0, 0, 0);
4434 while (*condition
== TRUE
)
4435 msleep(condition
, &jnl
->flock
, PRIBIO
, condition_name
, NULL
);
4440 KERNEL_DEBUG(0xbbbbc020|DBG_FUNC_END
, jnl
, condition
, 0, 0, 0);
4444 wait_condition(journal
*jnl
, boolean_t
*condition
, const char *condition_name
)
4447 if (*condition
== FALSE
)
4450 KERNEL_DEBUG(0xbbbbc02c|DBG_FUNC_START
, jnl
, condition
, 0, 0, 0);
4454 while (*condition
== TRUE
)
4455 msleep(condition
, &jnl
->flock
, PRIBIO
, condition_name
, NULL
);
4459 KERNEL_DEBUG(0xbbbbc02c|DBG_FUNC_END
, jnl
, condition
, 0, 0, 0);
4463 unlock_condition(journal
*jnl
, boolean_t
*condition
)
4474 abort_transaction(journal
*jnl
, transaction
*tr
)
4476 block_list_header
*blhdr
, *next
;
4478 // for each block list header, iterate over the blocks then
4479 // free up the memory associated with the block list.
4481 // find each of the primary blocks (i.e. the list could
4482 // contain a mix of shadowed and real buf_t's depending
4483 // on when the abort condition was detected) and mark them
4484 // clean and locked in the cache... this at least allows
4485 // the FS a consistent view between it's incore data structures
4486 // and the meta-data held in the cache
4488 KERNEL_DEBUG(0xbbbbc034|DBG_FUNC_START
, jnl
, tr
, 0, 0, 0);
4490 for (blhdr
= tr
->blhdr
; blhdr
; blhdr
= next
) {
4493 for (i
= 1; i
< blhdr
->num_blocks
; i
++) {
4498 if (blhdr
->binfo
[i
].bnum
== (off_t
)-1)
4501 tbp
= blhdr
->binfo
[i
].u
.bp
;
4503 bp_vp
= buf_vnode(tbp
);
4505 if (buf_shadow(tbp
)) {
4507 buf_setfilter(tbp
, NULL
, NULL
, NULL
, NULL
);
4509 assert(ISSET(buf_flags(tbp
), B_LOCKED
));
4514 errno
= buf_acquire(tbp
, BAC_REMOVE
, 0, 0);
4515 } while (errno
== EAGAIN
);
4518 buf_setfilter(tbp
, NULL
, NULL
, NULL
, NULL
);
4524 errno
= buf_meta_bread(bp_vp
,
4530 if (sbp
== NULL
&& bp
!= tbp
&& (buf_flags(tbp
) & B_LOCKED
)) {
4531 panic("jnl: abort_tr: got back a different bp! (bp %p should be %p, jnl %p\n",
4535 * once the journal has been marked INVALID and aborted,
4536 * NO meta data can be written back to the disk, so
4537 * mark the buf_t clean and make sure it's locked in the cache
4538 * note: if we found a shadow, the real buf_t needs to be relocked
4540 buf_setflags(bp
, B_LOCKED
);
4544 KERNEL_DEBUG(0xbbbbc034|DBG_FUNC_NONE
, jnl
, tr
, bp
, 0, 0);
4547 * this undoes the vnode_ref() in journal_modify_block_end()
4549 vnode_rele_ext(bp_vp
, 0, 1);
4551 printf("jnl: %s: abort_tr: could not find block %lld for vnode!\n",
4552 jnl
->jdev_name
, blhdr
->binfo
[i
].bnum
);
4561 next
= (block_list_header
*)((long)blhdr
->binfo
[0].bnum
);
4563 // we can free blhdr here since we won't need it any more
4564 blhdr
->binfo
[0].bnum
= 0xdeadc0de;
4565 kmem_free(kernel_map
, (vm_offset_t
)blhdr
, tr
->tbuffer_size
);
4569 * If the transaction we're aborting was the async transaction, then
4570 * tell the current transaction that there is no pending trim
4573 lck_rw_lock_exclusive(&jnl
->trim_lock
);
4574 if (jnl
->async_trim
== &tr
->trim
)
4575 jnl
->async_trim
= NULL
;
4576 lck_rw_unlock_exclusive(&jnl
->trim_lock
);
4579 if (tr
->trim
.extents
) {
4580 kfree(tr
->trim
.extents
, tr
->trim
.allocated_count
* sizeof(dk_extent_t
));
4582 tr
->trim
.allocated_count
= 0;
4583 tr
->trim
.extent_count
= 0;
4584 tr
->trim
.extents
= NULL
;
4587 tr
->total_bytes
= 0xdbadc0de;
4588 FREE_ZONE(tr
, sizeof(transaction
), M_JNL_TR
);
4590 KERNEL_DEBUG(0xbbbbc034|DBG_FUNC_END
, jnl
, tr
, 0, 0, 0);
4595 journal_end_transaction(journal
*jnl
)
4602 free_old_stuff(jnl
);
4604 if ((jnl
->flags
& JOURNAL_INVALID
) && jnl
->owner
== NULL
) {
4608 if (jnl
->owner
!= current_thread()) {
4609 panic("jnl: end_tr: I'm not the owner! jnl %p, owner %p, curact %p\n",
4610 jnl
, jnl
->owner
, current_thread());
4612 jnl
->nested_count
--;
4614 if (jnl
->nested_count
> 0) {
4616 } else if (jnl
->nested_count
< 0) {
4617 panic("jnl: jnl @ %p has negative nested count (%d). bad boy.\n", jnl
, jnl
->nested_count
);
4620 if (jnl
->flags
& JOURNAL_INVALID
) {
4621 if (jnl
->active_tr
) {
4622 if (jnl
->cur_tr
!= NULL
) {
4623 panic("jnl: journal @ %p has active tr (%p) and cur tr (%p)\n",
4624 jnl
, jnl
->active_tr
, jnl
->cur_tr
);
4626 tr
= jnl
->active_tr
;
4627 jnl
->active_tr
= NULL
;
4629 abort_transaction(jnl
, tr
);
4631 journal_unlock(jnl
);
4636 tr
= jnl
->active_tr
;
4637 CHECK_TRANSACTION(tr
);
4639 // clear this out here so that when check_free_space() calls
4640 // the FS flush function, we don't panic in journal_flush()
4641 // if the FS were to call that. note: check_free_space() is
4642 // called from end_transaction().
4644 jnl
->active_tr
= NULL
;
4646 /* Examine the force-journal-flush state in the active txn */
4647 if (tr
->flush_on_completion
== TRUE
) {
4649 * If the FS requested it, disallow group commit and force the
4650 * transaction out to disk immediately.
4652 ret
= end_transaction(tr
, 1, NULL
, NULL
, TRUE
, TRUE
);
4655 /* in the common path we can simply use the double-buffered journal */
4656 ret
= end_transaction(tr
, 0, NULL
, NULL
, TRUE
, FALSE
);
4664 * Flush the contents of the journal to the disk.
4668 * If TRUE, wait to write in-memory journal to the disk
4669 * consistently, and also wait to write all asynchronous
4670 * metadata blocks to its corresponding locations
4671 * consistently on the disk. This means that the journal
4672 * is empty at this point and does not contain any
4673 * transactions. This is overkill in normal scenarios
4674 * but is useful whenever the metadata blocks are required
4675 * to be consistent on-disk instead of just the journal
4676 * being consistent; like before live verification
4677 * and live volume resizing.
4679 * If FALSE, only wait to write in-memory journal to the
4680 * disk consistently. This means that the journal still
4681 * contains uncommitted transactions and the file system
4682 * metadata blocks in the journal transactions might be
4683 * written asynchronously to the disk. But there is no
4684 * guarantee that they are written to the disk before
4685 * returning to the caller. Note that this option is
4686 * sufficient for file system data integrity as it
4687 * guarantees consistent journal content on the disk.
4690 journal_flush(journal
*jnl
, journal_flush_options_t options
)
4692 boolean_t drop_lock
= FALSE
;
4694 uint32_t flush_count
;
4698 free_old_stuff(jnl
);
4700 if (jnl
->flags
& JOURNAL_INVALID
) {
4704 KERNEL_DEBUG(DBG_JOURNAL_FLUSH
| DBG_FUNC_START
, jnl
, 0, 0, 0, 0);
4706 if (jnl
->owner
!= current_thread()) {
4711 if (ISSET(options
, JOURNAL_FLUSH_FULL
))
4712 flush_count
= jnl
->flush_counter
;
4714 // if we're not active, flush any buffered transactions
4715 if (jnl
->active_tr
== NULL
&& jnl
->cur_tr
) {
4716 transaction
*tr
= jnl
->cur_tr
;
4720 if (ISSET(options
, JOURNAL_WAIT_FOR_IO
)) {
4721 wait_condition(jnl
, &jnl
->flushing
, "journal_flush");
4722 wait_condition(jnl
, &jnl
->asyncIO
, "journal_flush");
4725 * "end_transction" will wait for any current async flush
4726 * to complete, before flushing "cur_tr"... because we've
4727 * specified the 'must_wait' arg as TRUE, it will then
4728 * synchronously flush the "cur_tr"
4730 end_transaction(tr
, 1, NULL
, NULL
, drop_lock
, TRUE
); // force it to get flushed
4733 if (drop_lock
== TRUE
) {
4734 journal_unlock(jnl
);
4737 /* Because of pipelined journal, the journal transactions
4738 * might be in process of being flushed on another thread.
4739 * If there is nothing to flush currently, we should
4740 * synchronize ourselves with the pipelined journal thread
4741 * to ensure that all inflight transactions, if any, are
4742 * flushed before we return success to caller.
4744 wait_condition(jnl
, &jnl
->flushing
, "journal_flush");
4746 if (ISSET(options
, JOURNAL_WAIT_FOR_IO
)) {
4747 wait_condition(jnl
, &jnl
->asyncIO
, "journal_flush");
4750 if (ISSET(options
, JOURNAL_FLUSH_FULL
)) {
4752 dk_synchronize_t sync_request
= {
4756 // We need a full cache flush. If it has not been done, do it here.
4757 if (flush_count
== jnl
->flush_counter
)
4758 error
= VNOP_IOCTL(jnl
->jdev
, DKIOCSYNCHRONIZE
, (caddr_t
)&sync_request
, FWRITE
, vfs_context_kernel());
4760 // If external journal partition is enabled, flush filesystem data partition.
4761 if (jnl
->jdev
!= jnl
->fsdev
)
4762 error
= VNOP_IOCTL(jnl
->fsdev
, DKIOCSYNCHRONIZE
, (caddr_t
)&sync_request
, FWRITE
, vfs_context_kernel());
4766 KERNEL_DEBUG(DBG_JOURNAL_FLUSH
| DBG_FUNC_END
, jnl
, 0, 0, 0, 0);
4772 journal_active(journal
*jnl
)
4774 if (jnl
->flags
& JOURNAL_INVALID
) {
4778 return (jnl
->active_tr
== NULL
) ? 0 : 1;
4782 journal_owner(journal
*jnl
)
4787 int journal_uses_fua(journal
*jnl
)
4789 if (jnl
->flags
& JOURNAL_DO_FUA_WRITES
)
4795 * Relocate the journal.
4797 * You provide the new starting offset and size for the journal. You may
4798 * optionally provide a new tbuffer_size; passing zero defaults to not
4799 * changing the tbuffer size except as needed to fit within the new journal
4802 * You must have already started a transaction. The transaction may contain
4803 * modified blocks (such as those needed to deallocate the old journal,
4804 * allocate the new journal, and update the location and size of the journal
4805 * in filesystem-private structures). Any transactions prior to the active
4806 * transaction will be flushed to the old journal. The new journal will be
4807 * initialized, and the blocks from the active transaction will be written to
4810 * The caller will need to update the structures that identify the location
4811 * and size of the journal. These updates should be made in the supplied
4812 * callback routine. These updates must NOT go into a transaction. You should
4813 * force these updates to the media before returning from the callback. In the
4814 * even of a crash, either the old journal will be found, with an empty journal,
4815 * or the new journal will be found with the contents of the active transaction.
4817 * Upon return from the callback, the blocks from the active transaction are
4818 * written to their normal locations on disk.
4820 * (Remember that we have to ensure that blocks get committed to the journal
4821 * before being committed to their normal locations. But the blocks don't count
4822 * as committed until the new journal is pointed at.)
4824 * Upon return, there is still an active transaction: newly allocated, and
4825 * with no modified blocks. Call journal_end_transaction as normal. You may
4826 * modifiy additional blocks before calling journal_end_transaction, and those
4827 * blocks will (eventually) go to the relocated journal.
4830 * jnl The (opened) journal to relocate.
4831 * offset The new journal byte offset (from start of the journal device).
4832 * journal_size The size, in bytes, of the new journal.
4833 * tbuffer_size The new desired transaction buffer size. Pass zero to keep
4834 * the same size as the current journal. The size will be
4835 * modified as needed to fit the new journal.
4836 * callback Routine called after the new journal has been initialized,
4837 * and the active transaction written to the new journal, but
4838 * before the blocks are written to their normal locations.
4839 * Pass NULL for no callback.
4840 * callback_arg An argument passed to the callback routine.
4844 * EINVAL The offset is not block aligned
4845 * EINVAL The journal_size is not a multiple of the block size
4846 * EINVAL The journal is invalid
4847 * (any) An error returned by journal_flush.
4850 int journal_relocate(journal
*jnl
, off_t offset
, off_t journal_size
, int32_t tbuffer_size
,
4851 errno_t (*callback
)(void *), void *callback_arg
)
4858 * Sanity check inputs, and adjust the size of the transaction buffer.
4860 if ((offset
% jnl
->jhdr
->jhdr_size
) != 0) {
4861 printf("jnl: %s: relocate: offset 0x%llx is not an even multiple of block size 0x%x\n",
4862 jnl
->jdev_name
, offset
, jnl
->jhdr
->jhdr_size
);
4865 if ((journal_size
% jnl
->jhdr
->jhdr_size
) != 0) {
4866 printf("jnl: %s: relocate: journal size 0x%llx is not an even multiple of block size 0x%x\n",
4867 jnl
->jdev_name
, journal_size
, jnl
->jhdr
->jhdr_size
);
4873 /* Guarantee we own the active transaction. */
4874 if (jnl
->flags
& JOURNAL_INVALID
) {
4877 if (jnl
->owner
!= current_thread()) {
4878 panic("jnl: relocate: Not the owner! jnl %p, owner %p, curact %p\n",
4879 jnl
, jnl
->owner
, current_thread());
4882 if (tbuffer_size
== 0)
4883 tbuffer_size
= jnl
->tbuffer_size
;
4884 size_up_tbuffer(jnl
, tbuffer_size
, jnl
->jhdr
->jhdr_size
);
4887 * Flush any non-active transactions. We have to temporarily hide the
4888 * active transaction to make journal_flush flush out non-active but
4889 * current (unwritten) transactions.
4891 tr
= jnl
->active_tr
;
4892 CHECK_TRANSACTION(tr
);
4893 jnl
->active_tr
= NULL
;
4894 ret
= journal_flush(jnl
, JOURNAL_WAIT_FOR_IO
);
4895 jnl
->active_tr
= tr
;
4900 wait_condition(jnl
, &jnl
->flushing
, "end_transaction");
4903 * At this point, we have completely flushed the contents of the current
4904 * journal to disk (and have asynchronously written all of the txns to
4905 * their actual desired locations). As a result, we can (and must) clear
4906 * out the old_start array. If we do not, then if the last written transaction
4907 * started at the beginning of the journal (starting 1 block into the
4908 * journal file) it could confuse the buffer_flushed callback. This is
4909 * because we're about to reset the start/end pointers of the journal header
4913 for (i
= 0; i
< sizeof (jnl
->old_start
) / sizeof(jnl
->old_start
[0]); i
++) {
4914 jnl
->old_start
[i
] = 0;
4916 unlock_oldstart(jnl
);
4918 /* Update the journal's offset and size in memory. */
4919 jnl
->jdev_offset
= offset
;
4920 jnl
->jhdr
->start
= jnl
->jhdr
->end
= jnl
->jhdr
->jhdr_size
;
4921 jnl
->jhdr
->size
= journal_size
;
4922 jnl
->active_start
= jnl
->jhdr
->start
;
4925 * Force the active transaction to be written to the new journal. Call the
4926 * supplied callback after the blocks have been written to the journal, but
4927 * before they get written to their normal on-disk locations.
4929 jnl
->active_tr
= NULL
;
4930 ret
= end_transaction(tr
, 1, callback
, callback_arg
, FALSE
, TRUE
);
4932 printf("jnl: %s: relocate: end_transaction failed (%d)\n", jnl
->jdev_name
, ret
);
4937 * Create a new, empty transaction to be the active transaction. This way
4938 * our caller can use journal_end_transaction as usual.
4940 ret
= journal_allocate_transaction(jnl
);
4942 printf("jnl: %s: relocate: could not allocate new transaction (%d)\n", jnl
->jdev_name
, ret
);
4949 jnl
->flags
|= JOURNAL_INVALID
;
4950 abort_transaction(jnl
, tr
);
4954 uint32_t journal_current_txn(journal
*jnl
)
4956 return jnl
->sequence_num
+ (jnl
->active_tr
|| jnl
->cur_tr
? 0 : 1);
4959 #else // !JOURNALING - so provide stub functions
4961 int journal_uses_fua(__unused journal
*jnl
)
4967 journal_create(__unused
struct vnode
*jvp
,
4968 __unused off_t offset
,
4969 __unused off_t journal_size
,
4970 __unused
struct vnode
*fsvp
,
4971 __unused
size_t min_fs_blksz
,
4972 __unused
int32_t flags
,
4973 __unused
int32_t tbuffer_size
,
4974 __unused
void (*flush
)(void *arg
),
4976 __unused
struct mount
*fsmount
)
4982 journal_open(__unused
struct vnode
*jvp
,
4983 __unused off_t offset
,
4984 __unused off_t journal_size
,
4985 __unused
struct vnode
*fsvp
,
4986 __unused
size_t min_fs_blksz
,
4987 __unused
int32_t flags
,
4988 __unused
int32_t tbuffer_size
,
4989 __unused
void (*flush
)(void *arg
),
4991 __unused
struct mount
*fsmount
)
4998 journal_modify_block_start(__unused journal
*jnl
, __unused
struct buf
*bp
)
5004 journal_modify_block_end(__unused journal
*jnl
,
5005 __unused
struct buf
*bp
,
5006 __unused
void (*func
)(struct buf
*bp
, void *arg
),
5013 journal_kill_block(__unused journal
*jnl
, __unused
struct buf
*bp
)
5018 int journal_relocate(__unused journal
*jnl
,
5019 __unused off_t offset
,
5020 __unused off_t journal_size
,
5021 __unused
int32_t tbuffer_size
,
5022 __unused
errno_t (*callback
)(void *),
5023 __unused
void *callback_arg
)
5029 journal_close(__unused journal
*jnl
)
5034 journal_start_transaction(__unused journal
*jnl
)
5040 journal_end_transaction(__unused journal
*jnl
)
5046 journal_flush(__unused journal
*jnl
, __unused journal_flush_options_t options
)
5052 journal_is_clean(__unused
struct vnode
*jvp
,
5053 __unused off_t offset
,
5054 __unused off_t journal_size
,
5055 __unused
struct vnode
*fsvp
,
5056 __unused
size_t min_fs_block_size
)
5063 journal_owner(__unused journal
*jnl
)
5069 journal_lock(__unused journal
*jnl
)
5075 journal_unlock(__unused journal
*jnl
)
5080 __private_extern__
int
5081 journal_trim_add_extent(__unused journal
*jnl
,
5082 __unused
uint64_t offset
,
5083 __unused
uint64_t length
)
5089 journal_request_immediate_flush(__unused journal
*jnl
)
5094 __private_extern__
int
5095 journal_trim_remove_extent(__unused journal
*jnl
,
5096 __unused
uint64_t offset
,
5097 __unused
uint64_t length
)
5102 int journal_trim_extent_overlap(__unused journal
*jnl
,
5103 __unused
uint64_t offset
,
5104 __unused
uint64_t length
,
5105 __unused
uint64_t *end
)
5110 #endif // !JOURNALING