2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <sys/malloc.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <sys/uio_internal.h>
75 #include <libkern/libkern.h>
76 #include <machine/machine_routines.h>
78 #include <sys/ubc_internal.h>
79 #include <vm/vnode_pager.h>
81 #include <mach/mach_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_pageout.h>
90 #include <sys/kdebug.h>
91 #include <libkern/OSAtomic.h>
95 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
100 #define CL_WRITE 0x02
101 #define CL_ASYNC 0x04
102 #define CL_COMMIT 0x08
103 #define CL_PAGEOUT 0x10
105 #define CL_NOZERO 0x40
106 #define CL_PAGEIN 0x80
107 #define CL_DEV_MEMORY 0x100
108 #define CL_PRESERVE 0x200
109 #define CL_THROTTLE 0x400
110 #define CL_KEEPCACHED 0x800
111 #define CL_DIRECT_IO 0x1000
112 #define CL_PASSIVE 0x2000
113 #define CL_IOSTREAMING 0x4000
115 #define MAX_VECTOR_UPL_ELEMENTS 8
116 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE) * PAGE_SIZE
118 extern upl_t
vector_upl_create(vm_offset_t
);
119 extern boolean_t
vector_upl_is_valid(upl_t
);
120 extern boolean_t
vector_upl_set_subupl(upl_t
,upl_t
, u_int32_t
);
121 extern void vector_upl_set_pagelist(upl_t
);
122 extern void vector_upl_set_iostate(upl_t
, upl_t
, vm_offset_t
, u_int32_t
);
125 u_int io_completed
; /* amount of io that has currently completed */
126 u_int io_issued
; /* amount of io that was successfully issued */
127 int io_error
; /* error code of first error encountered */
128 int io_wanted
; /* someone is sleeping waiting for a change in state */
131 static lck_grp_t
*cl_mtx_grp
;
132 static lck_attr_t
*cl_mtx_attr
;
133 static lck_grp_attr_t
*cl_mtx_grp_attr
;
134 static lck_mtx_t
*cl_mtxp
;
135 static lck_mtx_t
*cl_transaction_mtxp
;
143 #define PUSH_DELAY 0x01
144 #define PUSH_ALL 0x02
145 #define PUSH_SYNC 0x04
148 static void cluster_EOT(buf_t cbp_head
, buf_t cbp_tail
, int zero_offset
);
149 static void cluster_wait_IO(buf_t cbp_head
, int async
);
150 static void cluster_complete_transaction(buf_t
*cbp_head
, void *callback_arg
, int *retval
, int flags
, int needwait
);
152 static int cluster_io_type(struct uio
*uio
, int *io_type
, u_int32_t
*io_length
, u_int32_t min_length
);
154 static int cluster_io(vnode_t vp
, upl_t upl
, vm_offset_t upl_offset
, off_t f_offset
, int non_rounded_size
,
155 int flags
, buf_t real_bp
, struct clios
*iostate
, int (*)(buf_t
, void *), void *callback_arg
);
156 static int cluster_iodone(buf_t bp
, void *callback_arg
);
157 static int cluster_ioerror(upl_t upl
, int upl_offset
, int abort_size
, int error
, int io_flags
);
158 static int cluster_hard_throttle_on(vnode_t vp
, uint32_t);
160 static void cluster_syncup(vnode_t vp
, off_t newEOF
, int (*)(buf_t
, void *), void *callback_arg
);
162 static void cluster_read_upl_release(upl_t upl
, int start_pg
, int last_pg
, int take_reference
);
163 static int cluster_copy_ubc_data_internal(vnode_t vp
, struct uio
*uio
, int *io_resid
, int mark_dirty
, int take_reference
);
165 static int cluster_read_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t filesize
, int flags
,
166 int (*)(buf_t
, void *), void *callback_arg
);
167 static int cluster_read_direct(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
168 int flags
, int (*)(buf_t
, void *), void *callback_arg
);
169 static int cluster_read_contig(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
170 int (*)(buf_t
, void *), void *callback_arg
, int flags
);
172 static int cluster_write_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t oldEOF
, off_t newEOF
,
173 off_t headOff
, off_t tailOff
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
174 static int cluster_write_direct(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
,
175 int *write_type
, u_int32_t
*write_length
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
176 static int cluster_write_contig(vnode_t vp
, struct uio
*uio
, off_t newEOF
,
177 int *write_type
, u_int32_t
*write_length
, int (*)(buf_t
, void *), void *callback_arg
, int bflag
);
179 static int cluster_align_phys_io(vnode_t vp
, struct uio
*uio
, addr64_t usr_paddr
, u_int32_t xsize
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
181 static int cluster_read_prefetch(vnode_t vp
, off_t f_offset
, u_int size
, off_t filesize
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
);
182 static void cluster_read_ahead(vnode_t vp
, struct cl_extent
*extent
, off_t filesize
, struct cl_readahead
*ra
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
);
184 static int cluster_push_now(vnode_t vp
, struct cl_extent
*, off_t EOF
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
186 static int cluster_try_push(struct cl_writebehind
*, vnode_t vp
, off_t EOF
, int push_flag
, int (*)(buf_t
, void *), void *callback_arg
);
188 static void sparse_cluster_switch(struct cl_writebehind
*, vnode_t vp
, off_t EOF
, int (*)(buf_t
, void *), void *callback_arg
);
189 static void sparse_cluster_push(void **cmapp
, vnode_t vp
, off_t EOF
, int push_flag
, int (*)(buf_t
, void *), void *callback_arg
);
190 static void sparse_cluster_add(void **cmapp
, vnode_t vp
, struct cl_extent
*, off_t EOF
, int (*)(buf_t
, void *), void *callback_arg
);
192 static kern_return_t
vfs_drt_mark_pages(void **cmapp
, off_t offset
, u_int length
, u_int
*setcountp
);
193 static kern_return_t
vfs_drt_get_cluster(void **cmapp
, off_t
*offsetp
, u_int
*lengthp
);
194 static kern_return_t
vfs_drt_control(void **cmapp
, int op_type
);
198 * limit the internal I/O size so that we
199 * can represent it in a 32 bit int
201 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
202 #define MAX_IO_CONTIG_SIZE (MAX_UPL_SIZE * PAGE_SIZE)
204 #define MIN_DIRECT_WRITE_SIZE (4 * PAGE_SIZE)
206 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * base)
207 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
208 #define MAX_PREFETCH(vp, io_size) (io_size * IO_SCALE(vp, 3))
211 int speculative_reads_disabled
= 0;
214 * throttle the number of async writes that
215 * can be outstanding on a single vnode
216 * before we issue a synchronous write
218 #define HARD_THROTTLE_MAXCNT 0
219 #define HARD_THROTTLE_MAXSIZE (32 * 1024)
221 int hard_throttle_on_root
= 0;
222 struct timeval priority_IO_timestamp_for_root
;
228 * allocate lock group attribute and group
230 cl_mtx_grp_attr
= lck_grp_attr_alloc_init();
231 cl_mtx_grp
= lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr
);
234 * allocate the lock attribute
236 cl_mtx_attr
= lck_attr_alloc_init();
239 * allocate and initialize mutex's used to protect updates and waits
240 * on the cluster_io context
242 cl_mtxp
= lck_mtx_alloc_init(cl_mtx_grp
, cl_mtx_attr
);
245 panic("cluster_init: failed to allocate cl_mtxp");
247 cl_transaction_mtxp
= lck_mtx_alloc_init(cl_mtx_grp
, cl_mtx_attr
);
249 if (cl_transaction_mtxp
== NULL
)
250 panic("cluster_init: failed to allocate cl_transaction_mtxp");
255 cluster_max_io_size(mount_t mp
, int type
)
257 uint32_t max_io_size
;
264 segcnt
= mp
->mnt_segreadcnt
;
265 maxcnt
= mp
->mnt_maxreadcnt
;
268 segcnt
= mp
->mnt_segwritecnt
;
269 maxcnt
= mp
->mnt_maxwritecnt
;
272 segcnt
= min(mp
->mnt_segreadcnt
, mp
->mnt_segwritecnt
);
273 maxcnt
= min(mp
->mnt_maxreadcnt
, mp
->mnt_maxwritecnt
);
276 if (segcnt
> MAX_UPL_SIZE
) {
278 * don't allow a size beyond the max UPL size we can create
280 segcnt
= MAX_UPL_SIZE
;
282 max_io_size
= min((segcnt
* PAGE_SIZE
), maxcnt
);
284 if (max_io_size
< (MAX_UPL_TRANSFER
* PAGE_SIZE
)) {
286 * don't allow a size smaller than the old fixed limit
288 max_io_size
= (MAX_UPL_TRANSFER
* PAGE_SIZE
);
291 * make sure the size specified is a multiple of PAGE_SIZE
293 max_io_size
&= ~PAGE_MASK
;
295 return (max_io_size
);
301 #define CLW_ALLOCATE 0x01
302 #define CLW_RETURNLOCKED 0x02
303 #define CLW_IONOCACHE 0x04
304 #define CLW_IOPASSIVE 0x08
307 * if the read ahead context doesn't yet exist,
308 * allocate and initialize it...
309 * the vnode lock serializes multiple callers
310 * during the actual assignment... first one
311 * to grab the lock wins... the other callers
312 * will release the now unnecessary storage
314 * once the context is present, try to grab (but don't block on)
315 * the lock associated with it... if someone
316 * else currently owns it, than the read
317 * will run without read-ahead. this allows
318 * multiple readers to run in parallel and
319 * since there's only 1 read ahead context,
320 * there's no real loss in only allowing 1
321 * reader to have read-ahead enabled.
323 static struct cl_readahead
*
324 cluster_get_rap(vnode_t vp
)
326 struct ubc_info
*ubc
;
327 struct cl_readahead
*rap
;
331 if ((rap
= ubc
->cl_rahead
) == NULL
) {
332 MALLOC_ZONE(rap
, struct cl_readahead
*, sizeof *rap
, M_CLRDAHEAD
, M_WAITOK
);
334 bzero(rap
, sizeof *rap
);
336 lck_mtx_init(&rap
->cl_lockr
, cl_mtx_grp
, cl_mtx_attr
);
340 if (ubc
->cl_rahead
== NULL
)
341 ubc
->cl_rahead
= rap
;
343 lck_mtx_destroy(&rap
->cl_lockr
, cl_mtx_grp
);
344 FREE_ZONE((void *)rap
, sizeof *rap
, M_CLRDAHEAD
);
345 rap
= ubc
->cl_rahead
;
349 if (lck_mtx_try_lock(&rap
->cl_lockr
) == TRUE
)
352 return ((struct cl_readahead
*)NULL
);
357 * if the write behind context doesn't yet exist,
358 * and CLW_ALLOCATE is specified, allocate and initialize it...
359 * the vnode lock serializes multiple callers
360 * during the actual assignment... first one
361 * to grab the lock wins... the other callers
362 * will release the now unnecessary storage
364 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
365 * the lock associated with the write behind context before
369 static struct cl_writebehind
*
370 cluster_get_wbp(vnode_t vp
, int flags
)
372 struct ubc_info
*ubc
;
373 struct cl_writebehind
*wbp
;
377 if ((wbp
= ubc
->cl_wbehind
) == NULL
) {
379 if ( !(flags
& CLW_ALLOCATE
))
380 return ((struct cl_writebehind
*)NULL
);
382 MALLOC_ZONE(wbp
, struct cl_writebehind
*, sizeof *wbp
, M_CLWRBEHIND
, M_WAITOK
);
384 bzero(wbp
, sizeof *wbp
);
385 lck_mtx_init(&wbp
->cl_lockw
, cl_mtx_grp
, cl_mtx_attr
);
389 if (ubc
->cl_wbehind
== NULL
)
390 ubc
->cl_wbehind
= wbp
;
392 lck_mtx_destroy(&wbp
->cl_lockw
, cl_mtx_grp
);
393 FREE_ZONE((void *)wbp
, sizeof *wbp
, M_CLWRBEHIND
);
394 wbp
= ubc
->cl_wbehind
;
398 if (flags
& CLW_RETURNLOCKED
)
399 lck_mtx_lock(&wbp
->cl_lockw
);
406 cluster_syncup(vnode_t vp
, off_t newEOF
, int (*callback
)(buf_t
, void *), void *callback_arg
)
408 struct cl_writebehind
*wbp
;
410 if ((wbp
= cluster_get_wbp(vp
, 0)) != NULL
) {
412 if (wbp
->cl_number
) {
413 lck_mtx_lock(&wbp
->cl_lockw
);
415 cluster_try_push(wbp
, vp
, newEOF
, PUSH_ALL
| PUSH_SYNC
, callback
, callback_arg
);
417 lck_mtx_unlock(&wbp
->cl_lockw
);
424 cluster_hard_throttle_on(vnode_t vp
, uint32_t hard_throttle
)
429 static struct timeval hard_throttle_maxelapsed
= { 0, 200000 };
431 if (vp
->v_mount
->mnt_kern_flag
& MNTK_ROOTDEV
) {
432 struct timeval elapsed
;
434 if (hard_throttle_on_root
)
437 microuptime(&elapsed
);
438 timevalsub(&elapsed
, &priority_IO_timestamp_for_root
);
440 if (timevalcmp(&elapsed
, &hard_throttle_maxelapsed
, <))
444 if (throttle_get_io_policy(&ut
) == IOPOL_THROTTLE
) {
445 if (throttle_io_will_be_throttled(-1, vp
->v_mount
)) {
454 cluster_ioerror(upl_t upl
, int upl_offset
, int abort_size
, int error
, int io_flags
)
456 int upl_abort_code
= 0;
460 if (io_flags
& B_PHYS
)
462 * direct write of any flavor, or a direct read that wasn't aligned
464 ubc_upl_commit_range(upl
, upl_offset
, abort_size
, UPL_COMMIT_FREE_ON_EMPTY
);
466 if (io_flags
& B_PAGEIO
) {
467 if (io_flags
& B_READ
)
472 if (io_flags
& B_CACHE
)
474 * leave pages in the cache unchanged on error
476 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
;
477 else if (page_out
&& (error
!= ENXIO
))
479 * transient error... leave pages unchanged
481 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
;
483 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
;
485 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_DUMP_PAGES
;
487 ubc_upl_abort_range(upl
, upl_offset
, abort_size
, upl_abort_code
);
489 return (upl_abort_code
);
494 cluster_iodone(buf_t bp
, void *callback_arg
)
505 int transaction_size
= 0;
511 struct clios
*iostate
;
512 boolean_t transaction_complete
= FALSE
;
514 cbp_head
= (buf_t
)(bp
->b_trans_head
);
516 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_START
,
517 cbp_head
, bp
->b_lblkno
, bp
->b_bcount
, bp
->b_flags
, 0);
519 if (cbp_head
->b_trans_next
|| !(cbp_head
->b_flags
& B_EOT
)) {
521 lck_mtx_lock_spin(cl_transaction_mtxp
);
523 bp
->b_flags
|= B_TDONE
;
525 for (cbp
= cbp_head
; cbp
; cbp
= cbp
->b_trans_next
) {
527 * all I/O requests that are part of this transaction
528 * have to complete before we can process it
530 if ( !(cbp
->b_flags
& B_TDONE
)) {
532 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_END
,
533 cbp_head
, cbp
, cbp
->b_bcount
, cbp
->b_flags
, 0);
535 lck_mtx_unlock(cl_transaction_mtxp
);
538 if (cbp
->b_flags
& B_EOT
)
539 transaction_complete
= TRUE
;
541 lck_mtx_unlock(cl_transaction_mtxp
);
543 if (transaction_complete
== FALSE
) {
544 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_END
,
545 cbp_head
, 0, 0, 0, 0);
555 upl_offset
= cbp
->b_uploffset
;
557 b_flags
= cbp
->b_flags
;
558 real_bp
= cbp
->b_real_bp
;
559 zero_offset
= cbp
->b_validend
;
560 iostate
= (struct clios
*)cbp
->b_iostate
;
563 real_bp
->b_dev
= cbp
->b_dev
;
566 if ((cbp
->b_flags
& B_ERROR
) && error
== 0)
567 error
= cbp
->b_error
;
569 total_resid
+= cbp
->b_resid
;
570 total_size
+= cbp
->b_bcount
;
572 cbp_next
= cbp
->b_trans_next
;
574 if (cbp_next
== NULL
)
576 * compute the overall size of the transaction
577 * in case we created one that has 'holes' in it
578 * 'total_size' represents the amount of I/O we
579 * did, not the span of the transaction w/r to the UPL
581 transaction_size
= cbp
->b_uploffset
+ cbp
->b_bcount
- upl_offset
;
588 if (error
== 0 && total_resid
)
592 int (*cliodone_func
)(buf_t
, void *) = (int (*)(buf_t
, void *))(cbp_head
->b_cliodone
);
594 if (cliodone_func
!= NULL
) {
595 cbp_head
->b_bcount
= transaction_size
;
597 error
= (*cliodone_func
)(cbp_head
, callback_arg
);
601 cluster_zero(upl
, zero_offset
, PAGE_SIZE
- (zero_offset
& PAGE_MASK
), real_bp
);
603 free_io_buf(cbp_head
);
609 * someone has issued multiple I/Os asynchrounsly
610 * and is waiting for them to complete (streaming)
612 lck_mtx_lock_spin(cl_mtxp
);
614 if (error
&& iostate
->io_error
== 0)
615 iostate
->io_error
= error
;
617 iostate
->io_completed
+= total_size
;
619 if (iostate
->io_wanted
) {
621 * someone is waiting for the state of
622 * this io stream to change
624 iostate
->io_wanted
= 0;
627 lck_mtx_unlock(cl_mtxp
);
630 wakeup((caddr_t
)&iostate
->io_wanted
);
633 if (b_flags
& B_COMMIT_UPL
) {
635 pg_offset
= upl_offset
& PAGE_MASK
;
636 commit_size
= (pg_offset
+ transaction_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
639 upl_flags
= cluster_ioerror(upl
, upl_offset
- pg_offset
, commit_size
, error
, b_flags
);
641 upl_flags
= UPL_COMMIT_FREE_ON_EMPTY
;
643 if ((b_flags
& B_PHYS
) && (b_flags
& B_READ
))
644 upl_flags
|= UPL_COMMIT_SET_DIRTY
;
647 upl_flags
|= UPL_COMMIT_INACTIVATE
;
649 ubc_upl_commit_range(upl
, upl_offset
- pg_offset
, commit_size
, upl_flags
);
652 if ((b_flags
& B_NEED_IODONE
) && real_bp
) {
654 real_bp
->b_flags
|= B_ERROR
;
655 real_bp
->b_error
= error
;
657 real_bp
->b_resid
= total_resid
;
659 buf_biodone(real_bp
);
661 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_END
,
662 upl
, upl_offset
- pg_offset
, commit_size
, (error
<< 24) | upl_flags
, 0);
669 cluster_hard_throttle_limit(vnode_t vp
, uint32_t *limit
, uint32_t hard_throttle
)
671 if (cluster_hard_throttle_on(vp
, hard_throttle
)) {
672 *limit
= HARD_THROTTLE_MAXSIZE
;
680 cluster_zero(upl_t upl
, upl_offset_t upl_offset
, int size
, buf_t bp
)
683 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 23)) | DBG_FUNC_START
,
684 upl_offset
, size
, bp
, 0, 0);
686 if (bp
== NULL
|| bp
->b_datap
== 0) {
690 pl
= ubc_upl_pageinfo(upl
);
692 if (upl_device_page(pl
) == TRUE
) {
693 zero_addr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + upl_offset
;
695 bzero_phys_nc(zero_addr
, size
);
702 page_index
= upl_offset
/ PAGE_SIZE
;
703 page_offset
= upl_offset
& PAGE_MASK
;
705 zero_addr
= ((addr64_t
)upl_phys_page(pl
, page_index
) << 12) + page_offset
;
706 zero_cnt
= min(PAGE_SIZE
- page_offset
, size
);
708 bzero_phys(zero_addr
, zero_cnt
);
711 upl_offset
+= zero_cnt
;
715 bzero((caddr_t
)((vm_offset_t
)bp
->b_datap
+ upl_offset
), size
);
717 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 23)) | DBG_FUNC_END
,
718 upl_offset
, size
, 0, 0, 0);
723 cluster_EOT(buf_t cbp_head
, buf_t cbp_tail
, int zero_offset
)
725 cbp_head
->b_validend
= zero_offset
;
726 cbp_tail
->b_flags
|= B_EOT
;
730 cluster_wait_IO(buf_t cbp_head
, int async
)
736 * async callback completion will not normally
737 * generate a wakeup upon I/O completion...
738 * by setting BL_WANTED, we will force a wakeup
739 * to occur as any outstanding I/Os complete...
740 * I/Os already completed will have BL_CALLDONE already
741 * set and we won't block in buf_biowait_callback..
742 * note that we're actually waiting for the bp to have
743 * completed the callback function... only then
744 * can we safely take back ownership of the bp
745 * need the main buf mutex in order to safely
750 for (cbp
= cbp_head
; cbp
; cbp
= cbp
->b_trans_next
)
751 cbp
->b_lflags
|= BL_WANTED
;
755 for (cbp
= cbp_head
; cbp
; cbp
= cbp
->b_trans_next
) {
757 buf_biowait_callback(cbp
);
764 cluster_complete_transaction(buf_t
*cbp_head
, void *callback_arg
, int *retval
, int flags
, int needwait
)
770 * cluster_complete_transaction will
771 * only be called if we've issued a complete chain in synchronous mode
772 * or, we've already done a cluster_wait_IO on an incomplete chain
775 for (cbp
= *cbp_head
; cbp
; cbp
= cbp
->b_trans_next
)
779 * we've already waited on all of the I/Os in this transaction,
780 * so mark all of the buf_t's in this transaction as B_TDONE
781 * so that cluster_iodone sees the transaction as completed
783 for (cbp
= *cbp_head
; cbp
; cbp
= cbp
->b_trans_next
)
784 cbp
->b_flags
|= B_TDONE
;
786 error
= cluster_iodone(*cbp_head
, callback_arg
);
788 if ( !(flags
& CL_ASYNC
) && error
&& *retval
== 0) {
789 if (((flags
& (CL_PAGEOUT
| CL_KEEPCACHED
)) != CL_PAGEOUT
) || (error
!= ENXIO
))
792 *cbp_head
= (buf_t
)NULL
;
797 cluster_io(vnode_t vp
, upl_t upl
, vm_offset_t upl_offset
, off_t f_offset
, int non_rounded_size
,
798 int flags
, buf_t real_bp
, struct clios
*iostate
, int (*callback
)(buf_t
, void *), void *callback_arg
)
807 buf_t cbp_head
= NULL
;
808 buf_t cbp_tail
= NULL
;
817 int async_throttle
= 0;
819 vm_offset_t upl_end_offset
;
820 boolean_t need_EOT
= FALSE
;
823 * we currently don't support buffers larger than a page
825 if (real_bp
&& non_rounded_size
> PAGE_SIZE
)
826 panic("%s(): Called with real buffer of size %d bytes which "
827 "is greater than the maximum allowed size of "
828 "%d bytes (the system PAGE_SIZE).\n",
829 __FUNCTION__
, non_rounded_size
, PAGE_SIZE
);
834 * we don't want to do any funny rounding of the size for IO requests
835 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
836 * belong to us... we can't extend (nor do we need to) the I/O to fill
839 if (mp
->mnt_devblocksize
> 1 && !(flags
& (CL_DEV_MEMORY
| CL_DIRECT_IO
))) {
841 * round the requested size up so that this I/O ends on a
842 * page boundary in case this is a 'write'... if the filesystem
843 * has blocks allocated to back the page beyond the EOF, we want to
844 * make sure to write out the zero's that are sitting beyond the EOF
845 * so that in case the filesystem doesn't explicitly zero this area
846 * if a hole is created via a lseek/write beyond the current EOF,
847 * it will return zeros when it's read back from the disk. If the
848 * physical allocation doesn't extend for the whole page, we'll
849 * only write/read from the disk up to the end of this allocation
850 * via the extent info returned from the VNOP_BLOCKMAP call.
852 pg_offset
= upl_offset
& PAGE_MASK
;
854 size
= (((non_rounded_size
+ pg_offset
) + (PAGE_SIZE
- 1)) & ~PAGE_MASK
) - pg_offset
;
857 * anyone advertising a blocksize of 1 byte probably
858 * can't deal with us rounding up the request size
859 * AFP is one such filesystem/device
861 size
= non_rounded_size
;
863 upl_end_offset
= upl_offset
+ size
;
865 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 22)) | DBG_FUNC_START
, (int)f_offset
, size
, upl_offset
, flags
, 0);
868 * Set the maximum transaction size to the maximum desired number of
872 if (flags
& CL_DEV_MEMORY
)
873 max_trans_count
= 16;
875 if (flags
& CL_READ
) {
877 bmap_flags
= VNODE_READ
;
879 max_iosize
= mp
->mnt_maxreadcnt
;
880 max_vectors
= mp
->mnt_segreadcnt
;
883 bmap_flags
= VNODE_WRITE
;
885 max_iosize
= mp
->mnt_maxwritecnt
;
886 max_vectors
= mp
->mnt_segwritecnt
;
888 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 22)) | DBG_FUNC_NONE
, max_iosize
, max_vectors
, mp
->mnt_devblocksize
, 0, 0);
891 * make sure the maximum iosize is a
892 * multiple of the page size
894 max_iosize
&= ~PAGE_MASK
;
897 * Ensure the maximum iosize is sensible.
900 max_iosize
= PAGE_SIZE
;
902 if (flags
& CL_THROTTLE
) {
903 if ( !(flags
& CL_PAGEOUT
) && cluster_hard_throttle_on(vp
, 1)) {
904 if (max_iosize
> HARD_THROTTLE_MAXSIZE
)
905 max_iosize
= HARD_THROTTLE_MAXSIZE
;
906 async_throttle
= HARD_THROTTLE_MAXCNT
;
908 if ( (flags
& CL_DEV_MEMORY
) )
909 async_throttle
= IO_SCALE(vp
, VNODE_ASYNC_THROTTLE
);
912 u_int max_cluster_size
;
915 max_cluster_size
= MAX_CLUSTER_SIZE(vp
);
916 max_prefetch
= MAX_PREFETCH(vp
, cluster_max_io_size(vp
->v_mount
, CL_READ
));
918 if (max_iosize
> max_cluster_size
)
919 max_cluster
= max_cluster_size
;
921 max_cluster
= max_iosize
;
923 if (size
< max_cluster
)
926 async_throttle
= min(IO_SCALE(vp
, VNODE_ASYNC_THROTTLE
), (max_prefetch
/ max_cluster
) - 1);
932 if (flags
& (CL_PAGEIN
| CL_PAGEOUT
))
933 io_flags
|= B_PAGEIO
;
934 if (flags
& (CL_IOSTREAMING
))
935 io_flags
|= B_IOSTREAMING
;
936 if (flags
& CL_COMMIT
)
937 io_flags
|= B_COMMIT_UPL
;
938 if (flags
& CL_PRESERVE
)
940 if (flags
& CL_KEEPCACHED
)
942 if (flags
& CL_PASSIVE
)
943 io_flags
|= B_PASSIVE
;
944 if (vp
->v_flag
& VSYSTEM
)
947 if ((flags
& CL_READ
) && ((upl_offset
+ non_rounded_size
) & PAGE_MASK
) && (!(flags
& CL_NOZERO
))) {
949 * then we are going to end up
950 * with a page that we can't complete (the file size wasn't a multiple
951 * of PAGE_SIZE and we're trying to read to the end of the file
952 * so we'll go ahead and zero out the portion of the page we can't
953 * read in from the file
955 zero_offset
= upl_offset
+ non_rounded_size
;
960 u_int io_size_wanted
;
963 if (size
> max_iosize
)
964 io_size
= max_iosize
;
968 io_size_wanted
= io_size
;
969 io_size_tmp
= (size_t)io_size
;
971 if ((error
= VNOP_BLOCKMAP(vp
, f_offset
, io_size
, &blkno
, &io_size_tmp
, NULL
, bmap_flags
, NULL
)))
974 if (io_size_tmp
> io_size_wanted
)
975 io_size
= io_size_wanted
;
977 io_size
= (u_int
)io_size_tmp
;
979 if (real_bp
&& (real_bp
->b_blkno
== real_bp
->b_lblkno
))
980 real_bp
->b_blkno
= blkno
;
982 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 24)) | DBG_FUNC_NONE
,
983 (int)f_offset
, (int)(blkno
>>32), (int)blkno
, io_size
, 0);
987 * vnop_blockmap didn't return an error... however, it did
988 * return an extent size of 0 which means we can't
989 * make forward progress on this I/O... a hole in the
990 * file would be returned as a blkno of -1 with a non-zero io_size
991 * a real extent is returned with a blkno != -1 and a non-zero io_size
996 if ( !(flags
& CL_READ
) && blkno
== -1) {
1000 if(upl_get_internal_vectorupl(upl
))
1001 panic("Vector UPLs should not take this code-path\n");
1003 * we're writing into a 'hole'
1005 if (flags
& CL_PAGEOUT
) {
1007 * if we got here via cluster_pageout
1008 * then just error the request and return
1009 * the 'hole' should already have been covered
1015 * we can get here if the cluster code happens to
1016 * pick up a page that was dirtied via mmap vs
1017 * a 'write' and the page targets a 'hole'...
1018 * i.e. the writes to the cluster were sparse
1019 * and the file was being written for the first time
1021 * we can also get here if the filesystem supports
1022 * 'holes' that are less than PAGE_SIZE.... because
1023 * we can't know if the range in the page that covers
1024 * the 'hole' has been dirtied via an mmap or not,
1025 * we have to assume the worst and try to push the
1026 * entire page to storage.
1028 * Try paging out the page individually before
1029 * giving up entirely and dumping it (the pageout
1030 * path will insure that the zero extent accounting
1031 * has been taken care of before we get back into cluster_io)
1033 * go direct to vnode_pageout so that we don't have to
1034 * unbusy the page from the UPL... we used to do this
1035 * so that we could call ubc_sync_range, but that results
1036 * in a potential deadlock if someone else races us to acquire
1037 * that page and wins and in addition needs one of the pages
1038 * we're continuing to hold in the UPL
1040 pageout_flags
= UPL_MSYNC
| UPL_VNODE_PAGER
| UPL_NESTED_PAGEOUT
;
1042 if ( !(flags
& CL_ASYNC
))
1043 pageout_flags
|= UPL_IOSYNC
;
1044 if ( !(flags
& CL_COMMIT
))
1045 pageout_flags
|= UPL_NOCOMMIT
;
1051 * first we have to wait for the the current outstanding I/Os
1052 * to complete... EOT hasn't been set yet on this transaction
1053 * so the pages won't be released just because all of the current
1054 * I/O linked to this transaction has completed...
1056 cluster_wait_IO(cbp_head
, (flags
& CL_ASYNC
));
1059 * we've got a transcation that
1060 * includes the page we're about to push out through vnode_pageout...
1061 * find the last bp in the list which will be the one that
1062 * includes the head of this page and round it's iosize down
1063 * to a page boundary...
1065 for (last_cbp
= cbp
= cbp_head
; cbp
->b_trans_next
; cbp
= cbp
->b_trans_next
)
1068 cbp
->b_bcount
&= ~PAGE_MASK
;
1070 if (cbp
->b_bcount
== 0) {
1072 * this buf no longer has any I/O associated with it
1076 if (cbp
== cbp_head
) {
1078 * the buf we just freed was the only buf in
1079 * this transaction... so there's no I/O to do
1084 * remove the buf we just freed from
1085 * the transaction list
1087 last_cbp
->b_trans_next
= NULL
;
1088 cbp_tail
= last_cbp
;
1093 * there was more to the current transaction
1094 * than just the page we are pushing out via vnode_pageout...
1095 * mark it as finished and complete it... we've already
1096 * waited for the I/Os to complete above in the call to cluster_wait_IO
1098 cluster_EOT(cbp_head
, cbp_tail
, 0);
1100 cluster_complete_transaction(&cbp_head
, callback_arg
, &retval
, flags
, 0);
1105 if (vnode_pageout(vp
, upl
, trunc_page(upl_offset
), trunc_page_64(f_offset
), PAGE_SIZE
, pageout_flags
, NULL
) != PAGER_SUCCESS
) {
1109 e_offset
= round_page_64(f_offset
+ 1);
1110 io_size
= e_offset
- f_offset
;
1112 f_offset
+= io_size
;
1113 upl_offset
+= io_size
;
1115 if (size
>= io_size
)
1120 * keep track of how much of the original request
1121 * that we've actually completed... non_rounded_size
1122 * may go negative due to us rounding the request
1123 * to a page size multiple (i.e. size > non_rounded_size)
1125 non_rounded_size
-= io_size
;
1127 if (non_rounded_size
<= 0) {
1129 * we've transferred all of the data in the original
1130 * request, but we were unable to complete the tail
1131 * of the last page because the file didn't have
1132 * an allocation to back that portion... this is ok.
1138 lblkno
= (daddr64_t
)(f_offset
/ PAGE_SIZE_64
);
1140 * we have now figured out how much I/O we can do - this is in 'io_size'
1141 * pg_offset is the starting point in the first page for the I/O
1142 * pg_count is the number of full and partial pages that 'io_size' encompasses
1144 pg_offset
= upl_offset
& PAGE_MASK
;
1146 if (flags
& CL_DEV_MEMORY
) {
1148 * treat physical requests as one 'giant' page
1152 pg_count
= (io_size
+ pg_offset
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1154 if ((flags
& CL_READ
) && blkno
== -1) {
1155 vm_offset_t commit_offset
;
1157 int complete_transaction_now
= 0;
1160 * if we're reading and blkno == -1, then we've got a
1161 * 'hole' in the file that we need to deal with by zeroing
1162 * out the affected area in the upl
1164 if (io_size
>= (u_int
)non_rounded_size
) {
1166 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1167 * than 'zero_offset' will be non-zero
1168 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
1169 * (indicated by the io_size finishing off the I/O request for this UPL)
1170 * than we're not going to issue an I/O for the
1171 * last page in this upl... we need to zero both the hole and the tail
1172 * of the page beyond the EOF, since the delayed zero-fill won't kick in
1174 bytes_to_zero
= non_rounded_size
;
1175 if (!(flags
& CL_NOZERO
))
1176 bytes_to_zero
= (((upl_offset
+ io_size
) + (PAGE_SIZE
- 1)) & ~PAGE_MASK
) - upl_offset
;
1180 bytes_to_zero
= io_size
;
1184 cluster_zero(upl
, upl_offset
, bytes_to_zero
, real_bp
);
1190 * if there is a current I/O chain pending
1191 * then the first page of the group we just zero'd
1192 * will be handled by the I/O completion if the zero
1193 * fill started in the middle of the page
1195 commit_offset
= (upl_offset
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
1197 pg_resid
= commit_offset
- upl_offset
;
1199 if (bytes_to_zero
>= pg_resid
) {
1201 * the last page of the current I/O
1202 * has been completed...
1203 * compute the number of fully zero'd
1204 * pages that are beyond it
1205 * plus the last page if its partial
1206 * and we have no more I/O to issue...
1207 * otherwise a partial page is left
1208 * to begin the next I/O
1210 if ((int)io_size
>= non_rounded_size
)
1211 pg_count
= (bytes_to_zero
- pg_resid
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1213 pg_count
= (bytes_to_zero
- pg_resid
) / PAGE_SIZE
;
1215 complete_transaction_now
= 1;
1219 * no pending I/O to deal with
1220 * so, commit all of the fully zero'd pages
1221 * plus the last page if its partial
1222 * and we have no more I/O to issue...
1223 * otherwise a partial page is left
1224 * to begin the next I/O
1226 if ((int)io_size
>= non_rounded_size
)
1227 pg_count
= (pg_offset
+ bytes_to_zero
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1229 pg_count
= (pg_offset
+ bytes_to_zero
) / PAGE_SIZE
;
1231 commit_offset
= upl_offset
& ~PAGE_MASK
;
1233 if ( (flags
& CL_COMMIT
) && pg_count
) {
1234 ubc_upl_commit_range(upl
, commit_offset
, pg_count
* PAGE_SIZE
,
1235 UPL_COMMIT_CLEAR_DIRTY
| UPL_COMMIT_FREE_ON_EMPTY
);
1237 upl_offset
+= io_size
;
1238 f_offset
+= io_size
;
1242 * keep track of how much of the original request
1243 * that we've actually completed... non_rounded_size
1244 * may go negative due to us rounding the request
1245 * to a page size multiple (i.e. size > non_rounded_size)
1247 non_rounded_size
-= io_size
;
1249 if (non_rounded_size
<= 0) {
1251 * we've transferred all of the data in the original
1252 * request, but we were unable to complete the tail
1253 * of the last page because the file didn't have
1254 * an allocation to back that portion... this is ok.
1258 if (cbp_head
&& (complete_transaction_now
|| size
== 0)) {
1259 cluster_wait_IO(cbp_head
, (flags
& CL_ASYNC
));
1261 cluster_EOT(cbp_head
, cbp_tail
, size
== 0 ? zero_offset
: 0);
1263 cluster_complete_transaction(&cbp_head
, callback_arg
, &retval
, flags
, 0);
1269 if (pg_count
> max_vectors
) {
1270 if (((pg_count
- max_vectors
) * PAGE_SIZE
) > io_size
) {
1271 io_size
= PAGE_SIZE
- pg_offset
;
1274 io_size
-= (pg_count
- max_vectors
) * PAGE_SIZE
;
1275 pg_count
= max_vectors
;
1279 * If the transaction is going to reach the maximum number of
1280 * desired elements, truncate the i/o to the nearest page so
1281 * that the actual i/o is initiated after this buffer is
1282 * created and added to the i/o chain.
1284 * I/O directed to physically contiguous memory
1285 * doesn't have a requirement to make sure we 'fill' a page
1287 if ( !(flags
& CL_DEV_MEMORY
) && trans_count
>= max_trans_count
&&
1288 ((upl_offset
+ io_size
) & PAGE_MASK
)) {
1289 vm_offset_t aligned_ofs
;
1291 aligned_ofs
= (upl_offset
+ io_size
) & ~PAGE_MASK
;
1293 * If the io_size does not actually finish off even a
1294 * single page we have to keep adding buffers to the
1295 * transaction despite having reached the desired limit.
1297 * Eventually we get here with the page being finished
1298 * off (and exceeded) and then we truncate the size of
1299 * this i/o request so that it is page aligned so that
1300 * we can finally issue the i/o on the transaction.
1302 if (aligned_ofs
> upl_offset
) {
1303 io_size
= aligned_ofs
- upl_offset
;
1308 if ( !(mp
->mnt_kern_flag
& MNTK_VIRTUALDEV
))
1310 * if we're not targeting a virtual device i.e. a disk image
1311 * it's safe to dip into the reserve pool since real devices
1312 * can complete this I/O request without requiring additional
1313 * bufs from the alloc_io_buf pool
1316 else if ((flags
& CL_ASYNC
) && !(flags
& CL_PAGEOUT
))
1318 * Throttle the speculative IO
1324 cbp
= alloc_io_buf(vp
, priv
);
1326 if (flags
& CL_PAGEOUT
) {
1329 for (i
= 0; i
< pg_count
; i
++) {
1330 if (buf_invalblkno(vp
, lblkno
+ i
, 0) == EBUSY
)
1331 panic("BUSY bp found in cluster_io");
1334 if (flags
& CL_ASYNC
) {
1335 if (buf_setcallback(cbp
, (void *)cluster_iodone
, callback_arg
))
1336 panic("buf_setcallback failed\n");
1338 cbp
->b_cliodone
= (void *)callback
;
1339 cbp
->b_flags
|= io_flags
;
1341 cbp
->b_lblkno
= lblkno
;
1342 cbp
->b_blkno
= blkno
;
1343 cbp
->b_bcount
= io_size
;
1345 if (buf_setupl(cbp
, upl
, upl_offset
))
1346 panic("buf_setupl failed\n");
1348 cbp
->b_trans_next
= (buf_t
)NULL
;
1350 if ((cbp
->b_iostate
= (void *)iostate
))
1352 * caller wants to track the state of this
1353 * io... bump the amount issued against this stream
1355 iostate
->io_issued
+= io_size
;
1357 if (flags
& CL_READ
) {
1358 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 26)) | DBG_FUNC_NONE
,
1359 (int)cbp
->b_lblkno
, (int)cbp
->b_blkno
, upl_offset
, io_size
, 0);
1362 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 27)) | DBG_FUNC_NONE
,
1363 (int)cbp
->b_lblkno
, (int)cbp
->b_blkno
, upl_offset
, io_size
, 0);
1367 cbp_tail
->b_trans_next
= cbp
;
1373 if ( (cbp_head
->b_real_bp
= real_bp
) ) {
1374 cbp_head
->b_flags
|= B_NEED_IODONE
;
1375 real_bp
= (buf_t
)NULL
;
1378 *(buf_t
*)(&cbp
->b_trans_head
) = cbp_head
;
1382 upl_offset
+= io_size
;
1383 f_offset
+= io_size
;
1386 * keep track of how much of the original request
1387 * that we've actually completed... non_rounded_size
1388 * may go negative due to us rounding the request
1389 * to a page size multiple (i.e. size > non_rounded_size)
1391 non_rounded_size
-= io_size
;
1393 if (non_rounded_size
<= 0) {
1395 * we've transferred all of the data in the original
1396 * request, but we were unable to complete the tail
1397 * of the last page because the file didn't have
1398 * an allocation to back that portion... this is ok.
1404 * we have no more I/O to issue, so go
1405 * finish the final transaction
1408 } else if ( ((flags
& CL_DEV_MEMORY
) || (upl_offset
& PAGE_MASK
) == 0) &&
1409 ((flags
& CL_ASYNC
) || trans_count
> max_trans_count
) ) {
1411 * I/O directed to physically contiguous memory...
1412 * which doesn't have a requirement to make sure we 'fill' a page
1414 * the current I/O we've prepared fully
1415 * completes the last page in this request
1417 * it's either an ASYNC request or
1418 * we've already accumulated more than 8 I/O's into
1419 * this transaction so mark it as complete so that
1420 * it can finish asynchronously or via the cluster_complete_transaction
1421 * below if the request is synchronous
1425 if (need_EOT
== TRUE
)
1426 cluster_EOT(cbp_head
, cbp_tail
, size
== 0 ? zero_offset
: 0);
1428 if (flags
& CL_THROTTLE
)
1429 (void)vnode_waitforwrites(vp
, async_throttle
, 0, 0, "cluster_io");
1431 if ( !(io_flags
& B_READ
))
1432 vnode_startwrite(vp
);
1434 (void) VNOP_STRATEGY(cbp
);
1436 if (need_EOT
== TRUE
) {
1437 if ( !(flags
& CL_ASYNC
))
1438 cluster_complete_transaction(&cbp_head
, callback_arg
, &retval
, flags
, 1);
1452 * first wait until all of the outstanding I/O
1453 * for this partial transaction has completed
1455 cluster_wait_IO(cbp_head
, (flags
& CL_ASYNC
));
1458 * Rewind the upl offset to the beginning of the
1461 upl_offset
= cbp_head
->b_uploffset
;
1463 for (cbp
= cbp_head
; cbp
;) {
1466 size
+= cbp
->b_bcount
;
1467 io_size
+= cbp
->b_bcount
;
1469 cbp_next
= cbp
->b_trans_next
;
1475 int need_wakeup
= 0;
1478 * update the error condition for this stream
1479 * since we never really issued the io
1480 * just go ahead and adjust it back
1482 lck_mtx_lock_spin(cl_mtxp
);
1484 if (iostate
->io_error
== 0)
1485 iostate
->io_error
= error
;
1486 iostate
->io_issued
-= io_size
;
1488 if (iostate
->io_wanted
) {
1490 * someone is waiting for the state of
1491 * this io stream to change
1493 iostate
->io_wanted
= 0;
1496 lck_mtx_unlock(cl_mtxp
);
1499 wakeup((caddr_t
)&iostate
->io_wanted
);
1501 if (flags
& CL_COMMIT
) {
1504 pg_offset
= upl_offset
& PAGE_MASK
;
1505 abort_size
= (upl_end_offset
- upl_offset
+ PAGE_MASK
) & ~PAGE_MASK
;
1507 upl_flags
= cluster_ioerror(upl
, upl_offset
- pg_offset
, abort_size
, error
, io_flags
);
1509 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 28)) | DBG_FUNC_NONE
,
1510 upl
, upl_offset
- pg_offset
, abort_size
, (error
<< 24) | upl_flags
, 0);
1514 } else if (cbp_head
)
1515 panic("%s(): cbp_head is not NULL.\n", __FUNCTION__
);
1519 * can get here if we either encountered an error
1520 * or we completely zero-filled the request and
1524 real_bp
->b_flags
|= B_ERROR
;
1525 real_bp
->b_error
= error
;
1527 buf_biodone(real_bp
);
1529 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 22)) | DBG_FUNC_END
, (int)f_offset
, size
, upl_offset
, retval
, 0);
1534 #define reset_vector_run_state() \
1535 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
1538 vector_cluster_io(vnode_t vp
, upl_t vector_upl
, vm_offset_t vector_upl_offset
, off_t v_upl_uio_offset
, int vector_upl_iosize
,
1539 int io_flag
, buf_t real_bp
, struct clios
*iostate
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1541 vector_upl_set_pagelist(vector_upl
);
1543 if(io_flag
& CL_READ
) {
1544 if(vector_upl_offset
== 0 && ((vector_upl_iosize
& PAGE_MASK
)==0))
1545 io_flag
&= ~CL_PRESERVE
; /*don't zero fill*/
1547 io_flag
|= CL_PRESERVE
; /*zero fill*/
1549 return (cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, real_bp
, iostate
, callback
, callback_arg
));
1554 cluster_read_prefetch(vnode_t vp
, off_t f_offset
, u_int size
, off_t filesize
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
)
1556 int pages_in_prefetch
;
1558 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 49)) | DBG_FUNC_START
,
1559 (int)f_offset
, size
, (int)filesize
, 0, 0);
1561 if (f_offset
>= filesize
) {
1562 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 49)) | DBG_FUNC_END
,
1563 (int)f_offset
, 0, 0, 0, 0);
1566 if ((off_t
)size
> (filesize
- f_offset
))
1567 size
= filesize
- f_offset
;
1568 pages_in_prefetch
= (size
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1570 advisory_read_ext(vp
, filesize
, f_offset
, size
, callback
, callback_arg
, bflag
);
1572 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 49)) | DBG_FUNC_END
,
1573 (int)f_offset
+ size
, pages_in_prefetch
, 0, 1, 0);
1575 return (pages_in_prefetch
);
1581 cluster_read_ahead(vnode_t vp
, struct cl_extent
*extent
, off_t filesize
, struct cl_readahead
*rap
, int (*callback
)(buf_t
, void *), void *callback_arg
,
1586 int size_of_prefetch
;
1590 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_START
,
1591 (int)extent
->b_addr
, (int)extent
->e_addr
, (int)rap
->cl_lastr
, 0, 0);
1593 if (extent
->b_addr
== rap
->cl_lastr
&& extent
->b_addr
== extent
->e_addr
) {
1594 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1595 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 0, 0);
1598 if (rap
->cl_lastr
== -1 || (extent
->b_addr
!= rap
->cl_lastr
&& extent
->b_addr
!= (rap
->cl_lastr
+ 1))) {
1602 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1603 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 1, 0);
1607 max_prefetch
= MAX_PREFETCH(vp
, cluster_max_io_size(vp
->v_mount
, CL_READ
));
1609 if (extent
->e_addr
< rap
->cl_maxra
) {
1610 if ((rap
->cl_maxra
- extent
->e_addr
) > ((max_prefetch
/ PAGE_SIZE
) / 4)) {
1612 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1613 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 2, 0);
1617 r_addr
= max(extent
->e_addr
, rap
->cl_maxra
) + 1;
1618 f_offset
= (off_t
)(r_addr
* PAGE_SIZE_64
);
1620 size_of_prefetch
= 0;
1622 ubc_range_op(vp
, f_offset
, f_offset
+ PAGE_SIZE_64
, UPL_ROP_PRESENT
, &size_of_prefetch
);
1624 if (size_of_prefetch
) {
1625 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1626 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 3, 0);
1629 if (f_offset
< filesize
) {
1630 daddr64_t read_size
;
1632 rap
->cl_ralen
= rap
->cl_ralen
? min(max_prefetch
/ PAGE_SIZE
, rap
->cl_ralen
<< 1) : 1;
1634 read_size
= (extent
->e_addr
+ 1) - extent
->b_addr
;
1636 if (read_size
> rap
->cl_ralen
) {
1637 if (read_size
> max_prefetch
/ PAGE_SIZE
)
1638 rap
->cl_ralen
= max_prefetch
/ PAGE_SIZE
;
1640 rap
->cl_ralen
= read_size
;
1642 size_of_prefetch
= cluster_read_prefetch(vp
, f_offset
, rap
->cl_ralen
* PAGE_SIZE
, filesize
, callback
, callback_arg
, bflag
);
1644 if (size_of_prefetch
)
1645 rap
->cl_maxra
= (r_addr
+ size_of_prefetch
) - 1;
1647 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1648 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 4, 0);
1653 cluster_pageout(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1654 int size
, off_t filesize
, int flags
)
1656 return cluster_pageout_ext(vp
, upl
, upl_offset
, f_offset
, size
, filesize
, flags
, NULL
, NULL
);
1662 cluster_pageout_ext(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1663 int size
, off_t filesize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1670 if (vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
)
1672 * if we know we're issuing this I/O to a virtual device (i.e. disk image)
1673 * then we don't want to enforce this throttle... if we do, we can
1674 * potentially deadlock since we're stalling the pageout thread at a time
1675 * when the disk image might need additional memory (which won't be available
1676 * if the pageout thread can't run)... instead we'll just depend on the throttle
1677 * that the pageout thread now has in place to deal with external files
1679 local_flags
= CL_PAGEOUT
;
1681 local_flags
= CL_PAGEOUT
| CL_THROTTLE
;
1683 if ((flags
& UPL_IOSYNC
) == 0)
1684 local_flags
|= CL_ASYNC
;
1685 if ((flags
& UPL_NOCOMMIT
) == 0)
1686 local_flags
|= CL_COMMIT
;
1687 if ((flags
& UPL_KEEPCACHED
))
1688 local_flags
|= CL_KEEPCACHED
;
1691 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 52)) | DBG_FUNC_NONE
,
1692 (int)f_offset
, size
, (int)filesize
, local_flags
, 0);
1695 * If they didn't specify any I/O, then we are done...
1696 * we can't issue an abort because we don't know how
1697 * big the upl really is
1702 if (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) {
1703 if (local_flags
& CL_COMMIT
)
1704 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
1708 * can't page-in from a negative offset
1709 * or if we're starting beyond the EOF
1710 * or if the file offset isn't page aligned
1711 * or the size requested isn't a multiple of PAGE_SIZE
1713 if (f_offset
< 0 || f_offset
>= filesize
||
1714 (f_offset
& PAGE_MASK_64
) || (size
& PAGE_MASK
)) {
1715 if (local_flags
& CL_COMMIT
)
1716 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
1719 max_size
= filesize
- f_offset
;
1721 if (size
< max_size
)
1726 rounded_size
= (io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
1728 if (size
> rounded_size
) {
1729 if (local_flags
& CL_COMMIT
)
1730 ubc_upl_abort_range(upl
, upl_offset
+ rounded_size
, size
- rounded_size
,
1731 UPL_ABORT_FREE_ON_EMPTY
);
1733 return (cluster_io(vp
, upl
, upl_offset
, f_offset
, io_size
,
1734 local_flags
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
));
1739 cluster_pagein(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1740 int size
, off_t filesize
, int flags
)
1742 return cluster_pagein_ext(vp
, upl
, upl_offset
, f_offset
, size
, filesize
, flags
, NULL
, NULL
);
1747 cluster_pagein_ext(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1748 int size
, off_t filesize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1754 int local_flags
= 0;
1756 if (upl
== NULL
|| size
< 0)
1757 panic("cluster_pagein: NULL upl passed in");
1759 if ((flags
& UPL_IOSYNC
) == 0)
1760 local_flags
|= CL_ASYNC
;
1761 if ((flags
& UPL_NOCOMMIT
) == 0)
1762 local_flags
|= CL_COMMIT
;
1763 if (flags
& UPL_IOSTREAMING
)
1764 local_flags
|= CL_IOSTREAMING
;
1767 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 56)) | DBG_FUNC_NONE
,
1768 (int)f_offset
, size
, (int)filesize
, local_flags
, 0);
1771 * can't page-in from a negative offset
1772 * or if we're starting beyond the EOF
1773 * or if the file offset isn't page aligned
1774 * or the size requested isn't a multiple of PAGE_SIZE
1776 if (f_offset
< 0 || f_offset
>= filesize
||
1777 (f_offset
& PAGE_MASK_64
) || (size
& PAGE_MASK
) || (upl_offset
& PAGE_MASK
)) {
1778 if (local_flags
& CL_COMMIT
)
1779 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
1782 max_size
= filesize
- f_offset
;
1784 if (size
< max_size
)
1789 rounded_size
= (io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
1791 if (size
> rounded_size
&& (local_flags
& CL_COMMIT
))
1792 ubc_upl_abort_range(upl
, upl_offset
+ rounded_size
,
1793 size
- rounded_size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
1795 retval
= cluster_io(vp
, upl
, upl_offset
, f_offset
, io_size
,
1796 local_flags
| CL_READ
| CL_PAGEIN
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
1803 cluster_bp(buf_t bp
)
1805 return cluster_bp_ext(bp
, NULL
, NULL
);
1810 cluster_bp_ext(buf_t bp
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1815 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 19)) | DBG_FUNC_START
,
1816 bp
, (int)bp
->b_lblkno
, bp
->b_bcount
, bp
->b_flags
, 0);
1818 if (bp
->b_flags
& B_READ
)
1819 flags
= CL_ASYNC
| CL_READ
;
1822 if (bp
->b_flags
& B_PASSIVE
)
1823 flags
|= CL_PASSIVE
;
1825 f_offset
= ubc_blktooff(bp
->b_vp
, bp
->b_lblkno
);
1827 return (cluster_io(bp
->b_vp
, bp
->b_upl
, 0, f_offset
, bp
->b_bcount
, flags
, bp
, (struct clios
*)NULL
, callback
, callback_arg
));
1833 cluster_write(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
, off_t headOff
, off_t tailOff
, int xflags
)
1835 return cluster_write_ext(vp
, uio
, oldEOF
, newEOF
, headOff
, tailOff
, xflags
, NULL
, NULL
);
1840 cluster_write_ext(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
, off_t headOff
, off_t tailOff
,
1841 int xflags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1843 user_ssize_t cur_resid
;
1848 int write_type
= IO_COPY
;
1849 u_int32_t write_length
;
1853 if (flags
& IO_PASSIVE
)
1858 if (vp
->v_flag
& VNOCACHE_DATA
)
1859 flags
|= IO_NOCACHE
;
1864 * this call is being made to zero-fill some range in the file
1866 retval
= cluster_write_copy(vp
, NULL
, (u_int32_t
)0, oldEOF
, newEOF
, headOff
, tailOff
, flags
, callback
, callback_arg
);
1871 * do a write through the cache if one of the following is true....
1872 * NOCACHE is not true and
1873 * the uio request doesn't target USERSPACE
1874 * otherwise, find out if we want the direct or contig variant for
1875 * the first vector in the uio request
1877 if ( (flags
& IO_NOCACHE
) && UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
) )
1878 retval
= cluster_io_type(uio
, &write_type
, &write_length
, MIN_DIRECT_WRITE_SIZE
);
1880 if ( (flags
& (IO_TAILZEROFILL
| IO_HEADZEROFILL
)) && write_type
== IO_DIRECT
)
1882 * must go through the cached variant in this case
1884 write_type
= IO_COPY
;
1886 while ((cur_resid
= uio_resid(uio
)) && uio
->uio_offset
< newEOF
&& retval
== 0) {
1888 switch (write_type
) {
1892 * make sure the uio_resid isn't too big...
1893 * internally, we want to handle all of the I/O in
1894 * chunk sizes that fit in a 32 bit int
1896 if (cur_resid
> (user_ssize_t
)(MAX_IO_REQUEST_SIZE
)) {
1898 * we're going to have to call cluster_write_copy
1901 * only want the last call to cluster_write_copy to
1902 * have the IO_TAILZEROFILL flag set and only the
1903 * first call should have IO_HEADZEROFILL
1905 zflags
= flags
& ~IO_TAILZEROFILL
;
1906 flags
&= ~IO_HEADZEROFILL
;
1908 write_length
= MAX_IO_REQUEST_SIZE
;
1911 * last call to cluster_write_copy
1915 write_length
= (u_int32_t
)cur_resid
;
1917 retval
= cluster_write_copy(vp
, uio
, write_length
, oldEOF
, newEOF
, headOff
, tailOff
, zflags
, callback
, callback_arg
);
1921 zflags
= flags
& ~(IO_TAILZEROFILL
| IO_HEADZEROFILL
);
1923 if (flags
& IO_HEADZEROFILL
) {
1925 * only do this once per request
1927 flags
&= ~IO_HEADZEROFILL
;
1929 retval
= cluster_write_copy(vp
, (struct uio
*)0, (u_int32_t
)0, (off_t
)0, uio
->uio_offset
,
1930 headOff
, (off_t
)0, zflags
| IO_HEADZEROFILL
| IO_SYNC
, callback
, callback_arg
);
1934 retval
= cluster_write_contig(vp
, uio
, newEOF
, &write_type
, &write_length
, callback
, callback_arg
, bflag
);
1936 if (retval
== 0 && (flags
& IO_TAILZEROFILL
) && uio_resid(uio
) == 0) {
1938 * we're done with the data from the user specified buffer(s)
1939 * and we've been requested to zero fill at the tail
1940 * treat this as an IO_HEADZEROFILL which doesn't require a uio
1941 * by rearranging the args and passing in IO_HEADZEROFILL
1943 retval
= cluster_write_copy(vp
, (struct uio
*)0, (u_int32_t
)0, (off_t
)0, tailOff
, uio
->uio_offset
,
1944 (off_t
)0, zflags
| IO_HEADZEROFILL
| IO_SYNC
, callback
, callback_arg
);
1950 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
1952 retval
= cluster_write_direct(vp
, uio
, oldEOF
, newEOF
, &write_type
, &write_length
, flags
, callback
, callback_arg
);
1956 retval
= cluster_io_type(uio
, &write_type
, &write_length
, MIN_DIRECT_WRITE_SIZE
);
1960 * in case we end up calling cluster_write_copy (from cluster_write_direct)
1961 * multiple times to service a multi-vector request that is not aligned properly
1962 * we need to update the oldEOF so that we
1963 * don't zero-fill the head of a page if we've successfully written
1964 * data to that area... 'cluster_write_copy' will zero-fill the head of a
1965 * page that is beyond the oldEOF if the write is unaligned... we only
1966 * want that to happen for the very first page of the cluster_write,
1967 * NOT the first page of each vector making up a multi-vector write.
1969 if (uio
->uio_offset
> oldEOF
)
1970 oldEOF
= uio
->uio_offset
;
1977 cluster_write_direct(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
, int *write_type
, u_int32_t
*write_length
,
1978 int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1981 upl_page_info_t
*pl
;
1982 vm_offset_t upl_offset
;
1983 vm_offset_t vector_upl_offset
= 0;
1984 u_int32_t io_req_size
;
1985 u_int32_t offset_in_file
;
1986 u_int32_t offset_in_iovbase
;
1989 upl_size_t upl_size
, vector_upl_size
= 0;
1990 vm_size_t upl_needed_size
;
1991 mach_msg_type_number_t pages_in_pl
;
1994 mach_msg_type_number_t i
;
1995 int force_data_sync
;
1998 struct clios iostate
;
1999 user_addr_t iov_base
;
2000 u_int32_t mem_alignment_mask
;
2001 u_int32_t devblocksize
;
2002 u_int32_t max_upl_size
;
2004 u_int32_t vector_upl_iosize
= 0;
2005 int issueVectorUPL
= 0,useVectorUPL
= (uio
->uio_iovcnt
> 1);
2006 off_t v_upl_uio_offset
= 0;
2007 int vector_upl_index
=0;
2008 upl_t vector_upl
= NULL
;
2012 * When we enter this routine, we know
2013 * -- the resid will not exceed iov_len
2015 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 75)) | DBG_FUNC_START
,
2016 (int)uio
->uio_offset
, *write_length
, (int)newEOF
, 0, 0);
2018 max_upl_size
= cluster_max_io_size(vp
->v_mount
, CL_WRITE
);
2020 io_flag
= CL_ASYNC
| CL_PRESERVE
| CL_COMMIT
| CL_THROTTLE
| CL_DIRECT_IO
;
2022 if (flags
& IO_PASSIVE
)
2023 io_flag
|= CL_PASSIVE
;
2025 iostate
.io_completed
= 0;
2026 iostate
.io_issued
= 0;
2027 iostate
.io_error
= 0;
2028 iostate
.io_wanted
= 0;
2030 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
2031 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
2033 if (devblocksize
== 1) {
2035 * the AFP client advertises a devblocksize of 1
2036 * however, its BLOCKMAP routine maps to physical
2037 * blocks that are PAGE_SIZE in size...
2038 * therefore we can't ask for I/Os that aren't page aligned
2039 * or aren't multiples of PAGE_SIZE in size
2040 * by setting devblocksize to PAGE_SIZE, we re-instate
2041 * the old behavior we had before the mem_alignment_mask
2042 * changes went in...
2044 devblocksize
= PAGE_SIZE
;
2048 io_req_size
= *write_length
;
2049 iov_base
= uio_curriovbase(uio
);
2051 offset_in_file
= (u_int32_t
)uio
->uio_offset
& PAGE_MASK
;
2052 offset_in_iovbase
= (u_int32_t
)iov_base
& mem_alignment_mask
;
2054 if (offset_in_file
|| offset_in_iovbase
) {
2056 * one of the 2 important offsets is misaligned
2057 * so fire an I/O through the cache for this entire vector
2059 goto wait_for_dwrites
;
2061 if (iov_base
& (devblocksize
- 1)) {
2063 * the offset in memory must be on a device block boundary
2064 * so that we can guarantee that we can generate an
2065 * I/O that ends on a page boundary in cluster_io
2067 goto wait_for_dwrites
;
2070 while (io_req_size
>= PAGE_SIZE
&& uio
->uio_offset
< newEOF
&& retval
== 0) {
2073 cluster_syncup(vp
, newEOF
, callback
, callback_arg
);
2076 io_size
= io_req_size
& ~PAGE_MASK
;
2077 iov_base
= uio_curriovbase(uio
);
2079 if (io_size
> max_upl_size
)
2080 io_size
= max_upl_size
;
2082 if(useVectorUPL
&& (iov_base
& PAGE_MASK
)) {
2084 * We have an iov_base that's not page-aligned.
2085 * Issue all I/O's that have been collected within
2086 * this Vectored UPL.
2088 if(vector_upl_index
) {
2089 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2090 reset_vector_run_state();
2094 * After this point, if we are using the Vector UPL path and the base is
2095 * not page-aligned then the UPL with that base will be the first in the vector UPL.
2099 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
2100 upl_needed_size
= (upl_offset
+ io_size
+ (PAGE_SIZE
-1)) & ~PAGE_MASK
;
2102 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_START
,
2103 (int)upl_offset
, upl_needed_size
, (int)iov_base
, io_size
, 0);
2105 for (force_data_sync
= 0; force_data_sync
< 3; force_data_sync
++) {
2107 upl_size
= upl_needed_size
;
2108 upl_flags
= UPL_FILE_IO
| UPL_COPYOUT_FROM
| UPL_NO_SYNC
|
2109 UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
2111 kret
= vm_map_get_upl(current_map(),
2112 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
2120 if (kret
!= KERN_SUCCESS
) {
2121 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_END
,
2124 * failed to get pagelist
2126 * we may have already spun some portion of this request
2127 * off as async requests... we need to wait for the I/O
2128 * to complete before returning
2130 goto wait_for_dwrites
;
2132 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2133 pages_in_pl
= upl_size
/ PAGE_SIZE
;
2135 for (i
= 0; i
< pages_in_pl
; i
++) {
2136 if (!upl_valid_page(pl
, i
))
2139 if (i
== pages_in_pl
)
2143 * didn't get all the pages back that we
2144 * needed... release this upl and try again
2146 ubc_upl_abort(upl
, 0);
2148 if (force_data_sync
>= 3) {
2149 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_END
,
2150 i
, pages_in_pl
, upl_size
, kret
, 0);
2152 * for some reason, we couldn't acquire a hold on all
2153 * the pages needed in the user's address space
2155 * we may have already spun some portion of this request
2156 * off as async requests... we need to wait for the I/O
2157 * to complete before returning
2159 goto wait_for_dwrites
;
2163 * Consider the possibility that upl_size wasn't satisfied.
2165 if (upl_size
< upl_needed_size
) {
2166 if (upl_size
&& upl_offset
== 0)
2171 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_END
,
2172 (int)upl_offset
, upl_size
, (int)iov_base
, io_size
, 0);
2175 ubc_upl_abort(upl
, 0);
2177 * we may have already spun some portion of this request
2178 * off as async requests... we need to wait for the I/O
2179 * to complete before returning
2181 goto wait_for_dwrites
;
2185 vm_offset_t end_off
= ((iov_base
+ io_size
) & PAGE_MASK
);
2189 * After this point, if we are using a vector UPL, then
2190 * either all the UPL elements end on a page boundary OR
2191 * this UPL is the last element because it does not end
2192 * on a page boundary.
2197 * Now look for pages already in the cache
2198 * and throw them away.
2199 * uio->uio_offset is page aligned within the file
2200 * io_size is a multiple of PAGE_SIZE
2202 ubc_range_op(vp
, uio
->uio_offset
, uio
->uio_offset
+ io_size
, UPL_ROP_DUMP
, NULL
);
2205 * we want push out these writes asynchronously so that we can overlap
2206 * the preparation of the next I/O
2207 * if there are already too many outstanding writes
2208 * wait until some complete before issuing the next
2210 if (iostate
.io_issued
> iostate
.io_completed
) {
2212 lck_mtx_lock(cl_mtxp
);
2214 while ((iostate
.io_issued
- iostate
.io_completed
) > (max_upl_size
* IO_SCALE(vp
, 2))) {
2216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2217 iostate
.io_issued
, iostate
.io_completed
, max_upl_size
* IO_SCALE(vp
, 2), 0, 0);
2219 iostate
.io_wanted
= 1;
2220 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_direct", NULL
);
2222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2223 iostate
.io_issued
, iostate
.io_completed
, max_upl_size
* IO_SCALE(vp
, 2), 0, 0);
2225 lck_mtx_unlock(cl_mtxp
);
2227 if (iostate
.io_error
) {
2229 * one of the earlier writes we issued ran into a hard error
2230 * don't issue any more writes, cleanup the UPL
2231 * that was just created but not used, then
2232 * go wait for all writes that are part of this stream
2233 * to complete before returning the error to the caller
2235 ubc_upl_abort(upl
, 0);
2237 goto wait_for_dwrites
;
2240 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 77)) | DBG_FUNC_START
,
2241 (int)upl_offset
, (int)uio
->uio_offset
, io_size
, io_flag
, 0);
2244 retval
= cluster_io(vp
, upl
, upl_offset
, uio
->uio_offset
,
2245 io_size
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2248 if(!vector_upl_index
) {
2249 vector_upl
= vector_upl_create(upl_offset
);
2250 v_upl_uio_offset
= uio
->uio_offset
;
2251 vector_upl_offset
= upl_offset
;
2254 vector_upl_set_subupl(vector_upl
,upl
,upl_size
);
2255 vector_upl_set_iostate(vector_upl
, upl
, vector_upl_size
, upl_size
);
2257 vector_upl_iosize
+= io_size
;
2258 vector_upl_size
+= upl_size
;
2260 if(issueVectorUPL
|| vector_upl_index
== MAX_VECTOR_UPL_ELEMENTS
|| vector_upl_size
>= MAX_VECTOR_UPL_SIZE
) {
2261 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2262 reset_vector_run_state();
2267 * update the uio structure to
2268 * reflect the I/O that we just issued
2270 uio_update(uio
, (user_size_t
)io_size
);
2273 * in case we end up calling through to cluster_write_copy to finish
2274 * the tail of this request, we need to update the oldEOF so that we
2275 * don't zero-fill the head of a page if we've successfully written
2276 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2277 * page that is beyond the oldEOF if the write is unaligned... we only
2278 * want that to happen for the very first page of the cluster_write,
2279 * NOT the first page of each vector making up a multi-vector write.
2281 if (uio
->uio_offset
> oldEOF
)
2282 oldEOF
= uio
->uio_offset
;
2284 io_req_size
-= io_size
;
2286 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 77)) | DBG_FUNC_END
,
2287 (int)upl_offset
, (int)uio
->uio_offset
, io_req_size
, retval
, 0);
2291 if (retval
== 0 && iostate
.io_error
== 0 && io_req_size
== 0) {
2293 retval
= cluster_io_type(uio
, write_type
, write_length
, MIN_DIRECT_WRITE_SIZE
);
2295 if (retval
== 0 && *write_type
== IO_DIRECT
) {
2297 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 75)) | DBG_FUNC_NONE
,
2298 (int)uio
->uio_offset
, *write_length
, (int)newEOF
, 0, 0);
2306 if(retval
== 0 && iostate
.io_error
== 0 && useVectorUPL
&& vector_upl_index
) {
2307 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2308 reset_vector_run_state();
2311 if (iostate
.io_issued
> iostate
.io_completed
) {
2313 * make sure all async writes issued as part of this stream
2314 * have completed before we return
2316 lck_mtx_lock(cl_mtxp
);
2318 while (iostate
.io_issued
!= iostate
.io_completed
) {
2319 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2320 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2322 iostate
.io_wanted
= 1;
2323 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_direct", NULL
);
2325 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2326 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2328 lck_mtx_unlock(cl_mtxp
);
2330 if (iostate
.io_error
)
2331 retval
= iostate
.io_error
;
2333 if (io_req_size
&& retval
== 0) {
2335 * we couldn't handle the tail of this request in DIRECT mode
2336 * so fire it through the copy path
2338 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
2339 * so we can just pass 0 in for the headOff and tailOff
2341 if (uio
->uio_offset
> oldEOF
)
2342 oldEOF
= uio
->uio_offset
;
2344 retval
= cluster_write_copy(vp
, uio
, io_req_size
, oldEOF
, newEOF
, (off_t
)0, (off_t
)0, flags
, callback
, callback_arg
);
2346 *write_type
= IO_UNKNOWN
;
2348 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 75)) | DBG_FUNC_END
,
2349 (int)uio
->uio_offset
, io_req_size
, retval
, 4, 0);
2356 cluster_write_contig(vnode_t vp
, struct uio
*uio
, off_t newEOF
, int *write_type
, u_int32_t
*write_length
,
2357 int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
)
2359 upl_page_info_t
*pl
;
2360 addr64_t src_paddr
= 0;
2361 upl_t upl
[MAX_VECTS
];
2362 vm_offset_t upl_offset
;
2363 u_int32_t tail_size
= 0;
2366 upl_size_t upl_size
;
2367 vm_size_t upl_needed_size
;
2368 mach_msg_type_number_t pages_in_pl
;
2371 struct clios iostate
;
2376 user_addr_t iov_base
;
2377 u_int32_t devblocksize
;
2378 u_int32_t mem_alignment_mask
;
2381 * When we enter this routine, we know
2382 * -- the io_req_size will not exceed iov_len
2383 * -- the target address is physically contiguous
2385 cluster_syncup(vp
, newEOF
, callback
, callback_arg
);
2387 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
2388 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
2390 iostate
.io_completed
= 0;
2391 iostate
.io_issued
= 0;
2392 iostate
.io_error
= 0;
2393 iostate
.io_wanted
= 0;
2396 io_size
= *write_length
;
2398 iov_base
= uio_curriovbase(uio
);
2400 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
2401 upl_needed_size
= upl_offset
+ io_size
;
2404 upl_size
= upl_needed_size
;
2405 upl_flags
= UPL_FILE_IO
| UPL_COPYOUT_FROM
| UPL_NO_SYNC
|
2406 UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
2408 kret
= vm_map_get_upl(current_map(),
2409 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
2410 &upl_size
, &upl
[cur_upl
], NULL
, &pages_in_pl
, &upl_flags
, 0);
2412 if (kret
!= KERN_SUCCESS
) {
2414 * failed to get pagelist
2417 goto wait_for_cwrites
;
2422 * Consider the possibility that upl_size wasn't satisfied.
2424 if (upl_size
< upl_needed_size
) {
2426 * This is a failure in the physical memory case.
2429 goto wait_for_cwrites
;
2431 pl
= ubc_upl_pageinfo(upl
[cur_upl
]);
2433 src_paddr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + (addr64_t
)upl_offset
;
2435 while (((uio
->uio_offset
& (devblocksize
- 1)) || io_size
< devblocksize
) && io_size
) {
2436 u_int32_t head_size
;
2438 head_size
= devblocksize
- (u_int32_t
)(uio
->uio_offset
& (devblocksize
- 1));
2440 if (head_size
> io_size
)
2441 head_size
= io_size
;
2443 error
= cluster_align_phys_io(vp
, uio
, src_paddr
, head_size
, 0, callback
, callback_arg
);
2446 goto wait_for_cwrites
;
2448 upl_offset
+= head_size
;
2449 src_paddr
+= head_size
;
2450 io_size
-= head_size
;
2452 iov_base
+= head_size
;
2454 if ((u_int32_t
)iov_base
& mem_alignment_mask
) {
2456 * request doesn't set up on a memory boundary
2457 * the underlying DMA engine can handle...
2458 * return an error instead of going through
2459 * the slow copy path since the intent of this
2460 * path is direct I/O from device memory
2463 goto wait_for_cwrites
;
2466 tail_size
= io_size
& (devblocksize
- 1);
2467 io_size
-= tail_size
;
2469 while (io_size
&& error
== 0) {
2471 if (io_size
> MAX_IO_CONTIG_SIZE
)
2472 xsize
= MAX_IO_CONTIG_SIZE
;
2476 * request asynchronously so that we can overlap
2477 * the preparation of the next I/O... we'll do
2478 * the commit after all the I/O has completed
2479 * since its all issued against the same UPL
2480 * if there are already too many outstanding writes
2481 * wait until some have completed before issuing the next
2483 if (iostate
.io_issued
> iostate
.io_completed
) {
2484 lck_mtx_lock(cl_mtxp
);
2486 while ((iostate
.io_issued
- iostate
.io_completed
) > (MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2))) {
2488 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2489 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
2491 iostate
.io_wanted
= 1;
2492 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_contig", NULL
);
2494 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2495 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
2497 lck_mtx_unlock(cl_mtxp
);
2499 if (iostate
.io_error
) {
2501 * one of the earlier writes we issued ran into a hard error
2502 * don't issue any more writes...
2503 * go wait for all writes that are part of this stream
2504 * to complete before returning the error to the caller
2506 goto wait_for_cwrites
;
2509 * issue an asynchronous write to cluster_io
2511 error
= cluster_io(vp
, upl
[cur_upl
], upl_offset
, uio
->uio_offset
,
2512 xsize
, CL_DEV_MEMORY
| CL_ASYNC
| bflag
, (buf_t
)NULL
, (struct clios
*)&iostate
, callback
, callback_arg
);
2516 * The cluster_io write completed successfully,
2517 * update the uio structure
2519 uio_update(uio
, (user_size_t
)xsize
);
2521 upl_offset
+= xsize
;
2526 if (error
== 0 && iostate
.io_error
== 0 && tail_size
== 0 && num_upl
< MAX_VECTS
) {
2528 error
= cluster_io_type(uio
, write_type
, write_length
, 0);
2530 if (error
== 0 && *write_type
== IO_CONTIG
) {
2535 *write_type
= IO_UNKNOWN
;
2539 * make sure all async writes that are part of this stream
2540 * have completed before we proceed
2542 if (iostate
.io_issued
> iostate
.io_completed
) {
2544 lck_mtx_lock(cl_mtxp
);
2546 while (iostate
.io_issued
!= iostate
.io_completed
) {
2547 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2548 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2550 iostate
.io_wanted
= 1;
2551 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_contig", NULL
);
2553 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2554 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2556 lck_mtx_unlock(cl_mtxp
);
2558 if (iostate
.io_error
)
2559 error
= iostate
.io_error
;
2561 if (error
== 0 && tail_size
)
2562 error
= cluster_align_phys_io(vp
, uio
, src_paddr
, tail_size
, 0, callback
, callback_arg
);
2564 for (n
= 0; n
< num_upl
; n
++)
2566 * just release our hold on each physically contiguous
2567 * region without changing any state
2569 ubc_upl_abort(upl
[n
], 0);
2576 * need to avoid a race between an msync of a range of pages dirtied via mmap
2577 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
2578 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
2580 * we should never force-zero-fill pages that are already valid in the cache...
2581 * the entire page contains valid data (either from disk, zero-filled or dirtied
2582 * via an mmap) so we can only do damage by trying to zero-fill
2586 cluster_zero_range(upl_t upl
, upl_page_info_t
*pl
, int flags
, int io_offset
, off_t zero_off
, off_t upl_f_offset
, int bytes_to_zero
)
2589 boolean_t need_cluster_zero
= TRUE
;
2591 if ((flags
& (IO_NOZEROVALID
| IO_NOZERODIRTY
))) {
2593 bytes_to_zero
= min(bytes_to_zero
, PAGE_SIZE
- (int)(zero_off
& PAGE_MASK_64
));
2594 zero_pg_index
= (int)((zero_off
- upl_f_offset
) / PAGE_SIZE_64
);
2596 if (upl_valid_page(pl
, zero_pg_index
)) {
2598 * never force zero valid pages - dirty or clean
2599 * we'll leave these in the UPL for cluster_write_copy to deal with
2601 need_cluster_zero
= FALSE
;
2604 if (need_cluster_zero
== TRUE
)
2605 cluster_zero(upl
, io_offset
, bytes_to_zero
, NULL
);
2607 return (bytes_to_zero
);
2612 cluster_write_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t oldEOF
, off_t newEOF
, off_t headOff
,
2613 off_t tailOff
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
2615 upl_page_info_t
*pl
;
2617 vm_offset_t upl_offset
= 0;
2630 long long total_size
;
2633 long long zero_cnt1
;
2635 struct cl_extent cl
;
2636 struct cl_writebehind
*wbp
;
2638 u_int max_cluster_pgcount
;
2642 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_START
,
2643 (int)uio
->uio_offset
, io_req_size
, (int)oldEOF
, (int)newEOF
, 0);
2645 io_resid
= io_req_size
;
2647 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_START
,
2648 0, 0, (int)oldEOF
, (int)newEOF
, 0);
2652 if (flags
& IO_PASSIVE
)
2662 max_cluster_pgcount
= MAX_CLUSTER_SIZE(vp
) / PAGE_SIZE
;
2663 max_io_size
= cluster_max_io_size(vp
->v_mount
, CL_WRITE
);
2665 if (flags
& IO_HEADZEROFILL
) {
2667 * some filesystems (HFS is one) don't support unallocated holes within a file...
2668 * so we zero fill the intervening space between the old EOF and the offset
2669 * where the next chunk of real data begins.... ftruncate will also use this
2670 * routine to zero fill to the new EOF when growing a file... in this case, the
2671 * uio structure will not be provided
2674 if (headOff
< uio
->uio_offset
) {
2675 zero_cnt
= uio
->uio_offset
- headOff
;
2678 } else if (headOff
< newEOF
) {
2679 zero_cnt
= newEOF
- headOff
;
2683 if (uio
&& uio
->uio_offset
> oldEOF
) {
2684 zero_off
= uio
->uio_offset
& ~PAGE_MASK_64
;
2686 if (zero_off
>= oldEOF
) {
2687 zero_cnt
= uio
->uio_offset
- zero_off
;
2689 flags
|= IO_HEADZEROFILL
;
2693 if (flags
& IO_TAILZEROFILL
) {
2695 zero_off1
= uio
->uio_offset
+ io_req_size
;
2697 if (zero_off1
< tailOff
)
2698 zero_cnt1
= tailOff
- zero_off1
;
2701 if (uio
&& newEOF
> oldEOF
) {
2702 zero_off1
= uio
->uio_offset
+ io_req_size
;
2704 if (zero_off1
== newEOF
&& (zero_off1
& PAGE_MASK_64
)) {
2705 zero_cnt1
= PAGE_SIZE_64
- (zero_off1
& PAGE_MASK_64
);
2707 flags
|= IO_TAILZEROFILL
;
2711 if (zero_cnt
== 0 && uio
== (struct uio
*) 0) {
2712 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_END
,
2713 retval
, 0, 0, 0, 0);
2717 while ((total_size
= (io_resid
+ zero_cnt
+ zero_cnt1
)) && retval
== 0) {
2719 * for this iteration of the loop, figure out where our starting point is
2722 start_offset
= (int)(zero_off
& PAGE_MASK_64
);
2723 upl_f_offset
= zero_off
- start_offset
;
2724 } else if (io_resid
) {
2725 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
2726 upl_f_offset
= uio
->uio_offset
- start_offset
;
2728 start_offset
= (int)(zero_off1
& PAGE_MASK_64
);
2729 upl_f_offset
= zero_off1
- start_offset
;
2731 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 46)) | DBG_FUNC_NONE
,
2732 (int)zero_off
, (int)zero_cnt
, (int)zero_off1
, (int)zero_cnt1
, 0);
2734 if (total_size
> max_io_size
)
2735 total_size
= max_io_size
;
2737 cl
.b_addr
= (daddr64_t
)(upl_f_offset
/ PAGE_SIZE_64
);
2739 if (uio
&& ((flags
& (IO_SYNC
| IO_HEADZEROFILL
| IO_TAILZEROFILL
)) == 0)) {
2741 * assumption... total_size <= io_resid
2742 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
2744 if ((start_offset
+ total_size
) > max_io_size
)
2745 total_size
= max_io_size
- start_offset
;
2746 xfer_resid
= total_size
;
2748 retval
= cluster_copy_ubc_data_internal(vp
, uio
, &xfer_resid
, 1, 1);
2753 io_resid
-= (total_size
- xfer_resid
);
2754 total_size
= xfer_resid
;
2755 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
2756 upl_f_offset
= uio
->uio_offset
- start_offset
;
2758 if (total_size
== 0) {
2761 * the write did not finish on a page boundary
2762 * which will leave upl_f_offset pointing to the
2763 * beginning of the last page written instead of
2764 * the page beyond it... bump it in this case
2765 * so that the cluster code records the last page
2768 upl_f_offset
+= PAGE_SIZE_64
;
2776 * compute the size of the upl needed to encompass
2777 * the requested write... limit each call to cluster_io
2778 * to the maximum UPL size... cluster_io will clip if
2779 * this exceeds the maximum io_size for the device,
2780 * make sure to account for
2781 * a starting offset that's not page aligned
2783 upl_size
= (start_offset
+ total_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
2785 if (upl_size
> max_io_size
)
2786 upl_size
= max_io_size
;
2788 pages_in_upl
= upl_size
/ PAGE_SIZE
;
2789 io_size
= upl_size
- start_offset
;
2791 if ((long long)io_size
> total_size
)
2792 io_size
= total_size
;
2794 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_START
, upl_size
, io_size
, total_size
, 0, 0);
2798 * Gather the pages from the buffer cache.
2799 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
2800 * that we intend to modify these pages.
2802 kret
= ubc_create_upl(vp
,
2807 UPL_SET_LITE
| (( uio
!=NULL
&& (uio
->uio_flags
& UIO_FLAGS_IS_COMPRESSED_FILE
)) ? 0 : UPL_WILL_MODIFY
));
2808 if (kret
!= KERN_SUCCESS
)
2809 panic("cluster_write_copy: failed to get pagelist");
2811 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_END
,
2812 upl
, (int)upl_f_offset
, start_offset
, 0, 0);
2814 if (start_offset
&& upl_f_offset
< oldEOF
&& !upl_valid_page(pl
, 0)) {
2818 * we're starting in the middle of the first page of the upl
2819 * and the page isn't currently valid, so we're going to have
2820 * to read it in first... this is a synchronous operation
2822 read_size
= PAGE_SIZE
;
2824 if ((upl_f_offset
+ read_size
) > oldEOF
)
2825 read_size
= oldEOF
- upl_f_offset
;
2827 retval
= cluster_io(vp
, upl
, 0, upl_f_offset
, read_size
,
2828 CL_READ
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
2831 * we had an error during the read which causes us to abort
2832 * the current cluster_write request... before we do, we need
2833 * to release the rest of the pages in the upl without modifying
2834 * there state and mark the failed page in error
2836 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_DUMP_PAGES
|UPL_ABORT_FREE_ON_EMPTY
);
2838 if (upl_size
> PAGE_SIZE
)
2839 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_FREE_ON_EMPTY
);
2841 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 45)) | DBG_FUNC_NONE
,
2842 upl
, 0, 0, retval
, 0);
2846 if ((start_offset
== 0 || upl_size
> PAGE_SIZE
) && ((start_offset
+ io_size
) & PAGE_MASK
)) {
2848 * the last offset we're writing to in this upl does not end on a page
2849 * boundary... if it's not beyond the old EOF, then we'll also need to
2850 * pre-read this page in if it isn't already valid
2852 upl_offset
= upl_size
- PAGE_SIZE
;
2854 if ((upl_f_offset
+ start_offset
+ io_size
) < oldEOF
&&
2855 !upl_valid_page(pl
, upl_offset
/ PAGE_SIZE
)) {
2858 read_size
= PAGE_SIZE
;
2860 if ((off_t
)(upl_f_offset
+ upl_offset
+ read_size
) > oldEOF
)
2861 read_size
= oldEOF
- (upl_f_offset
+ upl_offset
);
2863 retval
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
, read_size
,
2864 CL_READ
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
2867 * we had an error during the read which causes us to abort
2868 * the current cluster_write request... before we do, we
2869 * need to release the rest of the pages in the upl without
2870 * modifying there state and mark the failed page in error
2872 ubc_upl_abort_range(upl
, upl_offset
, PAGE_SIZE
, UPL_ABORT_DUMP_PAGES
|UPL_ABORT_FREE_ON_EMPTY
);
2874 if (upl_size
> PAGE_SIZE
)
2875 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_FREE_ON_EMPTY
);
2877 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 45)) | DBG_FUNC_NONE
,
2878 upl
, 0, 0, retval
, 0);
2883 xfer_resid
= io_size
;
2884 io_offset
= start_offset
;
2886 while (zero_cnt
&& xfer_resid
) {
2888 if (zero_cnt
< (long long)xfer_resid
)
2889 bytes_to_zero
= zero_cnt
;
2891 bytes_to_zero
= xfer_resid
;
2893 bytes_to_zero
= cluster_zero_range(upl
, pl
, flags
, io_offset
, zero_off
, upl_f_offset
, bytes_to_zero
);
2895 xfer_resid
-= bytes_to_zero
;
2896 zero_cnt
-= bytes_to_zero
;
2897 zero_off
+= bytes_to_zero
;
2898 io_offset
+= bytes_to_zero
;
2900 if (xfer_resid
&& io_resid
) {
2901 u_int32_t io_requested
;
2903 bytes_to_move
= min(io_resid
, xfer_resid
);
2904 io_requested
= bytes_to_move
;
2906 retval
= cluster_copy_upl_data(uio
, upl
, io_offset
, (int *)&io_requested
);
2909 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
);
2911 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 45)) | DBG_FUNC_NONE
,
2912 upl
, 0, 0, retval
, 0);
2914 io_resid
-= bytes_to_move
;
2915 xfer_resid
-= bytes_to_move
;
2916 io_offset
+= bytes_to_move
;
2919 while (xfer_resid
&& zero_cnt1
&& retval
== 0) {
2921 if (zero_cnt1
< (long long)xfer_resid
)
2922 bytes_to_zero
= zero_cnt1
;
2924 bytes_to_zero
= xfer_resid
;
2926 bytes_to_zero
= cluster_zero_range(upl
, pl
, flags
, io_offset
, zero_off1
, upl_f_offset
, bytes_to_zero
);
2928 xfer_resid
-= bytes_to_zero
;
2929 zero_cnt1
-= bytes_to_zero
;
2930 zero_off1
+= bytes_to_zero
;
2931 io_offset
+= bytes_to_zero
;
2935 int ret_cluster_try_push
;
2937 io_size
+= start_offset
;
2939 if ((upl_f_offset
+ io_size
) >= newEOF
&& (u_int
)io_size
< upl_size
) {
2941 * if we're extending the file with this write
2942 * we'll zero fill the rest of the page so that
2943 * if the file gets extended again in such a way as to leave a
2944 * hole starting at this EOF, we'll have zero's in the correct spot
2946 cluster_zero(upl
, io_size
, upl_size
- io_size
, NULL
);
2949 * release the upl now if we hold one since...
2950 * 1) pages in it may be present in the sparse cluster map
2951 * and may span 2 separate buckets there... if they do and
2952 * we happen to have to flush a bucket to make room and it intersects
2953 * this upl, a deadlock may result on page BUSY
2954 * 2) we're delaying the I/O... from this point forward we're just updating
2955 * the cluster state... no need to hold the pages, so commit them
2956 * 3) IO_SYNC is set...
2957 * because we had to ask for a UPL that provides currenty non-present pages, the
2958 * UPL has been automatically set to clear the dirty flags (both software and hardware)
2959 * upon committing it... this is not the behavior we want since it's possible for
2960 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
2961 * we'll pick these pages back up later with the correct behavior specified.
2962 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
2963 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
2964 * we hold since the flushing context is holding the cluster lock.
2966 ubc_upl_commit_range(upl
, 0, upl_size
,
2967 UPL_COMMIT_SET_DIRTY
| UPL_COMMIT_INACTIVATE
| UPL_COMMIT_FREE_ON_EMPTY
);
2970 * calculate the last logical block number
2971 * that this delayed I/O encompassed
2973 cl
.e_addr
= (daddr64_t
)((upl_f_offset
+ (off_t
)upl_size
) / PAGE_SIZE_64
);
2975 if (flags
& IO_SYNC
) {
2977 * if the IO_SYNC flag is set than we need to
2978 * bypass any clusters and immediately issue
2984 * take the lock to protect our accesses
2985 * of the writebehind and sparse cluster state
2987 wbp
= cluster_get_wbp(vp
, CLW_ALLOCATE
| CLW_RETURNLOCKED
);
2989 if (wbp
->cl_scmap
) {
2991 if ( !(flags
& IO_NOCACHE
)) {
2993 * we've fallen into the sparse
2994 * cluster method of delaying dirty pages
2996 sparse_cluster_add(&(wbp
->cl_scmap
), vp
, &cl
, newEOF
, callback
, callback_arg
);
2998 lck_mtx_unlock(&wbp
->cl_lockw
);
3003 * must have done cached writes that fell into
3004 * the sparse cluster mechanism... we've switched
3005 * to uncached writes on the file, so go ahead
3006 * and push whatever's in the sparse map
3007 * and switch back to normal clustering
3011 sparse_cluster_push(&(wbp
->cl_scmap
), vp
, newEOF
, PUSH_ALL
, callback
, callback_arg
);
3013 * no clusters of either type present at this point
3014 * so just go directly to start_new_cluster since
3015 * we know we need to delay this I/O since we've
3016 * already released the pages back into the cache
3017 * to avoid the deadlock with sparse_cluster_push
3019 goto start_new_cluster
;
3021 if (wbp
->cl_number
== 0)
3023 * no clusters currently present
3025 goto start_new_cluster
;
3027 for (cl_index
= 0; cl_index
< wbp
->cl_number
; cl_index
++) {
3029 * check each cluster that we currently hold
3030 * try to merge some or all of this write into
3031 * one or more of the existing clusters... if
3032 * any portion of the write remains, start a
3035 if (cl
.b_addr
>= wbp
->cl_clusters
[cl_index
].b_addr
) {
3037 * the current write starts at or after the current cluster
3039 if (cl
.e_addr
<= (wbp
->cl_clusters
[cl_index
].b_addr
+ max_cluster_pgcount
)) {
3041 * we have a write that fits entirely
3042 * within the existing cluster limits
3044 if (cl
.e_addr
> wbp
->cl_clusters
[cl_index
].e_addr
)
3046 * update our idea of where the cluster ends
3048 wbp
->cl_clusters
[cl_index
].e_addr
= cl
.e_addr
;
3051 if (cl
.b_addr
< (wbp
->cl_clusters
[cl_index
].b_addr
+ max_cluster_pgcount
)) {
3053 * we have a write that starts in the middle of the current cluster
3054 * but extends beyond the cluster's limit... we know this because
3055 * of the previous checks
3056 * we'll extend the current cluster to the max
3057 * and update the b_addr for the current write to reflect that
3058 * the head of it was absorbed into this cluster...
3059 * note that we'll always have a leftover tail in this case since
3060 * full absorbtion would have occurred in the clause above
3062 wbp
->cl_clusters
[cl_index
].e_addr
= wbp
->cl_clusters
[cl_index
].b_addr
+ max_cluster_pgcount
;
3064 cl
.b_addr
= wbp
->cl_clusters
[cl_index
].e_addr
;
3067 * we come here for the case where the current write starts
3068 * beyond the limit of the existing cluster or we have a leftover
3069 * tail after a partial absorbtion
3071 * in either case, we'll check the remaining clusters before
3072 * starting a new one
3076 * the current write starts in front of the cluster we're currently considering
3078 if ((wbp
->cl_clusters
[cl_index
].e_addr
- cl
.b_addr
) <= max_cluster_pgcount
) {
3080 * we can just merge the new request into
3081 * this cluster and leave it in the cache
3082 * since the resulting cluster is still
3083 * less than the maximum allowable size
3085 wbp
->cl_clusters
[cl_index
].b_addr
= cl
.b_addr
;
3087 if (cl
.e_addr
> wbp
->cl_clusters
[cl_index
].e_addr
) {
3089 * the current write completely
3090 * envelops the existing cluster and since
3091 * each write is limited to at most max_cluster_pgcount pages
3092 * we can just use the start and last blocknos of the write
3093 * to generate the cluster limits
3095 wbp
->cl_clusters
[cl_index
].e_addr
= cl
.e_addr
;
3101 * if we were to combine this write with the current cluster
3102 * we would exceed the cluster size limit.... so,
3103 * let's see if there's any overlap of the new I/O with
3104 * the cluster we're currently considering... in fact, we'll
3105 * stretch the cluster out to it's full limit and see if we
3106 * get an intersection with the current write
3109 if (cl
.e_addr
> wbp
->cl_clusters
[cl_index
].e_addr
- max_cluster_pgcount
) {
3111 * the current write extends into the proposed cluster
3112 * clip the length of the current write after first combining it's
3113 * tail with the newly shaped cluster
3115 wbp
->cl_clusters
[cl_index
].b_addr
= wbp
->cl_clusters
[cl_index
].e_addr
- max_cluster_pgcount
;
3117 cl
.e_addr
= wbp
->cl_clusters
[cl_index
].b_addr
;
3120 * if we get here, there was no way to merge
3121 * any portion of this write with this cluster
3122 * or we could only merge part of it which
3123 * will leave a tail...
3124 * we'll check the remaining clusters before starting a new one
3128 if (cl_index
< wbp
->cl_number
)
3130 * we found an existing cluster(s) that we
3131 * could entirely merge this I/O into
3135 if (wbp
->cl_number
< MAX_CLUSTERS
)
3137 * we didn't find an existing cluster to
3138 * merge into, but there's room to start
3141 goto start_new_cluster
;
3144 * no exisitng cluster to merge with and no
3145 * room to start a new one... we'll try
3146 * pushing one of the existing ones... if none of
3147 * them are able to be pushed, we'll switch
3148 * to the sparse cluster mechanism
3149 * cluster_try_push updates cl_number to the
3150 * number of remaining clusters... and
3151 * returns the number of currently unused clusters
3153 ret_cluster_try_push
= 0;
3156 * if writes are not deferred, call cluster push immediately
3158 if (!((unsigned int)vfs_flags(vp
->v_mount
) & MNT_DEFWRITE
)) {
3160 ret_cluster_try_push
= cluster_try_push(wbp
, vp
, newEOF
, (flags
& IO_NOCACHE
) ? 0 : PUSH_DELAY
, callback
, callback_arg
);
3164 * execute following regardless of writes being deferred or not
3166 if (ret_cluster_try_push
== 0) {
3168 * no more room in the normal cluster mechanism
3169 * so let's switch to the more expansive but expensive
3170 * sparse mechanism....
3172 sparse_cluster_switch(wbp
, vp
, newEOF
, callback
, callback_arg
);
3173 sparse_cluster_add(&(wbp
->cl_scmap
), vp
, &cl
, newEOF
, callback
, callback_arg
);
3175 lck_mtx_unlock(&wbp
->cl_lockw
);
3180 * we pushed one cluster successfully, so we must be sequentially writing this file
3181 * otherwise, we would have failed and fallen into the sparse cluster support
3182 * so let's take the opportunity to push out additional clusters...
3183 * this will give us better I/O locality if we're in a copy loop
3184 * (i.e. we won't jump back and forth between the read and write points
3186 if (!((unsigned int)vfs_flags(vp
->v_mount
) & MNT_DEFWRITE
)) {
3187 while (wbp
->cl_number
)
3188 cluster_try_push(wbp
, vp
, newEOF
, 0, callback
, callback_arg
);
3192 wbp
->cl_clusters
[wbp
->cl_number
].b_addr
= cl
.b_addr
;
3193 wbp
->cl_clusters
[wbp
->cl_number
].e_addr
= cl
.e_addr
;
3195 wbp
->cl_clusters
[wbp
->cl_number
].io_flags
= 0;
3197 if (flags
& IO_NOCACHE
)
3198 wbp
->cl_clusters
[wbp
->cl_number
].io_flags
|= CLW_IONOCACHE
;
3200 if (bflag
& CL_PASSIVE
)
3201 wbp
->cl_clusters
[wbp
->cl_number
].io_flags
|= CLW_IOPASSIVE
;
3205 lck_mtx_unlock(&wbp
->cl_lockw
);
3210 * we don't hold the lock at this point
3212 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
3213 * so that we correctly deal with a change in state of the hardware modify bit...
3214 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
3215 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
3216 * responsible for generating the correct sized I/O(s)
3218 retval
= cluster_push_now(vp
, &cl
, newEOF
, flags
, callback
, callback_arg
);
3221 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_END
, retval
, 0, io_resid
, 0, 0);
3229 cluster_read(vnode_t vp
, struct uio
*uio
, off_t filesize
, int xflags
)
3231 return cluster_read_ext(vp
, uio
, filesize
, xflags
, NULL
, NULL
);
3236 cluster_read_ext(vnode_t vp
, struct uio
*uio
, off_t filesize
, int xflags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
3240 user_ssize_t cur_resid
;
3242 u_int32_t read_length
= 0;
3243 int read_type
= IO_COPY
;
3247 if (vp
->v_flag
& VNOCACHE_DATA
)
3248 flags
|= IO_NOCACHE
;
3249 if ((vp
->v_flag
& VRAOFF
) || speculative_reads_disabled
)
3253 * do a read through the cache if one of the following is true....
3254 * NOCACHE is not true
3255 * the uio request doesn't target USERSPACE
3256 * otherwise, find out if we want the direct or contig variant for
3257 * the first vector in the uio request
3259 if ( (flags
& IO_NOCACHE
) && UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
) )
3260 retval
= cluster_io_type(uio
, &read_type
, &read_length
, 0);
3262 while ((cur_resid
= uio_resid(uio
)) && uio
->uio_offset
< filesize
&& retval
== 0) {
3264 switch (read_type
) {
3268 * make sure the uio_resid isn't too big...
3269 * internally, we want to handle all of the I/O in
3270 * chunk sizes that fit in a 32 bit int
3272 if (cur_resid
> (user_ssize_t
)(MAX_IO_REQUEST_SIZE
))
3273 io_size
= MAX_IO_REQUEST_SIZE
;
3275 io_size
= (u_int32_t
)cur_resid
;
3277 retval
= cluster_read_copy(vp
, uio
, io_size
, filesize
, flags
, callback
, callback_arg
);
3281 retval
= cluster_read_direct(vp
, uio
, filesize
, &read_type
, &read_length
, flags
, callback
, callback_arg
);
3285 retval
= cluster_read_contig(vp
, uio
, filesize
, &read_type
, &read_length
, callback
, callback_arg
, flags
);
3289 retval
= cluster_io_type(uio
, &read_type
, &read_length
, 0);
3299 cluster_read_upl_release(upl_t upl
, int start_pg
, int last_pg
, int take_reference
)
3302 int abort_flags
= UPL_ABORT_FREE_ON_EMPTY
;
3304 if ((range
= last_pg
- start_pg
)) {
3306 abort_flags
|= UPL_ABORT_REFERENCE
;
3308 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, range
* PAGE_SIZE
, abort_flags
);
3314 cluster_read_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t filesize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
3316 upl_page_info_t
*pl
;
3318 vm_offset_t upl_offset
;
3327 off_t last_ioread_offset
;
3328 off_t last_request_offset
;
3332 u_int32_t size_of_prefetch
;
3335 u_int32_t max_rd_size
;
3336 u_int32_t max_io_size
;
3337 u_int32_t max_prefetch
;
3338 u_int rd_ahead_enabled
= 1;
3339 u_int prefetch_enabled
= 1;
3340 struct cl_readahead
* rap
;
3341 struct clios iostate
;
3342 struct cl_extent extent
;
3344 int take_reference
= 1;
3346 int policy
= IOPOL_DEFAULT
;
3349 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 32)) | DBG_FUNC_START
,
3350 (int)uio
->uio_offset
, io_req_size
, (int)filesize
, flags
, 0);
3352 policy
= current_proc()->p_iopol_disk
;
3354 ut
= get_bsdthread_info(current_thread());
3356 if (ut
->uu_iopol_disk
!= IOPOL_DEFAULT
)
3357 policy
= ut
->uu_iopol_disk
;
3359 if (policy
== IOPOL_THROTTLE
|| (flags
& IO_NOCACHE
))
3362 if (flags
& IO_PASSIVE
)
3367 max_io_size
= cluster_max_io_size(vp
->v_mount
, CL_READ
);
3368 max_prefetch
= MAX_PREFETCH(vp
, max_io_size
);
3369 max_rd_size
= max_prefetch
;
3371 last_request_offset
= uio
->uio_offset
+ io_req_size
;
3373 if (last_request_offset
> filesize
)
3374 last_request_offset
= filesize
;
3376 if ((flags
& (IO_RAOFF
|IO_NOCACHE
)) || ((last_request_offset
& ~PAGE_MASK_64
) == (uio
->uio_offset
& ~PAGE_MASK_64
))) {
3377 rd_ahead_enabled
= 0;
3380 if (cluster_hard_throttle_on(vp
, 1)) {
3381 rd_ahead_enabled
= 0;
3382 prefetch_enabled
= 0;
3384 max_rd_size
= HARD_THROTTLE_MAXSIZE
;
3385 } else if (policy
== IOPOL_THROTTLE
) {
3386 rd_ahead_enabled
= 0;
3387 prefetch_enabled
= 0;
3389 if ((rap
= cluster_get_rap(vp
)) == NULL
)
3390 rd_ahead_enabled
= 0;
3392 extent
.b_addr
= uio
->uio_offset
/ PAGE_SIZE_64
;
3393 extent
.e_addr
= (last_request_offset
- 1) / PAGE_SIZE_64
;
3396 if (rap
!= NULL
&& rap
->cl_ralen
&& (rap
->cl_lastr
== extent
.b_addr
|| (rap
->cl_lastr
+ 1) == extent
.b_addr
)) {
3398 * determine if we already have a read-ahead in the pipe courtesy of the
3399 * last read systemcall that was issued...
3400 * if so, pick up it's extent to determine where we should start
3401 * with respect to any read-ahead that might be necessary to
3402 * garner all the data needed to complete this read systemcall
3404 last_ioread_offset
= (rap
->cl_maxra
* PAGE_SIZE_64
) + PAGE_SIZE_64
;
3406 if (last_ioread_offset
< uio
->uio_offset
)
3407 last_ioread_offset
= (off_t
)0;
3408 else if (last_ioread_offset
> last_request_offset
)
3409 last_ioread_offset
= last_request_offset
;
3411 last_ioread_offset
= (off_t
)0;
3413 while (io_req_size
&& uio
->uio_offset
< filesize
&& retval
== 0) {
3415 max_size
= filesize
- uio
->uio_offset
;
3417 if ((off_t
)(io_req_size
) < max_size
)
3418 io_size
= io_req_size
;
3422 if (!(flags
& IO_NOCACHE
)) {
3426 u_int32_t io_requested
;
3429 * if we keep finding the pages we need already in the cache, then
3430 * don't bother to call cluster_read_prefetch since it costs CPU cycles
3431 * to determine that we have all the pages we need... once we miss in
3432 * the cache and have issued an I/O, than we'll assume that we're likely
3433 * to continue to miss in the cache and it's to our advantage to try and prefetch
3435 if (last_request_offset
&& last_ioread_offset
&& (size_of_prefetch
= (last_request_offset
- last_ioread_offset
))) {
3436 if ((last_ioread_offset
- uio
->uio_offset
) <= max_rd_size
&& prefetch_enabled
) {
3438 * we've already issued I/O for this request and
3439 * there's still work to do and
3440 * our prefetch stream is running dry, so issue a
3441 * pre-fetch I/O... the I/O latency will overlap
3442 * with the copying of the data
3444 if (size_of_prefetch
> max_rd_size
)
3445 size_of_prefetch
= max_rd_size
;
3447 size_of_prefetch
= cluster_read_prefetch(vp
, last_ioread_offset
, size_of_prefetch
, filesize
, callback
, callback_arg
, bflag
);
3449 last_ioread_offset
+= (off_t
)(size_of_prefetch
* PAGE_SIZE
);
3451 if (last_ioread_offset
> last_request_offset
)
3452 last_ioread_offset
= last_request_offset
;
3456 * limit the size of the copy we're about to do so that
3457 * we can notice that our I/O pipe is running dry and
3458 * get the next I/O issued before it does go dry
3460 if (last_ioread_offset
&& io_size
> (max_io_size
/ 4))
3461 io_resid
= (max_io_size
/ 4);
3465 io_requested
= io_resid
;
3467 retval
= cluster_copy_ubc_data_internal(vp
, uio
, (int *)&io_resid
, 0, last_ioread_offset
== 0 ? take_reference
: 0);
3469 xsize
= io_requested
- io_resid
;
3472 io_req_size
-= xsize
;
3474 if (retval
|| io_resid
)
3476 * if we run into a real error or
3477 * a page that is not in the cache
3478 * we need to leave streaming mode
3482 if (rd_ahead_enabled
&& (io_size
== 0 || last_ioread_offset
== last_request_offset
)) {
3484 * we're already finished the I/O for this read request
3485 * let's see if we should do a read-ahead
3487 cluster_read_ahead(vp
, &extent
, filesize
, rap
, callback
, callback_arg
, bflag
);
3494 if (extent
.e_addr
< rap
->cl_lastr
)
3496 rap
->cl_lastr
= extent
.e_addr
;
3501 * recompute max_size since cluster_copy_ubc_data_internal
3502 * may have advanced uio->uio_offset
3504 max_size
= filesize
- uio
->uio_offset
;
3507 * compute the size of the upl needed to encompass
3508 * the requested read... limit each call to cluster_io
3509 * to the maximum UPL size... cluster_io will clip if
3510 * this exceeds the maximum io_size for the device,
3511 * make sure to account for
3512 * a starting offset that's not page aligned
3514 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
3515 upl_f_offset
= uio
->uio_offset
- (off_t
)start_offset
;
3517 if (io_size
> max_rd_size
)
3518 io_size
= max_rd_size
;
3520 upl_size
= (start_offset
+ io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
3522 if (flags
& IO_NOCACHE
) {
3523 if (upl_size
> max_io_size
)
3524 upl_size
= max_io_size
;
3526 if (upl_size
> max_io_size
/ 4)
3527 upl_size
= max_io_size
/ 4;
3529 pages_in_upl
= upl_size
/ PAGE_SIZE
;
3531 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 33)) | DBG_FUNC_START
,
3532 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
3534 kret
= ubc_create_upl(vp
,
3539 UPL_FILE_IO
| UPL_SET_LITE
);
3540 if (kret
!= KERN_SUCCESS
)
3541 panic("cluster_read_copy: failed to get pagelist");
3543 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 33)) | DBG_FUNC_END
,
3544 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
3547 * scan from the beginning of the upl looking for the first
3548 * non-valid page.... this will become the first page in
3549 * the request we're going to make to 'cluster_io'... if all
3550 * of the pages are valid, we won't call through to 'cluster_io'
3552 for (start_pg
= 0; start_pg
< pages_in_upl
; start_pg
++) {
3553 if (!upl_valid_page(pl
, start_pg
))
3558 * scan from the starting invalid page looking for a valid
3559 * page before the end of the upl is reached, if we
3560 * find one, then it will be the last page of the request to
3563 for (last_pg
= start_pg
; last_pg
< pages_in_upl
; last_pg
++) {
3564 if (upl_valid_page(pl
, last_pg
))
3567 iostate
.io_completed
= 0;
3568 iostate
.io_issued
= 0;
3569 iostate
.io_error
= 0;
3570 iostate
.io_wanted
= 0;
3572 if (start_pg
< last_pg
) {
3574 * we found a range of 'invalid' pages that must be filled
3575 * if the last page in this range is the last page of the file
3576 * we may have to clip the size of it to keep from reading past
3577 * the end of the last physical block associated with the file
3579 upl_offset
= start_pg
* PAGE_SIZE
;
3580 io_size
= (last_pg
- start_pg
) * PAGE_SIZE
;
3582 if ((off_t
)(upl_f_offset
+ upl_offset
+ io_size
) > filesize
)
3583 io_size
= filesize
- (upl_f_offset
+ upl_offset
);
3586 * issue an asynchronous read to cluster_io
3589 error
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
,
3590 io_size
, CL_READ
| CL_ASYNC
| bflag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
3594 * if the read completed successfully, or there was no I/O request
3595 * issued, than copy the data into user land via 'cluster_upl_copy_data'
3596 * we'll first add on any 'valid'
3597 * pages that were present in the upl when we acquired it.
3601 for (uio_last
= last_pg
; uio_last
< pages_in_upl
; uio_last
++) {
3602 if (!upl_valid_page(pl
, uio_last
))
3605 if (uio_last
< pages_in_upl
) {
3607 * there were some invalid pages beyond the valid pages
3608 * that we didn't issue an I/O for, just release them
3609 * unchanged now, so that any prefetch/readahed can
3612 ubc_upl_abort_range(upl
, uio_last
* PAGE_SIZE
,
3613 (pages_in_upl
- uio_last
) * PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
3617 * compute size to transfer this round, if io_req_size is
3618 * still non-zero after this attempt, we'll loop around and
3619 * set up for another I/O.
3621 val_size
= (uio_last
* PAGE_SIZE
) - start_offset
;
3623 if (val_size
> max_size
)
3624 val_size
= max_size
;
3626 if (val_size
> io_req_size
)
3627 val_size
= io_req_size
;
3629 if ((uio
->uio_offset
+ val_size
) > last_ioread_offset
)
3630 last_ioread_offset
= uio
->uio_offset
+ val_size
;
3632 if ((size_of_prefetch
= (last_request_offset
- last_ioread_offset
)) && prefetch_enabled
) {
3634 if ((last_ioread_offset
- (uio
->uio_offset
+ val_size
)) <= upl_size
) {
3636 * if there's still I/O left to do for this request, and...
3637 * we're not in hard throttle mode, and...
3638 * we're close to using up the previous prefetch, then issue a
3639 * new pre-fetch I/O... the I/O latency will overlap
3640 * with the copying of the data
3642 if (size_of_prefetch
> max_rd_size
)
3643 size_of_prefetch
= max_rd_size
;
3645 size_of_prefetch
= cluster_read_prefetch(vp
, last_ioread_offset
, size_of_prefetch
, filesize
, callback
, callback_arg
, bflag
);
3647 last_ioread_offset
+= (off_t
)(size_of_prefetch
* PAGE_SIZE
);
3649 if (last_ioread_offset
> last_request_offset
)
3650 last_ioread_offset
= last_request_offset
;
3653 } else if ((uio
->uio_offset
+ val_size
) == last_request_offset
) {
3655 * this transfer will finish this request, so...
3656 * let's try to read ahead if we're in
3657 * a sequential access pattern and we haven't
3658 * explicitly disabled it
3660 if (rd_ahead_enabled
)
3661 cluster_read_ahead(vp
, &extent
, filesize
, rap
, callback
, callback_arg
, bflag
);
3664 if (extent
.e_addr
< rap
->cl_lastr
)
3666 rap
->cl_lastr
= extent
.e_addr
;
3669 if (iostate
.io_issued
> iostate
.io_completed
) {
3671 lck_mtx_lock(cl_mtxp
);
3673 while (iostate
.io_issued
!= iostate
.io_completed
) {
3674 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
3675 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
3677 iostate
.io_wanted
= 1;
3678 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_copy", NULL
);
3680 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
3681 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
3683 lck_mtx_unlock(cl_mtxp
);
3685 if (iostate
.io_error
)
3686 error
= iostate
.io_error
;
3688 u_int32_t io_requested
;
3690 io_requested
= val_size
;
3692 retval
= cluster_copy_upl_data(uio
, upl
, start_offset
, (int *)&io_requested
);
3694 io_req_size
-= (val_size
- io_requested
);
3697 if (start_pg
< last_pg
) {
3699 * compute the range of pages that we actually issued an I/O for
3700 * and either commit them as valid if the I/O succeeded
3701 * or abort them if the I/O failed or we're not supposed to
3702 * keep them in the cache
3704 io_size
= (last_pg
- start_pg
) * PAGE_SIZE
;
3706 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_START
, upl
, start_pg
* PAGE_SIZE
, io_size
, error
, 0);
3708 if (error
|| (flags
& IO_NOCACHE
))
3709 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, io_size
,
3710 UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
);
3712 int commit_flags
= UPL_COMMIT_CLEAR_DIRTY
| UPL_COMMIT_FREE_ON_EMPTY
;
3715 commit_flags
|= UPL_COMMIT_INACTIVATE
;
3717 commit_flags
|= UPL_COMMIT_SPECULATE
;
3719 ubc_upl_commit_range(upl
, start_pg
* PAGE_SIZE
, io_size
, commit_flags
);
3721 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_END
, upl
, start_pg
* PAGE_SIZE
, io_size
, error
, 0);
3723 if ((last_pg
- start_pg
) < pages_in_upl
) {
3725 * the set of pages that we issued an I/O for did not encompass
3726 * the entire upl... so just release these without modifying
3730 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_FREE_ON_EMPTY
);
3733 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_START
,
3734 upl
, -1, pages_in_upl
- (last_pg
- start_pg
), 0, 0);
3737 * handle any valid pages at the beginning of
3738 * the upl... release these appropriately
3740 cluster_read_upl_release(upl
, 0, start_pg
, take_reference
);
3743 * handle any valid pages immediately after the
3744 * pages we issued I/O for... ... release these appropriately
3746 cluster_read_upl_release(upl
, last_pg
, uio_last
, take_reference
);
3748 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_END
, upl
, -1, -1, 0, 0);
3755 if (cluster_hard_throttle_on(vp
, 1)) {
3756 rd_ahead_enabled
= 0;
3757 prefetch_enabled
= 0;
3759 max_rd_size
= HARD_THROTTLE_MAXSIZE
;
3761 if (max_rd_size
== HARD_THROTTLE_MAXSIZE
) {
3763 * coming out of throttled state
3765 if (policy
!= IOPOL_THROTTLE
) {
3767 rd_ahead_enabled
= 1;
3768 prefetch_enabled
= 1;
3770 max_rd_size
= max_prefetch
;
3771 last_ioread_offset
= 0;
3777 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 32)) | DBG_FUNC_END
,
3778 (int)uio
->uio_offset
, io_req_size
, rap
->cl_lastr
, retval
, 0);
3780 lck_mtx_unlock(&rap
->cl_lockr
);
3782 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 32)) | DBG_FUNC_END
,
3783 (int)uio
->uio_offset
, io_req_size
, 0, retval
, 0);
3791 cluster_read_direct(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
3792 int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
3795 upl_page_info_t
*pl
;
3797 vm_offset_t upl_offset
, vector_upl_offset
= 0;
3798 upl_size_t upl_size
, vector_upl_size
= 0;
3799 vm_size_t upl_needed_size
;
3800 unsigned int pages_in_pl
;
3804 int force_data_sync
;
3806 int no_zero_fill
= 0;
3809 struct clios iostate
;
3810 user_addr_t iov_base
;
3811 u_int32_t io_req_size
;
3812 u_int32_t offset_in_file
;
3813 u_int32_t offset_in_iovbase
;
3817 u_int32_t devblocksize
;
3818 u_int32_t mem_alignment_mask
;
3819 u_int32_t max_upl_size
;
3820 u_int32_t max_rd_size
;
3821 u_int32_t max_rd_ahead
;
3823 u_int32_t vector_upl_iosize
= 0;
3824 int issueVectorUPL
= 0,useVectorUPL
= (uio
->uio_iovcnt
> 1);
3825 off_t v_upl_uio_offset
= 0;
3826 int vector_upl_index
=0;
3827 upl_t vector_upl
= NULL
;
3829 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_START
,
3830 (int)uio
->uio_offset
, (int)filesize
, *read_type
, *read_length
, 0);
3832 max_upl_size
= cluster_max_io_size(vp
->v_mount
, CL_READ
);
3834 max_rd_size
= max_upl_size
;
3835 max_rd_ahead
= max_rd_size
* IO_SCALE(vp
, 2);
3837 io_flag
= CL_COMMIT
| CL_READ
| CL_ASYNC
| CL_NOZERO
| CL_DIRECT_IO
;
3838 if (flags
& IO_PASSIVE
)
3839 io_flag
|= CL_PASSIVE
;
3841 iostate
.io_completed
= 0;
3842 iostate
.io_issued
= 0;
3843 iostate
.io_error
= 0;
3844 iostate
.io_wanted
= 0;
3846 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
3847 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
3849 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_NONE
,
3850 (int)devblocksize
, (int)mem_alignment_mask
, 0, 0, 0);
3852 if (devblocksize
== 1) {
3854 * the AFP client advertises a devblocksize of 1
3855 * however, its BLOCKMAP routine maps to physical
3856 * blocks that are PAGE_SIZE in size...
3857 * therefore we can't ask for I/Os that aren't page aligned
3858 * or aren't multiples of PAGE_SIZE in size
3859 * by setting devblocksize to PAGE_SIZE, we re-instate
3860 * the old behavior we had before the mem_alignment_mask
3861 * changes went in...
3863 devblocksize
= PAGE_SIZE
;
3866 io_req_size
= *read_length
;
3867 iov_base
= uio_curriovbase(uio
);
3869 max_io_size
= filesize
- uio
->uio_offset
;
3871 if ((off_t
)io_req_size
> max_io_size
)
3872 io_req_size
= max_io_size
;
3874 offset_in_file
= (u_int32_t
)uio
->uio_offset
& (devblocksize
- 1);
3875 offset_in_iovbase
= (u_int32_t
)iov_base
& mem_alignment_mask
;
3877 if (offset_in_file
|| offset_in_iovbase
) {
3879 * one of the 2 important offsets is misaligned
3880 * so fire an I/O through the cache for this entire vector
3884 if (iov_base
& (devblocksize
- 1)) {
3886 * the offset in memory must be on a device block boundary
3887 * so that we can guarantee that we can generate an
3888 * I/O that ends on a page boundary in cluster_io
3893 * When we get to this point, we know...
3894 * -- the offset into the file is on a devblocksize boundary
3897 while (io_req_size
&& retval
== 0) {
3900 if (cluster_hard_throttle_on(vp
, 1)) {
3901 max_rd_size
= HARD_THROTTLE_MAXSIZE
;
3902 max_rd_ahead
= HARD_THROTTLE_MAXSIZE
- 1;
3904 max_rd_size
= max_upl_size
;
3905 max_rd_ahead
= max_rd_size
* IO_SCALE(vp
, 2);
3907 io_start
= io_size
= io_req_size
;
3910 * First look for pages already in the cache
3911 * and move them to user space.
3913 * cluster_copy_ubc_data returns the resid
3916 retval
= cluster_copy_ubc_data_internal(vp
, uio
, (int *)&io_size
, 0, 0);
3919 * calculate the number of bytes actually copied
3920 * starting size - residual
3922 xsize
= io_start
- io_size
;
3924 io_req_size
-= xsize
;
3926 if(useVectorUPL
&& (xsize
|| (iov_base
& PAGE_MASK
))) {
3928 * We found something in the cache or we have an iov_base that's not
3931 * Issue all I/O's that have been collected within this Vectored UPL.
3933 if(vector_upl_index
) {
3934 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
3935 reset_vector_run_state();
3942 * After this point, if we are using the Vector UPL path and the base is
3943 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3948 * check to see if we are finished with this request...
3950 if (io_req_size
== 0 || misaligned
) {
3952 * see if there's another uio vector to
3953 * process that's of type IO_DIRECT
3955 * break out of while loop to get there
3960 * assume the request ends on a device block boundary
3962 io_min
= devblocksize
;
3965 * we can handle I/O's in multiples of the device block size
3966 * however, if io_size isn't a multiple of devblocksize we
3967 * want to clip it back to the nearest page boundary since
3968 * we are going to have to go through cluster_read_copy to
3969 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
3970 * multiple, we avoid asking the drive for the same physical
3971 * blocks twice.. once for the partial page at the end of the
3972 * request and a 2nd time for the page we read into the cache
3973 * (which overlaps the end of the direct read) in order to
3974 * get at the overhang bytes
3976 if (io_size
& (devblocksize
- 1)) {
3978 * request does NOT end on a device block boundary
3979 * so clip it back to a PAGE_SIZE boundary
3981 io_size
&= ~PAGE_MASK
;
3984 if (retval
|| io_size
< io_min
) {
3986 * either an error or we only have the tail left to
3987 * complete via the copy path...
3988 * we may have already spun some portion of this request
3989 * off as async requests... we need to wait for the I/O
3990 * to complete before returning
3992 goto wait_for_dreads
;
3994 if ((xsize
= io_size
) > max_rd_size
)
3995 xsize
= max_rd_size
;
3999 ubc_range_op(vp
, uio
->uio_offset
, uio
->uio_offset
+ xsize
, UPL_ROP_ABSENT
, (int *)&io_size
);
4003 * a page must have just come into the cache
4004 * since the first page in this range is no
4005 * longer absent, go back and re-evaluate
4009 iov_base
= uio_curriovbase(uio
);
4011 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
4012 upl_needed_size
= (upl_offset
+ io_size
+ (PAGE_SIZE
-1)) & ~PAGE_MASK
;
4014 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_START
,
4015 (int)upl_offset
, upl_needed_size
, (int)iov_base
, io_size
, 0);
4017 if (upl_offset
== 0 && ((io_size
& PAGE_MASK
) == 0))
4022 for (force_data_sync
= 0; force_data_sync
< 3; force_data_sync
++) {
4024 upl_size
= upl_needed_size
;
4025 upl_flags
= UPL_FILE_IO
| UPL_NO_SYNC
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
4028 upl_flags
|= UPL_NOZEROFILL
;
4029 if (force_data_sync
)
4030 upl_flags
|= UPL_FORCE_DATA_SYNC
;
4032 kret
= vm_map_create_upl(current_map(),
4033 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
4034 &upl_size
, &upl
, NULL
, &pages_in_pl
, &upl_flags
);
4036 if (kret
!= KERN_SUCCESS
) {
4037 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_END
,
4038 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4040 * failed to get pagelist
4042 * we may have already spun some portion of this request
4043 * off as async requests... we need to wait for the I/O
4044 * to complete before returning
4046 goto wait_for_dreads
;
4048 pages_in_pl
= upl_size
/ PAGE_SIZE
;
4049 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
4051 for (i
= 0; i
< pages_in_pl
; i
++) {
4052 if (!upl_page_present(pl
, i
))
4055 if (i
== pages_in_pl
)
4058 ubc_upl_abort(upl
, 0);
4060 if (force_data_sync
>= 3) {
4061 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_END
,
4062 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4064 goto wait_for_dreads
;
4067 * Consider the possibility that upl_size wasn't satisfied.
4069 if (upl_size
< upl_needed_size
) {
4070 if (upl_size
&& upl_offset
== 0)
4076 ubc_upl_abort(upl
, 0);
4077 goto wait_for_dreads
;
4079 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_END
,
4080 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4083 vm_offset_t end_off
= ((iov_base
+ io_size
) & PAGE_MASK
);
4087 * After this point, if we are using a vector UPL, then
4088 * either all the UPL elements end on a page boundary OR
4089 * this UPL is the last element because it does not end
4090 * on a page boundary.
4095 * request asynchronously so that we can overlap
4096 * the preparation of the next I/O
4097 * if there are already too many outstanding reads
4098 * wait until some have completed before issuing the next read
4100 if (iostate
.io_issued
> iostate
.io_completed
) {
4102 lck_mtx_lock(cl_mtxp
);
4104 while ((iostate
.io_issued
- iostate
.io_completed
) > max_rd_ahead
) {
4105 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4106 iostate
.io_issued
, iostate
.io_completed
, max_rd_ahead
, 0, 0);
4108 iostate
.io_wanted
= 1;
4109 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_direct", NULL
);
4111 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4112 iostate
.io_issued
, iostate
.io_completed
, max_rd_ahead
, 0, 0);
4114 lck_mtx_unlock(cl_mtxp
);
4116 if (iostate
.io_error
) {
4118 * one of the earlier reads we issued ran into a hard error
4119 * don't issue any more reads, cleanup the UPL
4120 * that was just created but not used, then
4121 * go wait for any other reads to complete before
4122 * returning the error to the caller
4124 ubc_upl_abort(upl
, 0);
4126 goto wait_for_dreads
;
4128 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 73)) | DBG_FUNC_START
,
4129 upl
, (int)upl_offset
, (int)uio
->uio_offset
, io_size
, 0);
4134 io_flag
&= ~CL_PRESERVE
;
4136 io_flag
|= CL_PRESERVE
;
4138 retval
= cluster_io(vp
, upl
, upl_offset
, uio
->uio_offset
, io_size
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4142 if(!vector_upl_index
) {
4143 vector_upl
= vector_upl_create(upl_offset
);
4144 v_upl_uio_offset
= uio
->uio_offset
;
4145 vector_upl_offset
= upl_offset
;
4148 vector_upl_set_subupl(vector_upl
,upl
, upl_size
);
4149 vector_upl_set_iostate(vector_upl
, upl
, vector_upl_size
, upl_size
);
4151 vector_upl_size
+= upl_size
;
4152 vector_upl_iosize
+= io_size
;
4154 if(issueVectorUPL
|| vector_upl_index
== MAX_VECTOR_UPL_ELEMENTS
|| vector_upl_size
>= MAX_VECTOR_UPL_SIZE
) {
4155 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4156 reset_vector_run_state();
4160 * update the uio structure
4162 uio_update(uio
, (user_size_t
)io_size
);
4164 io_req_size
-= io_size
;
4166 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 73)) | DBG_FUNC_END
,
4167 upl
, (int)uio
->uio_offset
, io_req_size
, retval
, 0);
4171 if (retval
== 0 && iostate
.io_error
== 0 && io_req_size
== 0 && uio
->uio_offset
< filesize
) {
4173 retval
= cluster_io_type(uio
, read_type
, read_length
, 0);
4175 if (retval
== 0 && *read_type
== IO_DIRECT
) {
4177 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_NONE
,
4178 (int)uio
->uio_offset
, (int)filesize
, *read_type
, *read_length
, 0);
4186 if(retval
== 0 && iostate
.io_error
== 0 && useVectorUPL
&& vector_upl_index
) {
4187 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4188 reset_vector_run_state();
4191 * make sure all async reads that are part of this stream
4192 * have completed before we return
4194 if (iostate
.io_issued
> iostate
.io_completed
) {
4196 lck_mtx_lock(cl_mtxp
);
4198 while (iostate
.io_issued
!= iostate
.io_completed
) {
4199 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4200 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4202 iostate
.io_wanted
= 1;
4203 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_direct", NULL
);
4205 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4206 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4208 lck_mtx_unlock(cl_mtxp
);
4210 if (iostate
.io_error
)
4211 retval
= iostate
.io_error
;
4213 if (io_req_size
&& retval
== 0) {
4215 * we couldn't handle the tail of this request in DIRECT mode
4216 * so fire it through the copy path
4218 retval
= cluster_read_copy(vp
, uio
, io_req_size
, filesize
, flags
, callback
, callback_arg
);
4220 *read_type
= IO_UNKNOWN
;
4222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_END
,
4223 (int)uio
->uio_offset
, (int)uio_resid(uio
), io_req_size
, retval
, 0);
4230 cluster_read_contig(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
4231 int (*callback
)(buf_t
, void *), void *callback_arg
, int flags
)
4233 upl_page_info_t
*pl
;
4234 upl_t upl
[MAX_VECTS
];
4235 vm_offset_t upl_offset
;
4236 addr64_t dst_paddr
= 0;
4237 user_addr_t iov_base
;
4239 upl_size_t upl_size
;
4240 vm_size_t upl_needed_size
;
4241 mach_msg_type_number_t pages_in_pl
;
4244 struct clios iostate
;
4251 u_int32_t devblocksize
;
4252 u_int32_t mem_alignment_mask
;
4253 u_int32_t tail_size
= 0;
4256 if (flags
& IO_PASSIVE
)
4262 * When we enter this routine, we know
4263 * -- the read_length will not exceed the current iov_len
4264 * -- the target address is physically contiguous for read_length
4266 cluster_syncup(vp
, filesize
, callback
, callback_arg
);
4268 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
4269 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
4271 iostate
.io_completed
= 0;
4272 iostate
.io_issued
= 0;
4273 iostate
.io_error
= 0;
4274 iostate
.io_wanted
= 0;
4277 io_size
= *read_length
;
4279 max_size
= filesize
- uio
->uio_offset
;
4281 if (io_size
> max_size
)
4284 iov_base
= uio_curriovbase(uio
);
4286 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
4287 upl_needed_size
= upl_offset
+ io_size
;
4290 upl_size
= upl_needed_size
;
4291 upl_flags
= UPL_FILE_IO
| UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
4294 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 92)) | DBG_FUNC_START
,
4295 (int)upl_offset
, (int)upl_size
, (int)iov_base
, io_size
, 0);
4297 kret
= vm_map_get_upl(current_map(),
4298 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
4299 &upl_size
, &upl
[cur_upl
], NULL
, &pages_in_pl
, &upl_flags
, 0);
4301 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 92)) | DBG_FUNC_END
,
4302 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4304 if (kret
!= KERN_SUCCESS
) {
4306 * failed to get pagelist
4309 goto wait_for_creads
;
4313 if (upl_size
< upl_needed_size
) {
4315 * The upl_size wasn't satisfied.
4318 goto wait_for_creads
;
4320 pl
= ubc_upl_pageinfo(upl
[cur_upl
]);
4322 dst_paddr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + (addr64_t
)upl_offset
;
4324 while (((uio
->uio_offset
& (devblocksize
- 1)) || io_size
< devblocksize
) && io_size
) {
4325 u_int32_t head_size
;
4327 head_size
= devblocksize
- (u_int32_t
)(uio
->uio_offset
& (devblocksize
- 1));
4329 if (head_size
> io_size
)
4330 head_size
= io_size
;
4332 error
= cluster_align_phys_io(vp
, uio
, dst_paddr
, head_size
, CL_READ
, callback
, callback_arg
);
4335 goto wait_for_creads
;
4337 upl_offset
+= head_size
;
4338 dst_paddr
+= head_size
;
4339 io_size
-= head_size
;
4341 iov_base
+= head_size
;
4343 if ((u_int32_t
)iov_base
& mem_alignment_mask
) {
4345 * request doesn't set up on a memory boundary
4346 * the underlying DMA engine can handle...
4347 * return an error instead of going through
4348 * the slow copy path since the intent of this
4349 * path is direct I/O to device memory
4352 goto wait_for_creads
;
4355 tail_size
= io_size
& (devblocksize
- 1);
4357 io_size
-= tail_size
;
4359 while (io_size
&& error
== 0) {
4361 if (io_size
> MAX_IO_CONTIG_SIZE
)
4362 xsize
= MAX_IO_CONTIG_SIZE
;
4366 * request asynchronously so that we can overlap
4367 * the preparation of the next I/O... we'll do
4368 * the commit after all the I/O has completed
4369 * since its all issued against the same UPL
4370 * if there are already too many outstanding reads
4371 * wait until some have completed before issuing the next
4373 if (iostate
.io_issued
> iostate
.io_completed
) {
4374 lck_mtx_lock(cl_mtxp
);
4376 while ((iostate
.io_issued
- iostate
.io_completed
) > (MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2))) {
4377 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4378 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
4380 iostate
.io_wanted
= 1;
4381 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_contig", NULL
);
4383 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4384 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
4386 lck_mtx_unlock(cl_mtxp
);
4388 if (iostate
.io_error
) {
4390 * one of the earlier reads we issued ran into a hard error
4391 * don't issue any more reads...
4392 * go wait for any other reads to complete before
4393 * returning the error to the caller
4395 goto wait_for_creads
;
4397 error
= cluster_io(vp
, upl
[cur_upl
], upl_offset
, uio
->uio_offset
, xsize
,
4398 CL_READ
| CL_NOZERO
| CL_DEV_MEMORY
| CL_ASYNC
| bflag
,
4399 (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4401 * The cluster_io read was issued successfully,
4402 * update the uio structure
4405 uio_update(uio
, (user_size_t
)xsize
);
4408 upl_offset
+= xsize
;
4412 if (error
== 0 && iostate
.io_error
== 0 && tail_size
== 0 && num_upl
< MAX_VECTS
&& uio
->uio_offset
< filesize
) {
4414 error
= cluster_io_type(uio
, read_type
, read_length
, 0);
4416 if (error
== 0 && *read_type
== IO_CONTIG
) {
4421 *read_type
= IO_UNKNOWN
;
4425 * make sure all async reads that are part of this stream
4426 * have completed before we proceed
4428 if (iostate
.io_issued
> iostate
.io_completed
) {
4430 lck_mtx_lock(cl_mtxp
);
4432 while (iostate
.io_issued
!= iostate
.io_completed
) {
4433 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4434 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4436 iostate
.io_wanted
= 1;
4437 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_contig", NULL
);
4439 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4440 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4442 lck_mtx_unlock(cl_mtxp
);
4444 if (iostate
.io_error
)
4445 error
= iostate
.io_error
;
4447 if (error
== 0 && tail_size
)
4448 error
= cluster_align_phys_io(vp
, uio
, dst_paddr
, tail_size
, CL_READ
, callback
, callback_arg
);
4450 for (n
= 0; n
< num_upl
; n
++)
4452 * just release our hold on each physically contiguous
4453 * region without changing any state
4455 ubc_upl_abort(upl
[n
], 0);
4462 cluster_io_type(struct uio
*uio
, int *io_type
, u_int32_t
*io_length
, u_int32_t min_length
)
4464 user_size_t iov_len
;
4465 user_addr_t iov_base
= 0;
4467 upl_size_t upl_size
;
4472 * skip over any emtpy vectors
4474 uio_update(uio
, (user_size_t
)0);
4476 iov_len
= uio_curriovlen(uio
);
4478 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 94)) | DBG_FUNC_START
, uio
, (int)iov_len
, 0, 0, 0);
4481 iov_base
= uio_curriovbase(uio
);
4483 * make sure the size of the vector isn't too big...
4484 * internally, we want to handle all of the I/O in
4485 * chunk sizes that fit in a 32 bit int
4487 if (iov_len
> (user_size_t
)MAX_IO_REQUEST_SIZE
)
4488 upl_size
= MAX_IO_REQUEST_SIZE
;
4490 upl_size
= (u_int32_t
)iov_len
;
4492 upl_flags
= UPL_QUERY_OBJECT_TYPE
;
4494 if ((vm_map_get_upl(current_map(),
4495 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
4496 &upl_size
, &upl
, NULL
, NULL
, &upl_flags
, 0)) != KERN_SUCCESS
) {
4498 * the user app must have passed in an invalid address
4505 *io_length
= upl_size
;
4507 if (upl_flags
& UPL_PHYS_CONTIG
)
4508 *io_type
= IO_CONTIG
;
4509 else if (iov_len
>= min_length
)
4510 *io_type
= IO_DIRECT
;
4515 * nothing left to do for this uio
4518 *io_type
= IO_UNKNOWN
;
4520 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 94)) | DBG_FUNC_END
, iov_base
, *io_type
, *io_length
, retval
, 0);
4527 * generate advisory I/O's in the largest chunks possible
4528 * the completed pages will be released into the VM cache
4531 advisory_read(vnode_t vp
, off_t filesize
, off_t f_offset
, int resid
)
4533 return advisory_read_ext(vp
, filesize
, f_offset
, resid
, NULL
, NULL
, CL_PASSIVE
);
4537 advisory_read_ext(vnode_t vp
, off_t filesize
, off_t f_offset
, int resid
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
)
4539 upl_page_info_t
*pl
;
4541 vm_offset_t upl_offset
;
4554 uint32_t max_io_size
;
4557 if ( !UBCINFOEXISTS(vp
))
4563 max_io_size
= cluster_max_io_size(vp
->v_mount
, CL_READ
);
4565 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 60)) | DBG_FUNC_START
,
4566 (int)f_offset
, resid
, (int)filesize
, 0, 0);
4568 while (resid
&& f_offset
< filesize
&& retval
== 0) {
4570 * compute the size of the upl needed to encompass
4571 * the requested read... limit each call to cluster_io
4572 * to the maximum UPL size... cluster_io will clip if
4573 * this exceeds the maximum io_size for the device,
4574 * make sure to account for
4575 * a starting offset that's not page aligned
4577 start_offset
= (int)(f_offset
& PAGE_MASK_64
);
4578 upl_f_offset
= f_offset
- (off_t
)start_offset
;
4579 max_size
= filesize
- f_offset
;
4581 if (resid
< max_size
)
4586 upl_size
= (start_offset
+ io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
4587 if ((uint32_t)upl_size
> max_io_size
)
4588 upl_size
= max_io_size
;
4592 * return the number of contiguously present pages in the cache
4593 * starting at upl_f_offset within the file
4595 ubc_range_op(vp
, upl_f_offset
, upl_f_offset
+ upl_size
, UPL_ROP_PRESENT
, &skip_range
);
4599 * skip over pages already present in the cache
4601 io_size
= skip_range
- start_offset
;
4603 f_offset
+= io_size
;
4606 if (skip_range
== upl_size
)
4609 * have to issue some real I/O
4610 * at this point, we know it's starting on a page boundary
4611 * because we've skipped over at least the first page in the request
4614 upl_f_offset
+= skip_range
;
4615 upl_size
-= skip_range
;
4617 pages_in_upl
= upl_size
/ PAGE_SIZE
;
4619 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 61)) | DBG_FUNC_START
,
4620 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
4622 kret
= ubc_create_upl(vp
,
4627 UPL_RET_ONLY_ABSENT
| UPL_SET_LITE
);
4628 if (kret
!= KERN_SUCCESS
)
4633 * before we start marching forward, we must make sure we end on
4634 * a present page, otherwise we will be working with a freed
4637 for (last_pg
= pages_in_upl
- 1; last_pg
>= 0; last_pg
--) {
4638 if (upl_page_present(pl
, last_pg
))
4641 pages_in_upl
= last_pg
+ 1;
4644 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 61)) | DBG_FUNC_END
,
4645 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
4648 for (last_pg
= 0; last_pg
< pages_in_upl
; ) {
4650 * scan from the beginning of the upl looking for the first
4651 * page that is present.... this will become the first page in
4652 * the request we're going to make to 'cluster_io'... if all
4653 * of the pages are absent, we won't call through to 'cluster_io'
4655 for (start_pg
= last_pg
; start_pg
< pages_in_upl
; start_pg
++) {
4656 if (upl_page_present(pl
, start_pg
))
4661 * scan from the starting present page looking for an absent
4662 * page before the end of the upl is reached, if we
4663 * find one, then it will terminate the range of pages being
4664 * presented to 'cluster_io'
4666 for (last_pg
= start_pg
; last_pg
< pages_in_upl
; last_pg
++) {
4667 if (!upl_page_present(pl
, last_pg
))
4671 if (last_pg
> start_pg
) {
4673 * we found a range of pages that must be filled
4674 * if the last page in this range is the last page of the file
4675 * we may have to clip the size of it to keep from reading past
4676 * the end of the last physical block associated with the file
4678 upl_offset
= start_pg
* PAGE_SIZE
;
4679 io_size
= (last_pg
- start_pg
) * PAGE_SIZE
;
4681 if ((off_t
)(upl_f_offset
+ upl_offset
+ io_size
) > filesize
)
4682 io_size
= filesize
- (upl_f_offset
+ upl_offset
);
4685 * issue an asynchronous read to cluster_io
4687 retval
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
, io_size
,
4688 CL_ASYNC
| CL_READ
| CL_COMMIT
| CL_AGE
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
4694 ubc_upl_abort(upl
, 0);
4696 io_size
= upl_size
- start_offset
;
4698 if (io_size
> resid
)
4700 f_offset
+= io_size
;
4704 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 60)) | DBG_FUNC_END
,
4705 (int)f_offset
, resid
, retval
, 0, 0);
4712 cluster_push(vnode_t vp
, int flags
)
4714 return cluster_push_ext(vp
, flags
, NULL
, NULL
);
4719 cluster_push_ext(vnode_t vp
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
4722 int my_sparse_wait
= 0;
4723 struct cl_writebehind
*wbp
;
4725 if ( !UBCINFOEXISTS(vp
)) {
4726 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_NONE
, vp
, flags
, 0, -1, 0);
4729 /* return if deferred write is set */
4730 if (((unsigned int)vfs_flags(vp
->v_mount
) & MNT_DEFWRITE
) && (flags
& IO_DEFWRITE
)) {
4733 if ((wbp
= cluster_get_wbp(vp
, CLW_RETURNLOCKED
)) == NULL
) {
4734 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_NONE
, vp
, flags
, 0, -2, 0);
4737 if (wbp
->cl_number
== 0 && wbp
->cl_scmap
== NULL
) {
4738 lck_mtx_unlock(&wbp
->cl_lockw
);
4740 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_NONE
, vp
, flags
, 0, -3, 0);
4743 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_START
,
4744 wbp
->cl_scmap
, wbp
->cl_number
, flags
, 0, 0);
4747 * if we have an fsync in progress, we don't want to allow any additional
4748 * sync/fsync/close(s) to occur until it finishes.
4749 * note that its possible for writes to continue to occur to this file
4750 * while we're waiting and also once the fsync starts to clean if we're
4751 * in the sparse map case
4753 while (wbp
->cl_sparse_wait
) {
4754 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 97)) | DBG_FUNC_START
, vp
, 0, 0, 0, 0);
4756 msleep((caddr_t
)&wbp
->cl_sparse_wait
, &wbp
->cl_lockw
, PRIBIO
+ 1, "cluster_push_ext", NULL
);
4758 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 97)) | DBG_FUNC_END
, vp
, 0, 0, 0, 0);
4760 if (flags
& IO_SYNC
) {
4762 wbp
->cl_sparse_wait
= 1;
4765 * this is an fsync (or equivalent)... we must wait for any existing async
4766 * cleaning operations to complete before we evaulate the current state
4767 * and finish cleaning... this insures that all writes issued before this
4768 * fsync actually get cleaned to the disk before this fsync returns
4770 while (wbp
->cl_sparse_pushes
) {
4771 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 98)) | DBG_FUNC_START
, vp
, 0, 0, 0, 0);
4773 msleep((caddr_t
)&wbp
->cl_sparse_pushes
, &wbp
->cl_lockw
, PRIBIO
+ 1, "cluster_push_ext", NULL
);
4775 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 98)) | DBG_FUNC_END
, vp
, 0, 0, 0, 0);
4778 if (wbp
->cl_scmap
) {
4781 if (wbp
->cl_sparse_pushes
< SPARSE_PUSH_LIMIT
) {
4783 scmap
= wbp
->cl_scmap
;
4784 wbp
->cl_scmap
= NULL
;
4786 wbp
->cl_sparse_pushes
++;
4788 lck_mtx_unlock(&wbp
->cl_lockw
);
4790 sparse_cluster_push(&scmap
, vp
, ubc_getsize(vp
), PUSH_ALL
| IO_PASSIVE
, callback
, callback_arg
);
4792 lck_mtx_lock(&wbp
->cl_lockw
);
4794 wbp
->cl_sparse_pushes
--;
4796 if (wbp
->cl_sparse_wait
&& wbp
->cl_sparse_pushes
== 0)
4797 wakeup((caddr_t
)&wbp
->cl_sparse_pushes
);
4799 sparse_cluster_push(&(wbp
->cl_scmap
), vp
, ubc_getsize(vp
), PUSH_ALL
| IO_PASSIVE
, callback
, callback_arg
);
4803 retval
= cluster_try_push(wbp
, vp
, ubc_getsize(vp
), PUSH_ALL
| IO_PASSIVE
, callback
, callback_arg
);
4805 lck_mtx_unlock(&wbp
->cl_lockw
);
4807 if (flags
& IO_SYNC
)
4808 (void)vnode_waitforwrites(vp
, 0, 0, 0, "cluster_push");
4810 if (my_sparse_wait
) {
4812 * I'm the owner of the serialization token
4813 * clear it and wakeup anyone that is waiting
4816 lck_mtx_lock(&wbp
->cl_lockw
);
4818 wbp
->cl_sparse_wait
= 0;
4819 wakeup((caddr_t
)&wbp
->cl_sparse_wait
);
4821 lck_mtx_unlock(&wbp
->cl_lockw
);
4823 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_END
,
4824 wbp
->cl_scmap
, wbp
->cl_number
, retval
, 0, 0);
4830 __private_extern__
void
4831 cluster_release(struct ubc_info
*ubc
)
4833 struct cl_writebehind
*wbp
;
4834 struct cl_readahead
*rap
;
4836 if ((wbp
= ubc
->cl_wbehind
)) {
4838 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 81)) | DBG_FUNC_START
, ubc
, wbp
->cl_scmap
, 0, 0, 0);
4841 vfs_drt_control(&(wbp
->cl_scmap
), 0);
4843 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 81)) | DBG_FUNC_START
, ubc
, 0, 0, 0, 0);
4846 rap
= ubc
->cl_rahead
;
4849 lck_mtx_destroy(&wbp
->cl_lockw
, cl_mtx_grp
);
4850 FREE_ZONE((void *)wbp
, sizeof *wbp
, M_CLWRBEHIND
);
4852 if ((rap
= ubc
->cl_rahead
)) {
4853 lck_mtx_destroy(&rap
->cl_lockr
, cl_mtx_grp
);
4854 FREE_ZONE((void *)rap
, sizeof *rap
, M_CLRDAHEAD
);
4856 ubc
->cl_rahead
= NULL
;
4857 ubc
->cl_wbehind
= NULL
;
4859 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 81)) | DBG_FUNC_END
, ubc
, rap
, wbp
, 0, 0);
4864 cluster_try_push(struct cl_writebehind
*wbp
, vnode_t vp
, off_t EOF
, int push_flag
, int (*callback
)(buf_t
, void *), void *callback_arg
)
4871 struct cl_wextent l_clusters
[MAX_CLUSTERS
];
4872 u_int max_cluster_pgcount
;
4875 max_cluster_pgcount
= MAX_CLUSTER_SIZE(vp
) / PAGE_SIZE
;
4877 * the write behind context exists and has
4878 * already been locked...
4880 if (wbp
->cl_number
== 0)
4882 * no clusters to push
4883 * return number of empty slots
4885 return (MAX_CLUSTERS
);
4888 * make a local 'sorted' copy of the clusters
4889 * and clear wbp->cl_number so that new clusters can
4892 for (cl_index
= 0; cl_index
< wbp
->cl_number
; cl_index
++) {
4893 for (min_index
= -1, cl_index1
= 0; cl_index1
< wbp
->cl_number
; cl_index1
++) {
4894 if (wbp
->cl_clusters
[cl_index1
].b_addr
== wbp
->cl_clusters
[cl_index1
].e_addr
)
4896 if (min_index
== -1)
4897 min_index
= cl_index1
;
4898 else if (wbp
->cl_clusters
[cl_index1
].b_addr
< wbp
->cl_clusters
[min_index
].b_addr
)
4899 min_index
= cl_index1
;
4901 if (min_index
== -1)
4904 l_clusters
[cl_index
].b_addr
= wbp
->cl_clusters
[min_index
].b_addr
;
4905 l_clusters
[cl_index
].e_addr
= wbp
->cl_clusters
[min_index
].e_addr
;
4906 l_clusters
[cl_index
].io_flags
= wbp
->cl_clusters
[min_index
].io_flags
;
4908 wbp
->cl_clusters
[min_index
].b_addr
= wbp
->cl_clusters
[min_index
].e_addr
;
4914 if ( (push_flag
& PUSH_DELAY
) && cl_len
== MAX_CLUSTERS
) {
4918 * determine if we appear to be writing the file sequentially
4919 * if not, by returning without having pushed any clusters
4920 * we will cause this vnode to be pushed into the sparse cluster mechanism
4921 * used for managing more random I/O patterns
4923 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
4924 * that's why we're in try_push with PUSH_DELAY...
4926 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
4927 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
4928 * so we can just make a simple pass through, up to, but not including the last one...
4929 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
4932 * we let the last one be partial as long as it was adjacent to the previous one...
4933 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
4934 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
4936 for (i
= 0; i
< MAX_CLUSTERS
- 1; i
++) {
4937 if ((l_clusters
[i
].e_addr
- l_clusters
[i
].b_addr
) != max_cluster_pgcount
)
4939 if (l_clusters
[i
].e_addr
!= l_clusters
[i
+1].b_addr
)
4943 for (cl_index
= 0; cl_index
< cl_len
; cl_index
++) {
4945 struct cl_extent cl
;
4948 * try to push each cluster in turn...
4950 if (l_clusters
[cl_index
].io_flags
& CLW_IONOCACHE
)
4955 if ((l_clusters
[cl_index
].io_flags
& CLW_IOPASSIVE
) || (push_flag
& IO_PASSIVE
))
4956 flags
|= IO_PASSIVE
;
4958 if (push_flag
& PUSH_SYNC
)
4961 cl
.b_addr
= l_clusters
[cl_index
].b_addr
;
4962 cl
.e_addr
= l_clusters
[cl_index
].e_addr
;
4964 cluster_push_now(vp
, &cl
, EOF
, flags
, callback
, callback_arg
);
4966 l_clusters
[cl_index
].b_addr
= 0;
4967 l_clusters
[cl_index
].e_addr
= 0;
4971 if ( !(push_flag
& PUSH_ALL
) )
4975 if (cl_len
> cl_pushed
) {
4977 * we didn't push all of the clusters, so
4978 * lets try to merge them back in to the vnode
4980 if ((MAX_CLUSTERS
- wbp
->cl_number
) < (cl_len
- cl_pushed
)) {
4982 * we picked up some new clusters while we were trying to
4983 * push the old ones... this can happen because I've dropped
4984 * the vnode lock... the sum of the
4985 * leftovers plus the new cluster count exceeds our ability
4986 * to represent them, so switch to the sparse cluster mechanism
4988 * collect the active public clusters...
4990 sparse_cluster_switch(wbp
, vp
, EOF
, callback
, callback_arg
);
4992 for (cl_index
= 0, cl_index1
= 0; cl_index
< cl_len
; cl_index
++) {
4993 if (l_clusters
[cl_index
].b_addr
== l_clusters
[cl_index
].e_addr
)
4995 wbp
->cl_clusters
[cl_index1
].b_addr
= l_clusters
[cl_index
].b_addr
;
4996 wbp
->cl_clusters
[cl_index1
].e_addr
= l_clusters
[cl_index
].e_addr
;
4997 wbp
->cl_clusters
[cl_index1
].io_flags
= l_clusters
[cl_index
].io_flags
;
5002 * update the cluster count
5004 wbp
->cl_number
= cl_index1
;
5007 * and collect the original clusters that were moved into the
5008 * local storage for sorting purposes
5010 sparse_cluster_switch(wbp
, vp
, EOF
, callback
, callback_arg
);
5014 * we've got room to merge the leftovers back in
5015 * just append them starting at the next 'hole'
5016 * represented by wbp->cl_number
5018 for (cl_index
= 0, cl_index1
= wbp
->cl_number
; cl_index
< cl_len
; cl_index
++) {
5019 if (l_clusters
[cl_index
].b_addr
== l_clusters
[cl_index
].e_addr
)
5022 wbp
->cl_clusters
[cl_index1
].b_addr
= l_clusters
[cl_index
].b_addr
;
5023 wbp
->cl_clusters
[cl_index1
].e_addr
= l_clusters
[cl_index
].e_addr
;
5024 wbp
->cl_clusters
[cl_index1
].io_flags
= l_clusters
[cl_index
].io_flags
;
5029 * update the cluster count
5031 wbp
->cl_number
= cl_index1
;
5034 return (MAX_CLUSTERS
- wbp
->cl_number
);
5040 cluster_push_now(vnode_t vp
, struct cl_extent
*cl
, off_t EOF
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5042 upl_page_info_t
*pl
;
5044 vm_offset_t upl_offset
;
5059 if (flags
& IO_PASSIVE
)
5064 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_START
,
5065 (int)cl
->b_addr
, (int)cl
->e_addr
, (int)EOF
, flags
, 0);
5067 if ((pages_in_upl
= (int)(cl
->e_addr
- cl
->b_addr
)) == 0) {
5068 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 0, 0, 0, 0);
5072 upl_size
= pages_in_upl
* PAGE_SIZE
;
5073 upl_f_offset
= (off_t
)(cl
->b_addr
* PAGE_SIZE_64
);
5075 if (upl_f_offset
+ upl_size
>= EOF
) {
5077 if (upl_f_offset
>= EOF
) {
5079 * must have truncated the file and missed
5080 * clearing a dangling cluster (i.e. it's completely
5081 * beyond the new EOF
5083 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 1, 0, 0, 0);
5087 size
= EOF
- upl_f_offset
;
5089 upl_size
= (size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
5090 pages_in_upl
= upl_size
/ PAGE_SIZE
;
5094 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_START
, upl_size
, size
, 0, 0, 0);
5097 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
5099 * - only pages that are currently dirty are returned... these are the ones we need to clean
5100 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
5101 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
5102 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
5103 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
5105 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
5108 if ((vp
->v_flag
& VNOCACHE_DATA
) || (flags
& IO_NOCACHE
))
5109 upl_flags
= UPL_COPYOUT_FROM
| UPL_RET_ONLY_DIRTY
| UPL_SET_LITE
| UPL_WILL_BE_DUMPED
;
5111 upl_flags
= UPL_COPYOUT_FROM
| UPL_RET_ONLY_DIRTY
| UPL_SET_LITE
;
5113 kret
= ubc_create_upl(vp
,
5119 if (kret
!= KERN_SUCCESS
)
5120 panic("cluster_push: failed to get pagelist");
5122 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_END
, upl
, upl_f_offset
, 0, 0, 0);
5125 * since we only asked for the dirty pages back
5126 * it's possible that we may only get a few or even none, so...
5127 * before we start marching forward, we must make sure we know
5128 * where the last present page is in the UPL, otherwise we could
5129 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
5130 * employed by commit_range and abort_range.
5132 for (last_pg
= pages_in_upl
- 1; last_pg
>= 0; last_pg
--) {
5133 if (upl_page_present(pl
, last_pg
))
5136 pages_in_upl
= last_pg
+ 1;
5138 if (pages_in_upl
== 0) {
5139 ubc_upl_abort(upl
, 0);
5141 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 2, 0, 0, 0);
5145 for (last_pg
= 0; last_pg
< pages_in_upl
; ) {
5147 * find the next dirty page in the UPL
5148 * this will become the first page in the
5149 * next I/O to generate
5151 for (start_pg
= last_pg
; start_pg
< pages_in_upl
; start_pg
++) {
5152 if (upl_dirty_page(pl
, start_pg
))
5154 if (upl_page_present(pl
, start_pg
))
5156 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
5157 * just release these unchanged since we're not going
5158 * to steal them or change their state
5160 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
5162 if (start_pg
>= pages_in_upl
)
5164 * done... no more dirty pages to push
5167 if (start_pg
> last_pg
)
5169 * skipped over some non-dirty pages
5171 size
-= ((start_pg
- last_pg
) * PAGE_SIZE
);
5174 * find a range of dirty pages to write
5176 for (last_pg
= start_pg
; last_pg
< pages_in_upl
; last_pg
++) {
5177 if (!upl_dirty_page(pl
, last_pg
))
5180 upl_offset
= start_pg
* PAGE_SIZE
;
5182 io_size
= min(size
, (last_pg
- start_pg
) * PAGE_SIZE
);
5184 io_flags
= CL_THROTTLE
| CL_COMMIT
| CL_AGE
| bflag
;
5186 if ( !(flags
& IO_SYNC
))
5187 io_flags
|= CL_ASYNC
;
5189 retval
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
, io_size
,
5190 io_flags
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
5192 if (error
== 0 && retval
)
5197 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 3, 0, 0, 0);
5204 * sparse_cluster_switch is called with the write behind lock held
5207 sparse_cluster_switch(struct cl_writebehind
*wbp
, vnode_t vp
, off_t EOF
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 78)) | DBG_FUNC_START
, vp
, wbp
->cl_scmap
, 0, 0, 0);
5213 for (cl_index
= 0; cl_index
< wbp
->cl_number
; cl_index
++) {
5215 struct cl_extent cl
;
5217 for (cl
.b_addr
= wbp
->cl_clusters
[cl_index
].b_addr
; cl
.b_addr
< wbp
->cl_clusters
[cl_index
].e_addr
; cl
.b_addr
++) {
5219 if (ubc_page_op(vp
, (off_t
)(cl
.b_addr
* PAGE_SIZE_64
), 0, NULL
, &flags
) == KERN_SUCCESS
) {
5220 if (flags
& UPL_POP_DIRTY
) {
5221 cl
.e_addr
= cl
.b_addr
+ 1;
5223 sparse_cluster_add(&(wbp
->cl_scmap
), vp
, &cl
, EOF
, callback
, callback_arg
);
5230 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 78)) | DBG_FUNC_END
, vp
, wbp
->cl_scmap
, 0, 0, 0);
5235 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
5236 * still associated with the write-behind context... however, if the scmap has been disassociated
5237 * from the write-behind context (the cluster_push case), the wb lock is not held
5240 sparse_cluster_push(void **scmap
, vnode_t vp
, off_t EOF
, int push_flag
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5242 struct cl_extent cl
;
5246 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 79)) | DBG_FUNC_START
, vp
, (*scmap
), 0, push_flag
, 0);
5248 if (push_flag
& PUSH_ALL
)
5249 vfs_drt_control(scmap
, 1);
5252 if (vfs_drt_get_cluster(scmap
, &offset
, &length
) != KERN_SUCCESS
)
5255 cl
.b_addr
= (daddr64_t
)(offset
/ PAGE_SIZE_64
);
5256 cl
.e_addr
= (daddr64_t
)((offset
+ length
) / PAGE_SIZE_64
);
5258 cluster_push_now(vp
, &cl
, EOF
, push_flag
& IO_PASSIVE
, callback
, callback_arg
);
5260 if ( !(push_flag
& PUSH_ALL
) )
5263 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 79)) | DBG_FUNC_END
, vp
, (*scmap
), 0, 0, 0);
5268 * sparse_cluster_add is called with the write behind lock held
5271 sparse_cluster_add(void **scmap
, vnode_t vp
, struct cl_extent
*cl
, off_t EOF
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5277 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 80)) | DBG_FUNC_START
, (*scmap
), 0, cl
->b_addr
, (int)cl
->e_addr
, 0);
5279 offset
= (off_t
)(cl
->b_addr
* PAGE_SIZE_64
);
5280 length
= ((u_int
)(cl
->e_addr
- cl
->b_addr
)) * PAGE_SIZE
;
5282 while (vfs_drt_mark_pages(scmap
, offset
, length
, &new_dirty
) != KERN_SUCCESS
) {
5284 * no room left in the map
5285 * only a partial update was done
5286 * push out some pages and try again
5288 sparse_cluster_push(scmap
, vp
, EOF
, 0, callback
, callback_arg
);
5290 offset
+= (new_dirty
* PAGE_SIZE_64
);
5291 length
-= (new_dirty
* PAGE_SIZE
);
5293 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 80)) | DBG_FUNC_END
, vp
, (*scmap
), 0, 0, 0);
5298 cluster_align_phys_io(vnode_t vp
, struct uio
*uio
, addr64_t usr_paddr
, u_int32_t xsize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5300 upl_page_info_t
*pl
;
5310 if (flags
& IO_PASSIVE
)
5315 upl_flags
= UPL_SET_LITE
;
5317 if ( !(flags
& CL_READ
) ) {
5319 * "write" operation: let the UPL subsystem know
5320 * that we intend to modify the buffer cache pages
5323 upl_flags
|= UPL_WILL_MODIFY
;
5326 * indicate that there is no need to pull the
5327 * mapping for this page... we're only going
5328 * to read from it, not modify it.
5330 upl_flags
|= UPL_FILE_IO
;
5332 kret
= ubc_create_upl(vp
,
5333 uio
->uio_offset
& ~PAGE_MASK_64
,
5339 if (kret
!= KERN_SUCCESS
)
5342 if (!upl_valid_page(pl
, 0)) {
5344 * issue a synchronous read to cluster_io
5346 error
= cluster_io(vp
, upl
, 0, uio
->uio_offset
& ~PAGE_MASK_64
, PAGE_SIZE
,
5347 CL_READ
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
5349 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
);
5355 ubc_paddr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + (addr64_t
)(uio
->uio_offset
& PAGE_MASK_64
);
5358 * NOTE: There is no prototype for the following in BSD. It, and the definitions
5359 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
5360 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
5361 * way to do so without exporting them to kexts as well.
5363 if (flags
& CL_READ
)
5364 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
5365 copypv(ubc_paddr
, usr_paddr
, xsize
, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
5367 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
5368 copypv(usr_paddr
, ubc_paddr
, xsize
, 2 | 1 | 8); /* Copy physical to physical and flush the source */
5370 if ( !(flags
& CL_READ
) || (upl_valid_page(pl
, 0) && upl_dirty_page(pl
, 0))) {
5372 * issue a synchronous write to cluster_io
5374 error
= cluster_io(vp
, upl
, 0, uio
->uio_offset
& ~PAGE_MASK_64
, PAGE_SIZE
,
5375 bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
5378 uio_update(uio
, (user_size_t
)xsize
);
5381 abort_flags
= UPL_ABORT_FREE_ON_EMPTY
;
5383 abort_flags
= UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_DUMP_PAGES
;
5385 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, abort_flags
);
5393 cluster_copy_upl_data(struct uio
*uio
, upl_t upl
, int upl_offset
, int *io_resid
)
5401 upl_page_info_t
*pl
;
5405 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_START
,
5406 (int)uio
->uio_offset
, upl_offset
, xsize
, 0, 0);
5408 segflg
= uio
->uio_segflg
;
5412 case UIO_USERSPACE32
:
5413 case UIO_USERISPACE32
:
5414 uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
5418 case UIO_USERISPACE
:
5419 uio
->uio_segflg
= UIO_PHYS_USERSPACE
;
5422 case UIO_USERSPACE64
:
5423 case UIO_USERISPACE64
:
5424 uio
->uio_segflg
= UIO_PHYS_USERSPACE64
;
5428 uio
->uio_segflg
= UIO_PHYS_SYSSPACE
;
5432 pl
= ubc_upl_pageinfo(upl
);
5434 pg_index
= upl_offset
/ PAGE_SIZE
;
5435 pg_offset
= upl_offset
& PAGE_MASK
;
5436 csize
= min(PAGE_SIZE
- pg_offset
, xsize
);
5438 while (xsize
&& retval
== 0) {
5441 paddr
= ((addr64_t
)upl_phys_page(pl
, pg_index
) << 12) + pg_offset
;
5443 retval
= uiomove64(paddr
, csize
, uio
);
5448 csize
= min(PAGE_SIZE
, xsize
);
5452 uio
->uio_segflg
= segflg
;
5454 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_END
,
5455 (int)uio
->uio_offset
, xsize
, retval
, segflg
, 0);
5462 cluster_copy_ubc_data(vnode_t vp
, struct uio
*uio
, int *io_resid
, int mark_dirty
)
5465 return (cluster_copy_ubc_data_internal(vp
, uio
, io_resid
, mark_dirty
, 1));
5470 cluster_copy_ubc_data_internal(vnode_t vp
, struct uio
*uio
, int *io_resid
, int mark_dirty
, int take_reference
)
5477 memory_object_control_t control
;
5479 io_size
= *io_resid
;
5481 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_START
,
5482 (int)uio
->uio_offset
, 0, io_size
, 0, 0);
5484 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
5486 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
5487 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_END
,
5488 (int)uio
->uio_offset
, io_size
, retval
, 3, 0);
5492 segflg
= uio
->uio_segflg
;
5496 case UIO_USERSPACE32
:
5497 case UIO_USERISPACE32
:
5498 uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
5501 case UIO_USERSPACE64
:
5502 case UIO_USERISPACE64
:
5503 uio
->uio_segflg
= UIO_PHYS_USERSPACE64
;
5507 case UIO_USERISPACE
:
5508 uio
->uio_segflg
= UIO_PHYS_USERSPACE
;
5512 uio
->uio_segflg
= UIO_PHYS_SYSSPACE
;
5516 if ( (io_size
= *io_resid
) ) {
5517 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
5518 xsize
= uio_resid(uio
);
5520 retval
= memory_object_control_uiomove(control
, uio
->uio_offset
- start_offset
, uio
,
5521 start_offset
, io_size
, mark_dirty
, take_reference
);
5522 xsize
-= uio_resid(uio
);
5525 uio
->uio_segflg
= segflg
;
5526 *io_resid
= io_size
;
5528 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_END
,
5529 (int)uio
->uio_offset
, io_size
, retval
, 0x80000000 | segflg
, 0);
5536 is_file_clean(vnode_t vp
, off_t filesize
)
5540 int total_dirty
= 0;
5542 for (f_offset
= 0; f_offset
< filesize
; f_offset
+= PAGE_SIZE_64
) {
5543 if (ubc_page_op(vp
, f_offset
, 0, NULL
, &flags
) == KERN_SUCCESS
) {
5544 if (flags
& UPL_POP_DIRTY
) {
5558 * Dirty region tracking/clustering mechanism.
5560 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
5561 * dirty regions within a larger space (file). It is primarily intended to
5562 * support clustering in large files with many dirty areas.
5564 * The implementation assumes that the dirty regions are pages.
5566 * To represent dirty pages within the file, we store bit vectors in a
5567 * variable-size circular hash.
5571 * Bitvector size. This determines the number of pages we group in a
5572 * single hashtable entry. Each hashtable entry is aligned to this
5573 * size within the file.
5575 #define DRT_BITVECTOR_PAGES 256
5578 * File offset handling.
5580 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
5581 * the correct formula is (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)
5583 #define DRT_ADDRESS_MASK (~((1 << 20) - 1))
5584 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
5587 * Hashtable address field handling.
5589 * The low-order bits of the hashtable address are used to conserve
5592 * DRT_HASH_COUNT_MASK must be large enough to store the range
5593 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
5594 * to indicate that the bucket is actually unoccupied.
5596 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
5597 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
5599 (scm)->scm_hashtable[(i)].dhe_control = \
5600 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
5602 #define DRT_HASH_COUNT_MASK 0x1ff
5603 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
5604 #define DRT_HASH_SET_COUNT(scm, i, c) \
5606 (scm)->scm_hashtable[(i)].dhe_control = \
5607 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
5609 #define DRT_HASH_CLEAR(scm, i) \
5611 (scm)->scm_hashtable[(i)].dhe_control = 0; \
5613 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
5614 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
5615 #define DRT_HASH_COPY(oscm, oi, scm, i) \
5617 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
5618 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
5623 * Hash table moduli.
5625 * Since the hashtable entry's size is dependent on the size of
5626 * the bitvector, and since the hashtable size is constrained to
5627 * both being prime and fitting within the desired allocation
5628 * size, these values need to be manually determined.
5630 * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
5632 * The small hashtable allocation is 1024 bytes, so the modulus is 23.
5633 * The large hashtable allocation is 16384 bytes, so the modulus is 401.
5635 #define DRT_HASH_SMALL_MODULUS 23
5636 #define DRT_HASH_LARGE_MODULUS 401
5639 * Physical memory required before the large hash modulus is permitted.
5641 * On small memory systems, the large hash modulus can lead to phsyical
5642 * memory starvation, so we avoid using it there.
5644 #define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
5646 #define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */
5647 #define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */
5649 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
5652 * Hashtable bitvector handling.
5654 * Bitvector fields are 32 bits long.
5657 #define DRT_HASH_SET_BIT(scm, i, bit) \
5658 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
5660 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
5661 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
5663 #define DRT_HASH_TEST_BIT(scm, i, bit) \
5664 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
5666 #define DRT_BITVECTOR_CLEAR(scm, i) \
5667 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5669 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
5670 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
5671 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
5672 (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5679 struct vfs_drt_hashentry
{
5680 u_int64_t dhe_control
;
5681 u_int32_t dhe_bitvector
[DRT_BITVECTOR_PAGES
/ 32];
5685 * Dirty Region Tracking structure.
5687 * The hashtable is allocated entirely inside the DRT structure.
5689 * The hash is a simple circular prime modulus arrangement, the structure
5690 * is resized from small to large if it overflows.
5693 struct vfs_drt_clustermap
{
5694 u_int32_t scm_magic
; /* sanity/detection */
5695 #define DRT_SCM_MAGIC 0x12020003
5696 u_int32_t scm_modulus
; /* current ring size */
5697 u_int32_t scm_buckets
; /* number of occupied buckets */
5698 u_int32_t scm_lastclean
; /* last entry we cleaned */
5699 u_int32_t scm_iskips
; /* number of slot skips */
5701 struct vfs_drt_hashentry scm_hashtable
[0];
5705 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
5706 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
5709 * Debugging codes and arguments.
5711 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
5712 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
5713 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
5714 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
5715 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
5718 /* 1 (clean, no map) */
5719 /* 2 (map alloc fail) */
5720 /* 3, resid (partial) */
5721 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
5722 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
5723 * lastclean, iskips */
5726 static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap
**cmapp
);
5727 static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap
*cmap
);
5728 static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap
*cmap
,
5729 u_int64_t offset
, int *indexp
);
5730 static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap
**cmapp
,
5734 static kern_return_t
vfs_drt_do_mark_pages(
5740 static void vfs_drt_trace(
5741 struct vfs_drt_clustermap
*cmap
,
5750 * Allocate and initialise a sparse cluster map.
5752 * Will allocate a new map, resize or compact an existing map.
5754 * XXX we should probably have at least one intermediate map size,
5755 * as the 1:16 ratio seems a bit drastic.
5757 static kern_return_t
5758 vfs_drt_alloc_map(struct vfs_drt_clustermap
**cmapp
)
5760 struct vfs_drt_clustermap
*cmap
, *ocmap
;
5764 int nsize
, active_buckets
, index
, copycount
;
5771 * Decide on the size of the new map.
5773 if (ocmap
== NULL
) {
5774 nsize
= DRT_HASH_SMALL_MODULUS
;
5776 /* count the number of active buckets in the old map */
5778 for (i
= 0; i
< ocmap
->scm_modulus
; i
++) {
5779 if (!DRT_HASH_VACANT(ocmap
, i
) &&
5780 (DRT_HASH_GET_COUNT(ocmap
, i
) != 0))
5784 * If we're currently using the small allocation, check to
5785 * see whether we should grow to the large one.
5787 if (ocmap
->scm_modulus
== DRT_HASH_SMALL_MODULUS
) {
5789 * If the ring is nearly full and we are allowed to
5790 * use the large modulus, upgrade.
5792 if ((active_buckets
> (DRT_HASH_SMALL_MODULUS
- 5)) &&
5793 (max_mem
>= DRT_HASH_LARGE_MEMORY_REQUIRED
)) {
5794 nsize
= DRT_HASH_LARGE_MODULUS
;
5796 nsize
= DRT_HASH_SMALL_MODULUS
;
5799 /* already using the large modulus */
5800 nsize
= DRT_HASH_LARGE_MODULUS
;
5802 * If the ring is completely full, there's
5803 * nothing useful for us to do. Behave as
5804 * though we had compacted into the new
5807 if (active_buckets
>= DRT_HASH_LARGE_MODULUS
)
5808 return(KERN_SUCCESS
);
5813 * Allocate and initialise the new map.
5816 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&cmap
,
5817 (nsize
== DRT_HASH_SMALL_MODULUS
) ? DRT_SMALL_ALLOCATION
: DRT_LARGE_ALLOCATION
);
5818 if (kret
!= KERN_SUCCESS
)
5820 cmap
->scm_magic
= DRT_SCM_MAGIC
;
5821 cmap
->scm_modulus
= nsize
;
5822 cmap
->scm_buckets
= 0;
5823 cmap
->scm_lastclean
= 0;
5824 cmap
->scm_iskips
= 0;
5825 for (i
= 0; i
< cmap
->scm_modulus
; i
++) {
5826 DRT_HASH_CLEAR(cmap
, i
);
5827 DRT_HASH_VACATE(cmap
, i
);
5828 DRT_BITVECTOR_CLEAR(cmap
, i
);
5832 * If there's an old map, re-hash entries from it into the new map.
5835 if (ocmap
!= NULL
) {
5836 for (i
= 0; i
< ocmap
->scm_modulus
; i
++) {
5837 /* skip empty buckets */
5838 if (DRT_HASH_VACANT(ocmap
, i
) ||
5839 (DRT_HASH_GET_COUNT(ocmap
, i
) == 0))
5842 offset
= DRT_HASH_GET_ADDRESS(ocmap
, i
);
5843 kret
= vfs_drt_get_index(&cmap
, offset
, &index
, 1);
5844 if (kret
!= KERN_SUCCESS
) {
5845 /* XXX need to bail out gracefully here */
5846 panic("vfs_drt: new cluster map mysteriously too small");
5850 DRT_HASH_COPY(ocmap
, i
, cmap
, index
);
5855 /* log what we've done */
5856 vfs_drt_trace(cmap
, DRT_DEBUG_ALLOC
, copycount
, 0, 0, 0);
5859 * It's important to ensure that *cmapp always points to
5860 * a valid map, so we must overwrite it before freeing
5864 if (ocmap
!= NULL
) {
5865 /* emit stats into trace buffer */
5866 vfs_drt_trace(ocmap
, DRT_DEBUG_SCMDATA
,
5869 ocmap
->scm_lastclean
,
5872 vfs_drt_free_map(ocmap
);
5874 return(KERN_SUCCESS
);
5879 * Free a sparse cluster map.
5881 static kern_return_t
5882 vfs_drt_free_map(struct vfs_drt_clustermap
*cmap
)
5884 kmem_free(kernel_map
, (vm_offset_t
)cmap
,
5885 (cmap
->scm_modulus
== DRT_HASH_SMALL_MODULUS
) ? DRT_SMALL_ALLOCATION
: DRT_LARGE_ALLOCATION
);
5886 return(KERN_SUCCESS
);
5891 * Find the hashtable slot currently occupied by an entry for the supplied offset.
5893 static kern_return_t
5894 vfs_drt_search_index(struct vfs_drt_clustermap
*cmap
, u_int64_t offset
, int *indexp
)
5899 offset
= DRT_ALIGN_ADDRESS(offset
);
5900 index
= DRT_HASH(cmap
, offset
);
5902 /* traverse the hashtable */
5903 for (i
= 0; i
< cmap
->scm_modulus
; i
++) {
5906 * If the slot is vacant, we can stop.
5908 if (DRT_HASH_VACANT(cmap
, index
))
5912 * If the address matches our offset, we have success.
5914 if (DRT_HASH_GET_ADDRESS(cmap
, index
) == offset
) {
5916 return(KERN_SUCCESS
);
5920 * Move to the next slot, try again.
5922 index
= DRT_HASH_NEXT(cmap
, index
);
5927 return(KERN_FAILURE
);
5931 * Find the hashtable slot for the supplied offset. If we haven't allocated
5932 * one yet, allocate one and populate the address field. Note that it will
5933 * not have a nonzero page count and thus will still technically be free, so
5934 * in the case where we are called to clean pages, the slot will remain free.
5936 static kern_return_t
5937 vfs_drt_get_index(struct vfs_drt_clustermap
**cmapp
, u_int64_t offset
, int *indexp
, int recursed
)
5939 struct vfs_drt_clustermap
*cmap
;
5946 /* look for an existing entry */
5947 kret
= vfs_drt_search_index(cmap
, offset
, indexp
);
5948 if (kret
== KERN_SUCCESS
)
5951 /* need to allocate an entry */
5952 offset
= DRT_ALIGN_ADDRESS(offset
);
5953 index
= DRT_HASH(cmap
, offset
);
5955 /* scan from the index forwards looking for a vacant slot */
5956 for (i
= 0; i
< cmap
->scm_modulus
; i
++) {
5958 if (DRT_HASH_VACANT(cmap
, index
) || DRT_HASH_GET_COUNT(cmap
,index
) == 0) {
5959 cmap
->scm_buckets
++;
5960 if (index
< cmap
->scm_lastclean
)
5961 cmap
->scm_lastclean
= index
;
5962 DRT_HASH_SET_ADDRESS(cmap
, index
, offset
);
5963 DRT_HASH_SET_COUNT(cmap
, index
, 0);
5964 DRT_BITVECTOR_CLEAR(cmap
, index
);
5966 vfs_drt_trace(cmap
, DRT_DEBUG_INSERT
, (int)offset
, i
, 0, 0);
5967 return(KERN_SUCCESS
);
5969 cmap
->scm_iskips
+= i
;
5970 index
= DRT_HASH_NEXT(cmap
, index
);
5974 * We haven't found a vacant slot, so the map is full. If we're not
5975 * already recursed, try reallocating/compacting it.
5978 return(KERN_FAILURE
);
5979 kret
= vfs_drt_alloc_map(cmapp
);
5980 if (kret
== KERN_SUCCESS
) {
5981 /* now try to insert again */
5982 kret
= vfs_drt_get_index(cmapp
, offset
, indexp
, 1);
5988 * Implementation of set dirty/clean.
5990 * In the 'clean' case, not finding a map is OK.
5992 static kern_return_t
5993 vfs_drt_do_mark_pages(
6000 struct vfs_drt_clustermap
*cmap
, **cmapp
;
6002 int i
, index
, pgoff
, pgcount
, setcount
, ecount
;
6004 cmapp
= (struct vfs_drt_clustermap
**)private;
6007 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_START
, (int)offset
, (int)length
, dirty
, 0);
6009 if (setcountp
!= NULL
)
6012 /* allocate a cluster map if we don't already have one */
6014 /* no cluster map, nothing to clean */
6016 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 1, 0, 0, 0);
6017 return(KERN_SUCCESS
);
6019 kret
= vfs_drt_alloc_map(cmapp
);
6020 if (kret
!= KERN_SUCCESS
) {
6021 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 2, 0, 0, 0);
6028 * Iterate over the length of the region.
6030 while (length
> 0) {
6032 * Get the hashtable index for this offset.
6034 * XXX this will add blank entries if we are clearing a range
6035 * that hasn't been dirtied.
6037 kret
= vfs_drt_get_index(cmapp
, offset
, &index
, 0);
6038 cmap
= *cmapp
; /* may have changed! */
6039 /* this may be a partial-success return */
6040 if (kret
!= KERN_SUCCESS
) {
6041 if (setcountp
!= NULL
)
6042 *setcountp
= setcount
;
6043 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 3, (int)length
, 0, 0);
6049 * Work out how many pages we're modifying in this
6052 pgoff
= (offset
- DRT_ALIGN_ADDRESS(offset
)) / PAGE_SIZE
;
6053 pgcount
= min((length
/ PAGE_SIZE
), (DRT_BITVECTOR_PAGES
- pgoff
));
6056 * Iterate over pages, dirty/clearing as we go.
6058 ecount
= DRT_HASH_GET_COUNT(cmap
, index
);
6059 for (i
= 0; i
< pgcount
; i
++) {
6061 if (!DRT_HASH_TEST_BIT(cmap
, index
, pgoff
+ i
)) {
6062 DRT_HASH_SET_BIT(cmap
, index
, pgoff
+ i
);
6067 if (DRT_HASH_TEST_BIT(cmap
, index
, pgoff
+ i
)) {
6068 DRT_HASH_CLEAR_BIT(cmap
, index
, pgoff
+ i
);
6074 DRT_HASH_SET_COUNT(cmap
, index
, ecount
);
6076 offset
+= pgcount
* PAGE_SIZE
;
6077 length
-= pgcount
* PAGE_SIZE
;
6079 if (setcountp
!= NULL
)
6080 *setcountp
= setcount
;
6082 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 0, setcount
, 0, 0);
6084 return(KERN_SUCCESS
);
6088 * Mark a set of pages as dirty/clean.
6090 * This is a public interface.
6093 * Pointer to storage suitable for holding a pointer. Note that
6094 * this must either be NULL or a value set by this function.
6097 * Current file size in bytes.
6100 * Offset of the first page to be marked as dirty, in bytes. Must be
6104 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
6107 * Number of pages newly marked dirty by this call (optional).
6109 * Returns KERN_SUCCESS if all the pages were successfully marked.
6111 static kern_return_t
6112 vfs_drt_mark_pages(void **cmapp
, off_t offset
, u_int length
, u_int
*setcountp
)
6114 /* XXX size unused, drop from interface */
6115 return(vfs_drt_do_mark_pages(cmapp
, offset
, length
, setcountp
, 1));
6119 static kern_return_t
6120 vfs_drt_unmark_pages(void **cmapp
, off_t offset
, u_int length
)
6122 return(vfs_drt_do_mark_pages(cmapp
, offset
, length
, NULL
, 0));
6127 * Get a cluster of dirty pages.
6129 * This is a public interface.
6132 * Pointer to storage managed by drt_mark_pages. Note that this must
6133 * be NULL or a value set by drt_mark_pages.
6136 * Returns the byte offset into the file of the first page in the cluster.
6139 * Returns the length in bytes of the cluster of dirty pages.
6141 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
6142 * are no dirty pages meeting the minmum size criteria. Private storage will
6143 * be released if there are no more dirty pages left in the map
6146 static kern_return_t
6147 vfs_drt_get_cluster(void **cmapp
, off_t
*offsetp
, u_int
*lengthp
)
6149 struct vfs_drt_clustermap
*cmap
;
6153 int index
, i
, fs
, ls
;
6156 if ((cmapp
== NULL
) || (*cmapp
== NULL
))
6157 return(KERN_FAILURE
);
6160 /* walk the hashtable */
6161 for (offset
= 0, j
= 0; j
< cmap
->scm_modulus
; offset
+= (DRT_BITVECTOR_PAGES
* PAGE_SIZE
), j
++) {
6162 index
= DRT_HASH(cmap
, offset
);
6164 if (DRT_HASH_VACANT(cmap
, index
) || (DRT_HASH_GET_COUNT(cmap
, index
) == 0))
6167 /* scan the bitfield for a string of bits */
6170 for (i
= 0; i
< DRT_BITVECTOR_PAGES
; i
++) {
6171 if (DRT_HASH_TEST_BIT(cmap
, index
, i
)) {
6177 /* didn't find any bits set */
6178 panic("vfs_drt: entry summary count > 0 but no bits set in map");
6180 for (ls
= 0; i
< DRT_BITVECTOR_PAGES
; i
++, ls
++) {
6181 if (!DRT_HASH_TEST_BIT(cmap
, index
, i
))
6185 /* compute offset and length, mark pages clean */
6186 offset
= DRT_HASH_GET_ADDRESS(cmap
, index
) + (PAGE_SIZE
* fs
);
6187 length
= ls
* PAGE_SIZE
;
6188 vfs_drt_do_mark_pages(cmapp
, offset
, length
, NULL
, 0);
6189 cmap
->scm_lastclean
= index
;
6191 /* return successful */
6192 *offsetp
= (off_t
)offset
;
6195 vfs_drt_trace(cmap
, DRT_DEBUG_RETCLUSTER
, (int)offset
, (int)length
, 0, 0);
6196 return(KERN_SUCCESS
);
6199 * We didn't find anything... hashtable is empty
6200 * emit stats into trace buffer and
6203 vfs_drt_trace(cmap
, DRT_DEBUG_SCMDATA
,
6206 cmap
->scm_lastclean
,
6209 vfs_drt_free_map(cmap
);
6212 return(KERN_FAILURE
);
6216 static kern_return_t
6217 vfs_drt_control(void **cmapp
, int op_type
)
6219 struct vfs_drt_clustermap
*cmap
;
6222 if ((cmapp
== NULL
) || (*cmapp
== NULL
))
6223 return(KERN_FAILURE
);
6228 /* emit stats into trace buffer */
6229 vfs_drt_trace(cmap
, DRT_DEBUG_SCMDATA
,
6232 cmap
->scm_lastclean
,
6235 vfs_drt_free_map(cmap
);
6240 cmap
->scm_lastclean
= 0;
6243 return(KERN_SUCCESS
);
6249 * Emit a summary of the state of the clustermap into the trace buffer
6250 * along with some caller-provided data.
6254 vfs_drt_trace(__unused
struct vfs_drt_clustermap
*cmap
, int code
, int arg1
, int arg2
, int arg3
, int arg4
)
6256 KERNEL_DEBUG(code
, arg1
, arg2
, arg3
, arg4
, 0);
6260 vfs_drt_trace(__unused
struct vfs_drt_clustermap
*cmap
, __unused
int code
,
6261 __unused
int arg1
, __unused
int arg2
, __unused
int arg3
,
6269 * Perform basic sanity check on the hash entry summary count
6270 * vs. the actual bits set in the entry.
6273 vfs_drt_sanity(struct vfs_drt_clustermap
*cmap
)
6278 for (index
= 0; index
< cmap
->scm_modulus
; index
++) {
6279 if (DRT_HASH_VACANT(cmap
, index
))
6282 for (bits_on
= 0, i
= 0; i
< DRT_BITVECTOR_PAGES
; i
++) {
6283 if (DRT_HASH_TEST_BIT(cmap
, index
, i
))
6286 if (bits_on
!= DRT_HASH_GET_COUNT(cmap
, index
))
6287 panic("bits_on = %d, index = %d\n", bits_on
, index
);