2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <sys/malloc.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <sys/uio_internal.h>
75 #include <libkern/libkern.h>
76 #include <machine/machine_routines.h>
78 #include <sys/ubc_internal.h>
79 #include <vm/vnode_pager.h>
81 #include <mach/mach_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_pageout.h>
90 #include <sys/kdebug.h>
91 #include <libkern/OSAtomic.h>
95 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
100 #define CL_WRITE 0x02
101 #define CL_ASYNC 0x04
102 #define CL_COMMIT 0x08
103 #define CL_PAGEOUT 0x10
105 #define CL_NOZERO 0x40
106 #define CL_PAGEIN 0x80
107 #define CL_DEV_MEMORY 0x100
108 #define CL_PRESERVE 0x200
109 #define CL_THROTTLE 0x400
110 #define CL_KEEPCACHED 0x800
111 #define CL_DIRECT_IO 0x1000
112 #define CL_PASSIVE 0x2000
113 #define CL_IOSTREAMING 0x4000
115 #define MAX_VECTOR_UPL_ELEMENTS 8
116 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE) * PAGE_SIZE
118 extern upl_t
vector_upl_create(vm_offset_t
);
119 extern boolean_t
vector_upl_is_valid(upl_t
);
120 extern boolean_t
vector_upl_set_subupl(upl_t
,upl_t
, u_int32_t
);
121 extern void vector_upl_set_pagelist(upl_t
);
122 extern void vector_upl_set_iostate(upl_t
, upl_t
, vm_offset_t
, u_int32_t
);
125 u_int io_completed
; /* amount of io that has currently completed */
126 u_int io_issued
; /* amount of io that was successfully issued */
127 int io_error
; /* error code of first error encountered */
128 int io_wanted
; /* someone is sleeping waiting for a change in state */
131 static lck_grp_t
*cl_mtx_grp
;
132 static lck_attr_t
*cl_mtx_attr
;
133 static lck_grp_attr_t
*cl_mtx_grp_attr
;
134 static lck_mtx_t
*cl_mtxp
;
142 #define PUSH_DELAY 0x01
143 #define PUSH_ALL 0x02
144 #define PUSH_SYNC 0x04
147 static void cluster_EOT(buf_t cbp_head
, buf_t cbp_tail
, int zero_offset
);
148 static void cluster_wait_IO(buf_t cbp_head
, int async
);
149 static void cluster_complete_transaction(buf_t
*cbp_head
, void *callback_arg
, int *retval
, int flags
, int needwait
);
151 static int cluster_io_type(struct uio
*uio
, int *io_type
, u_int32_t
*io_length
, u_int32_t min_length
);
153 static int cluster_io(vnode_t vp
, upl_t upl
, vm_offset_t upl_offset
, off_t f_offset
, int non_rounded_size
,
154 int flags
, buf_t real_bp
, struct clios
*iostate
, int (*)(buf_t
, void *), void *callback_arg
);
155 static int cluster_iodone(buf_t bp
, void *callback_arg
);
156 static int cluster_ioerror(upl_t upl
, int upl_offset
, int abort_size
, int error
, int io_flags
);
157 static int cluster_hard_throttle_on(vnode_t vp
, uint32_t);
159 static void cluster_syncup(vnode_t vp
, off_t newEOF
, int (*)(buf_t
, void *), void *callback_arg
);
161 static void cluster_read_upl_release(upl_t upl
, int start_pg
, int last_pg
, int take_reference
);
162 static int cluster_copy_ubc_data_internal(vnode_t vp
, struct uio
*uio
, int *io_resid
, int mark_dirty
, int take_reference
);
164 static int cluster_read_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t filesize
, int flags
,
165 int (*)(buf_t
, void *), void *callback_arg
);
166 static int cluster_read_direct(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
167 int flags
, int (*)(buf_t
, void *), void *callback_arg
);
168 static int cluster_read_contig(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
169 int (*)(buf_t
, void *), void *callback_arg
, int flags
);
171 static int cluster_write_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t oldEOF
, off_t newEOF
,
172 off_t headOff
, off_t tailOff
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
173 static int cluster_write_direct(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
,
174 int *write_type
, u_int32_t
*write_length
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
175 static int cluster_write_contig(vnode_t vp
, struct uio
*uio
, off_t newEOF
,
176 int *write_type
, u_int32_t
*write_length
, int (*)(buf_t
, void *), void *callback_arg
, int bflag
);
178 static int cluster_align_phys_io(vnode_t vp
, struct uio
*uio
, addr64_t usr_paddr
, u_int32_t xsize
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
180 static int cluster_read_prefetch(vnode_t vp
, off_t f_offset
, u_int size
, off_t filesize
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
);
181 static void cluster_read_ahead(vnode_t vp
, struct cl_extent
*extent
, off_t filesize
, struct cl_readahead
*ra
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
);
183 static int cluster_push_now(vnode_t vp
, struct cl_extent
*, off_t EOF
, int flags
, int (*)(buf_t
, void *), void *callback_arg
);
185 static int cluster_try_push(struct cl_writebehind
*, vnode_t vp
, off_t EOF
, int push_flag
, int (*)(buf_t
, void *), void *callback_arg
);
187 static void sparse_cluster_switch(struct cl_writebehind
*, vnode_t vp
, off_t EOF
, int (*)(buf_t
, void *), void *callback_arg
);
188 static void sparse_cluster_push(void **cmapp
, vnode_t vp
, off_t EOF
, int push_flag
, int (*)(buf_t
, void *), void *callback_arg
);
189 static void sparse_cluster_add(void **cmapp
, vnode_t vp
, struct cl_extent
*, off_t EOF
, int (*)(buf_t
, void *), void *callback_arg
);
191 static kern_return_t
vfs_drt_mark_pages(void **cmapp
, off_t offset
, u_int length
, u_int
*setcountp
);
192 static kern_return_t
vfs_drt_get_cluster(void **cmapp
, off_t
*offsetp
, u_int
*lengthp
);
193 static kern_return_t
vfs_drt_control(void **cmapp
, int op_type
);
197 * limit the internal I/O size so that we
198 * can represent it in a 32 bit int
200 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
201 #define MAX_IO_CONTIG_SIZE (MAX_UPL_SIZE * PAGE_SIZE)
203 #define MIN_DIRECT_WRITE_SIZE (4 * PAGE_SIZE)
205 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * base)
206 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
207 #define MAX_PREFETCH(vp, io_size) (io_size * IO_SCALE(vp, 3))
210 int speculative_reads_disabled
= 0;
213 * throttle the number of async writes that
214 * can be outstanding on a single vnode
215 * before we issue a synchronous write
217 #define HARD_THROTTLE_MAXCNT 0
218 #define HARD_THROTTLE_MAXSIZE (32 * 1024)
220 int hard_throttle_on_root
= 0;
221 struct timeval priority_IO_timestamp_for_root
;
227 * allocate lock group attribute and group
229 cl_mtx_grp_attr
= lck_grp_attr_alloc_init();
230 cl_mtx_grp
= lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr
);
233 * allocate the lock attribute
235 cl_mtx_attr
= lck_attr_alloc_init();
238 * allocate and initialize mutex's used to protect updates and waits
239 * on the cluster_io context
241 cl_mtxp
= lck_mtx_alloc_init(cl_mtx_grp
, cl_mtx_attr
);
244 panic("cluster_init: failed to allocate cl_mtxp");
249 cluster_max_io_size(mount_t mp
, int type
)
251 uint32_t max_io_size
;
258 segcnt
= mp
->mnt_segreadcnt
;
259 maxcnt
= mp
->mnt_maxreadcnt
;
262 segcnt
= mp
->mnt_segwritecnt
;
263 maxcnt
= mp
->mnt_maxwritecnt
;
266 segcnt
= min(mp
->mnt_segreadcnt
, mp
->mnt_segwritecnt
);
267 maxcnt
= min(mp
->mnt_maxreadcnt
, mp
->mnt_maxwritecnt
);
270 if (segcnt
> MAX_UPL_SIZE
) {
272 * don't allow a size beyond the max UPL size we can create
274 segcnt
= MAX_UPL_SIZE
;
276 max_io_size
= min((segcnt
* PAGE_SIZE
), maxcnt
);
278 if (max_io_size
< (MAX_UPL_TRANSFER
* PAGE_SIZE
)) {
280 * don't allow a size smaller than the old fixed limit
282 max_io_size
= (MAX_UPL_TRANSFER
* PAGE_SIZE
);
285 * make sure the size specified is a multiple of PAGE_SIZE
287 max_io_size
&= ~PAGE_MASK
;
289 return (max_io_size
);
295 #define CLW_ALLOCATE 0x01
296 #define CLW_RETURNLOCKED 0x02
297 #define CLW_IONOCACHE 0x04
298 #define CLW_IOPASSIVE 0x08
301 * if the read ahead context doesn't yet exist,
302 * allocate and initialize it...
303 * the vnode lock serializes multiple callers
304 * during the actual assignment... first one
305 * to grab the lock wins... the other callers
306 * will release the now unnecessary storage
308 * once the context is present, try to grab (but don't block on)
309 * the lock associated with it... if someone
310 * else currently owns it, than the read
311 * will run without read-ahead. this allows
312 * multiple readers to run in parallel and
313 * since there's only 1 read ahead context,
314 * there's no real loss in only allowing 1
315 * reader to have read-ahead enabled.
317 static struct cl_readahead
*
318 cluster_get_rap(vnode_t vp
)
320 struct ubc_info
*ubc
;
321 struct cl_readahead
*rap
;
325 if ((rap
= ubc
->cl_rahead
) == NULL
) {
326 MALLOC_ZONE(rap
, struct cl_readahead
*, sizeof *rap
, M_CLRDAHEAD
, M_WAITOK
);
328 bzero(rap
, sizeof *rap
);
330 lck_mtx_init(&rap
->cl_lockr
, cl_mtx_grp
, cl_mtx_attr
);
334 if (ubc
->cl_rahead
== NULL
)
335 ubc
->cl_rahead
= rap
;
337 lck_mtx_destroy(&rap
->cl_lockr
, cl_mtx_grp
);
338 FREE_ZONE((void *)rap
, sizeof *rap
, M_CLRDAHEAD
);
339 rap
= ubc
->cl_rahead
;
343 if (lck_mtx_try_lock(&rap
->cl_lockr
) == TRUE
)
346 return ((struct cl_readahead
*)NULL
);
351 * if the write behind context doesn't yet exist,
352 * and CLW_ALLOCATE is specified, allocate and initialize it...
353 * the vnode lock serializes multiple callers
354 * during the actual assignment... first one
355 * to grab the lock wins... the other callers
356 * will release the now unnecessary storage
358 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
359 * the lock associated with the write behind context before
363 static struct cl_writebehind
*
364 cluster_get_wbp(vnode_t vp
, int flags
)
366 struct ubc_info
*ubc
;
367 struct cl_writebehind
*wbp
;
371 if ((wbp
= ubc
->cl_wbehind
) == NULL
) {
373 if ( !(flags
& CLW_ALLOCATE
))
374 return ((struct cl_writebehind
*)NULL
);
376 MALLOC_ZONE(wbp
, struct cl_writebehind
*, sizeof *wbp
, M_CLWRBEHIND
, M_WAITOK
);
378 bzero(wbp
, sizeof *wbp
);
379 lck_mtx_init(&wbp
->cl_lockw
, cl_mtx_grp
, cl_mtx_attr
);
383 if (ubc
->cl_wbehind
== NULL
)
384 ubc
->cl_wbehind
= wbp
;
386 lck_mtx_destroy(&wbp
->cl_lockw
, cl_mtx_grp
);
387 FREE_ZONE((void *)wbp
, sizeof *wbp
, M_CLWRBEHIND
);
388 wbp
= ubc
->cl_wbehind
;
392 if (flags
& CLW_RETURNLOCKED
)
393 lck_mtx_lock(&wbp
->cl_lockw
);
400 cluster_syncup(vnode_t vp
, off_t newEOF
, int (*callback
)(buf_t
, void *), void *callback_arg
)
402 struct cl_writebehind
*wbp
;
404 if ((wbp
= cluster_get_wbp(vp
, 0)) != NULL
) {
406 if (wbp
->cl_number
) {
407 lck_mtx_lock(&wbp
->cl_lockw
);
409 cluster_try_push(wbp
, vp
, newEOF
, PUSH_ALL
| PUSH_SYNC
, callback
, callback_arg
);
411 lck_mtx_unlock(&wbp
->cl_lockw
);
418 cluster_hard_throttle_on(vnode_t vp
, uint32_t hard_throttle
)
423 static struct timeval hard_throttle_maxelapsed
= { 0, 200000 };
425 if (vp
->v_mount
->mnt_kern_flag
& MNTK_ROOTDEV
) {
426 struct timeval elapsed
;
428 if (hard_throttle_on_root
)
431 microuptime(&elapsed
);
432 timevalsub(&elapsed
, &priority_IO_timestamp_for_root
);
434 if (timevalcmp(&elapsed
, &hard_throttle_maxelapsed
, <))
438 if (throttle_get_io_policy(&ut
) == IOPOL_THROTTLE
) {
439 if (throttle_io_will_be_throttled(-1, vp
->v_mount
)) {
448 cluster_ioerror(upl_t upl
, int upl_offset
, int abort_size
, int error
, int io_flags
)
450 int upl_abort_code
= 0;
454 if (io_flags
& B_PHYS
)
456 * direct write of any flavor, or a direct read that wasn't aligned
458 ubc_upl_commit_range(upl
, upl_offset
, abort_size
, UPL_COMMIT_FREE_ON_EMPTY
);
460 if (io_flags
& B_PAGEIO
) {
461 if (io_flags
& B_READ
)
466 if (io_flags
& B_CACHE
)
468 * leave pages in the cache unchanged on error
470 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
;
471 else if (page_out
&& (error
!= ENXIO
))
473 * transient error... leave pages unchanged
475 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
;
477 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
;
479 upl_abort_code
= UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_DUMP_PAGES
;
481 ubc_upl_abort_range(upl
, upl_offset
, abort_size
, upl_abort_code
);
483 return (upl_abort_code
);
488 cluster_iodone(buf_t bp
, void *callback_arg
)
499 int transaction_size
= 0;
505 struct clios
*iostate
;
506 boolean_t transaction_complete
= FALSE
;
508 cbp_head
= (buf_t
)(bp
->b_trans_head
);
510 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_START
,
511 cbp_head
, bp
->b_lblkno
, bp
->b_bcount
, bp
->b_flags
, 0);
513 for (cbp
= cbp_head
; cbp
; cbp
= cbp
->b_trans_next
) {
515 * all I/O requests that are part of this transaction
516 * have to complete before we can process it
518 if ( !(cbp
->b_flags
& B_DONE
)) {
520 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_END
,
521 cbp_head
, cbp
, cbp
->b_bcount
, cbp
->b_flags
, 0);
525 if (cbp
->b_flags
& B_EOT
)
526 transaction_complete
= TRUE
;
528 if (transaction_complete
== FALSE
) {
529 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_END
,
530 cbp_head
, 0, 0, 0, 0);
539 upl_offset
= cbp
->b_uploffset
;
541 b_flags
= cbp
->b_flags
;
542 real_bp
= cbp
->b_real_bp
;
543 zero_offset
= cbp
->b_validend
;
544 iostate
= (struct clios
*)cbp
->b_iostate
;
547 real_bp
->b_dev
= cbp
->b_dev
;
550 if ((cbp
->b_flags
& B_ERROR
) && error
== 0)
551 error
= cbp
->b_error
;
553 total_resid
+= cbp
->b_resid
;
554 total_size
+= cbp
->b_bcount
;
556 cbp_next
= cbp
->b_trans_next
;
558 if (cbp_next
== NULL
)
560 * compute the overall size of the transaction
561 * in case we created one that has 'holes' in it
562 * 'total_size' represents the amount of I/O we
563 * did, not the span of the transaction w/r to the UPL
565 transaction_size
= cbp
->b_uploffset
+ cbp
->b_bcount
- upl_offset
;
572 if (error
== 0 && total_resid
)
576 int (*cliodone_func
)(buf_t
, void *) = (int (*)(buf_t
, void *))(cbp_head
->b_cliodone
);
578 if (cliodone_func
!= NULL
) {
579 cbp_head
->b_bcount
= transaction_size
;
581 error
= (*cliodone_func
)(cbp_head
, callback_arg
);
585 cluster_zero(upl
, zero_offset
, PAGE_SIZE
- (zero_offset
& PAGE_MASK
), real_bp
);
587 free_io_buf(cbp_head
);
593 * someone has issued multiple I/Os asynchrounsly
594 * and is waiting for them to complete (streaming)
596 lck_mtx_lock_spin(cl_mtxp
);
598 if (error
&& iostate
->io_error
== 0)
599 iostate
->io_error
= error
;
601 iostate
->io_completed
+= total_size
;
603 if (iostate
->io_wanted
) {
605 * someone is waiting for the state of
606 * this io stream to change
608 iostate
->io_wanted
= 0;
611 lck_mtx_unlock(cl_mtxp
);
614 wakeup((caddr_t
)&iostate
->io_wanted
);
617 if (b_flags
& B_COMMIT_UPL
) {
619 pg_offset
= upl_offset
& PAGE_MASK
;
620 commit_size
= (pg_offset
+ transaction_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
623 upl_flags
= cluster_ioerror(upl
, upl_offset
- pg_offset
, commit_size
, error
, b_flags
);
625 upl_flags
= UPL_COMMIT_FREE_ON_EMPTY
;
627 if ((b_flags
& B_PHYS
) && (b_flags
& B_READ
))
628 upl_flags
|= UPL_COMMIT_SET_DIRTY
;
631 upl_flags
|= UPL_COMMIT_INACTIVATE
;
633 ubc_upl_commit_range(upl
, upl_offset
- pg_offset
, commit_size
, upl_flags
);
636 if ((b_flags
& B_NEED_IODONE
) && real_bp
) {
638 real_bp
->b_flags
|= B_ERROR
;
639 real_bp
->b_error
= error
;
641 real_bp
->b_resid
= total_resid
;
643 buf_biodone(real_bp
);
645 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 20)) | DBG_FUNC_END
,
646 upl
, upl_offset
- pg_offset
, commit_size
, (error
<< 24) | upl_flags
, 0);
653 cluster_hard_throttle_limit(vnode_t vp
, uint32_t *limit
, uint32_t hard_throttle
)
655 if (cluster_hard_throttle_on(vp
, hard_throttle
)) {
656 *limit
= HARD_THROTTLE_MAXSIZE
;
664 cluster_zero(upl_t upl
, upl_offset_t upl_offset
, int size
, buf_t bp
)
667 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 23)) | DBG_FUNC_START
,
668 upl_offset
, size
, bp
, 0, 0);
670 if (bp
== NULL
|| bp
->b_datap
== 0) {
674 pl
= ubc_upl_pageinfo(upl
);
676 if (upl_device_page(pl
) == TRUE
) {
677 zero_addr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + upl_offset
;
679 bzero_phys_nc(zero_addr
, size
);
686 page_index
= upl_offset
/ PAGE_SIZE
;
687 page_offset
= upl_offset
& PAGE_MASK
;
689 zero_addr
= ((addr64_t
)upl_phys_page(pl
, page_index
) << 12) + page_offset
;
690 zero_cnt
= min(PAGE_SIZE
- page_offset
, size
);
692 bzero_phys(zero_addr
, zero_cnt
);
695 upl_offset
+= zero_cnt
;
699 bzero((caddr_t
)((vm_offset_t
)bp
->b_datap
+ upl_offset
), size
);
701 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 23)) | DBG_FUNC_END
,
702 upl_offset
, size
, 0, 0, 0);
707 cluster_EOT(buf_t cbp_head
, buf_t cbp_tail
, int zero_offset
)
709 cbp_head
->b_validend
= zero_offset
;
710 cbp_tail
->b_flags
|= B_EOT
;
714 cluster_wait_IO(buf_t cbp_head
, int async
)
720 * async callback completion will not normally
721 * generate a wakeup upon I/O completion...
722 * by setting BL_WANTED, we will force a wakeup
723 * to occur as any outstanding I/Os complete...
724 * I/Os already completed will have BL_CALLDONE already
725 * set and we won't block in buf_biowait_callback..
726 * note that we're actually waiting for the bp to have
727 * completed the callback function... only then
728 * can we safely take back ownership of the bp
729 * need the main buf mutex in order to safely
734 for (cbp
= cbp_head
; cbp
; cbp
= cbp
->b_trans_next
)
735 cbp
->b_lflags
|= BL_WANTED
;
739 for (cbp
= cbp_head
; cbp
; cbp
= cbp
->b_trans_next
) {
741 buf_biowait_callback(cbp
);
748 cluster_complete_transaction(buf_t
*cbp_head
, void *callback_arg
, int *retval
, int flags
, int needwait
)
754 * cluster_complete_transaction will
755 * only be called if we've issued a complete chain in synchronous mode
756 * or, we've already done a cluster_wait_IO on an incomplete chain
759 for (cbp
= *cbp_head
; cbp
; cbp
= cbp
->b_trans_next
)
762 error
= cluster_iodone(*cbp_head
, callback_arg
);
764 if ( !(flags
& CL_ASYNC
) && error
&& *retval
== 0) {
765 if (((flags
& (CL_PAGEOUT
| CL_KEEPCACHED
)) != CL_PAGEOUT
) || (error
!= ENXIO
))
768 *cbp_head
= (buf_t
)NULL
;
773 cluster_io(vnode_t vp
, upl_t upl
, vm_offset_t upl_offset
, off_t f_offset
, int non_rounded_size
,
774 int flags
, buf_t real_bp
, struct clios
*iostate
, int (*callback
)(buf_t
, void *), void *callback_arg
)
783 buf_t cbp_head
= NULL
;
784 buf_t cbp_tail
= NULL
;
793 int async_throttle
= 0;
795 vm_offset_t upl_end_offset
;
796 boolean_t need_EOT
= FALSE
;
799 * we currently don't support buffers larger than a page
801 if (real_bp
&& non_rounded_size
> PAGE_SIZE
)
802 panic("%s(): Called with real buffer of size %d bytes which "
803 "is greater than the maximum allowed size of "
804 "%d bytes (the system PAGE_SIZE).\n",
805 __FUNCTION__
, non_rounded_size
, PAGE_SIZE
);
810 * we don't want to do any funny rounding of the size for IO requests
811 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
812 * belong to us... we can't extend (nor do we need to) the I/O to fill
815 if (mp
->mnt_devblocksize
> 1 && !(flags
& (CL_DEV_MEMORY
| CL_DIRECT_IO
))) {
817 * round the requested size up so that this I/O ends on a
818 * page boundary in case this is a 'write'... if the filesystem
819 * has blocks allocated to back the page beyond the EOF, we want to
820 * make sure to write out the zero's that are sitting beyond the EOF
821 * so that in case the filesystem doesn't explicitly zero this area
822 * if a hole is created via a lseek/write beyond the current EOF,
823 * it will return zeros when it's read back from the disk. If the
824 * physical allocation doesn't extend for the whole page, we'll
825 * only write/read from the disk up to the end of this allocation
826 * via the extent info returned from the VNOP_BLOCKMAP call.
828 pg_offset
= upl_offset
& PAGE_MASK
;
830 size
= (((non_rounded_size
+ pg_offset
) + (PAGE_SIZE
- 1)) & ~PAGE_MASK
) - pg_offset
;
833 * anyone advertising a blocksize of 1 byte probably
834 * can't deal with us rounding up the request size
835 * AFP is one such filesystem/device
837 size
= non_rounded_size
;
839 upl_end_offset
= upl_offset
+ size
;
841 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 22)) | DBG_FUNC_START
, (int)f_offset
, size
, upl_offset
, flags
, 0);
844 * Set the maximum transaction size to the maximum desired number of
848 if (flags
& CL_DEV_MEMORY
)
849 max_trans_count
= 16;
851 if (flags
& CL_READ
) {
853 bmap_flags
= VNODE_READ
;
855 max_iosize
= mp
->mnt_maxreadcnt
;
856 max_vectors
= mp
->mnt_segreadcnt
;
859 bmap_flags
= VNODE_WRITE
;
861 max_iosize
= mp
->mnt_maxwritecnt
;
862 max_vectors
= mp
->mnt_segwritecnt
;
864 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 22)) | DBG_FUNC_NONE
, max_iosize
, max_vectors
, mp
->mnt_devblocksize
, 0, 0);
867 * make sure the maximum iosize is a
868 * multiple of the page size
870 max_iosize
&= ~PAGE_MASK
;
873 * Ensure the maximum iosize is sensible.
876 max_iosize
= PAGE_SIZE
;
878 if (flags
& CL_THROTTLE
) {
879 if ( !(flags
& CL_PAGEOUT
) && cluster_hard_throttle_on(vp
, 1)) {
880 if (max_iosize
> HARD_THROTTLE_MAXSIZE
)
881 max_iosize
= HARD_THROTTLE_MAXSIZE
;
882 async_throttle
= HARD_THROTTLE_MAXCNT
;
884 if ( (flags
& CL_DEV_MEMORY
) )
885 async_throttle
= IO_SCALE(vp
, VNODE_ASYNC_THROTTLE
);
888 u_int max_cluster_size
;
891 max_cluster_size
= MAX_CLUSTER_SIZE(vp
);
892 max_prefetch
= MAX_PREFETCH(vp
, cluster_max_io_size(vp
->v_mount
, CL_READ
));
894 if (max_iosize
> max_cluster_size
)
895 max_cluster
= max_cluster_size
;
897 max_cluster
= max_iosize
;
899 if (size
< max_cluster
)
902 async_throttle
= min(IO_SCALE(vp
, VNODE_ASYNC_THROTTLE
), (max_prefetch
/ max_cluster
) - 1);
908 if (flags
& (CL_PAGEIN
| CL_PAGEOUT
))
909 io_flags
|= B_PAGEIO
;
910 if (flags
& (CL_IOSTREAMING
))
911 io_flags
|= B_IOSTREAMING
;
912 if (flags
& CL_COMMIT
)
913 io_flags
|= B_COMMIT_UPL
;
914 if (flags
& CL_PRESERVE
)
916 if (flags
& CL_KEEPCACHED
)
918 if (flags
& CL_PASSIVE
)
919 io_flags
|= B_PASSIVE
;
920 if (vp
->v_flag
& VSYSTEM
)
923 if ((flags
& CL_READ
) && ((upl_offset
+ non_rounded_size
) & PAGE_MASK
) && (!(flags
& CL_NOZERO
))) {
925 * then we are going to end up
926 * with a page that we can't complete (the file size wasn't a multiple
927 * of PAGE_SIZE and we're trying to read to the end of the file
928 * so we'll go ahead and zero out the portion of the page we can't
929 * read in from the file
931 zero_offset
= upl_offset
+ non_rounded_size
;
936 u_int io_size_wanted
;
939 if (size
> max_iosize
)
940 io_size
= max_iosize
;
944 io_size_wanted
= io_size
;
945 io_size_tmp
= (size_t)io_size
;
947 if ((error
= VNOP_BLOCKMAP(vp
, f_offset
, io_size
, &blkno
, &io_size_tmp
, NULL
, bmap_flags
, NULL
)))
950 if (io_size_tmp
> io_size_wanted
)
951 io_size
= io_size_wanted
;
953 io_size
= (u_int
)io_size_tmp
;
955 if (real_bp
&& (real_bp
->b_blkno
== real_bp
->b_lblkno
))
956 real_bp
->b_blkno
= blkno
;
958 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 24)) | DBG_FUNC_NONE
,
959 (int)f_offset
, (int)(blkno
>>32), (int)blkno
, io_size
, 0);
963 * vnop_blockmap didn't return an error... however, it did
964 * return an extent size of 0 which means we can't
965 * make forward progress on this I/O... a hole in the
966 * file would be returned as a blkno of -1 with a non-zero io_size
967 * a real extent is returned with a blkno != -1 and a non-zero io_size
972 if ( !(flags
& CL_READ
) && blkno
== -1) {
976 if(upl_get_internal_vectorupl(upl
))
977 panic("Vector UPLs should not take this code-path\n");
979 * we're writing into a 'hole'
981 if (flags
& CL_PAGEOUT
) {
983 * if we got here via cluster_pageout
984 * then just error the request and return
985 * the 'hole' should already have been covered
991 * we can get here if the cluster code happens to
992 * pick up a page that was dirtied via mmap vs
993 * a 'write' and the page targets a 'hole'...
994 * i.e. the writes to the cluster were sparse
995 * and the file was being written for the first time
997 * we can also get here if the filesystem supports
998 * 'holes' that are less than PAGE_SIZE.... because
999 * we can't know if the range in the page that covers
1000 * the 'hole' has been dirtied via an mmap or not,
1001 * we have to assume the worst and try to push the
1002 * entire page to storage.
1004 * Try paging out the page individually before
1005 * giving up entirely and dumping it (the pageout
1006 * path will insure that the zero extent accounting
1007 * has been taken care of before we get back into cluster_io)
1009 * go direct to vnode_pageout so that we don't have to
1010 * unbusy the page from the UPL... we used to do this
1011 * so that we could call ubc_sync_range, but that results
1012 * in a potential deadlock if someone else races us to acquire
1013 * that page and wins and in addition needs one of the pages
1014 * we're continuing to hold in the UPL
1016 pageout_flags
= UPL_MSYNC
| UPL_VNODE_PAGER
| UPL_NESTED_PAGEOUT
;
1018 if ( !(flags
& CL_ASYNC
))
1019 pageout_flags
|= UPL_IOSYNC
;
1020 if ( !(flags
& CL_COMMIT
))
1021 pageout_flags
|= UPL_NOCOMMIT
;
1027 * first we have to wait for the the current outstanding I/Os
1028 * to complete... EOT hasn't been set yet on this transaction
1029 * so the pages won't be released just because all of the current
1030 * I/O linked to this transaction has completed...
1032 cluster_wait_IO(cbp_head
, (flags
& CL_ASYNC
));
1035 * we've got a transcation that
1036 * includes the page we're about to push out through vnode_pageout...
1037 * find the last bp in the list which will be the one that
1038 * includes the head of this page and round it's iosize down
1039 * to a page boundary...
1041 for (last_cbp
= cbp
= cbp_head
; cbp
->b_trans_next
; cbp
= cbp
->b_trans_next
)
1044 cbp
->b_bcount
&= ~PAGE_MASK
;
1046 if (cbp
->b_bcount
== 0) {
1048 * this buf no longer has any I/O associated with it
1052 if (cbp
== cbp_head
) {
1054 * the buf we just freed was the only buf in
1055 * this transaction... so there's no I/O to do
1060 * remove the buf we just freed from
1061 * the transaction list
1063 last_cbp
->b_trans_next
= NULL
;
1064 cbp_tail
= last_cbp
;
1069 * there was more to the current transaction
1070 * than just the page we are pushing out via vnode_pageout...
1071 * mark it as finished and complete it... we've already
1072 * waited for the I/Os to complete above in the call to cluster_wait_IO
1074 cluster_EOT(cbp_head
, cbp_tail
, 0);
1076 cluster_complete_transaction(&cbp_head
, callback_arg
, &retval
, flags
, 0);
1081 if (vnode_pageout(vp
, upl
, trunc_page(upl_offset
), trunc_page_64(f_offset
), PAGE_SIZE
, pageout_flags
, NULL
) != PAGER_SUCCESS
) {
1085 e_offset
= round_page_64(f_offset
+ 1);
1086 io_size
= e_offset
- f_offset
;
1088 f_offset
+= io_size
;
1089 upl_offset
+= io_size
;
1091 if (size
>= io_size
)
1096 * keep track of how much of the original request
1097 * that we've actually completed... non_rounded_size
1098 * may go negative due to us rounding the request
1099 * to a page size multiple (i.e. size > non_rounded_size)
1101 non_rounded_size
-= io_size
;
1103 if (non_rounded_size
<= 0) {
1105 * we've transferred all of the data in the original
1106 * request, but we were unable to complete the tail
1107 * of the last page because the file didn't have
1108 * an allocation to back that portion... this is ok.
1114 lblkno
= (daddr64_t
)(f_offset
/ PAGE_SIZE_64
);
1116 * we have now figured out how much I/O we can do - this is in 'io_size'
1117 * pg_offset is the starting point in the first page for the I/O
1118 * pg_count is the number of full and partial pages that 'io_size' encompasses
1120 pg_offset
= upl_offset
& PAGE_MASK
;
1122 if (flags
& CL_DEV_MEMORY
) {
1124 * treat physical requests as one 'giant' page
1128 pg_count
= (io_size
+ pg_offset
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1130 if ((flags
& CL_READ
) && blkno
== -1) {
1131 vm_offset_t commit_offset
;
1133 int complete_transaction_now
= 0;
1136 * if we're reading and blkno == -1, then we've got a
1137 * 'hole' in the file that we need to deal with by zeroing
1138 * out the affected area in the upl
1140 if (io_size
>= (u_int
)non_rounded_size
) {
1142 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1143 * than 'zero_offset' will be non-zero
1144 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
1145 * (indicated by the io_size finishing off the I/O request for this UPL)
1146 * than we're not going to issue an I/O for the
1147 * last page in this upl... we need to zero both the hole and the tail
1148 * of the page beyond the EOF, since the delayed zero-fill won't kick in
1150 bytes_to_zero
= non_rounded_size
;
1151 if (!(flags
& CL_NOZERO
))
1152 bytes_to_zero
= (((upl_offset
+ io_size
) + (PAGE_SIZE
- 1)) & ~PAGE_MASK
) - upl_offset
;
1156 bytes_to_zero
= io_size
;
1160 cluster_zero(upl
, upl_offset
, bytes_to_zero
, real_bp
);
1166 * if there is a current I/O chain pending
1167 * then the first page of the group we just zero'd
1168 * will be handled by the I/O completion if the zero
1169 * fill started in the middle of the page
1171 commit_offset
= (upl_offset
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
1173 pg_resid
= commit_offset
- upl_offset
;
1175 if (bytes_to_zero
>= pg_resid
) {
1177 * the last page of the current I/O
1178 * has been completed...
1179 * compute the number of fully zero'd
1180 * pages that are beyond it
1181 * plus the last page if its partial
1182 * and we have no more I/O to issue...
1183 * otherwise a partial page is left
1184 * to begin the next I/O
1186 if ((int)io_size
>= non_rounded_size
)
1187 pg_count
= (bytes_to_zero
- pg_resid
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1189 pg_count
= (bytes_to_zero
- pg_resid
) / PAGE_SIZE
;
1191 complete_transaction_now
= 1;
1195 * no pending I/O to deal with
1196 * so, commit all of the fully zero'd pages
1197 * plus the last page if its partial
1198 * and we have no more I/O to issue...
1199 * otherwise a partial page is left
1200 * to begin the next I/O
1202 if ((int)io_size
>= non_rounded_size
)
1203 pg_count
= (pg_offset
+ bytes_to_zero
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1205 pg_count
= (pg_offset
+ bytes_to_zero
) / PAGE_SIZE
;
1207 commit_offset
= upl_offset
& ~PAGE_MASK
;
1209 if ( (flags
& CL_COMMIT
) && pg_count
) {
1210 ubc_upl_commit_range(upl
, commit_offset
, pg_count
* PAGE_SIZE
,
1211 UPL_COMMIT_CLEAR_DIRTY
| UPL_COMMIT_FREE_ON_EMPTY
);
1213 upl_offset
+= io_size
;
1214 f_offset
+= io_size
;
1218 * keep track of how much of the original request
1219 * that we've actually completed... non_rounded_size
1220 * may go negative due to us rounding the request
1221 * to a page size multiple (i.e. size > non_rounded_size)
1223 non_rounded_size
-= io_size
;
1225 if (non_rounded_size
<= 0) {
1227 * we've transferred all of the data in the original
1228 * request, but we were unable to complete the tail
1229 * of the last page because the file didn't have
1230 * an allocation to back that portion... this is ok.
1234 if (cbp_head
&& (complete_transaction_now
|| size
== 0)) {
1235 cluster_wait_IO(cbp_head
, (flags
& CL_ASYNC
));
1237 cluster_EOT(cbp_head
, cbp_tail
, size
== 0 ? zero_offset
: 0);
1239 cluster_complete_transaction(&cbp_head
, callback_arg
, &retval
, flags
, 0);
1245 if (pg_count
> max_vectors
) {
1246 if (((pg_count
- max_vectors
) * PAGE_SIZE
) > io_size
) {
1247 io_size
= PAGE_SIZE
- pg_offset
;
1250 io_size
-= (pg_count
- max_vectors
) * PAGE_SIZE
;
1251 pg_count
= max_vectors
;
1255 * If the transaction is going to reach the maximum number of
1256 * desired elements, truncate the i/o to the nearest page so
1257 * that the actual i/o is initiated after this buffer is
1258 * created and added to the i/o chain.
1260 * I/O directed to physically contiguous memory
1261 * doesn't have a requirement to make sure we 'fill' a page
1263 if ( !(flags
& CL_DEV_MEMORY
) && trans_count
>= max_trans_count
&&
1264 ((upl_offset
+ io_size
) & PAGE_MASK
)) {
1265 vm_offset_t aligned_ofs
;
1267 aligned_ofs
= (upl_offset
+ io_size
) & ~PAGE_MASK
;
1269 * If the io_size does not actually finish off even a
1270 * single page we have to keep adding buffers to the
1271 * transaction despite having reached the desired limit.
1273 * Eventually we get here with the page being finished
1274 * off (and exceeded) and then we truncate the size of
1275 * this i/o request so that it is page aligned so that
1276 * we can finally issue the i/o on the transaction.
1278 if (aligned_ofs
> upl_offset
) {
1279 io_size
= aligned_ofs
- upl_offset
;
1284 if ( !(mp
->mnt_kern_flag
& MNTK_VIRTUALDEV
))
1286 * if we're not targeting a virtual device i.e. a disk image
1287 * it's safe to dip into the reserve pool since real devices
1288 * can complete this I/O request without requiring additional
1289 * bufs from the alloc_io_buf pool
1292 else if ((flags
& CL_ASYNC
) && !(flags
& CL_PAGEOUT
))
1294 * Throttle the speculative IO
1300 cbp
= alloc_io_buf(vp
, priv
);
1302 if (flags
& CL_PAGEOUT
) {
1305 for (i
= 0; i
< pg_count
; i
++) {
1306 if (buf_invalblkno(vp
, lblkno
+ i
, 0) == EBUSY
)
1307 panic("BUSY bp found in cluster_io");
1310 if (flags
& CL_ASYNC
) {
1311 if (buf_setcallback(cbp
, (void *)cluster_iodone
, callback_arg
))
1312 panic("buf_setcallback failed\n");
1314 cbp
->b_cliodone
= (void *)callback
;
1315 cbp
->b_flags
|= io_flags
;
1317 cbp
->b_lblkno
= lblkno
;
1318 cbp
->b_blkno
= blkno
;
1319 cbp
->b_bcount
= io_size
;
1321 if (buf_setupl(cbp
, upl
, upl_offset
))
1322 panic("buf_setupl failed\n");
1324 cbp
->b_trans_next
= (buf_t
)NULL
;
1326 if ((cbp
->b_iostate
= (void *)iostate
))
1328 * caller wants to track the state of this
1329 * io... bump the amount issued against this stream
1331 iostate
->io_issued
+= io_size
;
1333 if (flags
& CL_READ
) {
1334 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 26)) | DBG_FUNC_NONE
,
1335 (int)cbp
->b_lblkno
, (int)cbp
->b_blkno
, upl_offset
, io_size
, 0);
1338 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 27)) | DBG_FUNC_NONE
,
1339 (int)cbp
->b_lblkno
, (int)cbp
->b_blkno
, upl_offset
, io_size
, 0);
1343 cbp_tail
->b_trans_next
= cbp
;
1349 if ( (cbp_head
->b_real_bp
= real_bp
) ) {
1350 cbp_head
->b_flags
|= B_NEED_IODONE
;
1351 real_bp
= (buf_t
)NULL
;
1354 *(buf_t
*)(&cbp
->b_trans_head
) = cbp_head
;
1358 upl_offset
+= io_size
;
1359 f_offset
+= io_size
;
1362 * keep track of how much of the original request
1363 * that we've actually completed... non_rounded_size
1364 * may go negative due to us rounding the request
1365 * to a page size multiple (i.e. size > non_rounded_size)
1367 non_rounded_size
-= io_size
;
1369 if (non_rounded_size
<= 0) {
1371 * we've transferred all of the data in the original
1372 * request, but we were unable to complete the tail
1373 * of the last page because the file didn't have
1374 * an allocation to back that portion... this is ok.
1380 * we have no more I/O to issue, so go
1381 * finish the final transaction
1384 } else if ( ((flags
& CL_DEV_MEMORY
) || (upl_offset
& PAGE_MASK
) == 0) &&
1385 ((flags
& CL_ASYNC
) || trans_count
> max_trans_count
) ) {
1387 * I/O directed to physically contiguous memory...
1388 * which doesn't have a requirement to make sure we 'fill' a page
1390 * the current I/O we've prepared fully
1391 * completes the last page in this request
1393 * it's either an ASYNC request or
1394 * we've already accumulated more than 8 I/O's into
1395 * this transaction so mark it as complete so that
1396 * it can finish asynchronously or via the cluster_complete_transaction
1397 * below if the request is synchronous
1401 if (need_EOT
== TRUE
)
1402 cluster_EOT(cbp_head
, cbp_tail
, size
== 0 ? zero_offset
: 0);
1404 if (flags
& CL_THROTTLE
)
1405 (void)vnode_waitforwrites(vp
, async_throttle
, 0, 0, "cluster_io");
1407 if ( !(io_flags
& B_READ
))
1408 vnode_startwrite(vp
);
1410 (void) VNOP_STRATEGY(cbp
);
1412 if (need_EOT
== TRUE
) {
1413 if ( !(flags
& CL_ASYNC
))
1414 cluster_complete_transaction(&cbp_head
, callback_arg
, &retval
, flags
, 1);
1428 * first wait until all of the outstanding I/O
1429 * for this partial transaction has completed
1431 cluster_wait_IO(cbp_head
, (flags
& CL_ASYNC
));
1434 * Rewind the upl offset to the beginning of the
1437 upl_offset
= cbp_head
->b_uploffset
;
1439 for (cbp
= cbp_head
; cbp
;) {
1442 size
+= cbp
->b_bcount
;
1443 io_size
+= cbp
->b_bcount
;
1445 cbp_next
= cbp
->b_trans_next
;
1451 int need_wakeup
= 0;
1454 * update the error condition for this stream
1455 * since we never really issued the io
1456 * just go ahead and adjust it back
1458 lck_mtx_lock_spin(cl_mtxp
);
1460 if (iostate
->io_error
== 0)
1461 iostate
->io_error
= error
;
1462 iostate
->io_issued
-= io_size
;
1464 if (iostate
->io_wanted
) {
1466 * someone is waiting for the state of
1467 * this io stream to change
1469 iostate
->io_wanted
= 0;
1472 lck_mtx_unlock(cl_mtxp
);
1475 wakeup((caddr_t
)&iostate
->io_wanted
);
1477 if (flags
& CL_COMMIT
) {
1480 pg_offset
= upl_offset
& PAGE_MASK
;
1481 abort_size
= (upl_end_offset
- upl_offset
+ PAGE_MASK
) & ~PAGE_MASK
;
1483 upl_flags
= cluster_ioerror(upl
, upl_offset
- pg_offset
, abort_size
, error
, io_flags
);
1485 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 28)) | DBG_FUNC_NONE
,
1486 upl
, upl_offset
- pg_offset
, abort_size
, (error
<< 24) | upl_flags
, 0);
1490 } else if (cbp_head
)
1491 panic("%s(): cbp_head is not NULL.\n", __FUNCTION__
);
1495 * can get here if we either encountered an error
1496 * or we completely zero-filled the request and
1500 real_bp
->b_flags
|= B_ERROR
;
1501 real_bp
->b_error
= error
;
1503 buf_biodone(real_bp
);
1505 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 22)) | DBG_FUNC_END
, (int)f_offset
, size
, upl_offset
, retval
, 0);
1510 #define reset_vector_run_state() \
1511 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
1514 vector_cluster_io(vnode_t vp
, upl_t vector_upl
, vm_offset_t vector_upl_offset
, off_t v_upl_uio_offset
, int vector_upl_iosize
,
1515 int io_flag
, buf_t real_bp
, struct clios
*iostate
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1517 vector_upl_set_pagelist(vector_upl
);
1519 if(io_flag
& CL_READ
) {
1520 if(vector_upl_offset
== 0 && ((vector_upl_iosize
& PAGE_MASK
)==0))
1521 io_flag
&= ~CL_PRESERVE
; /*don't zero fill*/
1523 io_flag
|= CL_PRESERVE
; /*zero fill*/
1525 return (cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, real_bp
, iostate
, callback
, callback_arg
));
1530 cluster_read_prefetch(vnode_t vp
, off_t f_offset
, u_int size
, off_t filesize
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
)
1532 int pages_in_prefetch
;
1534 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 49)) | DBG_FUNC_START
,
1535 (int)f_offset
, size
, (int)filesize
, 0, 0);
1537 if (f_offset
>= filesize
) {
1538 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 49)) | DBG_FUNC_END
,
1539 (int)f_offset
, 0, 0, 0, 0);
1542 if ((off_t
)size
> (filesize
- f_offset
))
1543 size
= filesize
- f_offset
;
1544 pages_in_prefetch
= (size
+ (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1546 advisory_read_ext(vp
, filesize
, f_offset
, size
, callback
, callback_arg
, bflag
);
1548 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 49)) | DBG_FUNC_END
,
1549 (int)f_offset
+ size
, pages_in_prefetch
, 0, 1, 0);
1551 return (pages_in_prefetch
);
1557 cluster_read_ahead(vnode_t vp
, struct cl_extent
*extent
, off_t filesize
, struct cl_readahead
*rap
, int (*callback
)(buf_t
, void *), void *callback_arg
,
1562 int size_of_prefetch
;
1566 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_START
,
1567 (int)extent
->b_addr
, (int)extent
->e_addr
, (int)rap
->cl_lastr
, 0, 0);
1569 if (extent
->b_addr
== rap
->cl_lastr
&& extent
->b_addr
== extent
->e_addr
) {
1570 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1571 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 0, 0);
1574 if (rap
->cl_lastr
== -1 || (extent
->b_addr
!= rap
->cl_lastr
&& extent
->b_addr
!= (rap
->cl_lastr
+ 1))) {
1578 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1579 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 1, 0);
1583 max_prefetch
= MAX_PREFETCH(vp
, cluster_max_io_size(vp
->v_mount
, CL_READ
));
1585 if (extent
->e_addr
< rap
->cl_maxra
) {
1586 if ((rap
->cl_maxra
- extent
->e_addr
) > ((max_prefetch
/ PAGE_SIZE
) / 4)) {
1588 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1589 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 2, 0);
1593 r_addr
= max(extent
->e_addr
, rap
->cl_maxra
) + 1;
1594 f_offset
= (off_t
)(r_addr
* PAGE_SIZE_64
);
1596 size_of_prefetch
= 0;
1598 ubc_range_op(vp
, f_offset
, f_offset
+ PAGE_SIZE_64
, UPL_ROP_PRESENT
, &size_of_prefetch
);
1600 if (size_of_prefetch
) {
1601 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1602 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 3, 0);
1605 if (f_offset
< filesize
) {
1606 daddr64_t read_size
;
1608 rap
->cl_ralen
= rap
->cl_ralen
? min(max_prefetch
/ PAGE_SIZE
, rap
->cl_ralen
<< 1) : 1;
1610 read_size
= (extent
->e_addr
+ 1) - extent
->b_addr
;
1612 if (read_size
> rap
->cl_ralen
) {
1613 if (read_size
> max_prefetch
/ PAGE_SIZE
)
1614 rap
->cl_ralen
= max_prefetch
/ PAGE_SIZE
;
1616 rap
->cl_ralen
= read_size
;
1618 size_of_prefetch
= cluster_read_prefetch(vp
, f_offset
, rap
->cl_ralen
* PAGE_SIZE
, filesize
, callback
, callback_arg
, bflag
);
1620 if (size_of_prefetch
)
1621 rap
->cl_maxra
= (r_addr
+ size_of_prefetch
) - 1;
1623 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 48)) | DBG_FUNC_END
,
1624 rap
->cl_ralen
, (int)rap
->cl_maxra
, (int)rap
->cl_lastr
, 4, 0);
1629 cluster_pageout(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1630 int size
, off_t filesize
, int flags
)
1632 return cluster_pageout_ext(vp
, upl
, upl_offset
, f_offset
, size
, filesize
, flags
, NULL
, NULL
);
1638 cluster_pageout_ext(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1639 int size
, off_t filesize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1646 if (vp
->v_mount
->mnt_kern_flag
& MNTK_VIRTUALDEV
)
1648 * if we know we're issuing this I/O to a virtual device (i.e. disk image)
1649 * then we don't want to enforce this throttle... if we do, we can
1650 * potentially deadlock since we're stalling the pageout thread at a time
1651 * when the disk image might need additional memory (which won't be available
1652 * if the pageout thread can't run)... instead we'll just depend on the throttle
1653 * that the pageout thread now has in place to deal with external files
1655 local_flags
= CL_PAGEOUT
;
1657 local_flags
= CL_PAGEOUT
| CL_THROTTLE
;
1659 if ((flags
& UPL_IOSYNC
) == 0)
1660 local_flags
|= CL_ASYNC
;
1661 if ((flags
& UPL_NOCOMMIT
) == 0)
1662 local_flags
|= CL_COMMIT
;
1663 if ((flags
& UPL_KEEPCACHED
))
1664 local_flags
|= CL_KEEPCACHED
;
1667 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 52)) | DBG_FUNC_NONE
,
1668 (int)f_offset
, size
, (int)filesize
, local_flags
, 0);
1671 * If they didn't specify any I/O, then we are done...
1672 * we can't issue an abort because we don't know how
1673 * big the upl really is
1678 if (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) {
1679 if (local_flags
& CL_COMMIT
)
1680 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
1684 * can't page-in from a negative offset
1685 * or if we're starting beyond the EOF
1686 * or if the file offset isn't page aligned
1687 * or the size requested isn't a multiple of PAGE_SIZE
1689 if (f_offset
< 0 || f_offset
>= filesize
||
1690 (f_offset
& PAGE_MASK_64
) || (size
& PAGE_MASK
)) {
1691 if (local_flags
& CL_COMMIT
)
1692 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
);
1695 max_size
= filesize
- f_offset
;
1697 if (size
< max_size
)
1702 rounded_size
= (io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
1704 if (size
> rounded_size
) {
1705 if (local_flags
& CL_COMMIT
)
1706 ubc_upl_abort_range(upl
, upl_offset
+ rounded_size
, size
- rounded_size
,
1707 UPL_ABORT_FREE_ON_EMPTY
);
1709 return (cluster_io(vp
, upl
, upl_offset
, f_offset
, io_size
,
1710 local_flags
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
));
1715 cluster_pagein(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1716 int size
, off_t filesize
, int flags
)
1718 return cluster_pagein_ext(vp
, upl
, upl_offset
, f_offset
, size
, filesize
, flags
, NULL
, NULL
);
1723 cluster_pagein_ext(vnode_t vp
, upl_t upl
, upl_offset_t upl_offset
, off_t f_offset
,
1724 int size
, off_t filesize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1730 int local_flags
= 0;
1732 if (upl
== NULL
|| size
< 0)
1733 panic("cluster_pagein: NULL upl passed in");
1735 if ((flags
& UPL_IOSYNC
) == 0)
1736 local_flags
|= CL_ASYNC
;
1737 if ((flags
& UPL_NOCOMMIT
) == 0)
1738 local_flags
|= CL_COMMIT
;
1739 if (flags
& UPL_IOSTREAMING
)
1740 local_flags
|= CL_IOSTREAMING
;
1743 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 56)) | DBG_FUNC_NONE
,
1744 (int)f_offset
, size
, (int)filesize
, local_flags
, 0);
1747 * can't page-in from a negative offset
1748 * or if we're starting beyond the EOF
1749 * or if the file offset isn't page aligned
1750 * or the size requested isn't a multiple of PAGE_SIZE
1752 if (f_offset
< 0 || f_offset
>= filesize
||
1753 (f_offset
& PAGE_MASK_64
) || (size
& PAGE_MASK
) || (upl_offset
& PAGE_MASK
)) {
1754 if (local_flags
& CL_COMMIT
)
1755 ubc_upl_abort_range(upl
, upl_offset
, size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
1758 max_size
= filesize
- f_offset
;
1760 if (size
< max_size
)
1765 rounded_size
= (io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
1767 if (size
> rounded_size
&& (local_flags
& CL_COMMIT
))
1768 ubc_upl_abort_range(upl
, upl_offset
+ rounded_size
,
1769 size
- rounded_size
, UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_ERROR
);
1771 retval
= cluster_io(vp
, upl
, upl_offset
, f_offset
, io_size
,
1772 local_flags
| CL_READ
| CL_PAGEIN
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
1779 cluster_bp(buf_t bp
)
1781 return cluster_bp_ext(bp
, NULL
, NULL
);
1786 cluster_bp_ext(buf_t bp
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1791 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 19)) | DBG_FUNC_START
,
1792 bp
, (int)bp
->b_lblkno
, bp
->b_bcount
, bp
->b_flags
, 0);
1794 if (bp
->b_flags
& B_READ
)
1795 flags
= CL_ASYNC
| CL_READ
;
1798 if (bp
->b_flags
& B_PASSIVE
)
1799 flags
|= CL_PASSIVE
;
1801 f_offset
= ubc_blktooff(bp
->b_vp
, bp
->b_lblkno
);
1803 return (cluster_io(bp
->b_vp
, bp
->b_upl
, 0, f_offset
, bp
->b_bcount
, flags
, bp
, (struct clios
*)NULL
, callback
, callback_arg
));
1809 cluster_write(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
, off_t headOff
, off_t tailOff
, int xflags
)
1811 return cluster_write_ext(vp
, uio
, oldEOF
, newEOF
, headOff
, tailOff
, xflags
, NULL
, NULL
);
1816 cluster_write_ext(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
, off_t headOff
, off_t tailOff
,
1817 int xflags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1819 user_ssize_t cur_resid
;
1824 int write_type
= IO_COPY
;
1825 u_int32_t write_length
;
1829 if (flags
& IO_PASSIVE
)
1834 if (vp
->v_flag
& VNOCACHE_DATA
)
1835 flags
|= IO_NOCACHE
;
1840 * this call is being made to zero-fill some range in the file
1842 retval
= cluster_write_copy(vp
, NULL
, (u_int32_t
)0, oldEOF
, newEOF
, headOff
, tailOff
, flags
, callback
, callback_arg
);
1847 * do a write through the cache if one of the following is true....
1848 * NOCACHE is not true and
1849 * the uio request doesn't target USERSPACE
1850 * otherwise, find out if we want the direct or contig variant for
1851 * the first vector in the uio request
1853 if ( (flags
& IO_NOCACHE
) && UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
) )
1854 retval
= cluster_io_type(uio
, &write_type
, &write_length
, MIN_DIRECT_WRITE_SIZE
);
1856 if ( (flags
& (IO_TAILZEROFILL
| IO_HEADZEROFILL
)) && write_type
== IO_DIRECT
)
1858 * must go through the cached variant in this case
1860 write_type
= IO_COPY
;
1862 while ((cur_resid
= uio_resid(uio
)) && uio
->uio_offset
< newEOF
&& retval
== 0) {
1864 switch (write_type
) {
1868 * make sure the uio_resid isn't too big...
1869 * internally, we want to handle all of the I/O in
1870 * chunk sizes that fit in a 32 bit int
1872 if (cur_resid
> (user_ssize_t
)(MAX_IO_REQUEST_SIZE
)) {
1874 * we're going to have to call cluster_write_copy
1877 * only want the last call to cluster_write_copy to
1878 * have the IO_TAILZEROFILL flag set and only the
1879 * first call should have IO_HEADZEROFILL
1881 zflags
= flags
& ~IO_TAILZEROFILL
;
1882 flags
&= ~IO_HEADZEROFILL
;
1884 write_length
= MAX_IO_REQUEST_SIZE
;
1887 * last call to cluster_write_copy
1891 write_length
= (u_int32_t
)cur_resid
;
1893 retval
= cluster_write_copy(vp
, uio
, write_length
, oldEOF
, newEOF
, headOff
, tailOff
, zflags
, callback
, callback_arg
);
1897 zflags
= flags
& ~(IO_TAILZEROFILL
| IO_HEADZEROFILL
);
1899 if (flags
& IO_HEADZEROFILL
) {
1901 * only do this once per request
1903 flags
&= ~IO_HEADZEROFILL
;
1905 retval
= cluster_write_copy(vp
, (struct uio
*)0, (u_int32_t
)0, (off_t
)0, uio
->uio_offset
,
1906 headOff
, (off_t
)0, zflags
| IO_HEADZEROFILL
| IO_SYNC
, callback
, callback_arg
);
1910 retval
= cluster_write_contig(vp
, uio
, newEOF
, &write_type
, &write_length
, callback
, callback_arg
, bflag
);
1912 if (retval
== 0 && (flags
& IO_TAILZEROFILL
) && uio_resid(uio
) == 0) {
1914 * we're done with the data from the user specified buffer(s)
1915 * and we've been requested to zero fill at the tail
1916 * treat this as an IO_HEADZEROFILL which doesn't require a uio
1917 * by rearranging the args and passing in IO_HEADZEROFILL
1919 retval
= cluster_write_copy(vp
, (struct uio
*)0, (u_int32_t
)0, (off_t
)0, tailOff
, uio
->uio_offset
,
1920 (off_t
)0, zflags
| IO_HEADZEROFILL
| IO_SYNC
, callback
, callback_arg
);
1926 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
1928 retval
= cluster_write_direct(vp
, uio
, oldEOF
, newEOF
, &write_type
, &write_length
, flags
, callback
, callback_arg
);
1932 retval
= cluster_io_type(uio
, &write_type
, &write_length
, MIN_DIRECT_WRITE_SIZE
);
1936 * in case we end up calling cluster_write_copy (from cluster_write_direct)
1937 * multiple times to service a multi-vector request that is not aligned properly
1938 * we need to update the oldEOF so that we
1939 * don't zero-fill the head of a page if we've successfully written
1940 * data to that area... 'cluster_write_copy' will zero-fill the head of a
1941 * page that is beyond the oldEOF if the write is unaligned... we only
1942 * want that to happen for the very first page of the cluster_write,
1943 * NOT the first page of each vector making up a multi-vector write.
1945 if (uio
->uio_offset
> oldEOF
)
1946 oldEOF
= uio
->uio_offset
;
1953 cluster_write_direct(vnode_t vp
, struct uio
*uio
, off_t oldEOF
, off_t newEOF
, int *write_type
, u_int32_t
*write_length
,
1954 int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
1957 upl_page_info_t
*pl
;
1958 vm_offset_t upl_offset
;
1959 vm_offset_t vector_upl_offset
= 0;
1960 u_int32_t io_req_size
;
1961 u_int32_t offset_in_file
;
1962 u_int32_t offset_in_iovbase
;
1965 upl_size_t upl_size
, vector_upl_size
= 0;
1966 vm_size_t upl_needed_size
;
1967 mach_msg_type_number_t pages_in_pl
;
1970 mach_msg_type_number_t i
;
1971 int force_data_sync
;
1974 struct clios iostate
;
1975 user_addr_t iov_base
;
1976 u_int32_t mem_alignment_mask
;
1977 u_int32_t devblocksize
;
1978 u_int32_t max_upl_size
;
1980 u_int32_t vector_upl_iosize
= 0;
1981 int issueVectorUPL
= 0,useVectorUPL
= (uio
->uio_iovcnt
> 1);
1982 off_t v_upl_uio_offset
= 0;
1983 int vector_upl_index
=0;
1984 upl_t vector_upl
= NULL
;
1988 * When we enter this routine, we know
1989 * -- the resid will not exceed iov_len
1991 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 75)) | DBG_FUNC_START
,
1992 (int)uio
->uio_offset
, *write_length
, (int)newEOF
, 0, 0);
1994 max_upl_size
= cluster_max_io_size(vp
->v_mount
, CL_WRITE
);
1996 io_flag
= CL_ASYNC
| CL_PRESERVE
| CL_COMMIT
| CL_THROTTLE
| CL_DIRECT_IO
;
1998 if (flags
& IO_PASSIVE
)
1999 io_flag
|= CL_PASSIVE
;
2001 iostate
.io_completed
= 0;
2002 iostate
.io_issued
= 0;
2003 iostate
.io_error
= 0;
2004 iostate
.io_wanted
= 0;
2006 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
2007 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
2009 if (devblocksize
== 1) {
2011 * the AFP client advertises a devblocksize of 1
2012 * however, its BLOCKMAP routine maps to physical
2013 * blocks that are PAGE_SIZE in size...
2014 * therefore we can't ask for I/Os that aren't page aligned
2015 * or aren't multiples of PAGE_SIZE in size
2016 * by setting devblocksize to PAGE_SIZE, we re-instate
2017 * the old behavior we had before the mem_alignment_mask
2018 * changes went in...
2020 devblocksize
= PAGE_SIZE
;
2024 io_req_size
= *write_length
;
2025 iov_base
= uio_curriovbase(uio
);
2027 offset_in_file
= (u_int32_t
)uio
->uio_offset
& PAGE_MASK
;
2028 offset_in_iovbase
= (u_int32_t
)iov_base
& mem_alignment_mask
;
2030 if (offset_in_file
|| offset_in_iovbase
) {
2032 * one of the 2 important offsets is misaligned
2033 * so fire an I/O through the cache for this entire vector
2035 goto wait_for_dwrites
;
2037 if (iov_base
& (devblocksize
- 1)) {
2039 * the offset in memory must be on a device block boundary
2040 * so that we can guarantee that we can generate an
2041 * I/O that ends on a page boundary in cluster_io
2043 goto wait_for_dwrites
;
2046 while (io_req_size
>= PAGE_SIZE
&& uio
->uio_offset
< newEOF
&& retval
== 0) {
2049 cluster_syncup(vp
, newEOF
, callback
, callback_arg
);
2052 io_size
= io_req_size
& ~PAGE_MASK
;
2053 iov_base
= uio_curriovbase(uio
);
2055 if (io_size
> max_upl_size
)
2056 io_size
= max_upl_size
;
2058 if(useVectorUPL
&& (iov_base
& PAGE_MASK
)) {
2060 * We have an iov_base that's not page-aligned.
2061 * Issue all I/O's that have been collected within
2062 * this Vectored UPL.
2064 if(vector_upl_index
) {
2065 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2066 reset_vector_run_state();
2070 * After this point, if we are using the Vector UPL path and the base is
2071 * not page-aligned then the UPL with that base will be the first in the vector UPL.
2075 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
2076 upl_needed_size
= (upl_offset
+ io_size
+ (PAGE_SIZE
-1)) & ~PAGE_MASK
;
2078 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_START
,
2079 (int)upl_offset
, upl_needed_size
, (int)iov_base
, io_size
, 0);
2081 for (force_data_sync
= 0; force_data_sync
< 3; force_data_sync
++) {
2083 upl_size
= upl_needed_size
;
2084 upl_flags
= UPL_FILE_IO
| UPL_COPYOUT_FROM
| UPL_NO_SYNC
|
2085 UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
2087 kret
= vm_map_get_upl(current_map(),
2088 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
2096 if (kret
!= KERN_SUCCESS
) {
2097 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_END
,
2100 * failed to get pagelist
2102 * we may have already spun some portion of this request
2103 * off as async requests... we need to wait for the I/O
2104 * to complete before returning
2106 goto wait_for_dwrites
;
2108 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
2109 pages_in_pl
= upl_size
/ PAGE_SIZE
;
2111 for (i
= 0; i
< pages_in_pl
; i
++) {
2112 if (!upl_valid_page(pl
, i
))
2115 if (i
== pages_in_pl
)
2119 * didn't get all the pages back that we
2120 * needed... release this upl and try again
2122 ubc_upl_abort(upl
, 0);
2124 if (force_data_sync
>= 3) {
2125 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_END
,
2126 i
, pages_in_pl
, upl_size
, kret
, 0);
2128 * for some reason, we couldn't acquire a hold on all
2129 * the pages needed in the user's address space
2131 * we may have already spun some portion of this request
2132 * off as async requests... we need to wait for the I/O
2133 * to complete before returning
2135 goto wait_for_dwrites
;
2139 * Consider the possibility that upl_size wasn't satisfied.
2141 if (upl_size
< upl_needed_size
) {
2142 if (upl_size
&& upl_offset
== 0)
2147 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 76)) | DBG_FUNC_END
,
2148 (int)upl_offset
, upl_size
, (int)iov_base
, io_size
, 0);
2151 ubc_upl_abort(upl
, 0);
2153 * we may have already spun some portion of this request
2154 * off as async requests... we need to wait for the I/O
2155 * to complete before returning
2157 goto wait_for_dwrites
;
2161 vm_offset_t end_off
= ((iov_base
+ io_size
) & PAGE_MASK
);
2165 * After this point, if we are using a vector UPL, then
2166 * either all the UPL elements end on a page boundary OR
2167 * this UPL is the last element because it does not end
2168 * on a page boundary.
2173 * Now look for pages already in the cache
2174 * and throw them away.
2175 * uio->uio_offset is page aligned within the file
2176 * io_size is a multiple of PAGE_SIZE
2178 ubc_range_op(vp
, uio
->uio_offset
, uio
->uio_offset
+ io_size
, UPL_ROP_DUMP
, NULL
);
2181 * we want push out these writes asynchronously so that we can overlap
2182 * the preparation of the next I/O
2183 * if there are already too many outstanding writes
2184 * wait until some complete before issuing the next
2186 if (iostate
.io_issued
> iostate
.io_completed
) {
2188 lck_mtx_lock(cl_mtxp
);
2190 while ((iostate
.io_issued
- iostate
.io_completed
) > (max_upl_size
* IO_SCALE(vp
, 2))) {
2192 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2193 iostate
.io_issued
, iostate
.io_completed
, max_upl_size
* IO_SCALE(vp
, 2), 0, 0);
2195 iostate
.io_wanted
= 1;
2196 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_direct", NULL
);
2198 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2199 iostate
.io_issued
, iostate
.io_completed
, max_upl_size
* IO_SCALE(vp
, 2), 0, 0);
2201 lck_mtx_unlock(cl_mtxp
);
2203 if (iostate
.io_error
) {
2205 * one of the earlier writes we issued ran into a hard error
2206 * don't issue any more writes, cleanup the UPL
2207 * that was just created but not used, then
2208 * go wait for all writes that are part of this stream
2209 * to complete before returning the error to the caller
2211 ubc_upl_abort(upl
, 0);
2213 goto wait_for_dwrites
;
2216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 77)) | DBG_FUNC_START
,
2217 (int)upl_offset
, (int)uio
->uio_offset
, io_size
, io_flag
, 0);
2220 retval
= cluster_io(vp
, upl
, upl_offset
, uio
->uio_offset
,
2221 io_size
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2224 if(!vector_upl_index
) {
2225 vector_upl
= vector_upl_create(upl_offset
);
2226 v_upl_uio_offset
= uio
->uio_offset
;
2227 vector_upl_offset
= upl_offset
;
2230 vector_upl_set_subupl(vector_upl
,upl
,upl_size
);
2231 vector_upl_set_iostate(vector_upl
, upl
, vector_upl_size
, upl_size
);
2233 vector_upl_iosize
+= io_size
;
2234 vector_upl_size
+= upl_size
;
2236 if(issueVectorUPL
|| vector_upl_index
== MAX_VECTOR_UPL_ELEMENTS
|| vector_upl_size
>= MAX_VECTOR_UPL_SIZE
) {
2237 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2238 reset_vector_run_state();
2243 * update the uio structure to
2244 * reflect the I/O that we just issued
2246 uio_update(uio
, (user_size_t
)io_size
);
2249 * in case we end up calling through to cluster_write_copy to finish
2250 * the tail of this request, we need to update the oldEOF so that we
2251 * don't zero-fill the head of a page if we've successfully written
2252 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2253 * page that is beyond the oldEOF if the write is unaligned... we only
2254 * want that to happen for the very first page of the cluster_write,
2255 * NOT the first page of each vector making up a multi-vector write.
2257 if (uio
->uio_offset
> oldEOF
)
2258 oldEOF
= uio
->uio_offset
;
2260 io_req_size
-= io_size
;
2262 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 77)) | DBG_FUNC_END
,
2263 (int)upl_offset
, (int)uio
->uio_offset
, io_req_size
, retval
, 0);
2267 if (retval
== 0 && iostate
.io_error
== 0 && io_req_size
== 0) {
2269 retval
= cluster_io_type(uio
, write_type
, write_length
, MIN_DIRECT_WRITE_SIZE
);
2271 if (retval
== 0 && *write_type
== IO_DIRECT
) {
2273 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 75)) | DBG_FUNC_NONE
,
2274 (int)uio
->uio_offset
, *write_length
, (int)newEOF
, 0, 0);
2282 if(retval
== 0 && iostate
.io_error
== 0 && useVectorUPL
&& vector_upl_index
) {
2283 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
2284 reset_vector_run_state();
2287 if (iostate
.io_issued
> iostate
.io_completed
) {
2289 * make sure all async writes issued as part of this stream
2290 * have completed before we return
2292 lck_mtx_lock(cl_mtxp
);
2294 while (iostate
.io_issued
!= iostate
.io_completed
) {
2295 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2296 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2298 iostate
.io_wanted
= 1;
2299 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_direct", NULL
);
2301 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2302 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2304 lck_mtx_unlock(cl_mtxp
);
2306 if (iostate
.io_error
)
2307 retval
= iostate
.io_error
;
2309 if (io_req_size
&& retval
== 0) {
2311 * we couldn't handle the tail of this request in DIRECT mode
2312 * so fire it through the copy path
2314 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
2315 * so we can just pass 0 in for the headOff and tailOff
2317 if (uio
->uio_offset
> oldEOF
)
2318 oldEOF
= uio
->uio_offset
;
2320 retval
= cluster_write_copy(vp
, uio
, io_req_size
, oldEOF
, newEOF
, (off_t
)0, (off_t
)0, flags
, callback
, callback_arg
);
2322 *write_type
= IO_UNKNOWN
;
2324 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 75)) | DBG_FUNC_END
,
2325 (int)uio
->uio_offset
, io_req_size
, retval
, 4, 0);
2332 cluster_write_contig(vnode_t vp
, struct uio
*uio
, off_t newEOF
, int *write_type
, u_int32_t
*write_length
,
2333 int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
)
2335 upl_page_info_t
*pl
;
2336 addr64_t src_paddr
= 0;
2337 upl_t upl
[MAX_VECTS
];
2338 vm_offset_t upl_offset
;
2339 u_int32_t tail_size
= 0;
2342 upl_size_t upl_size
;
2343 vm_size_t upl_needed_size
;
2344 mach_msg_type_number_t pages_in_pl
;
2347 struct clios iostate
;
2352 user_addr_t iov_base
;
2353 u_int32_t devblocksize
;
2354 u_int32_t mem_alignment_mask
;
2357 * When we enter this routine, we know
2358 * -- the io_req_size will not exceed iov_len
2359 * -- the target address is physically contiguous
2361 cluster_syncup(vp
, newEOF
, callback
, callback_arg
);
2363 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
2364 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
2366 iostate
.io_completed
= 0;
2367 iostate
.io_issued
= 0;
2368 iostate
.io_error
= 0;
2369 iostate
.io_wanted
= 0;
2372 io_size
= *write_length
;
2374 iov_base
= uio_curriovbase(uio
);
2376 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
2377 upl_needed_size
= upl_offset
+ io_size
;
2380 upl_size
= upl_needed_size
;
2381 upl_flags
= UPL_FILE_IO
| UPL_COPYOUT_FROM
| UPL_NO_SYNC
|
2382 UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
2384 kret
= vm_map_get_upl(current_map(),
2385 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
2386 &upl_size
, &upl
[cur_upl
], NULL
, &pages_in_pl
, &upl_flags
, 0);
2388 if (kret
!= KERN_SUCCESS
) {
2390 * failed to get pagelist
2393 goto wait_for_cwrites
;
2398 * Consider the possibility that upl_size wasn't satisfied.
2400 if (upl_size
< upl_needed_size
) {
2402 * This is a failure in the physical memory case.
2405 goto wait_for_cwrites
;
2407 pl
= ubc_upl_pageinfo(upl
[cur_upl
]);
2409 src_paddr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + (addr64_t
)upl_offset
;
2411 while (((uio
->uio_offset
& (devblocksize
- 1)) || io_size
< devblocksize
) && io_size
) {
2412 u_int32_t head_size
;
2414 head_size
= devblocksize
- (u_int32_t
)(uio
->uio_offset
& (devblocksize
- 1));
2416 if (head_size
> io_size
)
2417 head_size
= io_size
;
2419 error
= cluster_align_phys_io(vp
, uio
, src_paddr
, head_size
, 0, callback
, callback_arg
);
2422 goto wait_for_cwrites
;
2424 upl_offset
+= head_size
;
2425 src_paddr
+= head_size
;
2426 io_size
-= head_size
;
2428 iov_base
+= head_size
;
2430 if ((u_int32_t
)iov_base
& mem_alignment_mask
) {
2432 * request doesn't set up on a memory boundary
2433 * the underlying DMA engine can handle...
2434 * return an error instead of going through
2435 * the slow copy path since the intent of this
2436 * path is direct I/O from device memory
2439 goto wait_for_cwrites
;
2442 tail_size
= io_size
& (devblocksize
- 1);
2443 io_size
-= tail_size
;
2445 while (io_size
&& error
== 0) {
2447 if (io_size
> MAX_IO_CONTIG_SIZE
)
2448 xsize
= MAX_IO_CONTIG_SIZE
;
2452 * request asynchronously so that we can overlap
2453 * the preparation of the next I/O... we'll do
2454 * the commit after all the I/O has completed
2455 * since its all issued against the same UPL
2456 * if there are already too many outstanding writes
2457 * wait until some have completed before issuing the next
2459 if (iostate
.io_issued
> iostate
.io_completed
) {
2460 lck_mtx_lock(cl_mtxp
);
2462 while ((iostate
.io_issued
- iostate
.io_completed
) > (MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2))) {
2464 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2465 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
2467 iostate
.io_wanted
= 1;
2468 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_contig", NULL
);
2470 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2471 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
2473 lck_mtx_unlock(cl_mtxp
);
2475 if (iostate
.io_error
) {
2477 * one of the earlier writes we issued ran into a hard error
2478 * don't issue any more writes...
2479 * go wait for all writes that are part of this stream
2480 * to complete before returning the error to the caller
2482 goto wait_for_cwrites
;
2485 * issue an asynchronous write to cluster_io
2487 error
= cluster_io(vp
, upl
[cur_upl
], upl_offset
, uio
->uio_offset
,
2488 xsize
, CL_DEV_MEMORY
| CL_ASYNC
| bflag
, (buf_t
)NULL
, (struct clios
*)&iostate
, callback
, callback_arg
);
2492 * The cluster_io write completed successfully,
2493 * update the uio structure
2495 uio_update(uio
, (user_size_t
)xsize
);
2497 upl_offset
+= xsize
;
2502 if (error
== 0 && iostate
.io_error
== 0 && tail_size
== 0 && num_upl
< MAX_VECTS
) {
2504 error
= cluster_io_type(uio
, write_type
, write_length
, 0);
2506 if (error
== 0 && *write_type
== IO_CONTIG
) {
2511 *write_type
= IO_UNKNOWN
;
2515 * make sure all async writes that are part of this stream
2516 * have completed before we proceed
2518 if (iostate
.io_issued
> iostate
.io_completed
) {
2520 lck_mtx_lock(cl_mtxp
);
2522 while (iostate
.io_issued
!= iostate
.io_completed
) {
2523 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
2524 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2526 iostate
.io_wanted
= 1;
2527 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_write_contig", NULL
);
2529 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
2530 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
2532 lck_mtx_unlock(cl_mtxp
);
2534 if (iostate
.io_error
)
2535 error
= iostate
.io_error
;
2537 if (error
== 0 && tail_size
)
2538 error
= cluster_align_phys_io(vp
, uio
, src_paddr
, tail_size
, 0, callback
, callback_arg
);
2540 for (n
= 0; n
< num_upl
; n
++)
2542 * just release our hold on each physically contiguous
2543 * region without changing any state
2545 ubc_upl_abort(upl
[n
], 0);
2552 * need to avoid a race between an msync of a range of pages dirtied via mmap
2553 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
2554 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
2556 * we should never force-zero-fill pages that are already valid in the cache...
2557 * the entire page contains valid data (either from disk, zero-filled or dirtied
2558 * via an mmap) so we can only do damage by trying to zero-fill
2562 cluster_zero_range(upl_t upl
, upl_page_info_t
*pl
, int flags
, int io_offset
, off_t zero_off
, off_t upl_f_offset
, int bytes_to_zero
)
2565 boolean_t need_cluster_zero
= TRUE
;
2567 if ((flags
& (IO_NOZEROVALID
| IO_NOZERODIRTY
))) {
2569 bytes_to_zero
= min(bytes_to_zero
, PAGE_SIZE
- (int)(zero_off
& PAGE_MASK_64
));
2570 zero_pg_index
= (int)((zero_off
- upl_f_offset
) / PAGE_SIZE_64
);
2572 if (upl_valid_page(pl
, zero_pg_index
)) {
2574 * never force zero valid pages - dirty or clean
2575 * we'll leave these in the UPL for cluster_write_copy to deal with
2577 need_cluster_zero
= FALSE
;
2580 if (need_cluster_zero
== TRUE
)
2581 cluster_zero(upl
, io_offset
, bytes_to_zero
, NULL
);
2583 return (bytes_to_zero
);
2588 cluster_write_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t oldEOF
, off_t newEOF
, off_t headOff
,
2589 off_t tailOff
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
2591 upl_page_info_t
*pl
;
2593 vm_offset_t upl_offset
= 0;
2606 long long total_size
;
2609 long long zero_cnt1
;
2611 struct cl_extent cl
;
2612 struct cl_writebehind
*wbp
;
2614 u_int max_cluster_pgcount
;
2618 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_START
,
2619 (int)uio
->uio_offset
, io_req_size
, (int)oldEOF
, (int)newEOF
, 0);
2621 io_resid
= io_req_size
;
2623 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_START
,
2624 0, 0, (int)oldEOF
, (int)newEOF
, 0);
2628 if (flags
& IO_PASSIVE
)
2638 max_cluster_pgcount
= MAX_CLUSTER_SIZE(vp
) / PAGE_SIZE
;
2639 max_io_size
= cluster_max_io_size(vp
->v_mount
, CL_WRITE
);
2641 if (flags
& IO_HEADZEROFILL
) {
2643 * some filesystems (HFS is one) don't support unallocated holes within a file...
2644 * so we zero fill the intervening space between the old EOF and the offset
2645 * where the next chunk of real data begins.... ftruncate will also use this
2646 * routine to zero fill to the new EOF when growing a file... in this case, the
2647 * uio structure will not be provided
2650 if (headOff
< uio
->uio_offset
) {
2651 zero_cnt
= uio
->uio_offset
- headOff
;
2654 } else if (headOff
< newEOF
) {
2655 zero_cnt
= newEOF
- headOff
;
2659 if (uio
&& uio
->uio_offset
> oldEOF
) {
2660 zero_off
= uio
->uio_offset
& ~PAGE_MASK_64
;
2662 if (zero_off
>= oldEOF
) {
2663 zero_cnt
= uio
->uio_offset
- zero_off
;
2665 flags
|= IO_HEADZEROFILL
;
2669 if (flags
& IO_TAILZEROFILL
) {
2671 zero_off1
= uio
->uio_offset
+ io_req_size
;
2673 if (zero_off1
< tailOff
)
2674 zero_cnt1
= tailOff
- zero_off1
;
2677 if (uio
&& newEOF
> oldEOF
) {
2678 zero_off1
= uio
->uio_offset
+ io_req_size
;
2680 if (zero_off1
== newEOF
&& (zero_off1
& PAGE_MASK_64
)) {
2681 zero_cnt1
= PAGE_SIZE_64
- (zero_off1
& PAGE_MASK_64
);
2683 flags
|= IO_TAILZEROFILL
;
2687 if (zero_cnt
== 0 && uio
== (struct uio
*) 0) {
2688 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_END
,
2689 retval
, 0, 0, 0, 0);
2693 while ((total_size
= (io_resid
+ zero_cnt
+ zero_cnt1
)) && retval
== 0) {
2695 * for this iteration of the loop, figure out where our starting point is
2698 start_offset
= (int)(zero_off
& PAGE_MASK_64
);
2699 upl_f_offset
= zero_off
- start_offset
;
2700 } else if (io_resid
) {
2701 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
2702 upl_f_offset
= uio
->uio_offset
- start_offset
;
2704 start_offset
= (int)(zero_off1
& PAGE_MASK_64
);
2705 upl_f_offset
= zero_off1
- start_offset
;
2707 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 46)) | DBG_FUNC_NONE
,
2708 (int)zero_off
, (int)zero_cnt
, (int)zero_off1
, (int)zero_cnt1
, 0);
2710 if (total_size
> max_io_size
)
2711 total_size
= max_io_size
;
2713 cl
.b_addr
= (daddr64_t
)(upl_f_offset
/ PAGE_SIZE_64
);
2715 if (uio
&& ((flags
& (IO_SYNC
| IO_HEADZEROFILL
| IO_TAILZEROFILL
)) == 0)) {
2717 * assumption... total_size <= io_resid
2718 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
2720 if ((start_offset
+ total_size
) > max_io_size
)
2721 total_size
-= start_offset
;
2722 xfer_resid
= total_size
;
2724 retval
= cluster_copy_ubc_data_internal(vp
, uio
, &xfer_resid
, 1, 1);
2729 io_resid
-= (total_size
- xfer_resid
);
2730 total_size
= xfer_resid
;
2731 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
2732 upl_f_offset
= uio
->uio_offset
- start_offset
;
2734 if (total_size
== 0) {
2737 * the write did not finish on a page boundary
2738 * which will leave upl_f_offset pointing to the
2739 * beginning of the last page written instead of
2740 * the page beyond it... bump it in this case
2741 * so that the cluster code records the last page
2744 upl_f_offset
+= PAGE_SIZE_64
;
2752 * compute the size of the upl needed to encompass
2753 * the requested write... limit each call to cluster_io
2754 * to the maximum UPL size... cluster_io will clip if
2755 * this exceeds the maximum io_size for the device,
2756 * make sure to account for
2757 * a starting offset that's not page aligned
2759 upl_size
= (start_offset
+ total_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
2761 if (upl_size
> max_io_size
)
2762 upl_size
= max_io_size
;
2764 pages_in_upl
= upl_size
/ PAGE_SIZE
;
2765 io_size
= upl_size
- start_offset
;
2767 if ((long long)io_size
> total_size
)
2768 io_size
= total_size
;
2770 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_START
, upl_size
, io_size
, total_size
, 0, 0);
2774 * Gather the pages from the buffer cache.
2775 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
2776 * that we intend to modify these pages.
2778 kret
= ubc_create_upl(vp
,
2783 UPL_SET_LITE
| (( uio
!=NULL
&& (uio
->uio_flags
& UIO_FLAGS_IS_COMPRESSED_FILE
)) ? 0 : UPL_WILL_MODIFY
));
2784 if (kret
!= KERN_SUCCESS
)
2785 panic("cluster_write_copy: failed to get pagelist");
2787 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_END
,
2788 upl
, (int)upl_f_offset
, start_offset
, 0, 0);
2790 if (start_offset
&& upl_f_offset
< oldEOF
&& !upl_valid_page(pl
, 0)) {
2794 * we're starting in the middle of the first page of the upl
2795 * and the page isn't currently valid, so we're going to have
2796 * to read it in first... this is a synchronous operation
2798 read_size
= PAGE_SIZE
;
2800 if ((upl_f_offset
+ read_size
) > oldEOF
)
2801 read_size
= oldEOF
- upl_f_offset
;
2803 retval
= cluster_io(vp
, upl
, 0, upl_f_offset
, read_size
,
2804 CL_READ
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
2807 * we had an error during the read which causes us to abort
2808 * the current cluster_write request... before we do, we need
2809 * to release the rest of the pages in the upl without modifying
2810 * there state and mark the failed page in error
2812 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_DUMP_PAGES
|UPL_ABORT_FREE_ON_EMPTY
);
2814 if (upl_size
> PAGE_SIZE
)
2815 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_FREE_ON_EMPTY
);
2817 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 45)) | DBG_FUNC_NONE
,
2818 upl
, 0, 0, retval
, 0);
2822 if ((start_offset
== 0 || upl_size
> PAGE_SIZE
) && ((start_offset
+ io_size
) & PAGE_MASK
)) {
2824 * the last offset we're writing to in this upl does not end on a page
2825 * boundary... if it's not beyond the old EOF, then we'll also need to
2826 * pre-read this page in if it isn't already valid
2828 upl_offset
= upl_size
- PAGE_SIZE
;
2830 if ((upl_f_offset
+ start_offset
+ io_size
) < oldEOF
&&
2831 !upl_valid_page(pl
, upl_offset
/ PAGE_SIZE
)) {
2834 read_size
= PAGE_SIZE
;
2836 if ((off_t
)(upl_f_offset
+ upl_offset
+ read_size
) > oldEOF
)
2837 read_size
= oldEOF
- (upl_f_offset
+ upl_offset
);
2839 retval
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
, read_size
,
2840 CL_READ
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
2843 * we had an error during the read which causes us to abort
2844 * the current cluster_write request... before we do, we
2845 * need to release the rest of the pages in the upl without
2846 * modifying there state and mark the failed page in error
2848 ubc_upl_abort_range(upl
, upl_offset
, PAGE_SIZE
, UPL_ABORT_DUMP_PAGES
|UPL_ABORT_FREE_ON_EMPTY
);
2850 if (upl_size
> PAGE_SIZE
)
2851 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_FREE_ON_EMPTY
);
2853 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 45)) | DBG_FUNC_NONE
,
2854 upl
, 0, 0, retval
, 0);
2859 xfer_resid
= io_size
;
2860 io_offset
= start_offset
;
2862 while (zero_cnt
&& xfer_resid
) {
2864 if (zero_cnt
< (long long)xfer_resid
)
2865 bytes_to_zero
= zero_cnt
;
2867 bytes_to_zero
= xfer_resid
;
2869 bytes_to_zero
= cluster_zero_range(upl
, pl
, flags
, io_offset
, zero_off
, upl_f_offset
, bytes_to_zero
);
2871 xfer_resid
-= bytes_to_zero
;
2872 zero_cnt
-= bytes_to_zero
;
2873 zero_off
+= bytes_to_zero
;
2874 io_offset
+= bytes_to_zero
;
2876 if (xfer_resid
&& io_resid
) {
2877 u_int32_t io_requested
;
2879 bytes_to_move
= min(io_resid
, xfer_resid
);
2880 io_requested
= bytes_to_move
;
2882 retval
= cluster_copy_upl_data(uio
, upl
, io_offset
, (int *)&io_requested
);
2885 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
);
2887 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 45)) | DBG_FUNC_NONE
,
2888 upl
, 0, 0, retval
, 0);
2890 io_resid
-= bytes_to_move
;
2891 xfer_resid
-= bytes_to_move
;
2892 io_offset
+= bytes_to_move
;
2895 while (xfer_resid
&& zero_cnt1
&& retval
== 0) {
2897 if (zero_cnt1
< (long long)xfer_resid
)
2898 bytes_to_zero
= zero_cnt1
;
2900 bytes_to_zero
= xfer_resid
;
2902 bytes_to_zero
= cluster_zero_range(upl
, pl
, flags
, io_offset
, zero_off1
, upl_f_offset
, bytes_to_zero
);
2904 xfer_resid
-= bytes_to_zero
;
2905 zero_cnt1
-= bytes_to_zero
;
2906 zero_off1
+= bytes_to_zero
;
2907 io_offset
+= bytes_to_zero
;
2911 int ret_cluster_try_push
;
2913 io_size
+= start_offset
;
2915 if ((upl_f_offset
+ io_size
) >= newEOF
&& (u_int
)io_size
< upl_size
) {
2917 * if we're extending the file with this write
2918 * we'll zero fill the rest of the page so that
2919 * if the file gets extended again in such a way as to leave a
2920 * hole starting at this EOF, we'll have zero's in the correct spot
2922 cluster_zero(upl
, io_size
, upl_size
- io_size
, NULL
);
2925 * release the upl now if we hold one since...
2926 * 1) pages in it may be present in the sparse cluster map
2927 * and may span 2 separate buckets there... if they do and
2928 * we happen to have to flush a bucket to make room and it intersects
2929 * this upl, a deadlock may result on page BUSY
2930 * 2) we're delaying the I/O... from this point forward we're just updating
2931 * the cluster state... no need to hold the pages, so commit them
2932 * 3) IO_SYNC is set...
2933 * because we had to ask for a UPL that provides currenty non-present pages, the
2934 * UPL has been automatically set to clear the dirty flags (both software and hardware)
2935 * upon committing it... this is not the behavior we want since it's possible for
2936 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
2937 * we'll pick these pages back up later with the correct behavior specified.
2938 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
2939 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
2940 * we hold since the flushing context is holding the cluster lock.
2942 ubc_upl_commit_range(upl
, 0, upl_size
,
2943 UPL_COMMIT_SET_DIRTY
| UPL_COMMIT_INACTIVATE
| UPL_COMMIT_FREE_ON_EMPTY
);
2946 * calculate the last logical block number
2947 * that this delayed I/O encompassed
2949 cl
.e_addr
= (daddr64_t
)((upl_f_offset
+ (off_t
)upl_size
) / PAGE_SIZE_64
);
2951 if (flags
& IO_SYNC
) {
2953 * if the IO_SYNC flag is set than we need to
2954 * bypass any clusters and immediately issue
2960 * take the lock to protect our accesses
2961 * of the writebehind and sparse cluster state
2963 wbp
= cluster_get_wbp(vp
, CLW_ALLOCATE
| CLW_RETURNLOCKED
);
2965 if (wbp
->cl_scmap
) {
2967 if ( !(flags
& IO_NOCACHE
)) {
2969 * we've fallen into the sparse
2970 * cluster method of delaying dirty pages
2972 sparse_cluster_add(&(wbp
->cl_scmap
), vp
, &cl
, newEOF
, callback
, callback_arg
);
2974 lck_mtx_unlock(&wbp
->cl_lockw
);
2979 * must have done cached writes that fell into
2980 * the sparse cluster mechanism... we've switched
2981 * to uncached writes on the file, so go ahead
2982 * and push whatever's in the sparse map
2983 * and switch back to normal clustering
2987 sparse_cluster_push(&(wbp
->cl_scmap
), vp
, newEOF
, PUSH_ALL
, callback
, callback_arg
);
2989 * no clusters of either type present at this point
2990 * so just go directly to start_new_cluster since
2991 * we know we need to delay this I/O since we've
2992 * already released the pages back into the cache
2993 * to avoid the deadlock with sparse_cluster_push
2995 goto start_new_cluster
;
2997 if (wbp
->cl_number
== 0)
2999 * no clusters currently present
3001 goto start_new_cluster
;
3003 for (cl_index
= 0; cl_index
< wbp
->cl_number
; cl_index
++) {
3005 * check each cluster that we currently hold
3006 * try to merge some or all of this write into
3007 * one or more of the existing clusters... if
3008 * any portion of the write remains, start a
3011 if (cl
.b_addr
>= wbp
->cl_clusters
[cl_index
].b_addr
) {
3013 * the current write starts at or after the current cluster
3015 if (cl
.e_addr
<= (wbp
->cl_clusters
[cl_index
].b_addr
+ max_cluster_pgcount
)) {
3017 * we have a write that fits entirely
3018 * within the existing cluster limits
3020 if (cl
.e_addr
> wbp
->cl_clusters
[cl_index
].e_addr
)
3022 * update our idea of where the cluster ends
3024 wbp
->cl_clusters
[cl_index
].e_addr
= cl
.e_addr
;
3027 if (cl
.b_addr
< (wbp
->cl_clusters
[cl_index
].b_addr
+ max_cluster_pgcount
)) {
3029 * we have a write that starts in the middle of the current cluster
3030 * but extends beyond the cluster's limit... we know this because
3031 * of the previous checks
3032 * we'll extend the current cluster to the max
3033 * and update the b_addr for the current write to reflect that
3034 * the head of it was absorbed into this cluster...
3035 * note that we'll always have a leftover tail in this case since
3036 * full absorbtion would have occurred in the clause above
3038 wbp
->cl_clusters
[cl_index
].e_addr
= wbp
->cl_clusters
[cl_index
].b_addr
+ max_cluster_pgcount
;
3040 cl
.b_addr
= wbp
->cl_clusters
[cl_index
].e_addr
;
3043 * we come here for the case where the current write starts
3044 * beyond the limit of the existing cluster or we have a leftover
3045 * tail after a partial absorbtion
3047 * in either case, we'll check the remaining clusters before
3048 * starting a new one
3052 * the current write starts in front of the cluster we're currently considering
3054 if ((wbp
->cl_clusters
[cl_index
].e_addr
- cl
.b_addr
) <= max_cluster_pgcount
) {
3056 * we can just merge the new request into
3057 * this cluster and leave it in the cache
3058 * since the resulting cluster is still
3059 * less than the maximum allowable size
3061 wbp
->cl_clusters
[cl_index
].b_addr
= cl
.b_addr
;
3063 if (cl
.e_addr
> wbp
->cl_clusters
[cl_index
].e_addr
) {
3065 * the current write completely
3066 * envelops the existing cluster and since
3067 * each write is limited to at most max_cluster_pgcount pages
3068 * we can just use the start and last blocknos of the write
3069 * to generate the cluster limits
3071 wbp
->cl_clusters
[cl_index
].e_addr
= cl
.e_addr
;
3077 * if we were to combine this write with the current cluster
3078 * we would exceed the cluster size limit.... so,
3079 * let's see if there's any overlap of the new I/O with
3080 * the cluster we're currently considering... in fact, we'll
3081 * stretch the cluster out to it's full limit and see if we
3082 * get an intersection with the current write
3085 if (cl
.e_addr
> wbp
->cl_clusters
[cl_index
].e_addr
- max_cluster_pgcount
) {
3087 * the current write extends into the proposed cluster
3088 * clip the length of the current write after first combining it's
3089 * tail with the newly shaped cluster
3091 wbp
->cl_clusters
[cl_index
].b_addr
= wbp
->cl_clusters
[cl_index
].e_addr
- max_cluster_pgcount
;
3093 cl
.e_addr
= wbp
->cl_clusters
[cl_index
].b_addr
;
3096 * if we get here, there was no way to merge
3097 * any portion of this write with this cluster
3098 * or we could only merge part of it which
3099 * will leave a tail...
3100 * we'll check the remaining clusters before starting a new one
3104 if (cl_index
< wbp
->cl_number
)
3106 * we found an existing cluster(s) that we
3107 * could entirely merge this I/O into
3111 if (wbp
->cl_number
< MAX_CLUSTERS
)
3113 * we didn't find an existing cluster to
3114 * merge into, but there's room to start
3117 goto start_new_cluster
;
3120 * no exisitng cluster to merge with and no
3121 * room to start a new one... we'll try
3122 * pushing one of the existing ones... if none of
3123 * them are able to be pushed, we'll switch
3124 * to the sparse cluster mechanism
3125 * cluster_try_push updates cl_number to the
3126 * number of remaining clusters... and
3127 * returns the number of currently unused clusters
3129 ret_cluster_try_push
= 0;
3132 * if writes are not deferred, call cluster push immediately
3134 if (!((unsigned int)vfs_flags(vp
->v_mount
) & MNT_DEFWRITE
)) {
3136 ret_cluster_try_push
= cluster_try_push(wbp
, vp
, newEOF
, (flags
& IO_NOCACHE
) ? 0 : PUSH_DELAY
, callback
, callback_arg
);
3140 * execute following regardless of writes being deferred or not
3142 if (ret_cluster_try_push
== 0) {
3144 * no more room in the normal cluster mechanism
3145 * so let's switch to the more expansive but expensive
3146 * sparse mechanism....
3148 sparse_cluster_switch(wbp
, vp
, newEOF
, callback
, callback_arg
);
3149 sparse_cluster_add(&(wbp
->cl_scmap
), vp
, &cl
, newEOF
, callback
, callback_arg
);
3151 lck_mtx_unlock(&wbp
->cl_lockw
);
3156 * we pushed one cluster successfully, so we must be sequentially writing this file
3157 * otherwise, we would have failed and fallen into the sparse cluster support
3158 * so let's take the opportunity to push out additional clusters...
3159 * this will give us better I/O locality if we're in a copy loop
3160 * (i.e. we won't jump back and forth between the read and write points
3162 if (!((unsigned int)vfs_flags(vp
->v_mount
) & MNT_DEFWRITE
)) {
3163 while (wbp
->cl_number
)
3164 cluster_try_push(wbp
, vp
, newEOF
, 0, callback
, callback_arg
);
3168 wbp
->cl_clusters
[wbp
->cl_number
].b_addr
= cl
.b_addr
;
3169 wbp
->cl_clusters
[wbp
->cl_number
].e_addr
= cl
.e_addr
;
3171 wbp
->cl_clusters
[wbp
->cl_number
].io_flags
= 0;
3173 if (flags
& IO_NOCACHE
)
3174 wbp
->cl_clusters
[wbp
->cl_number
].io_flags
|= CLW_IONOCACHE
;
3176 if (bflag
& CL_PASSIVE
)
3177 wbp
->cl_clusters
[wbp
->cl_number
].io_flags
|= CLW_IOPASSIVE
;
3181 lck_mtx_unlock(&wbp
->cl_lockw
);
3186 * we don't hold the lock at this point
3188 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
3189 * so that we correctly deal with a change in state of the hardware modify bit...
3190 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
3191 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
3192 * responsible for generating the correct sized I/O(s)
3194 retval
= cluster_push_now(vp
, &cl
, newEOF
, flags
, callback
, callback_arg
);
3197 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 40)) | DBG_FUNC_END
, retval
, 0, io_resid
, 0, 0);
3205 cluster_read(vnode_t vp
, struct uio
*uio
, off_t filesize
, int xflags
)
3207 return cluster_read_ext(vp
, uio
, filesize
, xflags
, NULL
, NULL
);
3212 cluster_read_ext(vnode_t vp
, struct uio
*uio
, off_t filesize
, int xflags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
3216 user_ssize_t cur_resid
;
3218 u_int32_t read_length
= 0;
3219 int read_type
= IO_COPY
;
3223 if (vp
->v_flag
& VNOCACHE_DATA
)
3224 flags
|= IO_NOCACHE
;
3225 if ((vp
->v_flag
& VRAOFF
) || speculative_reads_disabled
)
3229 * do a read through the cache if one of the following is true....
3230 * NOCACHE is not true
3231 * the uio request doesn't target USERSPACE
3232 * otherwise, find out if we want the direct or contig variant for
3233 * the first vector in the uio request
3235 if ( (flags
& IO_NOCACHE
) && UIO_SEG_IS_USER_SPACE(uio
->uio_segflg
) )
3236 retval
= cluster_io_type(uio
, &read_type
, &read_length
, 0);
3238 while ((cur_resid
= uio_resid(uio
)) && uio
->uio_offset
< filesize
&& retval
== 0) {
3240 switch (read_type
) {
3244 * make sure the uio_resid isn't too big...
3245 * internally, we want to handle all of the I/O in
3246 * chunk sizes that fit in a 32 bit int
3248 if (cur_resid
> (user_ssize_t
)(MAX_IO_REQUEST_SIZE
))
3249 io_size
= MAX_IO_REQUEST_SIZE
;
3251 io_size
= (u_int32_t
)cur_resid
;
3253 retval
= cluster_read_copy(vp
, uio
, io_size
, filesize
, flags
, callback
, callback_arg
);
3257 retval
= cluster_read_direct(vp
, uio
, filesize
, &read_type
, &read_length
, flags
, callback
, callback_arg
);
3261 retval
= cluster_read_contig(vp
, uio
, filesize
, &read_type
, &read_length
, callback
, callback_arg
, flags
);
3265 retval
= cluster_io_type(uio
, &read_type
, &read_length
, 0);
3275 cluster_read_upl_release(upl_t upl
, int start_pg
, int last_pg
, int take_reference
)
3278 int abort_flags
= UPL_ABORT_FREE_ON_EMPTY
;
3280 if ((range
= last_pg
- start_pg
)) {
3282 abort_flags
|= UPL_ABORT_REFERENCE
;
3284 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, range
* PAGE_SIZE
, abort_flags
);
3290 cluster_read_copy(vnode_t vp
, struct uio
*uio
, u_int32_t io_req_size
, off_t filesize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
3292 upl_page_info_t
*pl
;
3294 vm_offset_t upl_offset
;
3303 off_t last_ioread_offset
;
3304 off_t last_request_offset
;
3308 u_int32_t size_of_prefetch
;
3311 u_int32_t max_rd_size
;
3312 u_int32_t max_io_size
;
3313 u_int32_t max_prefetch
;
3314 u_int rd_ahead_enabled
= 1;
3315 u_int prefetch_enabled
= 1;
3316 struct cl_readahead
* rap
;
3317 struct clios iostate
;
3318 struct cl_extent extent
;
3320 int take_reference
= 1;
3322 int policy
= IOPOL_DEFAULT
;
3325 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 32)) | DBG_FUNC_START
,
3326 (int)uio
->uio_offset
, io_req_size
, (int)filesize
, flags
, 0);
3328 policy
= current_proc()->p_iopol_disk
;
3330 ut
= get_bsdthread_info(current_thread());
3332 if (ut
->uu_iopol_disk
!= IOPOL_DEFAULT
)
3333 policy
= ut
->uu_iopol_disk
;
3335 if (policy
== IOPOL_THROTTLE
|| (flags
& IO_NOCACHE
))
3338 if (flags
& IO_PASSIVE
)
3343 max_io_size
= cluster_max_io_size(vp
->v_mount
, CL_READ
);
3344 max_prefetch
= MAX_PREFETCH(vp
, max_io_size
);
3345 max_rd_size
= max_prefetch
;
3347 last_request_offset
= uio
->uio_offset
+ io_req_size
;
3349 if (last_request_offset
> filesize
)
3350 last_request_offset
= filesize
;
3352 if ((flags
& (IO_RAOFF
|IO_NOCACHE
)) || ((last_request_offset
& ~PAGE_MASK_64
) == (uio
->uio_offset
& ~PAGE_MASK_64
))) {
3353 rd_ahead_enabled
= 0;
3356 if (cluster_hard_throttle_on(vp
, 1)) {
3357 rd_ahead_enabled
= 0;
3358 prefetch_enabled
= 0;
3360 max_rd_size
= HARD_THROTTLE_MAXSIZE
;
3361 } else if (policy
== IOPOL_THROTTLE
) {
3362 rd_ahead_enabled
= 0;
3363 prefetch_enabled
= 0;
3365 if ((rap
= cluster_get_rap(vp
)) == NULL
)
3366 rd_ahead_enabled
= 0;
3368 extent
.b_addr
= uio
->uio_offset
/ PAGE_SIZE_64
;
3369 extent
.e_addr
= (last_request_offset
- 1) / PAGE_SIZE_64
;
3372 if (rap
!= NULL
&& rap
->cl_ralen
&& (rap
->cl_lastr
== extent
.b_addr
|| (rap
->cl_lastr
+ 1) == extent
.b_addr
)) {
3374 * determine if we already have a read-ahead in the pipe courtesy of the
3375 * last read systemcall that was issued...
3376 * if so, pick up it's extent to determine where we should start
3377 * with respect to any read-ahead that might be necessary to
3378 * garner all the data needed to complete this read systemcall
3380 last_ioread_offset
= (rap
->cl_maxra
* PAGE_SIZE_64
) + PAGE_SIZE_64
;
3382 if (last_ioread_offset
< uio
->uio_offset
)
3383 last_ioread_offset
= (off_t
)0;
3384 else if (last_ioread_offset
> last_request_offset
)
3385 last_ioread_offset
= last_request_offset
;
3387 last_ioread_offset
= (off_t
)0;
3389 while (io_req_size
&& uio
->uio_offset
< filesize
&& retval
== 0) {
3391 max_size
= filesize
- uio
->uio_offset
;
3393 if ((off_t
)(io_req_size
) < max_size
)
3394 io_size
= io_req_size
;
3398 if (!(flags
& IO_NOCACHE
)) {
3402 u_int32_t io_requested
;
3405 * if we keep finding the pages we need already in the cache, then
3406 * don't bother to call cluster_read_prefetch since it costs CPU cycles
3407 * to determine that we have all the pages we need... once we miss in
3408 * the cache and have issued an I/O, than we'll assume that we're likely
3409 * to continue to miss in the cache and it's to our advantage to try and prefetch
3411 if (last_request_offset
&& last_ioread_offset
&& (size_of_prefetch
= (last_request_offset
- last_ioread_offset
))) {
3412 if ((last_ioread_offset
- uio
->uio_offset
) <= max_rd_size
&& prefetch_enabled
) {
3414 * we've already issued I/O for this request and
3415 * there's still work to do and
3416 * our prefetch stream is running dry, so issue a
3417 * pre-fetch I/O... the I/O latency will overlap
3418 * with the copying of the data
3420 if (size_of_prefetch
> max_rd_size
)
3421 size_of_prefetch
= max_rd_size
;
3423 size_of_prefetch
= cluster_read_prefetch(vp
, last_ioread_offset
, size_of_prefetch
, filesize
, callback
, callback_arg
, bflag
);
3425 last_ioread_offset
+= (off_t
)(size_of_prefetch
* PAGE_SIZE
);
3427 if (last_ioread_offset
> last_request_offset
)
3428 last_ioread_offset
= last_request_offset
;
3432 * limit the size of the copy we're about to do so that
3433 * we can notice that our I/O pipe is running dry and
3434 * get the next I/O issued before it does go dry
3436 if (last_ioread_offset
&& io_size
> (max_io_size
/ 4))
3437 io_resid
= (max_io_size
/ 4);
3441 io_requested
= io_resid
;
3443 retval
= cluster_copy_ubc_data_internal(vp
, uio
, (int *)&io_resid
, 0, last_ioread_offset
== 0 ? take_reference
: 0);
3445 xsize
= io_requested
- io_resid
;
3448 io_req_size
-= xsize
;
3450 if (retval
|| io_resid
)
3452 * if we run into a real error or
3453 * a page that is not in the cache
3454 * we need to leave streaming mode
3458 if (rd_ahead_enabled
&& (io_size
== 0 || last_ioread_offset
== last_request_offset
)) {
3460 * we're already finished the I/O for this read request
3461 * let's see if we should do a read-ahead
3463 cluster_read_ahead(vp
, &extent
, filesize
, rap
, callback
, callback_arg
, bflag
);
3470 if (extent
.e_addr
< rap
->cl_lastr
)
3472 rap
->cl_lastr
= extent
.e_addr
;
3477 * recompute max_size since cluster_copy_ubc_data_internal
3478 * may have advanced uio->uio_offset
3480 max_size
= filesize
- uio
->uio_offset
;
3483 * compute the size of the upl needed to encompass
3484 * the requested read... limit each call to cluster_io
3485 * to the maximum UPL size... cluster_io will clip if
3486 * this exceeds the maximum io_size for the device,
3487 * make sure to account for
3488 * a starting offset that's not page aligned
3490 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
3491 upl_f_offset
= uio
->uio_offset
- (off_t
)start_offset
;
3493 if (io_size
> max_rd_size
)
3494 io_size
= max_rd_size
;
3496 upl_size
= (start_offset
+ io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
3498 if (flags
& IO_NOCACHE
) {
3499 if (upl_size
> max_io_size
)
3500 upl_size
= max_io_size
;
3502 if (upl_size
> max_io_size
/ 4)
3503 upl_size
= max_io_size
/ 4;
3505 pages_in_upl
= upl_size
/ PAGE_SIZE
;
3507 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 33)) | DBG_FUNC_START
,
3508 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
3510 kret
= ubc_create_upl(vp
,
3515 UPL_FILE_IO
| UPL_SET_LITE
);
3516 if (kret
!= KERN_SUCCESS
)
3517 panic("cluster_read_copy: failed to get pagelist");
3519 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 33)) | DBG_FUNC_END
,
3520 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
3523 * scan from the beginning of the upl looking for the first
3524 * non-valid page.... this will become the first page in
3525 * the request we're going to make to 'cluster_io'... if all
3526 * of the pages are valid, we won't call through to 'cluster_io'
3528 for (start_pg
= 0; start_pg
< pages_in_upl
; start_pg
++) {
3529 if (!upl_valid_page(pl
, start_pg
))
3534 * scan from the starting invalid page looking for a valid
3535 * page before the end of the upl is reached, if we
3536 * find one, then it will be the last page of the request to
3539 for (last_pg
= start_pg
; last_pg
< pages_in_upl
; last_pg
++) {
3540 if (upl_valid_page(pl
, last_pg
))
3543 iostate
.io_completed
= 0;
3544 iostate
.io_issued
= 0;
3545 iostate
.io_error
= 0;
3546 iostate
.io_wanted
= 0;
3548 if (start_pg
< last_pg
) {
3550 * we found a range of 'invalid' pages that must be filled
3551 * if the last page in this range is the last page of the file
3552 * we may have to clip the size of it to keep from reading past
3553 * the end of the last physical block associated with the file
3555 upl_offset
= start_pg
* PAGE_SIZE
;
3556 io_size
= (last_pg
- start_pg
) * PAGE_SIZE
;
3558 if ((off_t
)(upl_f_offset
+ upl_offset
+ io_size
) > filesize
)
3559 io_size
= filesize
- (upl_f_offset
+ upl_offset
);
3562 * issue an asynchronous read to cluster_io
3565 error
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
,
3566 io_size
, CL_READ
| CL_ASYNC
| bflag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
3570 * if the read completed successfully, or there was no I/O request
3571 * issued, than copy the data into user land via 'cluster_upl_copy_data'
3572 * we'll first add on any 'valid'
3573 * pages that were present in the upl when we acquired it.
3577 for (uio_last
= last_pg
; uio_last
< pages_in_upl
; uio_last
++) {
3578 if (!upl_valid_page(pl
, uio_last
))
3581 if (uio_last
< pages_in_upl
) {
3583 * there were some invalid pages beyond the valid pages
3584 * that we didn't issue an I/O for, just release them
3585 * unchanged now, so that any prefetch/readahed can
3588 ubc_upl_abort_range(upl
, uio_last
* PAGE_SIZE
,
3589 (pages_in_upl
- uio_last
) * PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
3593 * compute size to transfer this round, if io_req_size is
3594 * still non-zero after this attempt, we'll loop around and
3595 * set up for another I/O.
3597 val_size
= (uio_last
* PAGE_SIZE
) - start_offset
;
3599 if (val_size
> max_size
)
3600 val_size
= max_size
;
3602 if (val_size
> io_req_size
)
3603 val_size
= io_req_size
;
3605 if ((uio
->uio_offset
+ val_size
) > last_ioread_offset
)
3606 last_ioread_offset
= uio
->uio_offset
+ val_size
;
3608 if ((size_of_prefetch
= (last_request_offset
- last_ioread_offset
)) && prefetch_enabled
) {
3610 if ((last_ioread_offset
- (uio
->uio_offset
+ val_size
)) <= upl_size
) {
3612 * if there's still I/O left to do for this request, and...
3613 * we're not in hard throttle mode, and...
3614 * we're close to using up the previous prefetch, then issue a
3615 * new pre-fetch I/O... the I/O latency will overlap
3616 * with the copying of the data
3618 if (size_of_prefetch
> max_rd_size
)
3619 size_of_prefetch
= max_rd_size
;
3621 size_of_prefetch
= cluster_read_prefetch(vp
, last_ioread_offset
, size_of_prefetch
, filesize
, callback
, callback_arg
, bflag
);
3623 last_ioread_offset
+= (off_t
)(size_of_prefetch
* PAGE_SIZE
);
3625 if (last_ioread_offset
> last_request_offset
)
3626 last_ioread_offset
= last_request_offset
;
3629 } else if ((uio
->uio_offset
+ val_size
) == last_request_offset
) {
3631 * this transfer will finish this request, so...
3632 * let's try to read ahead if we're in
3633 * a sequential access pattern and we haven't
3634 * explicitly disabled it
3636 if (rd_ahead_enabled
)
3637 cluster_read_ahead(vp
, &extent
, filesize
, rap
, callback
, callback_arg
, bflag
);
3640 if (extent
.e_addr
< rap
->cl_lastr
)
3642 rap
->cl_lastr
= extent
.e_addr
;
3645 if (iostate
.io_issued
> iostate
.io_completed
) {
3647 lck_mtx_lock(cl_mtxp
);
3649 while (iostate
.io_issued
!= iostate
.io_completed
) {
3650 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
3651 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
3653 iostate
.io_wanted
= 1;
3654 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_copy", NULL
);
3656 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
3657 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
3659 lck_mtx_unlock(cl_mtxp
);
3661 if (iostate
.io_error
)
3662 error
= iostate
.io_error
;
3664 u_int32_t io_requested
;
3666 io_requested
= val_size
;
3668 retval
= cluster_copy_upl_data(uio
, upl
, start_offset
, (int *)&io_requested
);
3670 io_req_size
-= (val_size
- io_requested
);
3673 if (start_pg
< last_pg
) {
3675 * compute the range of pages that we actually issued an I/O for
3676 * and either commit them as valid if the I/O succeeded
3677 * or abort them if the I/O failed or we're not supposed to
3678 * keep them in the cache
3680 io_size
= (last_pg
- start_pg
) * PAGE_SIZE
;
3682 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_START
, upl
, start_pg
* PAGE_SIZE
, io_size
, error
, 0);
3684 if (error
|| (flags
& IO_NOCACHE
))
3685 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, io_size
,
3686 UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
);
3688 int commit_flags
= UPL_COMMIT_CLEAR_DIRTY
| UPL_COMMIT_FREE_ON_EMPTY
;
3691 commit_flags
|= UPL_COMMIT_INACTIVATE
;
3693 commit_flags
|= UPL_COMMIT_SPECULATE
;
3695 ubc_upl_commit_range(upl
, start_pg
* PAGE_SIZE
, io_size
, commit_flags
);
3697 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_END
, upl
, start_pg
* PAGE_SIZE
, io_size
, error
, 0);
3699 if ((last_pg
- start_pg
) < pages_in_upl
) {
3701 * the set of pages that we issued an I/O for did not encompass
3702 * the entire upl... so just release these without modifying
3706 ubc_upl_abort_range(upl
, 0, upl_size
, UPL_ABORT_FREE_ON_EMPTY
);
3709 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_START
,
3710 upl
, -1, pages_in_upl
- (last_pg
- start_pg
), 0, 0);
3713 * handle any valid pages at the beginning of
3714 * the upl... release these appropriately
3716 cluster_read_upl_release(upl
, 0, start_pg
, take_reference
);
3719 * handle any valid pages immediately after the
3720 * pages we issued I/O for... ... release these appropriately
3722 cluster_read_upl_release(upl
, last_pg
, uio_last
, take_reference
);
3724 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 35)) | DBG_FUNC_END
, upl
, -1, -1, 0, 0);
3731 if (cluster_hard_throttle_on(vp
, 1)) {
3732 rd_ahead_enabled
= 0;
3733 prefetch_enabled
= 0;
3735 max_rd_size
= HARD_THROTTLE_MAXSIZE
;
3737 if (max_rd_size
== HARD_THROTTLE_MAXSIZE
) {
3739 * coming out of throttled state
3741 if (policy
!= IOPOL_THROTTLE
) {
3743 rd_ahead_enabled
= 1;
3744 prefetch_enabled
= 1;
3746 max_rd_size
= max_prefetch
;
3747 last_ioread_offset
= 0;
3753 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 32)) | DBG_FUNC_END
,
3754 (int)uio
->uio_offset
, io_req_size
, rap
->cl_lastr
, retval
, 0);
3756 lck_mtx_unlock(&rap
->cl_lockr
);
3758 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 32)) | DBG_FUNC_END
,
3759 (int)uio
->uio_offset
, io_req_size
, 0, retval
, 0);
3767 cluster_read_direct(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
3768 int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
3771 upl_page_info_t
*pl
;
3773 vm_offset_t upl_offset
, vector_upl_offset
= 0;
3774 upl_size_t upl_size
, vector_upl_size
= 0;
3775 vm_size_t upl_needed_size
;
3776 unsigned int pages_in_pl
;
3780 int force_data_sync
;
3782 int no_zero_fill
= 0;
3786 struct clios iostate
;
3787 user_addr_t iov_base
;
3788 u_int32_t io_req_size
;
3789 u_int32_t offset_in_file
;
3790 u_int32_t offset_in_iovbase
;
3794 u_int32_t devblocksize
;
3795 u_int32_t mem_alignment_mask
;
3796 u_int32_t max_upl_size
;
3797 u_int32_t max_rd_size
;
3798 u_int32_t max_rd_ahead
;
3800 u_int32_t vector_upl_iosize
= 0;
3801 int issueVectorUPL
= 0,useVectorUPL
= (uio
->uio_iovcnt
> 1);
3802 off_t v_upl_uio_offset
= 0;
3803 int vector_upl_index
=0;
3804 upl_t vector_upl
= NULL
;
3806 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_START
,
3807 (int)uio
->uio_offset
, (int)filesize
, *read_type
, *read_length
, 0);
3809 max_upl_size
= cluster_max_io_size(vp
->v_mount
, CL_READ
);
3811 max_rd_size
= max_upl_size
;
3812 max_rd_ahead
= max_rd_size
* IO_SCALE(vp
, 2);
3814 io_flag
= CL_COMMIT
| CL_READ
| CL_ASYNC
| CL_NOZERO
| CL_DIRECT_IO
;
3815 if (flags
& IO_PASSIVE
)
3816 io_flag
|= CL_PASSIVE
;
3818 iostate
.io_completed
= 0;
3819 iostate
.io_issued
= 0;
3820 iostate
.io_error
= 0;
3821 iostate
.io_wanted
= 0;
3823 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
3824 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
3826 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_NONE
,
3827 (int)devblocksize
, (int)mem_alignment_mask
, 0, 0, 0);
3829 if (devblocksize
== 1) {
3831 * the AFP client advertises a devblocksize of 1
3832 * however, its BLOCKMAP routine maps to physical
3833 * blocks that are PAGE_SIZE in size...
3834 * therefore we can't ask for I/Os that aren't page aligned
3835 * or aren't multiples of PAGE_SIZE in size
3836 * by setting devblocksize to PAGE_SIZE, we re-instate
3837 * the old behavior we had before the mem_alignment_mask
3838 * changes went in...
3840 devblocksize
= PAGE_SIZE
;
3843 io_req_size
= *read_length
;
3844 iov_base
= uio_curriovbase(uio
);
3846 max_io_size
= filesize
- uio
->uio_offset
;
3848 if ((off_t
)io_req_size
> max_io_size
)
3849 io_req_size
= max_io_size
;
3851 offset_in_file
= (u_int32_t
)uio
->uio_offset
& (devblocksize
- 1);
3852 offset_in_iovbase
= (u_int32_t
)iov_base
& mem_alignment_mask
;
3854 if (offset_in_file
|| offset_in_iovbase
) {
3856 * one of the 2 important offsets is misaligned
3857 * so fire an I/O through the cache for this entire vector
3861 if (iov_base
& (devblocksize
- 1)) {
3863 * the offset in memory must be on a device block boundary
3864 * so that we can guarantee that we can generate an
3865 * I/O that ends on a page boundary in cluster_io
3870 * When we get to this point, we know...
3871 * -- the offset into the file is on a devblocksize boundary
3874 while (io_req_size
&& retval
== 0) {
3877 if (cluster_hard_throttle_on(vp
, 1)) {
3878 max_rd_size
= HARD_THROTTLE_MAXSIZE
;
3879 max_rd_ahead
= HARD_THROTTLE_MAXSIZE
- 1;
3881 max_rd_size
= max_upl_size
;
3882 max_rd_ahead
= max_rd_size
* IO_SCALE(vp
, 2);
3884 io_start
= io_size
= io_req_size
;
3887 * First look for pages already in the cache
3888 * and move them to user space.
3890 * cluster_copy_ubc_data returns the resid
3893 retval
= cluster_copy_ubc_data_internal(vp
, uio
, (int *)&io_size
, 0, 0);
3896 * calculate the number of bytes actually copied
3897 * starting size - residual
3899 xsize
= io_start
- io_size
;
3901 io_req_size
-= xsize
;
3903 if(useVectorUPL
&& (xsize
|| (iov_base
& PAGE_MASK
))) {
3905 * We found something in the cache or we have an iov_base that's not
3908 * Issue all I/O's that have been collected within this Vectored UPL.
3910 if(vector_upl_index
) {
3911 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
3912 reset_vector_run_state();
3919 * After this point, if we are using the Vector UPL path and the base is
3920 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3925 * check to see if we are finished with this request...
3927 if (io_req_size
== 0 || misaligned
) {
3929 * see if there's another uio vector to
3930 * process that's of type IO_DIRECT
3932 * break out of while loop to get there
3937 * assume the request ends on a device block boundary
3939 io_min
= devblocksize
;
3942 * we can handle I/O's in multiples of the device block size
3943 * however, if io_size isn't a multiple of devblocksize we
3944 * want to clip it back to the nearest page boundary since
3945 * we are going to have to go through cluster_read_copy to
3946 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
3947 * multiple, we avoid asking the drive for the same physical
3948 * blocks twice.. once for the partial page at the end of the
3949 * request and a 2nd time for the page we read into the cache
3950 * (which overlaps the end of the direct read) in order to
3951 * get at the overhang bytes
3953 if (io_size
& (devblocksize
- 1)) {
3955 * request does NOT end on a device block boundary
3956 * so clip it back to a PAGE_SIZE boundary
3958 io_size
&= ~PAGE_MASK
;
3961 if (retval
|| io_size
< io_min
) {
3963 * either an error or we only have the tail left to
3964 * complete via the copy path...
3965 * we may have already spun some portion of this request
3966 * off as async requests... we need to wait for the I/O
3967 * to complete before returning
3969 goto wait_for_dreads
;
3971 if ((xsize
= io_size
) > max_rd_size
)
3972 xsize
= max_rd_size
;
3976 ubc_range_op(vp
, uio
->uio_offset
, uio
->uio_offset
+ xsize
, UPL_ROP_ABSENT
, (int *)&io_size
);
3980 * a page must have just come into the cache
3981 * since the first page in this range is no
3982 * longer absent, go back and re-evaluate
3986 iov_base
= uio_curriovbase(uio
);
3988 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
3989 upl_needed_size
= (upl_offset
+ io_size
+ (PAGE_SIZE
-1)) & ~PAGE_MASK
;
3991 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_START
,
3992 (int)upl_offset
, upl_needed_size
, (int)iov_base
, io_size
, 0);
3994 if (upl_offset
== 0 && ((io_size
& PAGE_MASK
) == 0)) {
3996 abort_flag
= UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
;
3999 abort_flag
= UPL_ABORT_FREE_ON_EMPTY
;
4001 for (force_data_sync
= 0; force_data_sync
< 3; force_data_sync
++) {
4003 upl_size
= upl_needed_size
;
4004 upl_flags
= UPL_FILE_IO
| UPL_NO_SYNC
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
4007 upl_flags
|= UPL_NOZEROFILL
;
4008 if (force_data_sync
)
4009 upl_flags
|= UPL_FORCE_DATA_SYNC
;
4011 kret
= vm_map_create_upl(current_map(),
4012 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
4013 &upl_size
, &upl
, NULL
, &pages_in_pl
, &upl_flags
);
4015 if (kret
!= KERN_SUCCESS
) {
4016 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_END
,
4017 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4019 * failed to get pagelist
4021 * we may have already spun some portion of this request
4022 * off as async requests... we need to wait for the I/O
4023 * to complete before returning
4025 goto wait_for_dreads
;
4027 pages_in_pl
= upl_size
/ PAGE_SIZE
;
4028 pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
4030 for (i
= 0; i
< pages_in_pl
; i
++) {
4031 if (!upl_valid_page(pl
, i
))
4034 if (i
== pages_in_pl
)
4037 ubc_upl_abort(upl
, abort_flag
);
4039 if (force_data_sync
>= 3) {
4040 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_END
,
4041 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4043 goto wait_for_dreads
;
4046 * Consider the possibility that upl_size wasn't satisfied.
4048 if (upl_size
< upl_needed_size
) {
4049 if (upl_size
&& upl_offset
== 0)
4055 ubc_upl_abort(upl
, abort_flag
);
4056 goto wait_for_dreads
;
4058 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 72)) | DBG_FUNC_END
,
4059 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4062 vm_offset_t end_off
= ((iov_base
+ io_size
) & PAGE_MASK
);
4066 * After this point, if we are using a vector UPL, then
4067 * either all the UPL elements end on a page boundary OR
4068 * this UPL is the last element because it does not end
4069 * on a page boundary.
4074 * request asynchronously so that we can overlap
4075 * the preparation of the next I/O
4076 * if there are already too many outstanding reads
4077 * wait until some have completed before issuing the next read
4079 if (iostate
.io_issued
> iostate
.io_completed
) {
4081 lck_mtx_lock(cl_mtxp
);
4083 while ((iostate
.io_issued
- iostate
.io_completed
) > max_rd_ahead
) {
4084 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4085 iostate
.io_issued
, iostate
.io_completed
, max_rd_ahead
, 0, 0);
4087 iostate
.io_wanted
= 1;
4088 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_direct", NULL
);
4090 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4091 iostate
.io_issued
, iostate
.io_completed
, max_rd_ahead
, 0, 0);
4093 lck_mtx_unlock(cl_mtxp
);
4095 if (iostate
.io_error
) {
4097 * one of the earlier reads we issued ran into a hard error
4098 * don't issue any more reads, cleanup the UPL
4099 * that was just created but not used, then
4100 * go wait for any other reads to complete before
4101 * returning the error to the caller
4103 ubc_upl_abort(upl
, abort_flag
);
4105 goto wait_for_dreads
;
4107 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 73)) | DBG_FUNC_START
,
4108 upl
, (int)upl_offset
, (int)uio
->uio_offset
, io_size
, 0);
4113 io_flag
&= ~CL_PRESERVE
;
4115 io_flag
|= CL_PRESERVE
;
4117 retval
= cluster_io(vp
, upl
, upl_offset
, uio
->uio_offset
, io_size
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4121 if(!vector_upl_index
) {
4122 vector_upl
= vector_upl_create(upl_offset
);
4123 v_upl_uio_offset
= uio
->uio_offset
;
4124 vector_upl_offset
= upl_offset
;
4127 vector_upl_set_subupl(vector_upl
,upl
, upl_size
);
4128 vector_upl_set_iostate(vector_upl
, upl
, vector_upl_size
, upl_size
);
4130 vector_upl_size
+= upl_size
;
4131 vector_upl_iosize
+= io_size
;
4133 if(issueVectorUPL
|| vector_upl_index
== MAX_VECTOR_UPL_ELEMENTS
|| vector_upl_size
>= MAX_VECTOR_UPL_SIZE
) {
4134 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4135 reset_vector_run_state();
4139 * update the uio structure
4141 uio_update(uio
, (user_size_t
)io_size
);
4143 io_req_size
-= io_size
;
4145 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 73)) | DBG_FUNC_END
,
4146 upl
, (int)uio
->uio_offset
, io_req_size
, retval
, 0);
4150 if (retval
== 0 && iostate
.io_error
== 0 && io_req_size
== 0 && uio
->uio_offset
< filesize
) {
4152 retval
= cluster_io_type(uio
, read_type
, read_length
, 0);
4154 if (retval
== 0 && *read_type
== IO_DIRECT
) {
4156 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_NONE
,
4157 (int)uio
->uio_offset
, (int)filesize
, *read_type
, *read_length
, 0);
4165 if(retval
== 0 && iostate
.io_error
== 0 && useVectorUPL
&& vector_upl_index
) {
4166 retval
= vector_cluster_io(vp
, vector_upl
, vector_upl_offset
, v_upl_uio_offset
, vector_upl_iosize
, io_flag
, (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4167 reset_vector_run_state();
4170 * make sure all async reads that are part of this stream
4171 * have completed before we return
4173 if (iostate
.io_issued
> iostate
.io_completed
) {
4175 lck_mtx_lock(cl_mtxp
);
4177 while (iostate
.io_issued
!= iostate
.io_completed
) {
4178 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4179 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4181 iostate
.io_wanted
= 1;
4182 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_direct", NULL
);
4184 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4185 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4187 lck_mtx_unlock(cl_mtxp
);
4189 if (iostate
.io_error
)
4190 retval
= iostate
.io_error
;
4192 if (io_req_size
&& retval
== 0) {
4194 * we couldn't handle the tail of this request in DIRECT mode
4195 * so fire it through the copy path
4197 retval
= cluster_read_copy(vp
, uio
, io_req_size
, filesize
, flags
, callback
, callback_arg
);
4199 *read_type
= IO_UNKNOWN
;
4201 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 70)) | DBG_FUNC_END
,
4202 (int)uio
->uio_offset
, (int)uio_resid(uio
), io_req_size
, retval
, 0);
4209 cluster_read_contig(vnode_t vp
, struct uio
*uio
, off_t filesize
, int *read_type
, u_int32_t
*read_length
,
4210 int (*callback
)(buf_t
, void *), void *callback_arg
, int flags
)
4212 upl_page_info_t
*pl
;
4213 upl_t upl
[MAX_VECTS
];
4214 vm_offset_t upl_offset
;
4215 addr64_t dst_paddr
= 0;
4216 user_addr_t iov_base
;
4218 upl_size_t upl_size
;
4219 vm_size_t upl_needed_size
;
4220 mach_msg_type_number_t pages_in_pl
;
4223 struct clios iostate
;
4230 u_int32_t devblocksize
;
4231 u_int32_t mem_alignment_mask
;
4232 u_int32_t tail_size
= 0;
4235 if (flags
& IO_PASSIVE
)
4241 * When we enter this routine, we know
4242 * -- the read_length will not exceed the current iov_len
4243 * -- the target address is physically contiguous for read_length
4245 cluster_syncup(vp
, filesize
, callback
, callback_arg
);
4247 devblocksize
= (u_int32_t
)vp
->v_mount
->mnt_devblocksize
;
4248 mem_alignment_mask
= (u_int32_t
)vp
->v_mount
->mnt_alignmentmask
;
4250 iostate
.io_completed
= 0;
4251 iostate
.io_issued
= 0;
4252 iostate
.io_error
= 0;
4253 iostate
.io_wanted
= 0;
4256 io_size
= *read_length
;
4258 max_size
= filesize
- uio
->uio_offset
;
4260 if (io_size
> max_size
)
4263 iov_base
= uio_curriovbase(uio
);
4265 upl_offset
= (vm_offset_t
)((u_int32_t
)iov_base
& PAGE_MASK
);
4266 upl_needed_size
= upl_offset
+ io_size
;
4269 upl_size
= upl_needed_size
;
4270 upl_flags
= UPL_FILE_IO
| UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
| UPL_SET_INTERNAL
| UPL_SET_LITE
| UPL_SET_IO_WIRE
;
4273 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 92)) | DBG_FUNC_START
,
4274 (int)upl_offset
, (int)upl_size
, (int)iov_base
, io_size
, 0);
4276 kret
= vm_map_get_upl(current_map(),
4277 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
4278 &upl_size
, &upl
[cur_upl
], NULL
, &pages_in_pl
, &upl_flags
, 0);
4280 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 92)) | DBG_FUNC_END
,
4281 (int)upl_offset
, upl_size
, io_size
, kret
, 0);
4283 if (kret
!= KERN_SUCCESS
) {
4285 * failed to get pagelist
4288 goto wait_for_creads
;
4292 if (upl_size
< upl_needed_size
) {
4294 * The upl_size wasn't satisfied.
4297 goto wait_for_creads
;
4299 pl
= ubc_upl_pageinfo(upl
[cur_upl
]);
4301 dst_paddr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + (addr64_t
)upl_offset
;
4303 while (((uio
->uio_offset
& (devblocksize
- 1)) || io_size
< devblocksize
) && io_size
) {
4304 u_int32_t head_size
;
4306 head_size
= devblocksize
- (u_int32_t
)(uio
->uio_offset
& (devblocksize
- 1));
4308 if (head_size
> io_size
)
4309 head_size
= io_size
;
4311 error
= cluster_align_phys_io(vp
, uio
, dst_paddr
, head_size
, CL_READ
, callback
, callback_arg
);
4314 goto wait_for_creads
;
4316 upl_offset
+= head_size
;
4317 dst_paddr
+= head_size
;
4318 io_size
-= head_size
;
4320 iov_base
+= head_size
;
4322 if ((u_int32_t
)iov_base
& mem_alignment_mask
) {
4324 * request doesn't set up on a memory boundary
4325 * the underlying DMA engine can handle...
4326 * return an error instead of going through
4327 * the slow copy path since the intent of this
4328 * path is direct I/O to device memory
4331 goto wait_for_creads
;
4334 tail_size
= io_size
& (devblocksize
- 1);
4336 io_size
-= tail_size
;
4338 while (io_size
&& error
== 0) {
4340 if (io_size
> MAX_IO_CONTIG_SIZE
)
4341 xsize
= MAX_IO_CONTIG_SIZE
;
4345 * request asynchronously so that we can overlap
4346 * the preparation of the next I/O... we'll do
4347 * the commit after all the I/O has completed
4348 * since its all issued against the same UPL
4349 * if there are already too many outstanding reads
4350 * wait until some have completed before issuing the next
4352 if (iostate
.io_issued
> iostate
.io_completed
) {
4353 lck_mtx_lock(cl_mtxp
);
4355 while ((iostate
.io_issued
- iostate
.io_completed
) > (MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2))) {
4356 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4357 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
4359 iostate
.io_wanted
= 1;
4360 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_contig", NULL
);
4362 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4363 iostate
.io_issued
, iostate
.io_completed
, MAX_IO_CONTIG_SIZE
* IO_SCALE(vp
, 2), 0, 0);
4365 lck_mtx_unlock(cl_mtxp
);
4367 if (iostate
.io_error
) {
4369 * one of the earlier reads we issued ran into a hard error
4370 * don't issue any more reads...
4371 * go wait for any other reads to complete before
4372 * returning the error to the caller
4374 goto wait_for_creads
;
4376 error
= cluster_io(vp
, upl
[cur_upl
], upl_offset
, uio
->uio_offset
, xsize
,
4377 CL_READ
| CL_NOZERO
| CL_DEV_MEMORY
| CL_ASYNC
| bflag
,
4378 (buf_t
)NULL
, &iostate
, callback
, callback_arg
);
4380 * The cluster_io read was issued successfully,
4381 * update the uio structure
4384 uio_update(uio
, (user_size_t
)xsize
);
4387 upl_offset
+= xsize
;
4391 if (error
== 0 && iostate
.io_error
== 0 && tail_size
== 0 && num_upl
< MAX_VECTS
&& uio
->uio_offset
< filesize
) {
4393 error
= cluster_io_type(uio
, read_type
, read_length
, 0);
4395 if (error
== 0 && *read_type
== IO_CONTIG
) {
4400 *read_type
= IO_UNKNOWN
;
4404 * make sure all async reads that are part of this stream
4405 * have completed before we proceed
4407 if (iostate
.io_issued
> iostate
.io_completed
) {
4409 lck_mtx_lock(cl_mtxp
);
4411 while (iostate
.io_issued
!= iostate
.io_completed
) {
4412 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_START
,
4413 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4415 iostate
.io_wanted
= 1;
4416 msleep((caddr_t
)&iostate
.io_wanted
, cl_mtxp
, PRIBIO
+ 1, "cluster_read_contig", NULL
);
4418 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 95)) | DBG_FUNC_END
,
4419 iostate
.io_issued
, iostate
.io_completed
, 0, 0, 0);
4421 lck_mtx_unlock(cl_mtxp
);
4423 if (iostate
.io_error
)
4424 error
= iostate
.io_error
;
4426 if (error
== 0 && tail_size
)
4427 error
= cluster_align_phys_io(vp
, uio
, dst_paddr
, tail_size
, CL_READ
, callback
, callback_arg
);
4429 for (n
= 0; n
< num_upl
; n
++)
4431 * just release our hold on each physically contiguous
4432 * region without changing any state
4434 ubc_upl_abort(upl
[n
], 0);
4441 cluster_io_type(struct uio
*uio
, int *io_type
, u_int32_t
*io_length
, u_int32_t min_length
)
4443 user_size_t iov_len
;
4444 user_addr_t iov_base
= 0;
4446 upl_size_t upl_size
;
4451 * skip over any emtpy vectors
4453 uio_update(uio
, (user_size_t
)0);
4455 iov_len
= uio_curriovlen(uio
);
4457 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 94)) | DBG_FUNC_START
, uio
, (int)iov_len
, 0, 0, 0);
4460 iov_base
= uio_curriovbase(uio
);
4462 * make sure the size of the vector isn't too big...
4463 * internally, we want to handle all of the I/O in
4464 * chunk sizes that fit in a 32 bit int
4466 if (iov_len
> (user_size_t
)MAX_IO_REQUEST_SIZE
)
4467 upl_size
= MAX_IO_REQUEST_SIZE
;
4469 upl_size
= (u_int32_t
)iov_len
;
4471 upl_flags
= UPL_QUERY_OBJECT_TYPE
;
4473 if ((vm_map_get_upl(current_map(),
4474 (vm_map_offset_t
)(iov_base
& ~((user_addr_t
)PAGE_MASK
)),
4475 &upl_size
, &upl
, NULL
, NULL
, &upl_flags
, 0)) != KERN_SUCCESS
) {
4477 * the user app must have passed in an invalid address
4484 *io_length
= upl_size
;
4486 if (upl_flags
& UPL_PHYS_CONTIG
)
4487 *io_type
= IO_CONTIG
;
4488 else if (iov_len
>= min_length
)
4489 *io_type
= IO_DIRECT
;
4494 * nothing left to do for this uio
4497 *io_type
= IO_UNKNOWN
;
4499 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 94)) | DBG_FUNC_END
, iov_base
, *io_type
, *io_length
, retval
, 0);
4506 * generate advisory I/O's in the largest chunks possible
4507 * the completed pages will be released into the VM cache
4510 advisory_read(vnode_t vp
, off_t filesize
, off_t f_offset
, int resid
)
4512 return advisory_read_ext(vp
, filesize
, f_offset
, resid
, NULL
, NULL
, CL_PASSIVE
);
4516 advisory_read_ext(vnode_t vp
, off_t filesize
, off_t f_offset
, int resid
, int (*callback
)(buf_t
, void *), void *callback_arg
, int bflag
)
4518 upl_page_info_t
*pl
;
4520 vm_offset_t upl_offset
;
4533 uint32_t max_io_size
;
4536 if ( !UBCINFOEXISTS(vp
))
4542 max_io_size
= cluster_max_io_size(vp
->v_mount
, CL_READ
);
4544 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 60)) | DBG_FUNC_START
,
4545 (int)f_offset
, resid
, (int)filesize
, 0, 0);
4547 while (resid
&& f_offset
< filesize
&& retval
== 0) {
4549 * compute the size of the upl needed to encompass
4550 * the requested read... limit each call to cluster_io
4551 * to the maximum UPL size... cluster_io will clip if
4552 * this exceeds the maximum io_size for the device,
4553 * make sure to account for
4554 * a starting offset that's not page aligned
4556 start_offset
= (int)(f_offset
& PAGE_MASK_64
);
4557 upl_f_offset
= f_offset
- (off_t
)start_offset
;
4558 max_size
= filesize
- f_offset
;
4560 if (resid
< max_size
)
4565 upl_size
= (start_offset
+ io_size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
4566 if ((uint32_t)upl_size
> max_io_size
)
4567 upl_size
= max_io_size
;
4571 * return the number of contiguously present pages in the cache
4572 * starting at upl_f_offset within the file
4574 ubc_range_op(vp
, upl_f_offset
, upl_f_offset
+ upl_size
, UPL_ROP_PRESENT
, &skip_range
);
4578 * skip over pages already present in the cache
4580 io_size
= skip_range
- start_offset
;
4582 f_offset
+= io_size
;
4585 if (skip_range
== upl_size
)
4588 * have to issue some real I/O
4589 * at this point, we know it's starting on a page boundary
4590 * because we've skipped over at least the first page in the request
4593 upl_f_offset
+= skip_range
;
4594 upl_size
-= skip_range
;
4596 pages_in_upl
= upl_size
/ PAGE_SIZE
;
4598 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 61)) | DBG_FUNC_START
,
4599 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
4601 kret
= ubc_create_upl(vp
,
4606 UPL_RET_ONLY_ABSENT
| UPL_SET_LITE
);
4607 if (kret
!= KERN_SUCCESS
)
4612 * before we start marching forward, we must make sure we end on
4613 * a present page, otherwise we will be working with a freed
4616 for (last_pg
= pages_in_upl
- 1; last_pg
>= 0; last_pg
--) {
4617 if (upl_page_present(pl
, last_pg
))
4620 pages_in_upl
= last_pg
+ 1;
4623 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 61)) | DBG_FUNC_END
,
4624 upl
, (int)upl_f_offset
, upl_size
, start_offset
, 0);
4627 for (last_pg
= 0; last_pg
< pages_in_upl
; ) {
4629 * scan from the beginning of the upl looking for the first
4630 * page that is present.... this will become the first page in
4631 * the request we're going to make to 'cluster_io'... if all
4632 * of the pages are absent, we won't call through to 'cluster_io'
4634 for (start_pg
= last_pg
; start_pg
< pages_in_upl
; start_pg
++) {
4635 if (upl_page_present(pl
, start_pg
))
4640 * scan from the starting present page looking for an absent
4641 * page before the end of the upl is reached, if we
4642 * find one, then it will terminate the range of pages being
4643 * presented to 'cluster_io'
4645 for (last_pg
= start_pg
; last_pg
< pages_in_upl
; last_pg
++) {
4646 if (!upl_page_present(pl
, last_pg
))
4650 if (last_pg
> start_pg
) {
4652 * we found a range of pages that must be filled
4653 * if the last page in this range is the last page of the file
4654 * we may have to clip the size of it to keep from reading past
4655 * the end of the last physical block associated with the file
4657 upl_offset
= start_pg
* PAGE_SIZE
;
4658 io_size
= (last_pg
- start_pg
) * PAGE_SIZE
;
4660 if ((off_t
)(upl_f_offset
+ upl_offset
+ io_size
) > filesize
)
4661 io_size
= filesize
- (upl_f_offset
+ upl_offset
);
4664 * issue an asynchronous read to cluster_io
4666 retval
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
, io_size
,
4667 CL_ASYNC
| CL_READ
| CL_COMMIT
| CL_AGE
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
4673 ubc_upl_abort(upl
, 0);
4675 io_size
= upl_size
- start_offset
;
4677 if (io_size
> resid
)
4679 f_offset
+= io_size
;
4683 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 60)) | DBG_FUNC_END
,
4684 (int)f_offset
, resid
, retval
, 0, 0);
4691 cluster_push(vnode_t vp
, int flags
)
4693 return cluster_push_ext(vp
, flags
, NULL
, NULL
);
4698 cluster_push_ext(vnode_t vp
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
4701 int my_sparse_wait
= 0;
4702 struct cl_writebehind
*wbp
;
4704 if ( !UBCINFOEXISTS(vp
)) {
4705 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_NONE
, vp
, flags
, 0, -1, 0);
4708 /* return if deferred write is set */
4709 if (((unsigned int)vfs_flags(vp
->v_mount
) & MNT_DEFWRITE
) && (flags
& IO_DEFWRITE
)) {
4712 if ((wbp
= cluster_get_wbp(vp
, CLW_RETURNLOCKED
)) == NULL
) {
4713 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_NONE
, vp
, flags
, 0, -2, 0);
4716 if (wbp
->cl_number
== 0 && wbp
->cl_scmap
== NULL
) {
4717 lck_mtx_unlock(&wbp
->cl_lockw
);
4719 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_NONE
, vp
, flags
, 0, -3, 0);
4722 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_START
,
4723 wbp
->cl_scmap
, wbp
->cl_number
, flags
, 0, 0);
4726 * if we have an fsync in progress, we don't want to allow any additional
4727 * sync/fsync/close(s) to occur until it finishes.
4728 * note that its possible for writes to continue to occur to this file
4729 * while we're waiting and also once the fsync starts to clean if we're
4730 * in the sparse map case
4732 while (wbp
->cl_sparse_wait
) {
4733 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 97)) | DBG_FUNC_START
, vp
, 0, 0, 0, 0);
4735 msleep((caddr_t
)&wbp
->cl_sparse_wait
, &wbp
->cl_lockw
, PRIBIO
+ 1, "cluster_push_ext", NULL
);
4737 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 97)) | DBG_FUNC_END
, vp
, 0, 0, 0, 0);
4739 if (flags
& IO_SYNC
) {
4741 wbp
->cl_sparse_wait
= 1;
4744 * this is an fsync (or equivalent)... we must wait for any existing async
4745 * cleaning operations to complete before we evaulate the current state
4746 * and finish cleaning... this insures that all writes issued before this
4747 * fsync actually get cleaned to the disk before this fsync returns
4749 while (wbp
->cl_sparse_pushes
) {
4750 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 98)) | DBG_FUNC_START
, vp
, 0, 0, 0, 0);
4752 msleep((caddr_t
)&wbp
->cl_sparse_pushes
, &wbp
->cl_lockw
, PRIBIO
+ 1, "cluster_push_ext", NULL
);
4754 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 98)) | DBG_FUNC_END
, vp
, 0, 0, 0, 0);
4757 if (wbp
->cl_scmap
) {
4760 if (wbp
->cl_sparse_pushes
< SPARSE_PUSH_LIMIT
) {
4762 scmap
= wbp
->cl_scmap
;
4763 wbp
->cl_scmap
= NULL
;
4765 wbp
->cl_sparse_pushes
++;
4767 lck_mtx_unlock(&wbp
->cl_lockw
);
4769 sparse_cluster_push(&scmap
, vp
, ubc_getsize(vp
), PUSH_ALL
| IO_PASSIVE
, callback
, callback_arg
);
4771 lck_mtx_lock(&wbp
->cl_lockw
);
4773 wbp
->cl_sparse_pushes
--;
4775 if (wbp
->cl_sparse_wait
&& wbp
->cl_sparse_pushes
== 0)
4776 wakeup((caddr_t
)&wbp
->cl_sparse_pushes
);
4778 sparse_cluster_push(&(wbp
->cl_scmap
), vp
, ubc_getsize(vp
), PUSH_ALL
| IO_PASSIVE
, callback
, callback_arg
);
4782 retval
= cluster_try_push(wbp
, vp
, ubc_getsize(vp
), PUSH_ALL
| IO_PASSIVE
, callback
, callback_arg
);
4784 lck_mtx_unlock(&wbp
->cl_lockw
);
4786 if (flags
& IO_SYNC
)
4787 (void)vnode_waitforwrites(vp
, 0, 0, 0, "cluster_push");
4789 if (my_sparse_wait
) {
4791 * I'm the owner of the serialization token
4792 * clear it and wakeup anyone that is waiting
4795 lck_mtx_lock(&wbp
->cl_lockw
);
4797 wbp
->cl_sparse_wait
= 0;
4798 wakeup((caddr_t
)&wbp
->cl_sparse_wait
);
4800 lck_mtx_unlock(&wbp
->cl_lockw
);
4802 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 53)) | DBG_FUNC_END
,
4803 wbp
->cl_scmap
, wbp
->cl_number
, retval
, 0, 0);
4809 __private_extern__
void
4810 cluster_release(struct ubc_info
*ubc
)
4812 struct cl_writebehind
*wbp
;
4813 struct cl_readahead
*rap
;
4815 if ((wbp
= ubc
->cl_wbehind
)) {
4817 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 81)) | DBG_FUNC_START
, ubc
, wbp
->cl_scmap
, 0, 0, 0);
4820 vfs_drt_control(&(wbp
->cl_scmap
), 0);
4822 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 81)) | DBG_FUNC_START
, ubc
, 0, 0, 0, 0);
4825 rap
= ubc
->cl_rahead
;
4828 lck_mtx_destroy(&wbp
->cl_lockw
, cl_mtx_grp
);
4829 FREE_ZONE((void *)wbp
, sizeof *wbp
, M_CLWRBEHIND
);
4831 if ((rap
= ubc
->cl_rahead
)) {
4832 lck_mtx_destroy(&rap
->cl_lockr
, cl_mtx_grp
);
4833 FREE_ZONE((void *)rap
, sizeof *rap
, M_CLRDAHEAD
);
4835 ubc
->cl_rahead
= NULL
;
4836 ubc
->cl_wbehind
= NULL
;
4838 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 81)) | DBG_FUNC_END
, ubc
, rap
, wbp
, 0, 0);
4843 cluster_try_push(struct cl_writebehind
*wbp
, vnode_t vp
, off_t EOF
, int push_flag
, int (*callback
)(buf_t
, void *), void *callback_arg
)
4850 struct cl_wextent l_clusters
[MAX_CLUSTERS
];
4851 u_int max_cluster_pgcount
;
4854 max_cluster_pgcount
= MAX_CLUSTER_SIZE(vp
) / PAGE_SIZE
;
4856 * the write behind context exists and has
4857 * already been locked...
4859 if (wbp
->cl_number
== 0)
4861 * no clusters to push
4862 * return number of empty slots
4864 return (MAX_CLUSTERS
);
4867 * make a local 'sorted' copy of the clusters
4868 * and clear wbp->cl_number so that new clusters can
4871 for (cl_index
= 0; cl_index
< wbp
->cl_number
; cl_index
++) {
4872 for (min_index
= -1, cl_index1
= 0; cl_index1
< wbp
->cl_number
; cl_index1
++) {
4873 if (wbp
->cl_clusters
[cl_index1
].b_addr
== wbp
->cl_clusters
[cl_index1
].e_addr
)
4875 if (min_index
== -1)
4876 min_index
= cl_index1
;
4877 else if (wbp
->cl_clusters
[cl_index1
].b_addr
< wbp
->cl_clusters
[min_index
].b_addr
)
4878 min_index
= cl_index1
;
4880 if (min_index
== -1)
4883 l_clusters
[cl_index
].b_addr
= wbp
->cl_clusters
[min_index
].b_addr
;
4884 l_clusters
[cl_index
].e_addr
= wbp
->cl_clusters
[min_index
].e_addr
;
4885 l_clusters
[cl_index
].io_flags
= wbp
->cl_clusters
[min_index
].io_flags
;
4887 wbp
->cl_clusters
[min_index
].b_addr
= wbp
->cl_clusters
[min_index
].e_addr
;
4893 if ( (push_flag
& PUSH_DELAY
) && cl_len
== MAX_CLUSTERS
) {
4897 * determine if we appear to be writing the file sequentially
4898 * if not, by returning without having pushed any clusters
4899 * we will cause this vnode to be pushed into the sparse cluster mechanism
4900 * used for managing more random I/O patterns
4902 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
4903 * that's why we're in try_push with PUSH_DELAY...
4905 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
4906 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
4907 * so we can just make a simple pass through, up to, but not including the last one...
4908 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
4911 * we let the last one be partial as long as it was adjacent to the previous one...
4912 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
4913 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
4915 for (i
= 0; i
< MAX_CLUSTERS
- 1; i
++) {
4916 if ((l_clusters
[i
].e_addr
- l_clusters
[i
].b_addr
) != max_cluster_pgcount
)
4918 if (l_clusters
[i
].e_addr
!= l_clusters
[i
+1].b_addr
)
4922 for (cl_index
= 0; cl_index
< cl_len
; cl_index
++) {
4924 struct cl_extent cl
;
4927 * try to push each cluster in turn...
4929 if (l_clusters
[cl_index
].io_flags
& CLW_IONOCACHE
)
4934 if ((l_clusters
[cl_index
].io_flags
& CLW_IOPASSIVE
) || (push_flag
& IO_PASSIVE
))
4935 flags
|= IO_PASSIVE
;
4937 if (push_flag
& PUSH_SYNC
)
4940 cl
.b_addr
= l_clusters
[cl_index
].b_addr
;
4941 cl
.e_addr
= l_clusters
[cl_index
].e_addr
;
4943 cluster_push_now(vp
, &cl
, EOF
, flags
, callback
, callback_arg
);
4945 l_clusters
[cl_index
].b_addr
= 0;
4946 l_clusters
[cl_index
].e_addr
= 0;
4950 if ( !(push_flag
& PUSH_ALL
) )
4954 if (cl_len
> cl_pushed
) {
4956 * we didn't push all of the clusters, so
4957 * lets try to merge them back in to the vnode
4959 if ((MAX_CLUSTERS
- wbp
->cl_number
) < (cl_len
- cl_pushed
)) {
4961 * we picked up some new clusters while we were trying to
4962 * push the old ones... this can happen because I've dropped
4963 * the vnode lock... the sum of the
4964 * leftovers plus the new cluster count exceeds our ability
4965 * to represent them, so switch to the sparse cluster mechanism
4967 * collect the active public clusters...
4969 sparse_cluster_switch(wbp
, vp
, EOF
, callback
, callback_arg
);
4971 for (cl_index
= 0, cl_index1
= 0; cl_index
< cl_len
; cl_index
++) {
4972 if (l_clusters
[cl_index
].b_addr
== l_clusters
[cl_index
].e_addr
)
4974 wbp
->cl_clusters
[cl_index1
].b_addr
= l_clusters
[cl_index
].b_addr
;
4975 wbp
->cl_clusters
[cl_index1
].e_addr
= l_clusters
[cl_index
].e_addr
;
4976 wbp
->cl_clusters
[cl_index1
].io_flags
= l_clusters
[cl_index
].io_flags
;
4981 * update the cluster count
4983 wbp
->cl_number
= cl_index1
;
4986 * and collect the original clusters that were moved into the
4987 * local storage for sorting purposes
4989 sparse_cluster_switch(wbp
, vp
, EOF
, callback
, callback_arg
);
4993 * we've got room to merge the leftovers back in
4994 * just append them starting at the next 'hole'
4995 * represented by wbp->cl_number
4997 for (cl_index
= 0, cl_index1
= wbp
->cl_number
; cl_index
< cl_len
; cl_index
++) {
4998 if (l_clusters
[cl_index
].b_addr
== l_clusters
[cl_index
].e_addr
)
5001 wbp
->cl_clusters
[cl_index1
].b_addr
= l_clusters
[cl_index
].b_addr
;
5002 wbp
->cl_clusters
[cl_index1
].e_addr
= l_clusters
[cl_index
].e_addr
;
5003 wbp
->cl_clusters
[cl_index1
].io_flags
= l_clusters
[cl_index
].io_flags
;
5008 * update the cluster count
5010 wbp
->cl_number
= cl_index1
;
5013 return (MAX_CLUSTERS
- wbp
->cl_number
);
5019 cluster_push_now(vnode_t vp
, struct cl_extent
*cl
, off_t EOF
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5021 upl_page_info_t
*pl
;
5023 vm_offset_t upl_offset
;
5038 if (flags
& IO_PASSIVE
)
5043 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_START
,
5044 (int)cl
->b_addr
, (int)cl
->e_addr
, (int)EOF
, flags
, 0);
5046 if ((pages_in_upl
= (int)(cl
->e_addr
- cl
->b_addr
)) == 0) {
5047 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 0, 0, 0, 0);
5051 upl_size
= pages_in_upl
* PAGE_SIZE
;
5052 upl_f_offset
= (off_t
)(cl
->b_addr
* PAGE_SIZE_64
);
5054 if (upl_f_offset
+ upl_size
>= EOF
) {
5056 if (upl_f_offset
>= EOF
) {
5058 * must have truncated the file and missed
5059 * clearing a dangling cluster (i.e. it's completely
5060 * beyond the new EOF
5062 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 1, 0, 0, 0);
5066 size
= EOF
- upl_f_offset
;
5068 upl_size
= (size
+ (PAGE_SIZE
- 1)) & ~PAGE_MASK
;
5069 pages_in_upl
= upl_size
/ PAGE_SIZE
;
5073 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_START
, upl_size
, size
, 0, 0, 0);
5076 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
5078 * - only pages that are currently dirty are returned... these are the ones we need to clean
5079 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
5080 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
5081 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
5082 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
5084 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
5087 if ((vp
->v_flag
& VNOCACHE_DATA
) || (flags
& IO_NOCACHE
))
5088 upl_flags
= UPL_COPYOUT_FROM
| UPL_RET_ONLY_DIRTY
| UPL_SET_LITE
| UPL_WILL_BE_DUMPED
;
5090 upl_flags
= UPL_COPYOUT_FROM
| UPL_RET_ONLY_DIRTY
| UPL_SET_LITE
;
5092 kret
= ubc_create_upl(vp
,
5098 if (kret
!= KERN_SUCCESS
)
5099 panic("cluster_push: failed to get pagelist");
5101 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 41)) | DBG_FUNC_END
, upl
, upl_f_offset
, 0, 0, 0);
5104 * since we only asked for the dirty pages back
5105 * it's possible that we may only get a few or even none, so...
5106 * before we start marching forward, we must make sure we know
5107 * where the last present page is in the UPL, otherwise we could
5108 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
5109 * employed by commit_range and abort_range.
5111 for (last_pg
= pages_in_upl
- 1; last_pg
>= 0; last_pg
--) {
5112 if (upl_page_present(pl
, last_pg
))
5115 pages_in_upl
= last_pg
+ 1;
5117 if (pages_in_upl
== 0) {
5118 ubc_upl_abort(upl
, 0);
5120 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 2, 0, 0, 0);
5124 for (last_pg
= 0; last_pg
< pages_in_upl
; ) {
5126 * find the next dirty page in the UPL
5127 * this will become the first page in the
5128 * next I/O to generate
5130 for (start_pg
= last_pg
; start_pg
< pages_in_upl
; start_pg
++) {
5131 if (upl_dirty_page(pl
, start_pg
))
5133 if (upl_page_present(pl
, start_pg
))
5135 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
5136 * just release these unchanged since we're not going
5137 * to steal them or change their state
5139 ubc_upl_abort_range(upl
, start_pg
* PAGE_SIZE
, PAGE_SIZE
, UPL_ABORT_FREE_ON_EMPTY
);
5141 if (start_pg
>= pages_in_upl
)
5143 * done... no more dirty pages to push
5146 if (start_pg
> last_pg
)
5148 * skipped over some non-dirty pages
5150 size
-= ((start_pg
- last_pg
) * PAGE_SIZE
);
5153 * find a range of dirty pages to write
5155 for (last_pg
= start_pg
; last_pg
< pages_in_upl
; last_pg
++) {
5156 if (!upl_dirty_page(pl
, last_pg
))
5159 upl_offset
= start_pg
* PAGE_SIZE
;
5161 io_size
= min(size
, (last_pg
- start_pg
) * PAGE_SIZE
);
5163 io_flags
= CL_THROTTLE
| CL_COMMIT
| CL_AGE
| bflag
;
5165 if ( !(flags
& IO_SYNC
))
5166 io_flags
|= CL_ASYNC
;
5168 retval
= cluster_io(vp
, upl
, upl_offset
, upl_f_offset
+ upl_offset
, io_size
,
5169 io_flags
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
5171 if (error
== 0 && retval
)
5176 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 51)) | DBG_FUNC_END
, 1, 3, 0, 0, 0);
5183 * sparse_cluster_switch is called with the write behind lock held
5186 sparse_cluster_switch(struct cl_writebehind
*wbp
, vnode_t vp
, off_t EOF
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5190 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 78)) | DBG_FUNC_START
, vp
, wbp
->cl_scmap
, 0, 0, 0);
5192 for (cl_index
= 0; cl_index
< wbp
->cl_number
; cl_index
++) {
5194 struct cl_extent cl
;
5196 for (cl
.b_addr
= wbp
->cl_clusters
[cl_index
].b_addr
; cl
.b_addr
< wbp
->cl_clusters
[cl_index
].e_addr
; cl
.b_addr
++) {
5198 if (ubc_page_op(vp
, (off_t
)(cl
.b_addr
* PAGE_SIZE_64
), 0, NULL
, &flags
) == KERN_SUCCESS
) {
5199 if (flags
& UPL_POP_DIRTY
) {
5200 cl
.e_addr
= cl
.b_addr
+ 1;
5202 sparse_cluster_add(&(wbp
->cl_scmap
), vp
, &cl
, EOF
, callback
, callback_arg
);
5209 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 78)) | DBG_FUNC_END
, vp
, wbp
->cl_scmap
, 0, 0, 0);
5214 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
5215 * still associated with the write-behind context... however, if the scmap has been disassociated
5216 * from the write-behind context (the cluster_push case), the wb lock is not held
5219 sparse_cluster_push(void **scmap
, vnode_t vp
, off_t EOF
, int push_flag
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5221 struct cl_extent cl
;
5225 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 79)) | DBG_FUNC_START
, vp
, (*scmap
), 0, push_flag
, 0);
5227 if (push_flag
& PUSH_ALL
)
5228 vfs_drt_control(scmap
, 1);
5231 if (vfs_drt_get_cluster(scmap
, &offset
, &length
) != KERN_SUCCESS
)
5234 cl
.b_addr
= (daddr64_t
)(offset
/ PAGE_SIZE_64
);
5235 cl
.e_addr
= (daddr64_t
)((offset
+ length
) / PAGE_SIZE_64
);
5237 cluster_push_now(vp
, &cl
, EOF
, push_flag
& IO_PASSIVE
, callback
, callback_arg
);
5239 if ( !(push_flag
& PUSH_ALL
) )
5242 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 79)) | DBG_FUNC_END
, vp
, (*scmap
), 0, 0, 0);
5247 * sparse_cluster_add is called with the write behind lock held
5250 sparse_cluster_add(void **scmap
, vnode_t vp
, struct cl_extent
*cl
, off_t EOF
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5256 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 80)) | DBG_FUNC_START
, (*scmap
), 0, cl
->b_addr
, (int)cl
->e_addr
, 0);
5258 offset
= (off_t
)(cl
->b_addr
* PAGE_SIZE_64
);
5259 length
= ((u_int
)(cl
->e_addr
- cl
->b_addr
)) * PAGE_SIZE
;
5261 while (vfs_drt_mark_pages(scmap
, offset
, length
, &new_dirty
) != KERN_SUCCESS
) {
5263 * no room left in the map
5264 * only a partial update was done
5265 * push out some pages and try again
5267 sparse_cluster_push(scmap
, vp
, EOF
, 0, callback
, callback_arg
);
5269 offset
+= (new_dirty
* PAGE_SIZE_64
);
5270 length
-= (new_dirty
* PAGE_SIZE
);
5272 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 80)) | DBG_FUNC_END
, vp
, (*scmap
), 0, 0, 0);
5277 cluster_align_phys_io(vnode_t vp
, struct uio
*uio
, addr64_t usr_paddr
, u_int32_t xsize
, int flags
, int (*callback
)(buf_t
, void *), void *callback_arg
)
5279 upl_page_info_t
*pl
;
5289 if (flags
& IO_PASSIVE
)
5294 upl_flags
= UPL_SET_LITE
;
5296 if ( !(flags
& CL_READ
) ) {
5298 * "write" operation: let the UPL subsystem know
5299 * that we intend to modify the buffer cache pages
5302 upl_flags
|= UPL_WILL_MODIFY
;
5305 * indicate that there is no need to pull the
5306 * mapping for this page... we're only going
5307 * to read from it, not modify it.
5309 upl_flags
|= UPL_FILE_IO
;
5311 kret
= ubc_create_upl(vp
,
5312 uio
->uio_offset
& ~PAGE_MASK_64
,
5318 if (kret
!= KERN_SUCCESS
)
5321 if (!upl_valid_page(pl
, 0)) {
5323 * issue a synchronous read to cluster_io
5325 error
= cluster_io(vp
, upl
, 0, uio
->uio_offset
& ~PAGE_MASK_64
, PAGE_SIZE
,
5326 CL_READ
| bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
5328 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, UPL_ABORT_DUMP_PAGES
| UPL_ABORT_FREE_ON_EMPTY
);
5334 ubc_paddr
= ((addr64_t
)upl_phys_page(pl
, 0) << 12) + (addr64_t
)(uio
->uio_offset
& PAGE_MASK_64
);
5337 * NOTE: There is no prototype for the following in BSD. It, and the definitions
5338 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
5339 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
5340 * way to do so without exporting them to kexts as well.
5342 if (flags
& CL_READ
)
5343 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
5344 copypv(ubc_paddr
, usr_paddr
, xsize
, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
5346 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
5347 copypv(usr_paddr
, ubc_paddr
, xsize
, 2 | 1 | 8); /* Copy physical to physical and flush the source */
5349 if ( !(flags
& CL_READ
) || (upl_valid_page(pl
, 0) && upl_dirty_page(pl
, 0))) {
5351 * issue a synchronous write to cluster_io
5353 error
= cluster_io(vp
, upl
, 0, uio
->uio_offset
& ~PAGE_MASK_64
, PAGE_SIZE
,
5354 bflag
, (buf_t
)NULL
, (struct clios
*)NULL
, callback
, callback_arg
);
5357 uio_update(uio
, (user_size_t
)xsize
);
5360 abort_flags
= UPL_ABORT_FREE_ON_EMPTY
;
5362 abort_flags
= UPL_ABORT_FREE_ON_EMPTY
| UPL_ABORT_DUMP_PAGES
;
5364 ubc_upl_abort_range(upl
, 0, PAGE_SIZE
, abort_flags
);
5372 cluster_copy_upl_data(struct uio
*uio
, upl_t upl
, int upl_offset
, int *io_resid
)
5380 upl_page_info_t
*pl
;
5384 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_START
,
5385 (int)uio
->uio_offset
, upl_offset
, xsize
, 0, 0);
5387 segflg
= uio
->uio_segflg
;
5391 case UIO_USERSPACE32
:
5392 case UIO_USERISPACE32
:
5393 uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
5397 case UIO_USERISPACE
:
5398 uio
->uio_segflg
= UIO_PHYS_USERSPACE
;
5401 case UIO_USERSPACE64
:
5402 case UIO_USERISPACE64
:
5403 uio
->uio_segflg
= UIO_PHYS_USERSPACE64
;
5407 uio
->uio_segflg
= UIO_PHYS_SYSSPACE
;
5411 pl
= ubc_upl_pageinfo(upl
);
5413 pg_index
= upl_offset
/ PAGE_SIZE
;
5414 pg_offset
= upl_offset
& PAGE_MASK
;
5415 csize
= min(PAGE_SIZE
- pg_offset
, xsize
);
5417 while (xsize
&& retval
== 0) {
5420 paddr
= ((addr64_t
)upl_phys_page(pl
, pg_index
) << 12) + pg_offset
;
5422 retval
= uiomove64(paddr
, csize
, uio
);
5427 csize
= min(PAGE_SIZE
, xsize
);
5431 uio
->uio_segflg
= segflg
;
5433 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_END
,
5434 (int)uio
->uio_offset
, xsize
, retval
, segflg
, 0);
5441 cluster_copy_ubc_data(vnode_t vp
, struct uio
*uio
, int *io_resid
, int mark_dirty
)
5444 return (cluster_copy_ubc_data_internal(vp
, uio
, io_resid
, mark_dirty
, 1));
5449 cluster_copy_ubc_data_internal(vnode_t vp
, struct uio
*uio
, int *io_resid
, int mark_dirty
, int take_reference
)
5456 memory_object_control_t control
;
5458 io_size
= *io_resid
;
5460 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_START
,
5461 (int)uio
->uio_offset
, 0, io_size
, 0, 0);
5463 control
= ubc_getobject(vp
, UBC_FLAGS_NONE
);
5465 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
5466 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_END
,
5467 (int)uio
->uio_offset
, io_size
, retval
, 3, 0);
5471 segflg
= uio
->uio_segflg
;
5475 case UIO_USERSPACE32
:
5476 case UIO_USERISPACE32
:
5477 uio
->uio_segflg
= UIO_PHYS_USERSPACE32
;
5480 case UIO_USERSPACE64
:
5481 case UIO_USERISPACE64
:
5482 uio
->uio_segflg
= UIO_PHYS_USERSPACE64
;
5486 case UIO_USERISPACE
:
5487 uio
->uio_segflg
= UIO_PHYS_USERSPACE
;
5491 uio
->uio_segflg
= UIO_PHYS_SYSSPACE
;
5495 if ( (io_size
= *io_resid
) ) {
5496 start_offset
= (int)(uio
->uio_offset
& PAGE_MASK_64
);
5497 xsize
= uio_resid(uio
);
5499 retval
= memory_object_control_uiomove(control
, uio
->uio_offset
- start_offset
, uio
,
5500 start_offset
, io_size
, mark_dirty
, take_reference
);
5501 xsize
-= uio_resid(uio
);
5504 uio
->uio_segflg
= segflg
;
5505 *io_resid
= io_size
;
5507 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW
, 34)) | DBG_FUNC_END
,
5508 (int)uio
->uio_offset
, io_size
, retval
, 0x80000000 | segflg
, 0);
5515 is_file_clean(vnode_t vp
, off_t filesize
)
5519 int total_dirty
= 0;
5521 for (f_offset
= 0; f_offset
< filesize
; f_offset
+= PAGE_SIZE_64
) {
5522 if (ubc_page_op(vp
, f_offset
, 0, NULL
, &flags
) == KERN_SUCCESS
) {
5523 if (flags
& UPL_POP_DIRTY
) {
5537 * Dirty region tracking/clustering mechanism.
5539 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
5540 * dirty regions within a larger space (file). It is primarily intended to
5541 * support clustering in large files with many dirty areas.
5543 * The implementation assumes that the dirty regions are pages.
5545 * To represent dirty pages within the file, we store bit vectors in a
5546 * variable-size circular hash.
5550 * Bitvector size. This determines the number of pages we group in a
5551 * single hashtable entry. Each hashtable entry is aligned to this
5552 * size within the file.
5554 #define DRT_BITVECTOR_PAGES 256
5557 * File offset handling.
5559 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
5560 * the correct formula is (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)
5562 #define DRT_ADDRESS_MASK (~((1 << 20) - 1))
5563 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
5566 * Hashtable address field handling.
5568 * The low-order bits of the hashtable address are used to conserve
5571 * DRT_HASH_COUNT_MASK must be large enough to store the range
5572 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
5573 * to indicate that the bucket is actually unoccupied.
5575 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
5576 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
5578 (scm)->scm_hashtable[(i)].dhe_control = \
5579 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
5581 #define DRT_HASH_COUNT_MASK 0x1ff
5582 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
5583 #define DRT_HASH_SET_COUNT(scm, i, c) \
5585 (scm)->scm_hashtable[(i)].dhe_control = \
5586 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
5588 #define DRT_HASH_CLEAR(scm, i) \
5590 (scm)->scm_hashtable[(i)].dhe_control = 0; \
5592 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
5593 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
5594 #define DRT_HASH_COPY(oscm, oi, scm, i) \
5596 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
5597 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
5602 * Hash table moduli.
5604 * Since the hashtable entry's size is dependent on the size of
5605 * the bitvector, and since the hashtable size is constrained to
5606 * both being prime and fitting within the desired allocation
5607 * size, these values need to be manually determined.
5609 * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
5611 * The small hashtable allocation is 1024 bytes, so the modulus is 23.
5612 * The large hashtable allocation is 16384 bytes, so the modulus is 401.
5614 #define DRT_HASH_SMALL_MODULUS 23
5615 #define DRT_HASH_LARGE_MODULUS 401
5617 #define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */
5618 #define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */
5620 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
5623 * Hashtable bitvector handling.
5625 * Bitvector fields are 32 bits long.
5628 #define DRT_HASH_SET_BIT(scm, i, bit) \
5629 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
5631 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
5632 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
5634 #define DRT_HASH_TEST_BIT(scm, i, bit) \
5635 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
5637 #define DRT_BITVECTOR_CLEAR(scm, i) \
5638 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5640 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
5641 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
5642 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
5643 (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5650 struct vfs_drt_hashentry
{
5651 u_int64_t dhe_control
;
5652 u_int32_t dhe_bitvector
[DRT_BITVECTOR_PAGES
/ 32];
5656 * Dirty Region Tracking structure.
5658 * The hashtable is allocated entirely inside the DRT structure.
5660 * The hash is a simple circular prime modulus arrangement, the structure
5661 * is resized from small to large if it overflows.
5664 struct vfs_drt_clustermap
{
5665 u_int32_t scm_magic
; /* sanity/detection */
5666 #define DRT_SCM_MAGIC 0x12020003
5667 u_int32_t scm_modulus
; /* current ring size */
5668 u_int32_t scm_buckets
; /* number of occupied buckets */
5669 u_int32_t scm_lastclean
; /* last entry we cleaned */
5670 u_int32_t scm_iskips
; /* number of slot skips */
5672 struct vfs_drt_hashentry scm_hashtable
[0];
5676 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
5677 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
5680 * Debugging codes and arguments.
5682 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
5683 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
5684 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
5685 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
5686 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
5689 /* 1 (clean, no map) */
5690 /* 2 (map alloc fail) */
5691 /* 3, resid (partial) */
5692 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
5693 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
5694 * lastclean, iskips */
5697 static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap
**cmapp
);
5698 static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap
*cmap
);
5699 static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap
*cmap
,
5700 u_int64_t offset
, int *indexp
);
5701 static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap
**cmapp
,
5705 static kern_return_t
vfs_drt_do_mark_pages(
5711 static void vfs_drt_trace(
5712 struct vfs_drt_clustermap
*cmap
,
5721 * Allocate and initialise a sparse cluster map.
5723 * Will allocate a new map, resize or compact an existing map.
5725 * XXX we should probably have at least one intermediate map size,
5726 * as the 1:16 ratio seems a bit drastic.
5728 static kern_return_t
5729 vfs_drt_alloc_map(struct vfs_drt_clustermap
**cmapp
)
5731 struct vfs_drt_clustermap
*cmap
, *ocmap
;
5735 int nsize
, active_buckets
, index
, copycount
;
5742 * Decide on the size of the new map.
5744 if (ocmap
== NULL
) {
5745 nsize
= DRT_HASH_SMALL_MODULUS
;
5747 /* count the number of active buckets in the old map */
5749 for (i
= 0; i
< ocmap
->scm_modulus
; i
++) {
5750 if (!DRT_HASH_VACANT(ocmap
, i
) &&
5751 (DRT_HASH_GET_COUNT(ocmap
, i
) != 0))
5755 * If we're currently using the small allocation, check to
5756 * see whether we should grow to the large one.
5758 if (ocmap
->scm_modulus
== DRT_HASH_SMALL_MODULUS
) {
5759 /* if the ring is nearly full */
5760 if (active_buckets
> (DRT_HASH_SMALL_MODULUS
- 5)) {
5761 nsize
= DRT_HASH_LARGE_MODULUS
;
5763 nsize
= DRT_HASH_SMALL_MODULUS
;
5766 /* already using the large modulus */
5767 nsize
= DRT_HASH_LARGE_MODULUS
;
5769 * If the ring is completely full, there's
5770 * nothing useful for us to do. Behave as
5771 * though we had compacted into the new
5774 if (active_buckets
>= DRT_HASH_LARGE_MODULUS
)
5775 return(KERN_SUCCESS
);
5780 * Allocate and initialise the new map.
5783 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&cmap
,
5784 (nsize
== DRT_HASH_SMALL_MODULUS
) ? DRT_SMALL_ALLOCATION
: DRT_LARGE_ALLOCATION
);
5785 if (kret
!= KERN_SUCCESS
)
5787 cmap
->scm_magic
= DRT_SCM_MAGIC
;
5788 cmap
->scm_modulus
= nsize
;
5789 cmap
->scm_buckets
= 0;
5790 cmap
->scm_lastclean
= 0;
5791 cmap
->scm_iskips
= 0;
5792 for (i
= 0; i
< cmap
->scm_modulus
; i
++) {
5793 DRT_HASH_CLEAR(cmap
, i
);
5794 DRT_HASH_VACATE(cmap
, i
);
5795 DRT_BITVECTOR_CLEAR(cmap
, i
);
5799 * If there's an old map, re-hash entries from it into the new map.
5802 if (ocmap
!= NULL
) {
5803 for (i
= 0; i
< ocmap
->scm_modulus
; i
++) {
5804 /* skip empty buckets */
5805 if (DRT_HASH_VACANT(ocmap
, i
) ||
5806 (DRT_HASH_GET_COUNT(ocmap
, i
) == 0))
5809 offset
= DRT_HASH_GET_ADDRESS(ocmap
, i
);
5810 kret
= vfs_drt_get_index(&cmap
, offset
, &index
, 1);
5811 if (kret
!= KERN_SUCCESS
) {
5812 /* XXX need to bail out gracefully here */
5813 panic("vfs_drt: new cluster map mysteriously too small");
5817 DRT_HASH_COPY(ocmap
, i
, cmap
, index
);
5822 /* log what we've done */
5823 vfs_drt_trace(cmap
, DRT_DEBUG_ALLOC
, copycount
, 0, 0, 0);
5826 * It's important to ensure that *cmapp always points to
5827 * a valid map, so we must overwrite it before freeing
5831 if (ocmap
!= NULL
) {
5832 /* emit stats into trace buffer */
5833 vfs_drt_trace(ocmap
, DRT_DEBUG_SCMDATA
,
5836 ocmap
->scm_lastclean
,
5839 vfs_drt_free_map(ocmap
);
5841 return(KERN_SUCCESS
);
5846 * Free a sparse cluster map.
5848 static kern_return_t
5849 vfs_drt_free_map(struct vfs_drt_clustermap
*cmap
)
5851 kmem_free(kernel_map
, (vm_offset_t
)cmap
,
5852 (cmap
->scm_modulus
== DRT_HASH_SMALL_MODULUS
) ? DRT_SMALL_ALLOCATION
: DRT_LARGE_ALLOCATION
);
5853 return(KERN_SUCCESS
);
5858 * Find the hashtable slot currently occupied by an entry for the supplied offset.
5860 static kern_return_t
5861 vfs_drt_search_index(struct vfs_drt_clustermap
*cmap
, u_int64_t offset
, int *indexp
)
5866 offset
= DRT_ALIGN_ADDRESS(offset
);
5867 index
= DRT_HASH(cmap
, offset
);
5869 /* traverse the hashtable */
5870 for (i
= 0; i
< cmap
->scm_modulus
; i
++) {
5873 * If the slot is vacant, we can stop.
5875 if (DRT_HASH_VACANT(cmap
, index
))
5879 * If the address matches our offset, we have success.
5881 if (DRT_HASH_GET_ADDRESS(cmap
, index
) == offset
) {
5883 return(KERN_SUCCESS
);
5887 * Move to the next slot, try again.
5889 index
= DRT_HASH_NEXT(cmap
, index
);
5894 return(KERN_FAILURE
);
5898 * Find the hashtable slot for the supplied offset. If we haven't allocated
5899 * one yet, allocate one and populate the address field. Note that it will
5900 * not have a nonzero page count and thus will still technically be free, so
5901 * in the case where we are called to clean pages, the slot will remain free.
5903 static kern_return_t
5904 vfs_drt_get_index(struct vfs_drt_clustermap
**cmapp
, u_int64_t offset
, int *indexp
, int recursed
)
5906 struct vfs_drt_clustermap
*cmap
;
5913 /* look for an existing entry */
5914 kret
= vfs_drt_search_index(cmap
, offset
, indexp
);
5915 if (kret
== KERN_SUCCESS
)
5918 /* need to allocate an entry */
5919 offset
= DRT_ALIGN_ADDRESS(offset
);
5920 index
= DRT_HASH(cmap
, offset
);
5922 /* scan from the index forwards looking for a vacant slot */
5923 for (i
= 0; i
< cmap
->scm_modulus
; i
++) {
5925 if (DRT_HASH_VACANT(cmap
, index
) || DRT_HASH_GET_COUNT(cmap
,index
) == 0) {
5926 cmap
->scm_buckets
++;
5927 if (index
< cmap
->scm_lastclean
)
5928 cmap
->scm_lastclean
= index
;
5929 DRT_HASH_SET_ADDRESS(cmap
, index
, offset
);
5930 DRT_HASH_SET_COUNT(cmap
, index
, 0);
5931 DRT_BITVECTOR_CLEAR(cmap
, index
);
5933 vfs_drt_trace(cmap
, DRT_DEBUG_INSERT
, (int)offset
, i
, 0, 0);
5934 return(KERN_SUCCESS
);
5936 cmap
->scm_iskips
+= i
;
5937 index
= DRT_HASH_NEXT(cmap
, index
);
5941 * We haven't found a vacant slot, so the map is full. If we're not
5942 * already recursed, try reallocating/compacting it.
5945 return(KERN_FAILURE
);
5946 kret
= vfs_drt_alloc_map(cmapp
);
5947 if (kret
== KERN_SUCCESS
) {
5948 /* now try to insert again */
5949 kret
= vfs_drt_get_index(cmapp
, offset
, indexp
, 1);
5955 * Implementation of set dirty/clean.
5957 * In the 'clean' case, not finding a map is OK.
5959 static kern_return_t
5960 vfs_drt_do_mark_pages(
5967 struct vfs_drt_clustermap
*cmap
, **cmapp
;
5969 int i
, index
, pgoff
, pgcount
, setcount
, ecount
;
5971 cmapp
= (struct vfs_drt_clustermap
**)private;
5974 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_START
, (int)offset
, (int)length
, dirty
, 0);
5976 if (setcountp
!= NULL
)
5979 /* allocate a cluster map if we don't already have one */
5981 /* no cluster map, nothing to clean */
5983 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 1, 0, 0, 0);
5984 return(KERN_SUCCESS
);
5986 kret
= vfs_drt_alloc_map(cmapp
);
5987 if (kret
!= KERN_SUCCESS
) {
5988 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 2, 0, 0, 0);
5995 * Iterate over the length of the region.
5997 while (length
> 0) {
5999 * Get the hashtable index for this offset.
6001 * XXX this will add blank entries if we are clearing a range
6002 * that hasn't been dirtied.
6004 kret
= vfs_drt_get_index(cmapp
, offset
, &index
, 0);
6005 cmap
= *cmapp
; /* may have changed! */
6006 /* this may be a partial-success return */
6007 if (kret
!= KERN_SUCCESS
) {
6008 if (setcountp
!= NULL
)
6009 *setcountp
= setcount
;
6010 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 3, (int)length
, 0, 0);
6016 * Work out how many pages we're modifying in this
6019 pgoff
= (offset
- DRT_ALIGN_ADDRESS(offset
)) / PAGE_SIZE
;
6020 pgcount
= min((length
/ PAGE_SIZE
), (DRT_BITVECTOR_PAGES
- pgoff
));
6023 * Iterate over pages, dirty/clearing as we go.
6025 ecount
= DRT_HASH_GET_COUNT(cmap
, index
);
6026 for (i
= 0; i
< pgcount
; i
++) {
6028 if (!DRT_HASH_TEST_BIT(cmap
, index
, pgoff
+ i
)) {
6029 DRT_HASH_SET_BIT(cmap
, index
, pgoff
+ i
);
6034 if (DRT_HASH_TEST_BIT(cmap
, index
, pgoff
+ i
)) {
6035 DRT_HASH_CLEAR_BIT(cmap
, index
, pgoff
+ i
);
6041 DRT_HASH_SET_COUNT(cmap
, index
, ecount
);
6043 offset
+= pgcount
* PAGE_SIZE
;
6044 length
-= pgcount
* PAGE_SIZE
;
6046 if (setcountp
!= NULL
)
6047 *setcountp
= setcount
;
6049 vfs_drt_trace(cmap
, DRT_DEBUG_MARK
| DBG_FUNC_END
, 0, setcount
, 0, 0);
6051 return(KERN_SUCCESS
);
6055 * Mark a set of pages as dirty/clean.
6057 * This is a public interface.
6060 * Pointer to storage suitable for holding a pointer. Note that
6061 * this must either be NULL or a value set by this function.
6064 * Current file size in bytes.
6067 * Offset of the first page to be marked as dirty, in bytes. Must be
6071 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
6074 * Number of pages newly marked dirty by this call (optional).
6076 * Returns KERN_SUCCESS if all the pages were successfully marked.
6078 static kern_return_t
6079 vfs_drt_mark_pages(void **cmapp
, off_t offset
, u_int length
, u_int
*setcountp
)
6081 /* XXX size unused, drop from interface */
6082 return(vfs_drt_do_mark_pages(cmapp
, offset
, length
, setcountp
, 1));
6086 static kern_return_t
6087 vfs_drt_unmark_pages(void **cmapp
, off_t offset
, u_int length
)
6089 return(vfs_drt_do_mark_pages(cmapp
, offset
, length
, NULL
, 0));
6094 * Get a cluster of dirty pages.
6096 * This is a public interface.
6099 * Pointer to storage managed by drt_mark_pages. Note that this must
6100 * be NULL or a value set by drt_mark_pages.
6103 * Returns the byte offset into the file of the first page in the cluster.
6106 * Returns the length in bytes of the cluster of dirty pages.
6108 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
6109 * are no dirty pages meeting the minmum size criteria. Private storage will
6110 * be released if there are no more dirty pages left in the map
6113 static kern_return_t
6114 vfs_drt_get_cluster(void **cmapp
, off_t
*offsetp
, u_int
*lengthp
)
6116 struct vfs_drt_clustermap
*cmap
;
6120 int index
, i
, fs
, ls
;
6123 if ((cmapp
== NULL
) || (*cmapp
== NULL
))
6124 return(KERN_FAILURE
);
6127 /* walk the hashtable */
6128 for (offset
= 0, j
= 0; j
< cmap
->scm_modulus
; offset
+= (DRT_BITVECTOR_PAGES
* PAGE_SIZE
), j
++) {
6129 index
= DRT_HASH(cmap
, offset
);
6131 if (DRT_HASH_VACANT(cmap
, index
) || (DRT_HASH_GET_COUNT(cmap
, index
) == 0))
6134 /* scan the bitfield for a string of bits */
6137 for (i
= 0; i
< DRT_BITVECTOR_PAGES
; i
++) {
6138 if (DRT_HASH_TEST_BIT(cmap
, index
, i
)) {
6144 /* didn't find any bits set */
6145 panic("vfs_drt: entry summary count > 0 but no bits set in map");
6147 for (ls
= 0; i
< DRT_BITVECTOR_PAGES
; i
++, ls
++) {
6148 if (!DRT_HASH_TEST_BIT(cmap
, index
, i
))
6152 /* compute offset and length, mark pages clean */
6153 offset
= DRT_HASH_GET_ADDRESS(cmap
, index
) + (PAGE_SIZE
* fs
);
6154 length
= ls
* PAGE_SIZE
;
6155 vfs_drt_do_mark_pages(cmapp
, offset
, length
, NULL
, 0);
6156 cmap
->scm_lastclean
= index
;
6158 /* return successful */
6159 *offsetp
= (off_t
)offset
;
6162 vfs_drt_trace(cmap
, DRT_DEBUG_RETCLUSTER
, (int)offset
, (int)length
, 0, 0);
6163 return(KERN_SUCCESS
);
6166 * We didn't find anything... hashtable is empty
6167 * emit stats into trace buffer and
6170 vfs_drt_trace(cmap
, DRT_DEBUG_SCMDATA
,
6173 cmap
->scm_lastclean
,
6176 vfs_drt_free_map(cmap
);
6179 return(KERN_FAILURE
);
6183 static kern_return_t
6184 vfs_drt_control(void **cmapp
, int op_type
)
6186 struct vfs_drt_clustermap
*cmap
;
6189 if ((cmapp
== NULL
) || (*cmapp
== NULL
))
6190 return(KERN_FAILURE
);
6195 /* emit stats into trace buffer */
6196 vfs_drt_trace(cmap
, DRT_DEBUG_SCMDATA
,
6199 cmap
->scm_lastclean
,
6202 vfs_drt_free_map(cmap
);
6207 cmap
->scm_lastclean
= 0;
6210 return(KERN_SUCCESS
);
6216 * Emit a summary of the state of the clustermap into the trace buffer
6217 * along with some caller-provided data.
6221 vfs_drt_trace(__unused
struct vfs_drt_clustermap
*cmap
, int code
, int arg1
, int arg2
, int arg3
, int arg4
)
6223 KERNEL_DEBUG(code
, arg1
, arg2
, arg3
, arg4
, 0);
6227 vfs_drt_trace(__unused
struct vfs_drt_clustermap
*cmap
, __unused
int code
,
6228 __unused
int arg1
, __unused
int arg2
, __unused
int arg3
,
6236 * Perform basic sanity check on the hash entry summary count
6237 * vs. the actual bits set in the entry.
6240 vfs_drt_sanity(struct vfs_drt_clustermap
*cmap
)
6245 for (index
= 0; index
< cmap
->scm_modulus
; index
++) {
6246 if (DRT_HASH_VACANT(cmap
, index
))
6249 for (bits_on
= 0, i
= 0; i
< DRT_BITVECTOR_PAGES
; i
++) {
6250 if (DRT_HASH_TEST_BIT(cmap
, index
, i
))
6253 if (bits_on
!= DRT_HASH_GET_COUNT(cmap
, index
))
6254 panic("bits_on = %d, index = %d\n", bits_on
, index
);