]> git.saurik.com Git - apple/xnu.git/blame - bsd/vfs/vfs_cluster.c
xnu-1504.9.37.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cluster.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
62 */
63
64#include <sys/param.h>
91447636
A
65#include <sys/proc_internal.h>
66#include <sys/buf_internal.h>
67#include <sys/mount_internal.h>
68#include <sys/vnode_internal.h>
1c79356b
A
69#include <sys/trace.h>
70#include <sys/malloc.h>
55e303ae
A
71#include <sys/time.h>
72#include <sys/kernel.h>
1c79356b 73#include <sys/resourcevar.h>
91447636 74#include <sys/uio_internal.h>
1c79356b 75#include <libkern/libkern.h>
55e303ae 76#include <machine/machine_routines.h>
1c79356b 77
91447636 78#include <sys/ubc_internal.h>
2d21ac55 79#include <vm/vnode_pager.h>
1c79356b 80
55e303ae
A
81#include <mach/mach_types.h>
82#include <mach/memory_object_types.h>
91447636
A
83#include <mach/vm_map.h>
84#include <mach/upl.h>
85
86#include <vm/vm_kern.h>
87#include <vm/vm_map.h>
88#include <vm/vm_pageout.h>
55e303ae 89
1c79356b 90#include <sys/kdebug.h>
b0d623f7
A
91#include <libkern/OSAtomic.h>
92
93#if 0
94#undef KERNEL_DEBUG
95#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
96#endif
97
1c79356b 98
2d21ac55 99#define CL_READ 0x01
b0d623f7 100#define CL_WRITE 0x02
cf7d32b8
A
101#define CL_ASYNC 0x04
102#define CL_COMMIT 0x08
2d21ac55
A
103#define CL_PAGEOUT 0x10
104#define CL_AGE 0x20
105#define CL_NOZERO 0x40
106#define CL_PAGEIN 0x80
107#define CL_DEV_MEMORY 0x100
108#define CL_PRESERVE 0x200
109#define CL_THROTTLE 0x400
110#define CL_KEEPCACHED 0x800
111#define CL_DIRECT_IO 0x1000
112#define CL_PASSIVE 0x2000
b0d623f7
A
113#define CL_IOSTREAMING 0x4000
114
115#define MAX_VECTOR_UPL_ELEMENTS 8
116#define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE) * PAGE_SIZE
b4c24cb9 117
b0d623f7
A
118extern upl_t vector_upl_create(vm_offset_t);
119extern boolean_t vector_upl_is_valid(upl_t);
120extern boolean_t vector_upl_set_subupl(upl_t,upl_t, u_int32_t);
121extern void vector_upl_set_pagelist(upl_t);
122extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, u_int32_t);
d7e50217 123
b4c24cb9 124struct clios {
d7e50217
A
125 u_int io_completed; /* amount of io that has currently completed */
126 u_int io_issued; /* amount of io that was successfully issued */
127 int io_error; /* error code of first error encountered */
128 int io_wanted; /* someone is sleeping waiting for a change in state */
b4c24cb9
A
129};
130
91447636
A
131static lck_grp_t *cl_mtx_grp;
132static lck_attr_t *cl_mtx_attr;
133static lck_grp_attr_t *cl_mtx_grp_attr;
134static lck_mtx_t *cl_mtxp;
135
136
2d21ac55
A
137#define IO_UNKNOWN 0
138#define IO_DIRECT 1
139#define IO_CONTIG 2
140#define IO_COPY 3
141
142#define PUSH_DELAY 0x01
143#define PUSH_ALL 0x02
144#define PUSH_SYNC 0x04
145
146
147static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset);
148static void cluster_wait_IO(buf_t cbp_head, int async);
149static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
150
151static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
152
91447636 153static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
2d21ac55
A
154 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
155static int cluster_iodone(buf_t bp, void *callback_arg);
156static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags);
b0d623f7 157static int cluster_hard_throttle_on(vnode_t vp, uint32_t);
91447636 158
2d21ac55
A
159static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg);
160
b0d623f7 161static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
2d21ac55
A
162static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
163
164static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
165 int (*)(buf_t, void *), void *callback_arg);
166static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
167 int flags, int (*)(buf_t, void *), void *callback_arg);
168static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
169 int (*)(buf_t, void *), void *callback_arg, int flags);
1c79356b 170
2d21ac55
A
171static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
172 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg);
173static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF,
174 int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg);
175static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
176 int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag);
91447636 177
2d21ac55 178static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
91447636 179
2d21ac55
A
180static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
181static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
91447636 182
2d21ac55 183static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg);
55e303ae 184
2d21ac55
A
185static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int (*)(buf_t, void *), void *callback_arg);
186
187static void sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
b0d623f7
A
188static void sparse_cluster_push(void **cmapp, vnode_t vp, off_t EOF, int push_flag, int (*)(buf_t, void *), void *callback_arg);
189static void sparse_cluster_add(void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
2d21ac55
A
190
191static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
55e303ae
A
192static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
193static kern_return_t vfs_drt_control(void **cmapp, int op_type);
194
9bccf70c 195
2d21ac55
A
196/*
197 * limit the internal I/O size so that we
198 * can represent it in a 32 bit int
199 */
b0d623f7
A
200#define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
201#define MAX_IO_CONTIG_SIZE (MAX_UPL_SIZE * PAGE_SIZE)
202#define MAX_VECTS 16
2d21ac55
A
203#define MIN_DIRECT_WRITE_SIZE (4 * PAGE_SIZE)
204
b0d623f7
A
205#define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * base)
206#define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
207#define MAX_PREFETCH(vp, io_size) (io_size * IO_SCALE(vp, 3))
cf7d32b8
A
208
209
2d21ac55
A
210int speculative_reads_disabled = 0;
211
1c79356b
A
212/*
213 * throttle the number of async writes that
214 * can be outstanding on a single vnode
215 * before we issue a synchronous write
216 */
91447636 217#define HARD_THROTTLE_MAXCNT 0
b0d623f7 218#define HARD_THROTTLE_MAXSIZE (32 * 1024)
55e303ae
A
219
220int hard_throttle_on_root = 0;
221struct timeval priority_IO_timestamp_for_root;
222
223
91447636
A
224void
225cluster_init(void) {
2d21ac55 226 /*
91447636
A
227 * allocate lock group attribute and group
228 */
2d21ac55 229 cl_mtx_grp_attr = lck_grp_attr_alloc_init();
91447636
A
230 cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr);
231
232 /*
233 * allocate the lock attribute
234 */
235 cl_mtx_attr = lck_attr_alloc_init();
91447636
A
236
237 /*
238 * allocate and initialize mutex's used to protect updates and waits
239 * on the cluster_io context
240 */
241 cl_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr);
242
243 if (cl_mtxp == NULL)
244 panic("cluster_init: failed to allocate cl_mtxp");
245}
246
247
cf7d32b8
A
248uint32_t
249cluster_max_io_size(mount_t mp, int type)
250{
b0d623f7
A
251 uint32_t max_io_size;
252 uint32_t segcnt;
253 uint32_t maxcnt;
254
255 switch(type) {
256
257 case CL_READ:
258 segcnt = mp->mnt_segreadcnt;
259 maxcnt = mp->mnt_maxreadcnt;
260 break;
261 case CL_WRITE:
262 segcnt = mp->mnt_segwritecnt;
263 maxcnt = mp->mnt_maxwritecnt;
264 break;
265 default:
266 segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
267 maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
268 break;
269 }
cf7d32b8
A
270 if (segcnt > MAX_UPL_SIZE) {
271 /*
272 * don't allow a size beyond the max UPL size we can create
273 */
274 segcnt = MAX_UPL_SIZE;
275 }
276 max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
277
278 if (max_io_size < (MAX_UPL_TRANSFER * PAGE_SIZE)) {
279 /*
280 * don't allow a size smaller than the old fixed limit
281 */
282 max_io_size = (MAX_UPL_TRANSFER * PAGE_SIZE);
283 } else {
284 /*
285 * make sure the size specified is a multiple of PAGE_SIZE
286 */
287 max_io_size &= ~PAGE_MASK;
288 }
289 return (max_io_size);
290}
291
292
293
91447636
A
294
295#define CLW_ALLOCATE 0x01
296#define CLW_RETURNLOCKED 0x02
2d21ac55
A
297#define CLW_IONOCACHE 0x04
298#define CLW_IOPASSIVE 0x08
299
91447636
A
300/*
301 * if the read ahead context doesn't yet exist,
302 * allocate and initialize it...
303 * the vnode lock serializes multiple callers
304 * during the actual assignment... first one
305 * to grab the lock wins... the other callers
306 * will release the now unnecessary storage
307 *
308 * once the context is present, try to grab (but don't block on)
309 * the lock associated with it... if someone
310 * else currently owns it, than the read
311 * will run without read-ahead. this allows
312 * multiple readers to run in parallel and
313 * since there's only 1 read ahead context,
314 * there's no real loss in only allowing 1
315 * reader to have read-ahead enabled.
316 */
317static struct cl_readahead *
318cluster_get_rap(vnode_t vp)
319{
320 struct ubc_info *ubc;
321 struct cl_readahead *rap;
322
323 ubc = vp->v_ubcinfo;
324
325 if ((rap = ubc->cl_rahead) == NULL) {
326 MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK);
327
328 bzero(rap, sizeof *rap);
329 rap->cl_lastr = -1;
330 lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr);
331
332 vnode_lock(vp);
333
334 if (ubc->cl_rahead == NULL)
335 ubc->cl_rahead = rap;
336 else {
337 lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
338 FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
2d21ac55 339 rap = ubc->cl_rahead;
91447636
A
340 }
341 vnode_unlock(vp);
342 }
343 if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE)
344 return(rap);
345
346 return ((struct cl_readahead *)NULL);
347}
348
349
350/*
351 * if the write behind context doesn't yet exist,
352 * and CLW_ALLOCATE is specified, allocate and initialize it...
353 * the vnode lock serializes multiple callers
354 * during the actual assignment... first one
355 * to grab the lock wins... the other callers
356 * will release the now unnecessary storage
357 *
358 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
359 * the lock associated with the write behind context before
360 * returning
361 */
362
363static struct cl_writebehind *
364cluster_get_wbp(vnode_t vp, int flags)
365{
366 struct ubc_info *ubc;
367 struct cl_writebehind *wbp;
368
369 ubc = vp->v_ubcinfo;
370
371 if ((wbp = ubc->cl_wbehind) == NULL) {
372
373 if ( !(flags & CLW_ALLOCATE))
374 return ((struct cl_writebehind *)NULL);
375
376 MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK);
377
378 bzero(wbp, sizeof *wbp);
379 lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr);
380
381 vnode_lock(vp);
382
383 if (ubc->cl_wbehind == NULL)
384 ubc->cl_wbehind = wbp;
385 else {
386 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
387 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
2d21ac55 388 wbp = ubc->cl_wbehind;
91447636
A
389 }
390 vnode_unlock(vp);
391 }
392 if (flags & CLW_RETURNLOCKED)
393 lck_mtx_lock(&wbp->cl_lockw);
394
395 return (wbp);
396}
397
398
2d21ac55
A
399static void
400cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg)
401{
402 struct cl_writebehind *wbp;
403
404 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
405
406 if (wbp->cl_number) {
407 lck_mtx_lock(&wbp->cl_lockw);
408
409 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | PUSH_SYNC, callback, callback_arg);
410
411 lck_mtx_unlock(&wbp->cl_lockw);
412 }
413 }
414}
415
416
55e303ae 417static int
b0d623f7 418cluster_hard_throttle_on(vnode_t vp, uint32_t hard_throttle)
55e303ae 419{
b0d623f7
A
420 struct uthread *ut;
421
422 if (hard_throttle) {
423 static struct timeval hard_throttle_maxelapsed = { 0, 200000 };
55e303ae 424
b0d623f7
A
425 if (vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV) {
426 struct timeval elapsed;
55e303ae 427
b0d623f7
A
428 if (hard_throttle_on_root)
429 return(1);
55e303ae 430
b0d623f7
A
431 microuptime(&elapsed);
432 timevalsub(&elapsed, &priority_IO_timestamp_for_root);
55e303ae 433
b0d623f7
A
434 if (timevalcmp(&elapsed, &hard_throttle_maxelapsed, <))
435 return(1);
436 }
55e303ae 437 }
593a1d5f 438 if (throttle_get_io_policy(&ut) == IOPOL_THROTTLE) {
b0d623f7 439 if (throttle_io_will_be_throttled(-1, vp->v_mount)) {
593a1d5f
A
440 return(1);
441 }
442 }
55e303ae
A
443 return(0);
444}
445
1c79356b
A
446
447static int
2d21ac55
A
448cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags)
449{
450 int upl_abort_code = 0;
451 int page_in = 0;
452 int page_out = 0;
453
454 if (io_flags & B_PHYS)
455 /*
456 * direct write of any flavor, or a direct read that wasn't aligned
457 */
458 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
459 else {
460 if (io_flags & B_PAGEIO) {
461 if (io_flags & B_READ)
462 page_in = 1;
463 else
464 page_out = 1;
465 }
466 if (io_flags & B_CACHE)
467 /*
468 * leave pages in the cache unchanged on error
469 */
470 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
471 else if (page_out && (error != ENXIO))
472 /*
473 * transient error... leave pages unchanged
474 */
475 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
476 else if (page_in)
477 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
478 else
479 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
480
481 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
482 }
483 return (upl_abort_code);
484}
485
486
487static int
488cluster_iodone(buf_t bp, void *callback_arg)
1c79356b 489{
91447636
A
490 int b_flags;
491 int error;
492 int total_size;
493 int total_resid;
494 int upl_offset;
495 int zero_offset;
2d21ac55
A
496 int pg_offset = 0;
497 int commit_size = 0;
498 int upl_flags = 0;
499 int transaction_size = 0;
91447636
A
500 upl_t upl;
501 buf_t cbp;
502 buf_t cbp_head;
503 buf_t cbp_next;
504 buf_t real_bp;
505 struct clios *iostate;
2d21ac55 506 boolean_t transaction_complete = FALSE;
91447636
A
507
508 cbp_head = (buf_t)(bp->b_trans_head);
1c79356b
A
509
510 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
b0d623f7 511 cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
1c79356b
A
512
513 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
514 /*
515 * all I/O requests that are part of this transaction
516 * have to complete before we can process it
517 */
518 if ( !(cbp->b_flags & B_DONE)) {
519
520 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
b0d623f7 521 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
1c79356b 522
2d21ac55 523 return 0;
1c79356b 524 }
2d21ac55
A
525 if (cbp->b_flags & B_EOT)
526 transaction_complete = TRUE;
527 }
528 if (transaction_complete == FALSE) {
529 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
b0d623f7 530 cbp_head, 0, 0, 0, 0);
2d21ac55
A
531
532 return 0;
1c79356b
A
533 }
534 error = 0;
535 total_size = 0;
536 total_resid = 0;
537
538 cbp = cbp_head;
539 upl_offset = cbp->b_uploffset;
91447636 540 upl = cbp->b_upl;
1c79356b
A
541 b_flags = cbp->b_flags;
542 real_bp = cbp->b_real_bp;
9bccf70c 543 zero_offset= cbp->b_validend;
b4c24cb9 544 iostate = (struct clios *)cbp->b_iostate;
1c79356b 545
91447636
A
546 if (real_bp)
547 real_bp->b_dev = cbp->b_dev;
548
1c79356b 549 while (cbp) {
1c79356b
A
550 if ((cbp->b_flags & B_ERROR) && error == 0)
551 error = cbp->b_error;
552
553 total_resid += cbp->b_resid;
554 total_size += cbp->b_bcount;
555
556 cbp_next = cbp->b_trans_next;
557
2d21ac55
A
558 if (cbp_next == NULL)
559 /*
560 * compute the overall size of the transaction
561 * in case we created one that has 'holes' in it
562 * 'total_size' represents the amount of I/O we
563 * did, not the span of the transaction w/r to the UPL
564 */
565 transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
566
567 if (cbp != cbp_head)
568 free_io_buf(cbp);
1c79356b
A
569
570 cbp = cbp_next;
571 }
2d21ac55
A
572 if (error == 0 && total_resid)
573 error = EIO;
574
575 if (error == 0) {
576 int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
577
578 if (cliodone_func != NULL) {
579 cbp_head->b_bcount = transaction_size;
580
581 error = (*cliodone_func)(cbp_head, callback_arg);
582 }
583 }
b4c24cb9
A
584 if (zero_offset)
585 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
586
2d21ac55
A
587 free_io_buf(cbp_head);
588
b4c24cb9 589 if (iostate) {
91447636
A
590 int need_wakeup = 0;
591
d7e50217
A
592 /*
593 * someone has issued multiple I/Os asynchrounsly
594 * and is waiting for them to complete (streaming)
595 */
2d21ac55 596 lck_mtx_lock_spin(cl_mtxp);
91447636 597
d7e50217
A
598 if (error && iostate->io_error == 0)
599 iostate->io_error = error;
9bccf70c 600
b4c24cb9
A
601 iostate->io_completed += total_size;
602
603 if (iostate->io_wanted) {
d7e50217
A
604 /*
605 * someone is waiting for the state of
606 * this io stream to change
607 */
b4c24cb9 608 iostate->io_wanted = 0;
91447636 609 need_wakeup = 1;
b4c24cb9 610 }
91447636
A
611 lck_mtx_unlock(cl_mtxp);
612
613 if (need_wakeup)
614 wakeup((caddr_t)&iostate->io_wanted);
b4c24cb9 615 }
1c79356b
A
616
617 if (b_flags & B_COMMIT_UPL) {
91447636 618
2d21ac55
A
619 pg_offset = upl_offset & PAGE_MASK;
620 commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1c79356b 621
2d21ac55
A
622 if (error)
623 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags);
624 else {
625 upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
1c79356b 626
91447636 627 if ((b_flags & B_PHYS) && (b_flags & B_READ))
2d21ac55 628 upl_flags |= UPL_COMMIT_SET_DIRTY;
55e303ae 629
1c79356b 630 if (b_flags & B_AGE)
2d21ac55 631 upl_flags |= UPL_COMMIT_INACTIVATE;
1c79356b 632
2d21ac55 633 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
1c79356b 634 }
91447636 635 }
2d21ac55
A
636 if ((b_flags & B_NEED_IODONE) && real_bp) {
637 if (error) {
638 real_bp->b_flags |= B_ERROR;
639 real_bp->b_error = error;
640 }
641 real_bp->b_resid = total_resid;
642
643 buf_biodone(real_bp);
644 }
645 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
b0d623f7 646 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
1c79356b
A
647
648 return (error);
649}
650
651
b0d623f7
A
652uint32_t
653cluster_hard_throttle_limit(vnode_t vp, uint32_t *limit, uint32_t hard_throttle)
654{
655 if (cluster_hard_throttle_on(vp, hard_throttle)) {
656 *limit = HARD_THROTTLE_MAXSIZE;
657 return 1;
658 }
659 return 0;
660}
661
662
91447636 663void
b0d623f7 664cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
1c79356b 665{
1c79356b 666
55e303ae 667 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
b0d623f7 668 upl_offset, size, bp, 0, 0);
9bccf70c 669
91447636 670 if (bp == NULL || bp->b_datap == 0) {
2d21ac55
A
671 upl_page_info_t *pl;
672 addr64_t zero_addr;
9bccf70c 673
55e303ae
A
674 pl = ubc_upl_pageinfo(upl);
675
2d21ac55
A
676 if (upl_device_page(pl) == TRUE) {
677 zero_addr = ((addr64_t)upl_phys_page(pl, 0) << 12) + upl_offset;
678
679 bzero_phys_nc(zero_addr, size);
680 } else {
681 while (size) {
682 int page_offset;
683 int page_index;
684 int zero_cnt;
55e303ae 685
2d21ac55
A
686 page_index = upl_offset / PAGE_SIZE;
687 page_offset = upl_offset & PAGE_MASK;
55e303ae 688
2d21ac55
A
689 zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << 12) + page_offset;
690 zero_cnt = min(PAGE_SIZE - page_offset, size);
55e303ae 691
2d21ac55 692 bzero_phys(zero_addr, zero_cnt);
55e303ae 693
2d21ac55
A
694 size -= zero_cnt;
695 upl_offset += zero_cnt;
696 }
55e303ae 697 }
1c79356b 698 } else
91447636 699 bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
1c79356b 700
55e303ae
A
701 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
702 upl_offset, size, 0, 0, 0);
1c79356b
A
703}
704
91447636 705
2d21ac55
A
706static void
707cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset)
708{
709 cbp_head->b_validend = zero_offset;
710 cbp_tail->b_flags |= B_EOT;
711}
712
713static void
714cluster_wait_IO(buf_t cbp_head, int async)
715{
716 buf_t cbp;
717
718 if (async) {
719 /*
720 * async callback completion will not normally
721 * generate a wakeup upon I/O completion...
722 * by setting BL_WANTED, we will force a wakeup
723 * to occur as any outstanding I/Os complete...
724 * I/Os already completed will have BL_CALLDONE already
725 * set and we won't block in buf_biowait_callback..
726 * note that we're actually waiting for the bp to have
727 * completed the callback function... only then
728 * can we safely take back ownership of the bp
729 * need the main buf mutex in order to safely
730 * update b_lflags
731 */
732 buf_list_lock();
733
734 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
735 cbp->b_lflags |= BL_WANTED;
736
737 buf_list_unlock();
738 }
739 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
740 if (async)
741 buf_biowait_callback(cbp);
742 else
743 buf_biowait(cbp);
744 }
745}
746
747static void
748cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
749{
750 buf_t cbp;
751 int error;
752
753 /*
754 * cluster_complete_transaction will
755 * only be called if we've issued a complete chain in synchronous mode
756 * or, we've already done a cluster_wait_IO on an incomplete chain
757 */
758 if (needwait) {
759 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next)
760 buf_biowait(cbp);
761 }
762 error = cluster_iodone(*cbp_head, callback_arg);
763
764 if ( !(flags & CL_ASYNC) && error && *retval == 0) {
765 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO))
766 *retval = error;
767 }
768 *cbp_head = (buf_t)NULL;
769}
770
771
1c79356b 772static int
91447636 773cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
2d21ac55 774 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b 775{
91447636
A
776 buf_t cbp;
777 u_int size;
778 u_int io_size;
779 int io_flags;
780 int bmap_flags;
781 int error = 0;
782 int retval = 0;
783 buf_t cbp_head = NULL;
784 buf_t cbp_tail = NULL;
785 int trans_count = 0;
2d21ac55 786 int max_trans_count;
91447636
A
787 u_int pg_count;
788 int pg_offset;
789 u_int max_iosize;
790 u_int max_vectors;
791 int priv;
792 int zero_offset = 0;
793 int async_throttle = 0;
794 mount_t mp;
2d21ac55
A
795 vm_offset_t upl_end_offset;
796 boolean_t need_EOT = FALSE;
797
798 /*
799 * we currently don't support buffers larger than a page
800 */
801 if (real_bp && non_rounded_size > PAGE_SIZE)
802 panic("%s(): Called with real buffer of size %d bytes which "
803 "is greater than the maximum allowed size of "
804 "%d bytes (the system PAGE_SIZE).\n",
805 __FUNCTION__, non_rounded_size, PAGE_SIZE);
91447636
A
806
807 mp = vp->v_mount;
808
2d21ac55
A
809 /*
810 * we don't want to do any funny rounding of the size for IO requests
811 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
812 * belong to us... we can't extend (nor do we need to) the I/O to fill
813 * out a page
814 */
815 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
91447636
A
816 /*
817 * round the requested size up so that this I/O ends on a
818 * page boundary in case this is a 'write'... if the filesystem
819 * has blocks allocated to back the page beyond the EOF, we want to
820 * make sure to write out the zero's that are sitting beyond the EOF
821 * so that in case the filesystem doesn't explicitly zero this area
822 * if a hole is created via a lseek/write beyond the current EOF,
823 * it will return zeros when it's read back from the disk. If the
824 * physical allocation doesn't extend for the whole page, we'll
825 * only write/read from the disk up to the end of this allocation
826 * via the extent info returned from the VNOP_BLOCKMAP call.
827 */
828 pg_offset = upl_offset & PAGE_MASK;
55e303ae 829
91447636
A
830 size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
831 } else {
832 /*
833 * anyone advertising a blocksize of 1 byte probably
834 * can't deal with us rounding up the request size
835 * AFP is one such filesystem/device
836 */
837 size = non_rounded_size;
838 }
2d21ac55
A
839 upl_end_offset = upl_offset + size;
840
841 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
842
843 /*
844 * Set the maximum transaction size to the maximum desired number of
845 * buffers.
846 */
847 max_trans_count = 8;
848 if (flags & CL_DEV_MEMORY)
849 max_trans_count = 16;
55e303ae 850
0b4e3aa0 851 if (flags & CL_READ) {
2d21ac55 852 io_flags = B_READ;
91447636 853 bmap_flags = VNODE_READ;
0b4e3aa0 854
91447636
A
855 max_iosize = mp->mnt_maxreadcnt;
856 max_vectors = mp->mnt_segreadcnt;
0b4e3aa0 857 } else {
2d21ac55 858 io_flags = B_WRITE;
91447636 859 bmap_flags = VNODE_WRITE;
1c79356b 860
91447636
A
861 max_iosize = mp->mnt_maxwritecnt;
862 max_vectors = mp->mnt_segwritecnt;
0b4e3aa0 863 }
91447636
A
864 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
865
55e303ae 866 /*
91447636
A
867 * make sure the maximum iosize is a
868 * multiple of the page size
55e303ae
A
869 */
870 max_iosize &= ~PAGE_MASK;
871
2d21ac55
A
872 /*
873 * Ensure the maximum iosize is sensible.
874 */
875 if (!max_iosize)
876 max_iosize = PAGE_SIZE;
877
55e303ae 878 if (flags & CL_THROTTLE) {
b0d623f7 879 if ( !(flags & CL_PAGEOUT) && cluster_hard_throttle_on(vp, 1)) {
55e303ae
A
880 if (max_iosize > HARD_THROTTLE_MAXSIZE)
881 max_iosize = HARD_THROTTLE_MAXSIZE;
882 async_throttle = HARD_THROTTLE_MAXCNT;
2d21ac55
A
883 } else {
884 if ( (flags & CL_DEV_MEMORY) )
b0d623f7 885 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
2d21ac55
A
886 else {
887 u_int max_cluster;
cf7d32b8
A
888 u_int max_cluster_size;
889 u_int max_prefetch;
2d21ac55 890
b0d623f7
A
891 max_cluster_size = MAX_CLUSTER_SIZE(vp);
892 max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ));
893
cf7d32b8 894 if (max_iosize > max_cluster_size)
b0d623f7 895 max_cluster = max_cluster_size;
2d21ac55
A
896 else
897 max_cluster = max_iosize;
898
899 if (size < max_cluster)
900 max_cluster = size;
901
b0d623f7 902 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), (max_prefetch / max_cluster) - 1);
2d21ac55
A
903 }
904 }
55e303ae 905 }
1c79356b
A
906 if (flags & CL_AGE)
907 io_flags |= B_AGE;
91447636
A
908 if (flags & (CL_PAGEIN | CL_PAGEOUT))
909 io_flags |= B_PAGEIO;
b0d623f7
A
910 if (flags & (CL_IOSTREAMING))
911 io_flags |= B_IOSTREAMING;
b4c24cb9
A
912 if (flags & CL_COMMIT)
913 io_flags |= B_COMMIT_UPL;
914 if (flags & CL_PRESERVE)
915 io_flags |= B_PHYS;
91447636
A
916 if (flags & CL_KEEPCACHED)
917 io_flags |= B_CACHE;
2d21ac55
A
918 if (flags & CL_PASSIVE)
919 io_flags |= B_PASSIVE;
920 if (vp->v_flag & VSYSTEM)
921 io_flags |= B_META;
1c79356b 922
9bccf70c 923 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1c79356b
A
924 /*
925 * then we are going to end up
926 * with a page that we can't complete (the file size wasn't a multiple
927 * of PAGE_SIZE and we're trying to read to the end of the file
928 * so we'll go ahead and zero out the portion of the page we can't
929 * read in from the file
930 */
9bccf70c 931 zero_offset = upl_offset + non_rounded_size;
1c79356b
A
932 }
933 while (size) {
91447636
A
934 daddr64_t blkno;
935 daddr64_t lblkno;
2d21ac55 936 u_int io_size_wanted;
b0d623f7 937 size_t io_size_tmp;
1c79356b 938
0b4e3aa0
A
939 if (size > max_iosize)
940 io_size = max_iosize;
1c79356b
A
941 else
942 io_size = size;
2d21ac55
A
943
944 io_size_wanted = io_size;
b0d623f7 945 io_size_tmp = (size_t)io_size;
91447636 946
b0d623f7 947 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL)))
1c79356b 948 break;
2d21ac55 949
b0d623f7 950 if (io_size_tmp > io_size_wanted)
2d21ac55 951 io_size = io_size_wanted;
b0d623f7
A
952 else
953 io_size = (u_int)io_size_tmp;
2d21ac55 954
91447636
A
955 if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno))
956 real_bp->b_blkno = blkno;
1c79356b
A
957
958 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
2d21ac55 959 (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
1c79356b 960
91447636
A
961 if (io_size == 0) {
962 /*
963 * vnop_blockmap didn't return an error... however, it did
964 * return an extent size of 0 which means we can't
965 * make forward progress on this I/O... a hole in the
966 * file would be returned as a blkno of -1 with a non-zero io_size
967 * a real extent is returned with a blkno != -1 and a non-zero io_size
968 */
969 error = EINVAL;
970 break;
971 }
972 if ( !(flags & CL_READ) && blkno == -1) {
2d21ac55
A
973 off_t e_offset;
974 int pageout_flags;
91447636 975
b0d623f7
A
976 if(upl_get_internal_vectorupl(upl))
977 panic("Vector UPLs should not take this code-path\n");
91447636
A
978 /*
979 * we're writing into a 'hole'
980 */
0b4e3aa0 981 if (flags & CL_PAGEOUT) {
91447636
A
982 /*
983 * if we got here via cluster_pageout
984 * then just error the request and return
985 * the 'hole' should already have been covered
986 */
0b4e3aa0
A
987 error = EINVAL;
988 break;
91447636 989 }
91447636
A
990 /*
991 * we can get here if the cluster code happens to
992 * pick up a page that was dirtied via mmap vs
993 * a 'write' and the page targets a 'hole'...
994 * i.e. the writes to the cluster were sparse
995 * and the file was being written for the first time
996 *
997 * we can also get here if the filesystem supports
998 * 'holes' that are less than PAGE_SIZE.... because
999 * we can't know if the range in the page that covers
1000 * the 'hole' has been dirtied via an mmap or not,
1001 * we have to assume the worst and try to push the
1002 * entire page to storage.
1003 *
1004 * Try paging out the page individually before
1005 * giving up entirely and dumping it (the pageout
1006 * path will insure that the zero extent accounting
1007 * has been taken care of before we get back into cluster_io)
2d21ac55
A
1008 *
1009 * go direct to vnode_pageout so that we don't have to
1010 * unbusy the page from the UPL... we used to do this
1011 * so that we could call ubc_sync_range, but that results
1012 * in a potential deadlock if someone else races us to acquire
1013 * that page and wins and in addition needs one of the pages
1014 * we're continuing to hold in the UPL
0b4e3aa0 1015 */
2d21ac55 1016 pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
91447636 1017
2d21ac55
A
1018 if ( !(flags & CL_ASYNC))
1019 pageout_flags |= UPL_IOSYNC;
1020 if ( !(flags & CL_COMMIT))
1021 pageout_flags |= UPL_NOCOMMIT;
1022
1023 if (cbp_head) {
1024 buf_t last_cbp;
1025
1026 /*
1027 * first we have to wait for the the current outstanding I/Os
1028 * to complete... EOT hasn't been set yet on this transaction
1029 * so the pages won't be released just because all of the current
1030 * I/O linked to this transaction has completed...
1031 */
1032 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1033
1034 /*
1035 * we've got a transcation that
1036 * includes the page we're about to push out through vnode_pageout...
1037 * find the last bp in the list which will be the one that
1038 * includes the head of this page and round it's iosize down
1039 * to a page boundary...
1040 */
1041 for (last_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next)
1042 last_cbp = cbp;
1043
1044 cbp->b_bcount &= ~PAGE_MASK;
1045
1046 if (cbp->b_bcount == 0) {
1047 /*
1048 * this buf no longer has any I/O associated with it
1049 */
1050 free_io_buf(cbp);
1051
1052 if (cbp == cbp_head) {
1053 /*
1054 * the buf we just freed was the only buf in
1055 * this transaction... so there's no I/O to do
1056 */
1057 cbp_head = NULL;
1058 } else {
1059 /*
1060 * remove the buf we just freed from
1061 * the transaction list
1062 */
1063 last_cbp->b_trans_next = NULL;
1064 cbp_tail = last_cbp;
1065 }
1066 }
1067 if (cbp_head) {
1068 /*
1069 * there was more to the current transaction
1070 * than just the page we are pushing out via vnode_pageout...
1071 * mark it as finished and complete it... we've already
1072 * waited for the I/Os to complete above in the call to cluster_wait_IO
1073 */
1074 cluster_EOT(cbp_head, cbp_tail, 0);
91447636 1075
2d21ac55
A
1076 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1077
1078 trans_count = 0;
1079 }
1080 }
1081 if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
91447636 1082 error = EINVAL;
0b4e3aa0 1083 break;
91447636 1084 }
2d21ac55 1085 e_offset = round_page_64(f_offset + 1);
91447636
A
1086 io_size = e_offset - f_offset;
1087
1088 f_offset += io_size;
1089 upl_offset += io_size;
1090
1091 if (size >= io_size)
1092 size -= io_size;
1093 else
1094 size = 0;
1095 /*
1096 * keep track of how much of the original request
1097 * that we've actually completed... non_rounded_size
1098 * may go negative due to us rounding the request
1099 * to a page size multiple (i.e. size > non_rounded_size)
1100 */
1101 non_rounded_size -= io_size;
1102
1103 if (non_rounded_size <= 0) {
1104 /*
1105 * we've transferred all of the data in the original
1106 * request, but we were unable to complete the tail
1107 * of the last page because the file didn't have
1108 * an allocation to back that portion... this is ok.
1109 */
1110 size = 0;
1111 }
0b4e3aa0 1112 continue;
1c79356b 1113 }
91447636 1114 lblkno = (daddr64_t)(f_offset / PAGE_SIZE_64);
1c79356b
A
1115 /*
1116 * we have now figured out how much I/O we can do - this is in 'io_size'
1c79356b
A
1117 * pg_offset is the starting point in the first page for the I/O
1118 * pg_count is the number of full and partial pages that 'io_size' encompasses
1119 */
1c79356b 1120 pg_offset = upl_offset & PAGE_MASK;
1c79356b 1121
0b4e3aa0 1122 if (flags & CL_DEV_MEMORY) {
0b4e3aa0
A
1123 /*
1124 * treat physical requests as one 'giant' page
1125 */
1126 pg_count = 1;
55e303ae
A
1127 } else
1128 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1129
91447636 1130 if ((flags & CL_READ) && blkno == -1) {
2d21ac55 1131 vm_offset_t commit_offset;
9bccf70c 1132 int bytes_to_zero;
2d21ac55 1133 int complete_transaction_now = 0;
9bccf70c 1134
1c79356b
A
1135 /*
1136 * if we're reading and blkno == -1, then we've got a
1137 * 'hole' in the file that we need to deal with by zeroing
1138 * out the affected area in the upl
1139 */
2d21ac55 1140 if (io_size >= (u_int)non_rounded_size) {
9bccf70c
A
1141 /*
1142 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1143 * than 'zero_offset' will be non-zero
91447636 1144 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
9bccf70c
A
1145 * (indicated by the io_size finishing off the I/O request for this UPL)
1146 * than we're not going to issue an I/O for the
1147 * last page in this upl... we need to zero both the hole and the tail
1148 * of the page beyond the EOF, since the delayed zero-fill won't kick in
1149 */
2d21ac55
A
1150 bytes_to_zero = non_rounded_size;
1151 if (!(flags & CL_NOZERO))
1152 bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
1c79356b 1153
9bccf70c
A
1154 zero_offset = 0;
1155 } else
1156 bytes_to_zero = io_size;
1c79356b 1157
2d21ac55
A
1158 pg_count = 0;
1159
1160 cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
9bccf70c 1161
2d21ac55
A
1162 if (cbp_head) {
1163 int pg_resid;
1164
9bccf70c
A
1165 /*
1166 * if there is a current I/O chain pending
1167 * then the first page of the group we just zero'd
1168 * will be handled by the I/O completion if the zero
1169 * fill started in the middle of the page
1170 */
2d21ac55
A
1171 commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1172
1173 pg_resid = commit_offset - upl_offset;
1174
1175 if (bytes_to_zero >= pg_resid) {
1176 /*
1177 * the last page of the current I/O
1178 * has been completed...
1179 * compute the number of fully zero'd
1180 * pages that are beyond it
1181 * plus the last page if its partial
1182 * and we have no more I/O to issue...
1183 * otherwise a partial page is left
1184 * to begin the next I/O
1185 */
1186 if ((int)io_size >= non_rounded_size)
1187 pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
1188 else
1189 pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
1190
1191 complete_transaction_now = 1;
1192 }
1193 } else {
9bccf70c 1194 /*
2d21ac55
A
1195 * no pending I/O to deal with
1196 * so, commit all of the fully zero'd pages
1197 * plus the last page if its partial
1198 * and we have no more I/O to issue...
1199 * otherwise a partial page is left
1200 * to begin the next I/O
9bccf70c 1201 */
2d21ac55
A
1202 if ((int)io_size >= non_rounded_size)
1203 pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
1c79356b 1204 else
2d21ac55 1205 pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
9bccf70c 1206
2d21ac55
A
1207 commit_offset = upl_offset & ~PAGE_MASK;
1208 }
1209 if ( (flags & CL_COMMIT) && pg_count) {
1210 ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE,
1211 UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
1c79356b
A
1212 }
1213 upl_offset += io_size;
1214 f_offset += io_size;
1215 size -= io_size;
2d21ac55 1216
91447636
A
1217 /*
1218 * keep track of how much of the original request
1219 * that we've actually completed... non_rounded_size
1220 * may go negative due to us rounding the request
1221 * to a page size multiple (i.e. size > non_rounded_size)
1222 */
1223 non_rounded_size -= io_size;
1c79356b 1224
91447636
A
1225 if (non_rounded_size <= 0) {
1226 /*
1227 * we've transferred all of the data in the original
1228 * request, but we were unable to complete the tail
1229 * of the last page because the file didn't have
1230 * an allocation to back that portion... this is ok.
1231 */
1232 size = 0;
1233 }
2d21ac55
A
1234 if (cbp_head && (complete_transaction_now || size == 0)) {
1235 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
9bccf70c 1236
2d21ac55
A
1237 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
1238
1239 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1240
1241 trans_count = 0;
1242 }
1243 continue;
1c79356b 1244 }
55e303ae 1245 if (pg_count > max_vectors) {
91447636 1246 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
55e303ae
A
1247 io_size = PAGE_SIZE - pg_offset;
1248 pg_count = 1;
91447636
A
1249 } else {
1250 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
55e303ae 1251 pg_count = max_vectors;
91447636 1252 }
1c79356b 1253 }
2d21ac55
A
1254 /*
1255 * If the transaction is going to reach the maximum number of
1256 * desired elements, truncate the i/o to the nearest page so
1257 * that the actual i/o is initiated after this buffer is
1258 * created and added to the i/o chain.
1259 *
1260 * I/O directed to physically contiguous memory
1261 * doesn't have a requirement to make sure we 'fill' a page
1262 */
1263 if ( !(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
1264 ((upl_offset + io_size) & PAGE_MASK)) {
1265 vm_offset_t aligned_ofs;
1266
1267 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1268 /*
1269 * If the io_size does not actually finish off even a
1270 * single page we have to keep adding buffers to the
1271 * transaction despite having reached the desired limit.
1272 *
1273 * Eventually we get here with the page being finished
1274 * off (and exceeded) and then we truncate the size of
1275 * this i/o request so that it is page aligned so that
1276 * we can finally issue the i/o on the transaction.
1277 */
1278 if (aligned_ofs > upl_offset) {
1279 io_size = aligned_ofs - upl_offset;
1280 pg_count--;
1281 }
1282 }
1c79356b 1283
91447636 1284 if ( !(mp->mnt_kern_flag & MNTK_VIRTUALDEV))
55e303ae
A
1285 /*
1286 * if we're not targeting a virtual device i.e. a disk image
1287 * it's safe to dip into the reserve pool since real devices
1288 * can complete this I/O request without requiring additional
1289 * bufs from the alloc_io_buf pool
1290 */
1291 priv = 1;
1292 else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
1293 /*
1294 * Throttle the speculative IO
1295 */
0b4e3aa0
A
1296 priv = 0;
1297 else
1298 priv = 1;
1299
1300 cbp = alloc_io_buf(vp, priv);
1c79356b 1301
55e303ae 1302 if (flags & CL_PAGEOUT) {
91447636
A
1303 u_int i;
1304
55e303ae 1305 for (i = 0; i < pg_count; i++) {
91447636
A
1306 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY)
1307 panic("BUSY bp found in cluster_io");
1c79356b 1308 }
1c79356b 1309 }
b4c24cb9 1310 if (flags & CL_ASYNC) {
2d21ac55 1311 if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg))
91447636 1312 panic("buf_setcallback failed\n");
b4c24cb9 1313 }
2d21ac55 1314 cbp->b_cliodone = (void *)callback;
1c79356b
A
1315 cbp->b_flags |= io_flags;
1316
1317 cbp->b_lblkno = lblkno;
1318 cbp->b_blkno = blkno;
1319 cbp->b_bcount = io_size;
1c79356b 1320
91447636
A
1321 if (buf_setupl(cbp, upl, upl_offset))
1322 panic("buf_setupl failed\n");
1323
1324 cbp->b_trans_next = (buf_t)NULL;
1325
1326 if ((cbp->b_iostate = (void *)iostate))
d7e50217
A
1327 /*
1328 * caller wants to track the state of this
1329 * io... bump the amount issued against this stream
1330 */
b4c24cb9
A
1331 iostate->io_issued += io_size;
1332
91447636 1333 if (flags & CL_READ) {
1c79356b 1334 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
91447636
A
1335 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1336 }
1337 else {
1c79356b 1338 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
91447636
A
1339 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1340 }
1c79356b
A
1341
1342 if (cbp_head) {
1343 cbp_tail->b_trans_next = cbp;
1344 cbp_tail = cbp;
1345 } else {
1346 cbp_head = cbp;
1347 cbp_tail = cbp;
2d21ac55
A
1348
1349 if ( (cbp_head->b_real_bp = real_bp) ) {
1350 cbp_head->b_flags |= B_NEED_IODONE;
1351 real_bp = (buf_t)NULL;
1352 }
1c79356b 1353 }
2d21ac55
A
1354 *(buf_t *)(&cbp->b_trans_head) = cbp_head;
1355
91447636 1356 trans_count++;
1c79356b
A
1357
1358 upl_offset += io_size;
1359 f_offset += io_size;
1360 size -= io_size;
91447636
A
1361 /*
1362 * keep track of how much of the original request
1363 * that we've actually completed... non_rounded_size
1364 * may go negative due to us rounding the request
1365 * to a page size multiple (i.e. size > non_rounded_size)
1366 */
1367 non_rounded_size -= io_size;
1c79356b 1368
91447636
A
1369 if (non_rounded_size <= 0) {
1370 /*
1371 * we've transferred all of the data in the original
1372 * request, but we were unable to complete the tail
1373 * of the last page because the file didn't have
1374 * an allocation to back that portion... this is ok.
1375 */
1376 size = 0;
1377 }
2d21ac55
A
1378 if (size == 0) {
1379 /*
1380 * we have no more I/O to issue, so go
1381 * finish the final transaction
1382 */
1383 need_EOT = TRUE;
1384 } else if ( ((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
1385 ((flags & CL_ASYNC) || trans_count > max_trans_count) ) {
1c79356b 1386 /*
2d21ac55
A
1387 * I/O directed to physically contiguous memory...
1388 * which doesn't have a requirement to make sure we 'fill' a page
1389 * or...
1c79356b
A
1390 * the current I/O we've prepared fully
1391 * completes the last page in this request
2d21ac55
A
1392 * and ...
1393 * it's either an ASYNC request or
9bccf70c 1394 * we've already accumulated more than 8 I/O's into
2d21ac55
A
1395 * this transaction so mark it as complete so that
1396 * it can finish asynchronously or via the cluster_complete_transaction
1397 * below if the request is synchronous
1c79356b 1398 */
2d21ac55
A
1399 need_EOT = TRUE;
1400 }
1401 if (need_EOT == TRUE)
1402 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
1c79356b 1403
2d21ac55
A
1404 if (flags & CL_THROTTLE)
1405 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
1c79356b 1406
2d21ac55
A
1407 if ( !(io_flags & B_READ))
1408 vnode_startwrite(vp);
9bccf70c 1409
2d21ac55
A
1410 (void) VNOP_STRATEGY(cbp);
1411
1412 if (need_EOT == TRUE) {
1413 if ( !(flags & CL_ASYNC))
1414 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
9bccf70c 1415
2d21ac55 1416 need_EOT = FALSE;
91447636 1417 trans_count = 0;
2d21ac55 1418 cbp_head = NULL;
1c79356b 1419 }
2d21ac55 1420 }
1c79356b 1421 if (error) {
0b4e3aa0
A
1422 int abort_size;
1423
b4c24cb9
A
1424 io_size = 0;
1425
2d21ac55
A
1426 if (cbp_head) {
1427 /*
1428 * first wait until all of the outstanding I/O
1429 * for this partial transaction has completed
1430 */
1431 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
0b4e3aa0 1432
2d21ac55
A
1433 /*
1434 * Rewind the upl offset to the beginning of the
1435 * transaction.
1436 */
1437 upl_offset = cbp_head->b_uploffset;
1438
1439 for (cbp = cbp_head; cbp;) {
1440 buf_t cbp_next;
1441
1442 size += cbp->b_bcount;
1443 io_size += cbp->b_bcount;
1444
1445 cbp_next = cbp->b_trans_next;
1446 free_io_buf(cbp);
1447 cbp = cbp_next;
1448 }
1c79356b 1449 }
b4c24cb9 1450 if (iostate) {
91447636
A
1451 int need_wakeup = 0;
1452
d7e50217
A
1453 /*
1454 * update the error condition for this stream
1455 * since we never really issued the io
1456 * just go ahead and adjust it back
1457 */
2d21ac55 1458 lck_mtx_lock_spin(cl_mtxp);
91447636 1459
d7e50217 1460 if (iostate->io_error == 0)
b4c24cb9 1461 iostate->io_error = error;
b4c24cb9
A
1462 iostate->io_issued -= io_size;
1463
1464 if (iostate->io_wanted) {
d7e50217
A
1465 /*
1466 * someone is waiting for the state of
1467 * this io stream to change
1468 */
b4c24cb9 1469 iostate->io_wanted = 0;
2d21ac55 1470 need_wakeup = 1;
b4c24cb9 1471 }
91447636
A
1472 lck_mtx_unlock(cl_mtxp);
1473
1474 if (need_wakeup)
1475 wakeup((caddr_t)&iostate->io_wanted);
b4c24cb9 1476 }
1c79356b 1477 if (flags & CL_COMMIT) {
2d21ac55 1478 int upl_flags;
1c79356b 1479
2d21ac55
A
1480 pg_offset = upl_offset & PAGE_MASK;
1481 abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK;
1482
1483 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags);
1484
1c79356b 1485 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
b0d623f7 1486 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
1c79356b
A
1487 }
1488 if (retval == 0)
1489 retval = error;
2d21ac55
A
1490 } else if (cbp_head)
1491 panic("%s(): cbp_head is not NULL.\n", __FUNCTION__);
1492
1493 if (real_bp) {
1494 /*
1495 * can get here if we either encountered an error
1496 * or we completely zero-filled the request and
1497 * no I/O was issued
1498 */
1499 if (error) {
1500 real_bp->b_flags |= B_ERROR;
1501 real_bp->b_error = error;
1502 }
1503 buf_biodone(real_bp);
1c79356b 1504 }
2d21ac55 1505 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
1c79356b
A
1506
1507 return (retval);
1508}
1509
b0d623f7
A
1510#define reset_vector_run_state() \
1511 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
1512
1513static int
1514vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
1515 int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1516{
1517 vector_upl_set_pagelist(vector_upl);
1518
1519 if(io_flag & CL_READ) {
1520 if(vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK)==0))
1521 io_flag &= ~CL_PRESERVE; /*don't zero fill*/
1522 else
1523 io_flag |= CL_PRESERVE; /*zero fill*/
1524 }
1525 return (cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg));
1526
1527}
1c79356b
A
1528
1529static int
2d21ac55 1530cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
1c79356b 1531{
55e303ae 1532 int pages_in_prefetch;
1c79356b
A
1533
1534 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
1535 (int)f_offset, size, (int)filesize, 0, 0);
1536
1537 if (f_offset >= filesize) {
1538 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
1539 (int)f_offset, 0, 0, 0, 0);
1540 return(0);
1541 }
9bccf70c
A
1542 if ((off_t)size > (filesize - f_offset))
1543 size = filesize - f_offset;
55e303ae 1544 pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
1c79356b 1545
2d21ac55 1546 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
1c79356b
A
1547
1548 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
55e303ae 1549 (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
1c79356b 1550
55e303ae 1551 return (pages_in_prefetch);
1c79356b
A
1552}
1553
1554
1555
1556static void
2d21ac55
A
1557cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
1558 int bflag)
1c79356b 1559{
91447636
A
1560 daddr64_t r_addr;
1561 off_t f_offset;
1562 int size_of_prefetch;
b0d623f7 1563 u_int max_prefetch;
91447636 1564
1c79356b
A
1565
1566 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
91447636 1567 (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
1c79356b 1568
91447636 1569 if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
1c79356b 1570 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
91447636 1571 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
1c79356b
A
1572 return;
1573 }
2d21ac55 1574 if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
91447636
A
1575 rap->cl_ralen = 0;
1576 rap->cl_maxra = 0;
1c79356b
A
1577
1578 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
91447636 1579 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
1c79356b
A
1580
1581 return;
1582 }
b0d623f7 1583 max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ));
cf7d32b8 1584
91447636 1585 if (extent->e_addr < rap->cl_maxra) {
cf7d32b8 1586 if ((rap->cl_maxra - extent->e_addr) > ((max_prefetch / PAGE_SIZE) / 4)) {
1c79356b
A
1587
1588 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
91447636 1589 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
1c79356b
A
1590 return;
1591 }
1592 }
91447636
A
1593 r_addr = max(extent->e_addr, rap->cl_maxra) + 1;
1594 f_offset = (off_t)(r_addr * PAGE_SIZE_64);
1c79356b 1595
55e303ae
A
1596 size_of_prefetch = 0;
1597
1598 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
1599
1600 if (size_of_prefetch) {
1601 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
91447636 1602 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
55e303ae
A
1603 return;
1604 }
9bccf70c 1605 if (f_offset < filesize) {
91447636 1606 daddr64_t read_size;
55e303ae 1607
cf7d32b8 1608 rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
55e303ae 1609
91447636
A
1610 read_size = (extent->e_addr + 1) - extent->b_addr;
1611
1612 if (read_size > rap->cl_ralen) {
cf7d32b8
A
1613 if (read_size > max_prefetch / PAGE_SIZE)
1614 rap->cl_ralen = max_prefetch / PAGE_SIZE;
91447636
A
1615 else
1616 rap->cl_ralen = read_size;
1617 }
2d21ac55 1618 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
1c79356b 1619
9bccf70c 1620 if (size_of_prefetch)
91447636 1621 rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
9bccf70c 1622 }
1c79356b 1623 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
91447636 1624 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
1c79356b
A
1625}
1626
2d21ac55 1627
9bccf70c 1628int
b0d623f7 1629cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
91447636 1630 int size, off_t filesize, int flags)
2d21ac55
A
1631{
1632 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1633
1634}
1635
1636
1637int
b0d623f7 1638cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2d21ac55 1639 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
1640{
1641 int io_size;
55e303ae 1642 int rounded_size;
1c79356b 1643 off_t max_size;
55e303ae
A
1644 int local_flags;
1645
1646 if (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)
1647 /*
1648 * if we know we're issuing this I/O to a virtual device (i.e. disk image)
1649 * then we don't want to enforce this throttle... if we do, we can
1650 * potentially deadlock since we're stalling the pageout thread at a time
1651 * when the disk image might need additional memory (which won't be available
1652 * if the pageout thread can't run)... instead we'll just depend on the throttle
1653 * that the pageout thread now has in place to deal with external files
1654 */
1655 local_flags = CL_PAGEOUT;
1656 else
1657 local_flags = CL_PAGEOUT | CL_THROTTLE;
1c79356b
A
1658
1659 if ((flags & UPL_IOSYNC) == 0)
1660 local_flags |= CL_ASYNC;
1661 if ((flags & UPL_NOCOMMIT) == 0)
1662 local_flags |= CL_COMMIT;
91447636
A
1663 if ((flags & UPL_KEEPCACHED))
1664 local_flags |= CL_KEEPCACHED;
1c79356b 1665
1c79356b
A
1666
1667 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
1668 (int)f_offset, size, (int)filesize, local_flags, 0);
1669
1670 /*
1671 * If they didn't specify any I/O, then we are done...
1672 * we can't issue an abort because we don't know how
1673 * big the upl really is
1674 */
1675 if (size <= 0)
1676 return (EINVAL);
1677
1678 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1679 if (local_flags & CL_COMMIT)
9bccf70c 1680 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
1681 return (EROFS);
1682 }
1683 /*
1684 * can't page-in from a negative offset
1685 * or if we're starting beyond the EOF
1686 * or if the file offset isn't page aligned
1687 * or the size requested isn't a multiple of PAGE_SIZE
1688 */
1689 if (f_offset < 0 || f_offset >= filesize ||
1690 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
0b4e3aa0
A
1691 if (local_flags & CL_COMMIT)
1692 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
1693 return (EINVAL);
1694 }
1695 max_size = filesize - f_offset;
1696
1697 if (size < max_size)
1698 io_size = size;
1699 else
9bccf70c 1700 io_size = max_size;
1c79356b 1701
55e303ae 1702 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1c79356b 1703
55e303ae 1704 if (size > rounded_size) {
0b4e3aa0 1705 if (local_flags & CL_COMMIT)
55e303ae 1706 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
1c79356b
A
1707 UPL_ABORT_FREE_ON_EMPTY);
1708 }
91447636 1709 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
2d21ac55 1710 local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg));
1c79356b
A
1711}
1712
2d21ac55 1713
9bccf70c 1714int
b0d623f7 1715cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
91447636 1716 int size, off_t filesize, int flags)
2d21ac55
A
1717{
1718 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1719}
1720
1721
1722int
b0d623f7 1723cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2d21ac55 1724 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
1725{
1726 u_int io_size;
9bccf70c 1727 int rounded_size;
1c79356b
A
1728 off_t max_size;
1729 int retval;
1730 int local_flags = 0;
1c79356b 1731
9bccf70c
A
1732 if (upl == NULL || size < 0)
1733 panic("cluster_pagein: NULL upl passed in");
1c79356b 1734
9bccf70c
A
1735 if ((flags & UPL_IOSYNC) == 0)
1736 local_flags |= CL_ASYNC;
1c79356b 1737 if ((flags & UPL_NOCOMMIT) == 0)
9bccf70c 1738 local_flags |= CL_COMMIT;
b0d623f7
A
1739 if (flags & UPL_IOSTREAMING)
1740 local_flags |= CL_IOSTREAMING;
9bccf70c 1741
1c79356b
A
1742
1743 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
1744 (int)f_offset, size, (int)filesize, local_flags, 0);
1745
1746 /*
1747 * can't page-in from a negative offset
1748 * or if we're starting beyond the EOF
1749 * or if the file offset isn't page aligned
1750 * or the size requested isn't a multiple of PAGE_SIZE
1751 */
1752 if (f_offset < 0 || f_offset >= filesize ||
9bccf70c
A
1753 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
1754 if (local_flags & CL_COMMIT)
1755 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
1c79356b
A
1756 return (EINVAL);
1757 }
1758 max_size = filesize - f_offset;
1759
1760 if (size < max_size)
1761 io_size = size;
1762 else
9bccf70c 1763 io_size = max_size;
1c79356b 1764
9bccf70c 1765 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1c79356b 1766
9bccf70c
A
1767 if (size > rounded_size && (local_flags & CL_COMMIT))
1768 ubc_upl_abort_range(upl, upl_offset + rounded_size,
55e303ae 1769 size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
9bccf70c 1770
91447636 1771 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2d21ac55 1772 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
1c79356b 1773
1c79356b
A
1774 return (retval);
1775}
1776
2d21ac55 1777
9bccf70c 1778int
91447636 1779cluster_bp(buf_t bp)
2d21ac55
A
1780{
1781 return cluster_bp_ext(bp, NULL, NULL);
1782}
1783
1784
1785int
1786cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
1787{
1788 off_t f_offset;
1789 int flags;
1790
9bccf70c 1791 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
b0d623f7 1792 bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
9bccf70c 1793
1c79356b 1794 if (bp->b_flags & B_READ)
9bccf70c 1795 flags = CL_ASYNC | CL_READ;
1c79356b 1796 else
9bccf70c 1797 flags = CL_ASYNC;
2d21ac55
A
1798 if (bp->b_flags & B_PASSIVE)
1799 flags |= CL_PASSIVE;
1c79356b
A
1800
1801 f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
1802
2d21ac55 1803 return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg));
1c79356b
A
1804}
1805
2d21ac55
A
1806
1807
9bccf70c 1808int
91447636 1809cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
1c79356b 1810{
2d21ac55
A
1811 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
1812}
1813
1814
1815int
1816cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
1817 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
1818{
1819 user_ssize_t cur_resid;
1820 int retval = 0;
1821 int flags;
1822 int zflags;
1823 int bflag;
1824 int write_type = IO_COPY;
1825 u_int32_t write_length;
1c79356b 1826
91447636
A
1827 flags = xflags;
1828
2d21ac55 1829 if (flags & IO_PASSIVE)
b0d623f7 1830 bflag = CL_PASSIVE;
2d21ac55 1831 else
b0d623f7 1832 bflag = 0;
2d21ac55 1833
91447636
A
1834 if (vp->v_flag & VNOCACHE_DATA)
1835 flags |= IO_NOCACHE;
1836
2d21ac55 1837 if (uio == NULL) {
91447636 1838 /*
2d21ac55
A
1839 * no user data...
1840 * this call is being made to zero-fill some range in the file
91447636 1841 */
2d21ac55 1842 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
91447636 1843
2d21ac55 1844 return(retval);
91447636 1845 }
2d21ac55
A
1846 /*
1847 * do a write through the cache if one of the following is true....
1848 * NOCACHE is not true and
1849 * the uio request doesn't target USERSPACE
1850 * otherwise, find out if we want the direct or contig variant for
1851 * the first vector in the uio request
1852 */
1853 if ( (flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
1854 retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
1855
1856 if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT)
1857 /*
1858 * must go through the cached variant in this case
0b4e3aa0 1859 */
2d21ac55 1860 write_type = IO_COPY;
0b4e3aa0 1861
2d21ac55
A
1862 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
1863
1864 switch (write_type) {
91447636 1865
2d21ac55 1866 case IO_COPY:
91447636 1867 /*
2d21ac55
A
1868 * make sure the uio_resid isn't too big...
1869 * internally, we want to handle all of the I/O in
1870 * chunk sizes that fit in a 32 bit int
91447636 1871 */
2d21ac55 1872 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
91447636 1873 /*
2d21ac55
A
1874 * we're going to have to call cluster_write_copy
1875 * more than once...
1876 *
1877 * only want the last call to cluster_write_copy to
1878 * have the IO_TAILZEROFILL flag set and only the
1879 * first call should have IO_HEADZEROFILL
91447636 1880 */
2d21ac55
A
1881 zflags = flags & ~IO_TAILZEROFILL;
1882 flags &= ~IO_HEADZEROFILL;
91447636 1883
2d21ac55
A
1884 write_length = MAX_IO_REQUEST_SIZE;
1885 } else {
1886 /*
1887 * last call to cluster_write_copy
91447636 1888 */
2d21ac55
A
1889 zflags = flags;
1890
1891 write_length = (u_int32_t)cur_resid;
1892 }
1893 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
1894 break;
91447636 1895
2d21ac55
A
1896 case IO_CONTIG:
1897 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
91447636 1898
2d21ac55
A
1899 if (flags & IO_HEADZEROFILL) {
1900 /*
1901 * only do this once per request
91447636 1902 */
2d21ac55 1903 flags &= ~IO_HEADZEROFILL;
91447636 1904
2d21ac55
A
1905 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
1906 headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
1907 if (retval)
1908 break;
91447636 1909 }
2d21ac55
A
1910 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
1911
1912 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
1913 /*
1914 * we're done with the data from the user specified buffer(s)
1915 * and we've been requested to zero fill at the tail
1916 * treat this as an IO_HEADZEROFILL which doesn't require a uio
1917 * by rearranging the args and passing in IO_HEADZEROFILL
91447636 1918 */
2d21ac55
A
1919 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset,
1920 (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
1921 }
1922 break;
91447636 1923
2d21ac55
A
1924 case IO_DIRECT:
1925 /*
1926 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
1927 */
1928 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
1929 break;
91447636 1930
2d21ac55
A
1931 case IO_UNKNOWN:
1932 retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
1933 break;
1934 }
b0d623f7
A
1935 /*
1936 * in case we end up calling cluster_write_copy (from cluster_write_direct)
1937 * multiple times to service a multi-vector request that is not aligned properly
1938 * we need to update the oldEOF so that we
1939 * don't zero-fill the head of a page if we've successfully written
1940 * data to that area... 'cluster_write_copy' will zero-fill the head of a
1941 * page that is beyond the oldEOF if the write is unaligned... we only
1942 * want that to happen for the very first page of the cluster_write,
1943 * NOT the first page of each vector making up a multi-vector write.
1944 */
1945 if (uio->uio_offset > oldEOF)
1946 oldEOF = uio->uio_offset;
2d21ac55
A
1947 }
1948 return (retval);
1c79356b
A
1949}
1950
b4c24cb9 1951
9bccf70c 1952static int
2d21ac55
A
1953cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
1954 int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
1955{
1956 upl_t upl;
1957 upl_page_info_t *pl;
1c79356b 1958 vm_offset_t upl_offset;
b0d623f7 1959 vm_offset_t vector_upl_offset = 0;
2d21ac55
A
1960 u_int32_t io_req_size;
1961 u_int32_t offset_in_file;
1962 u_int32_t offset_in_iovbase;
b0d623f7
A
1963 u_int32_t io_size;
1964 int io_flag = 0;
1965 upl_size_t upl_size, vector_upl_size = 0;
2d21ac55
A
1966 vm_size_t upl_needed_size;
1967 mach_msg_type_number_t pages_in_pl;
1c79356b
A
1968 int upl_flags;
1969 kern_return_t kret;
2d21ac55 1970 mach_msg_type_number_t i;
1c79356b 1971 int force_data_sync;
2d21ac55
A
1972 int retval = 0;
1973 int first_IO = 1;
d7e50217 1974 struct clios iostate;
2d21ac55
A
1975 user_addr_t iov_base;
1976 u_int32_t mem_alignment_mask;
1977 u_int32_t devblocksize;
b0d623f7 1978 u_int32_t max_upl_size;
cf7d32b8 1979
b0d623f7
A
1980 u_int32_t vector_upl_iosize = 0;
1981 int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
1982 off_t v_upl_uio_offset = 0;
1983 int vector_upl_index=0;
1984 upl_t vector_upl = NULL;
cf7d32b8 1985
1c79356b
A
1986
1987 /*
1988 * When we enter this routine, we know
1c79356b
A
1989 * -- the resid will not exceed iov_len
1990 */
2d21ac55
A
1991 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
1992 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
91447636 1993
b0d623f7
A
1994 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
1995
1996 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
1997
1998 if (flags & IO_PASSIVE)
1999 io_flag |= CL_PASSIVE;
2000
d7e50217
A
2001 iostate.io_completed = 0;
2002 iostate.io_issued = 0;
2003 iostate.io_error = 0;
2004 iostate.io_wanted = 0;
2005
2d21ac55
A
2006 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2007 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2008
2009 if (devblocksize == 1) {
2010 /*
2011 * the AFP client advertises a devblocksize of 1
2012 * however, its BLOCKMAP routine maps to physical
2013 * blocks that are PAGE_SIZE in size...
2014 * therefore we can't ask for I/Os that aren't page aligned
2015 * or aren't multiples of PAGE_SIZE in size
2016 * by setting devblocksize to PAGE_SIZE, we re-instate
2017 * the old behavior we had before the mem_alignment_mask
2018 * changes went in...
2019 */
2020 devblocksize = PAGE_SIZE;
2021 }
2022
2023next_dwrite:
2024 io_req_size = *write_length;
2025 iov_base = uio_curriovbase(uio);
cc9f6e38 2026
2d21ac55
A
2027 offset_in_file = (u_int32_t)uio->uio_offset & PAGE_MASK;
2028 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
1c79356b 2029
2d21ac55
A
2030 if (offset_in_file || offset_in_iovbase) {
2031 /*
2032 * one of the 2 important offsets is misaligned
2033 * so fire an I/O through the cache for this entire vector
2034 */
2035 goto wait_for_dwrites;
2036 }
2037 if (iov_base & (devblocksize - 1)) {
2038 /*
2039 * the offset in memory must be on a device block boundary
2040 * so that we can guarantee that we can generate an
2041 * I/O that ends on a page boundary in cluster_io
2042 */
2043 goto wait_for_dwrites;
2044 }
1c79356b 2045
2d21ac55
A
2046 while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) {
2047
2048 if (first_IO) {
2049 cluster_syncup(vp, newEOF, callback, callback_arg);
2050 first_IO = 0;
2051 }
2052 io_size = io_req_size & ~PAGE_MASK;
cc9f6e38
A
2053 iov_base = uio_curriovbase(uio);
2054
cf7d32b8
A
2055 if (io_size > max_upl_size)
2056 io_size = max_upl_size;
2d21ac55 2057
b0d623f7
A
2058 if(useVectorUPL && (iov_base & PAGE_MASK)) {
2059 /*
2060 * We have an iov_base that's not page-aligned.
2061 * Issue all I/O's that have been collected within
2062 * this Vectored UPL.
2063 */
2064 if(vector_upl_index) {
2065 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2066 reset_vector_run_state();
2067 }
2068
2069 /*
2070 * After this point, if we are using the Vector UPL path and the base is
2071 * not page-aligned then the UPL with that base will be the first in the vector UPL.
2072 */
2073 }
2074
2d21ac55 2075 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
d7e50217
A
2076 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
2077
2078 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
cc9f6e38 2079 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
d7e50217
A
2080
2081 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
2082 pages_in_pl = 0;
2083 upl_size = upl_needed_size;
2084 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
55e303ae 2085 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
d7e50217
A
2086
2087 kret = vm_map_get_upl(current_map(),
cc9f6e38 2088 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
d7e50217
A
2089 &upl_size,
2090 &upl,
2091 NULL,
2092 &pages_in_pl,
2093 &upl_flags,
2094 force_data_sync);
2095
2096 if (kret != KERN_SUCCESS) {
2097 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2098 0, 0, 0, kret, 0);
d7e50217 2099 /*
2d21ac55 2100 * failed to get pagelist
d7e50217
A
2101 *
2102 * we may have already spun some portion of this request
2103 * off as async requests... we need to wait for the I/O
2104 * to complete before returning
2105 */
2d21ac55 2106 goto wait_for_dwrites;
d7e50217
A
2107 }
2108 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2109 pages_in_pl = upl_size / PAGE_SIZE;
1c79356b 2110
d7e50217
A
2111 for (i = 0; i < pages_in_pl; i++) {
2112 if (!upl_valid_page(pl, i))
2113 break;
2114 }
2115 if (i == pages_in_pl)
2116 break;
1c79356b 2117
d7e50217
A
2118 /*
2119 * didn't get all the pages back that we
2120 * needed... release this upl and try again
2121 */
2d21ac55 2122 ubc_upl_abort(upl, 0);
1c79356b 2123 }
d7e50217
A
2124 if (force_data_sync >= 3) {
2125 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2126 i, pages_in_pl, upl_size, kret, 0);
d7e50217
A
2127 /*
2128 * for some reason, we couldn't acquire a hold on all
2129 * the pages needed in the user's address space
2130 *
2131 * we may have already spun some portion of this request
2132 * off as async requests... we need to wait for the I/O
2133 * to complete before returning
2134 */
2d21ac55 2135 goto wait_for_dwrites;
1c79356b 2136 }
0b4e3aa0 2137
d7e50217
A
2138 /*
2139 * Consider the possibility that upl_size wasn't satisfied.
2140 */
2d21ac55
A
2141 if (upl_size < upl_needed_size) {
2142 if (upl_size && upl_offset == 0)
2143 io_size = upl_size;
2144 else
2145 io_size = 0;
2146 }
d7e50217 2147 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
cc9f6e38 2148 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
1c79356b 2149
d7e50217 2150 if (io_size == 0) {
2d21ac55 2151 ubc_upl_abort(upl, 0);
d7e50217
A
2152 /*
2153 * we may have already spun some portion of this request
2154 * off as async requests... we need to wait for the I/O
2155 * to complete before returning
2156 */
2d21ac55 2157 goto wait_for_dwrites;
d7e50217 2158 }
b0d623f7
A
2159
2160 if(useVectorUPL) {
2161 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
2162 if(end_off)
2163 issueVectorUPL = 1;
2164 /*
2165 * After this point, if we are using a vector UPL, then
2166 * either all the UPL elements end on a page boundary OR
2167 * this UPL is the last element because it does not end
2168 * on a page boundary.
2169 */
2170 }
2d21ac55 2171
d7e50217
A
2172 /*
2173 * Now look for pages already in the cache
2174 * and throw them away.
55e303ae
A
2175 * uio->uio_offset is page aligned within the file
2176 * io_size is a multiple of PAGE_SIZE
d7e50217 2177 */
55e303ae 2178 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
1c79356b 2179
d7e50217
A
2180 /*
2181 * we want push out these writes asynchronously so that we can overlap
2182 * the preparation of the next I/O
2183 * if there are already too many outstanding writes
2184 * wait until some complete before issuing the next
2185 */
b0d623f7 2186 if (iostate.io_issued > iostate.io_completed) {
91447636 2187
b0d623f7 2188 lck_mtx_lock(cl_mtxp);
cf7d32b8 2189
b0d623f7 2190 while ((iostate.io_issued - iostate.io_completed) > (max_upl_size * IO_SCALE(vp, 2))) {
cf7d32b8 2191
b0d623f7
A
2192 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2193 iostate.io_issued, iostate.io_completed, max_upl_size * IO_SCALE(vp, 2), 0, 0);
cf7d32b8 2194
b0d623f7
A
2195 iostate.io_wanted = 1;
2196 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
91447636 2197
b0d623f7
A
2198 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2199 iostate.io_issued, iostate.io_completed, max_upl_size * IO_SCALE(vp, 2), 0, 0);
2200 }
2201 lck_mtx_unlock(cl_mtxp);
2202 }
d7e50217
A
2203 if (iostate.io_error) {
2204 /*
2205 * one of the earlier writes we issued ran into a hard error
2206 * don't issue any more writes, cleanup the UPL
2207 * that was just created but not used, then
2208 * go wait for all writes that are part of this stream
2209 * to complete before returning the error to the caller
2210 */
2d21ac55 2211 ubc_upl_abort(upl, 0);
1c79356b 2212
2d21ac55 2213 goto wait_for_dwrites;
d7e50217 2214 }
1c79356b 2215
d7e50217
A
2216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
2217 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
1c79356b 2218
b0d623f7
A
2219 if(!useVectorUPL)
2220 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2d21ac55 2221 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
7b1edb79 2222
b0d623f7
A
2223 else {
2224 if(!vector_upl_index) {
2225 vector_upl = vector_upl_create(upl_offset);
2226 v_upl_uio_offset = uio->uio_offset;
2227 vector_upl_offset = upl_offset;
2228 }
2229
2230 vector_upl_set_subupl(vector_upl,upl,upl_size);
2231 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
2232 vector_upl_index++;
2233 vector_upl_iosize += io_size;
2234 vector_upl_size += upl_size;
2235
2236 if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= MAX_VECTOR_UPL_SIZE) {
2237 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2238 reset_vector_run_state();
2239 }
2240 }
2241
2d21ac55
A
2242 /*
2243 * update the uio structure to
2244 * reflect the I/O that we just issued
2245 */
cc9f6e38 2246 uio_update(uio, (user_size_t)io_size);
1c79356b 2247
b0d623f7
A
2248 /*
2249 * in case we end up calling through to cluster_write_copy to finish
2250 * the tail of this request, we need to update the oldEOF so that we
2251 * don't zero-fill the head of a page if we've successfully written
2252 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2253 * page that is beyond the oldEOF if the write is unaligned... we only
2254 * want that to happen for the very first page of the cluster_write,
2255 * NOT the first page of each vector making up a multi-vector write.
2256 */
2257 if (uio->uio_offset > oldEOF)
2258 oldEOF = uio->uio_offset;
2259
2d21ac55
A
2260 io_req_size -= io_size;
2261
d7e50217 2262 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
2d21ac55 2263 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
1c79356b
A
2264
2265 } /* end while */
2266
2d21ac55 2267 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
91447636 2268
2d21ac55
A
2269 retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE);
2270
2271 if (retval == 0 && *write_type == IO_DIRECT) {
2272
2273 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
2274 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2275
2276 goto next_dwrite;
2277 }
2278 }
2279
2280wait_for_dwrites:
b0d623f7
A
2281
2282 if(retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
2283 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2284 reset_vector_run_state();
2285 }
2286
2287 if (iostate.io_issued > iostate.io_completed) {
2d21ac55
A
2288 /*
2289 * make sure all async writes issued as part of this stream
2290 * have completed before we return
2291 */
2292 lck_mtx_lock(cl_mtxp);
91447636 2293
2d21ac55 2294 while (iostate.io_issued != iostate.io_completed) {
cf7d32b8 2295 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
b0d623f7 2296 iostate.io_issued, iostate.io_completed, 0, 0, 0);
cf7d32b8 2297
2d21ac55
A
2298 iostate.io_wanted = 1;
2299 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
cf7d32b8 2300
b0d623f7
A
2301 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2302 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2d21ac55
A
2303 }
2304 lck_mtx_unlock(cl_mtxp);
2305 }
d7e50217 2306 if (iostate.io_error)
2d21ac55
A
2307 retval = iostate.io_error;
2308
2309 if (io_req_size && retval == 0) {
2310 /*
2311 * we couldn't handle the tail of this request in DIRECT mode
2312 * so fire it through the copy path
2313 *
2314 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
2315 * so we can just pass 0 in for the headOff and tailOff
2316 */
b0d623f7
A
2317 if (uio->uio_offset > oldEOF)
2318 oldEOF = uio->uio_offset;
2319
2d21ac55 2320 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
1c79356b 2321
2d21ac55
A
2322 *write_type = IO_UNKNOWN;
2323 }
1c79356b 2324 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
2d21ac55 2325 (int)uio->uio_offset, io_req_size, retval, 4, 0);
1c79356b 2326
2d21ac55 2327 return (retval);
1c79356b
A
2328}
2329
b4c24cb9 2330
9bccf70c 2331static int
2d21ac55
A
2332cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
2333 int (*callback)(buf_t, void *), void *callback_arg, int bflag)
0b4e3aa0 2334{
b4c24cb9 2335 upl_page_info_t *pl;
2d21ac55
A
2336 addr64_t src_paddr = 0;
2337 upl_t upl[MAX_VECTS];
0b4e3aa0 2338 vm_offset_t upl_offset;
2d21ac55
A
2339 u_int32_t tail_size = 0;
2340 u_int32_t io_size;
2341 u_int32_t xsize;
b0d623f7 2342 upl_size_t upl_size;
2d21ac55
A
2343 vm_size_t upl_needed_size;
2344 mach_msg_type_number_t pages_in_pl;
0b4e3aa0
A
2345 int upl_flags;
2346 kern_return_t kret;
2d21ac55 2347 struct clios iostate;
0b4e3aa0 2348 int error = 0;
2d21ac55
A
2349 int cur_upl = 0;
2350 int num_upl = 0;
2351 int n;
cc9f6e38 2352 user_addr_t iov_base;
2d21ac55
A
2353 u_int32_t devblocksize;
2354 u_int32_t mem_alignment_mask;
0b4e3aa0
A
2355
2356 /*
2357 * When we enter this routine, we know
2d21ac55
A
2358 * -- the io_req_size will not exceed iov_len
2359 * -- the target address is physically contiguous
0b4e3aa0 2360 */
2d21ac55 2361 cluster_syncup(vp, newEOF, callback, callback_arg);
0b4e3aa0 2362
2d21ac55
A
2363 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2364 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
91447636 2365
2d21ac55
A
2366 iostate.io_completed = 0;
2367 iostate.io_issued = 0;
2368 iostate.io_error = 0;
2369 iostate.io_wanted = 0;
2370
2371next_cwrite:
2372 io_size = *write_length;
91447636 2373
cc9f6e38
A
2374 iov_base = uio_curriovbase(uio);
2375
2d21ac55 2376 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
0b4e3aa0
A
2377 upl_needed_size = upl_offset + io_size;
2378
2379 pages_in_pl = 0;
2380 upl_size = upl_needed_size;
9bccf70c 2381 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
55e303ae 2382 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
0b4e3aa0
A
2383
2384 kret = vm_map_get_upl(current_map(),
cc9f6e38 2385 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2d21ac55 2386 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
0b4e3aa0 2387
b4c24cb9
A
2388 if (kret != KERN_SUCCESS) {
2389 /*
2d21ac55 2390 * failed to get pagelist
b4c24cb9 2391 */
2d21ac55
A
2392 error = EINVAL;
2393 goto wait_for_cwrites;
b4c24cb9 2394 }
2d21ac55
A
2395 num_upl++;
2396
0b4e3aa0
A
2397 /*
2398 * Consider the possibility that upl_size wasn't satisfied.
0b4e3aa0 2399 */
b4c24cb9 2400 if (upl_size < upl_needed_size) {
2d21ac55
A
2401 /*
2402 * This is a failure in the physical memory case.
2403 */
2404 error = EINVAL;
2405 goto wait_for_cwrites;
b4c24cb9 2406 }
2d21ac55 2407 pl = ubc_upl_pageinfo(upl[cur_upl]);
0b4e3aa0 2408
cc9f6e38 2409 src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
0b4e3aa0 2410
b4c24cb9 2411 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2d21ac55 2412 u_int32_t head_size;
0b4e3aa0 2413
2d21ac55 2414 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
0b4e3aa0 2415
b4c24cb9
A
2416 if (head_size > io_size)
2417 head_size = io_size;
2418
2d21ac55 2419 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
b4c24cb9 2420
2d21ac55
A
2421 if (error)
2422 goto wait_for_cwrites;
b4c24cb9 2423
b4c24cb9
A
2424 upl_offset += head_size;
2425 src_paddr += head_size;
2426 io_size -= head_size;
2d21ac55
A
2427
2428 iov_base += head_size;
2429 }
2430 if ((u_int32_t)iov_base & mem_alignment_mask) {
2431 /*
2432 * request doesn't set up on a memory boundary
2433 * the underlying DMA engine can handle...
2434 * return an error instead of going through
2435 * the slow copy path since the intent of this
2436 * path is direct I/O from device memory
2437 */
2438 error = EINVAL;
2439 goto wait_for_cwrites;
0b4e3aa0 2440 }
2d21ac55 2441
b4c24cb9
A
2442 tail_size = io_size & (devblocksize - 1);
2443 io_size -= tail_size;
2444
2d21ac55
A
2445 while (io_size && error == 0) {
2446
2447 if (io_size > MAX_IO_CONTIG_SIZE)
2448 xsize = MAX_IO_CONTIG_SIZE;
2449 else
2450 xsize = io_size;
2451 /*
2452 * request asynchronously so that we can overlap
2453 * the preparation of the next I/O... we'll do
2454 * the commit after all the I/O has completed
2455 * since its all issued against the same UPL
2456 * if there are already too many outstanding writes
2457 * wait until some have completed before issuing the next
b4c24cb9 2458 */
b0d623f7 2459 if (iostate.io_issued > iostate.io_completed) {
2d21ac55
A
2460 lck_mtx_lock(cl_mtxp);
2461
b0d623f7 2462 while ((iostate.io_issued - iostate.io_completed) > (MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2))) {
cf7d32b8 2463
b0d623f7
A
2464 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2465 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
cf7d32b8 2466
2d21ac55
A
2467 iostate.io_wanted = 1;
2468 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
cf7d32b8 2469
b0d623f7
A
2470 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2471 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
2d21ac55
A
2472 }
2473 lck_mtx_unlock(cl_mtxp);
2474 }
2475 if (iostate.io_error) {
2476 /*
2477 * one of the earlier writes we issued ran into a hard error
2478 * don't issue any more writes...
2479 * go wait for all writes that are part of this stream
2480 * to complete before returning the error to the caller
2481 */
2482 goto wait_for_cwrites;
2483 }
b4c24cb9 2484 /*
2d21ac55 2485 * issue an asynchronous write to cluster_io
b4c24cb9 2486 */
2d21ac55
A
2487 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
2488 xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
cc9f6e38 2489
2d21ac55
A
2490 if (error == 0) {
2491 /*
2492 * The cluster_io write completed successfully,
2493 * update the uio structure
2494 */
2495 uio_update(uio, (user_size_t)xsize);
b4c24cb9 2496
2d21ac55
A
2497 upl_offset += xsize;
2498 src_paddr += xsize;
2499 io_size -= xsize;
2500 }
b4c24cb9 2501 }
cf7d32b8 2502 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
2d21ac55
A
2503
2504 error = cluster_io_type(uio, write_type, write_length, 0);
2505
2506 if (error == 0 && *write_type == IO_CONTIG) {
2507 cur_upl++;
2508 goto next_cwrite;
2509 }
2510 } else
2511 *write_type = IO_UNKNOWN;
2512
2513wait_for_cwrites:
b4c24cb9 2514 /*
2d21ac55
A
2515 * make sure all async writes that are part of this stream
2516 * have completed before we proceed
2517 */
b0d623f7
A
2518 if (iostate.io_issued > iostate.io_completed) {
2519
2520 lck_mtx_lock(cl_mtxp);
cf7d32b8 2521
b0d623f7
A
2522 while (iostate.io_issued != iostate.io_completed) {
2523 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2524 iostate.io_issued, iostate.io_completed, 0, 0, 0);
cf7d32b8 2525
b0d623f7
A
2526 iostate.io_wanted = 1;
2527 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
2d21ac55 2528
b0d623f7
A
2529 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2530 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2531 }
2532 lck_mtx_unlock(cl_mtxp);
2533 }
2d21ac55
A
2534 if (iostate.io_error)
2535 error = iostate.io_error;
2536
2537 if (error == 0 && tail_size)
2538 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
2539
2540 for (n = 0; n < num_upl; n++)
2541 /*
2542 * just release our hold on each physically contiguous
2543 * region without changing any state
2544 */
2545 ubc_upl_abort(upl[n], 0);
0b4e3aa0
A
2546
2547 return (error);
2548}
2549
b4c24cb9 2550
b0d623f7
A
2551/*
2552 * need to avoid a race between an msync of a range of pages dirtied via mmap
2553 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
2554 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
2555 *
2556 * we should never force-zero-fill pages that are already valid in the cache...
2557 * the entire page contains valid data (either from disk, zero-filled or dirtied
2558 * via an mmap) so we can only do damage by trying to zero-fill
2559 *
2560 */
2561static int
2562cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
2563{
2564 int zero_pg_index;
2565 boolean_t need_cluster_zero = TRUE;
2566
2567 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
2568
2569 bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
2570 zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
2571
2572 if (upl_valid_page(pl, zero_pg_index)) {
2573 /*
2574 * never force zero valid pages - dirty or clean
2575 * we'll leave these in the UPL for cluster_write_copy to deal with
2576 */
2577 need_cluster_zero = FALSE;
2578 }
2579 }
2580 if (need_cluster_zero == TRUE)
2581 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2582
2583 return (bytes_to_zero);
2584}
2585
2586
9bccf70c 2587static int
2d21ac55
A
2588cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
2589 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
2590{
2591 upl_page_info_t *pl;
2592 upl_t upl;
91447636 2593 vm_offset_t upl_offset = 0;
2d21ac55 2594 vm_size_t upl_size;
1c79356b
A
2595 off_t upl_f_offset;
2596 int pages_in_upl;
2597 int start_offset;
2598 int xfer_resid;
2599 int io_size;
1c79356b
A
2600 int io_offset;
2601 int bytes_to_zero;
2602 int bytes_to_move;
2603 kern_return_t kret;
2604 int retval = 0;
91447636 2605 int io_resid;
1c79356b
A
2606 long long total_size;
2607 long long zero_cnt;
2608 off_t zero_off;
2609 long long zero_cnt1;
2610 off_t zero_off1;
91447636 2611 struct cl_extent cl;
91447636 2612 struct cl_writebehind *wbp;
2d21ac55 2613 int bflag;
b0d623f7
A
2614 u_int max_cluster_pgcount;
2615 u_int max_io_size;
1c79356b
A
2616
2617 if (uio) {
2618 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
2d21ac55 2619 (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
1c79356b 2620
2d21ac55 2621 io_resid = io_req_size;
1c79356b
A
2622 } else {
2623 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
2624 0, 0, (int)oldEOF, (int)newEOF, 0);
2625
91447636 2626 io_resid = 0;
1c79356b 2627 }
b0d623f7
A
2628 if (flags & IO_PASSIVE)
2629 bflag = CL_PASSIVE;
2630 else
2631 bflag = 0;
2632
1c79356b
A
2633 zero_cnt = 0;
2634 zero_cnt1 = 0;
91447636
A
2635 zero_off = 0;
2636 zero_off1 = 0;
1c79356b 2637
cf7d32b8
A
2638 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
2639 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2640
1c79356b
A
2641 if (flags & IO_HEADZEROFILL) {
2642 /*
2643 * some filesystems (HFS is one) don't support unallocated holes within a file...
2644 * so we zero fill the intervening space between the old EOF and the offset
2645 * where the next chunk of real data begins.... ftruncate will also use this
2646 * routine to zero fill to the new EOF when growing a file... in this case, the
2647 * uio structure will not be provided
2648 */
2649 if (uio) {
2650 if (headOff < uio->uio_offset) {
2651 zero_cnt = uio->uio_offset - headOff;
2652 zero_off = headOff;
2653 }
2654 } else if (headOff < newEOF) {
2655 zero_cnt = newEOF - headOff;
2656 zero_off = headOff;
2657 }
b0d623f7
A
2658 } else {
2659 if (uio && uio->uio_offset > oldEOF) {
2660 zero_off = uio->uio_offset & ~PAGE_MASK_64;
2661
2662 if (zero_off >= oldEOF) {
2663 zero_cnt = uio->uio_offset - zero_off;
2664
2665 flags |= IO_HEADZEROFILL;
2666 }
2667 }
1c79356b
A
2668 }
2669 if (flags & IO_TAILZEROFILL) {
2670 if (uio) {
2d21ac55 2671 zero_off1 = uio->uio_offset + io_req_size;
1c79356b
A
2672
2673 if (zero_off1 < tailOff)
2674 zero_cnt1 = tailOff - zero_off1;
2675 }
b0d623f7
A
2676 } else {
2677 if (uio && newEOF > oldEOF) {
2678 zero_off1 = uio->uio_offset + io_req_size;
2679
2680 if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
2681 zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
2682
2683 flags |= IO_TAILZEROFILL;
2684 }
2685 }
1c79356b 2686 }
55e303ae 2687 if (zero_cnt == 0 && uio == (struct uio *) 0) {
91447636
A
2688 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
2689 retval, 0, 0, 0, 0);
2690 return (0);
55e303ae 2691 }
1c79356b 2692
91447636 2693 while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
1c79356b
A
2694 /*
2695 * for this iteration of the loop, figure out where our starting point is
2696 */
2697 if (zero_cnt) {
2698 start_offset = (int)(zero_off & PAGE_MASK_64);
2699 upl_f_offset = zero_off - start_offset;
91447636 2700 } else if (io_resid) {
1c79356b
A
2701 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
2702 upl_f_offset = uio->uio_offset - start_offset;
2703 } else {
2704 start_offset = (int)(zero_off1 & PAGE_MASK_64);
2705 upl_f_offset = zero_off1 - start_offset;
2706 }
2707 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
2708 (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
2709
cf7d32b8
A
2710 if (total_size > max_io_size)
2711 total_size = max_io_size;
1c79356b 2712
91447636 2713 cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
55e303ae 2714
2d21ac55 2715 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
55e303ae 2716 /*
91447636 2717 * assumption... total_size <= io_resid
55e303ae
A
2718 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
2719 */
cf7d32b8 2720 if ((start_offset + total_size) > max_io_size)
b7266188 2721 total_size = max_io_size - start_offset;
55e303ae
A
2722 xfer_resid = total_size;
2723
2d21ac55 2724 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
b0d623f7 2725
55e303ae
A
2726 if (retval)
2727 break;
2728
2d21ac55 2729 io_resid -= (total_size - xfer_resid);
55e303ae
A
2730 total_size = xfer_resid;
2731 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
2732 upl_f_offset = uio->uio_offset - start_offset;
2733
2734 if (total_size == 0) {
2735 if (start_offset) {
2736 /*
2737 * the write did not finish on a page boundary
2738 * which will leave upl_f_offset pointing to the
2739 * beginning of the last page written instead of
2740 * the page beyond it... bump it in this case
2741 * so that the cluster code records the last page
2742 * written as dirty
2743 */
2744 upl_f_offset += PAGE_SIZE_64;
2745 }
2746 upl_size = 0;
2747
2748 goto check_cluster;
2749 }
2750 }
1c79356b
A
2751 /*
2752 * compute the size of the upl needed to encompass
2753 * the requested write... limit each call to cluster_io
0b4e3aa0
A
2754 * to the maximum UPL size... cluster_io will clip if
2755 * this exceeds the maximum io_size for the device,
2756 * make sure to account for
1c79356b
A
2757 * a starting offset that's not page aligned
2758 */
2759 upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2760
cf7d32b8
A
2761 if (upl_size > max_io_size)
2762 upl_size = max_io_size;
1c79356b
A
2763
2764 pages_in_upl = upl_size / PAGE_SIZE;
2765 io_size = upl_size - start_offset;
2766
2767 if ((long long)io_size > total_size)
2768 io_size = total_size;
2769
55e303ae
A
2770 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
2771
1c79356b 2772
91447636
A
2773 /*
2774 * Gather the pages from the buffer cache.
2775 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
2776 * that we intend to modify these pages.
2777 */
0b4e3aa0 2778 kret = ubc_create_upl(vp,
91447636
A
2779 upl_f_offset,
2780 upl_size,
2781 &upl,
2782 &pl,
b0d623f7 2783 UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY));
1c79356b 2784 if (kret != KERN_SUCCESS)
2d21ac55 2785 panic("cluster_write_copy: failed to get pagelist");
1c79356b 2786
55e303ae 2787 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
b0d623f7 2788 upl, (int)upl_f_offset, start_offset, 0, 0);
1c79356b 2789
b0d623f7 2790 if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
0b4e3aa0 2791 int read_size;
1c79356b 2792
0b4e3aa0 2793 /*
1c79356b
A
2794 * we're starting in the middle of the first page of the upl
2795 * and the page isn't currently valid, so we're going to have
2796 * to read it in first... this is a synchronous operation
2797 */
2798 read_size = PAGE_SIZE;
2799
b0d623f7
A
2800 if ((upl_f_offset + read_size) > oldEOF)
2801 read_size = oldEOF - upl_f_offset;
9bccf70c 2802
91447636 2803 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
2d21ac55 2804 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
1c79356b 2805 if (retval) {
0b4e3aa0 2806 /*
1c79356b
A
2807 * we had an error during the read which causes us to abort
2808 * the current cluster_write request... before we do, we need
2809 * to release the rest of the pages in the upl without modifying
2810 * there state and mark the failed page in error
2811 */
935ed37a 2812 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
91447636
A
2813
2814 if (upl_size > PAGE_SIZE)
2815 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
2816
2817 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
b0d623f7 2818 upl, 0, 0, retval, 0);
1c79356b
A
2819 break;
2820 }
2821 }
2822 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
2823 /*
2824 * the last offset we're writing to in this upl does not end on a page
2825 * boundary... if it's not beyond the old EOF, then we'll also need to
2826 * pre-read this page in if it isn't already valid
2827 */
2828 upl_offset = upl_size - PAGE_SIZE;
2829
2830 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
2831 !upl_valid_page(pl, upl_offset / PAGE_SIZE)) {
2832 int read_size;
2833
2834 read_size = PAGE_SIZE;
2835
b0d623f7
A
2836 if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF)
2837 read_size = oldEOF - (upl_f_offset + upl_offset);
9bccf70c 2838
91447636 2839 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
2d21ac55 2840 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
1c79356b 2841 if (retval) {
0b4e3aa0 2842 /*
1c79356b 2843 * we had an error during the read which causes us to abort
0b4e3aa0
A
2844 * the current cluster_write request... before we do, we
2845 * need to release the rest of the pages in the upl without
2846 * modifying there state and mark the failed page in error
1c79356b 2847 */
935ed37a 2848 ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
91447636
A
2849
2850 if (upl_size > PAGE_SIZE)
2851 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
2852
2853 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
b0d623f7 2854 upl, 0, 0, retval, 0);
1c79356b
A
2855 break;
2856 }
2857 }
2858 }
1c79356b
A
2859 xfer_resid = io_size;
2860 io_offset = start_offset;
2861
2862 while (zero_cnt && xfer_resid) {
2863
2864 if (zero_cnt < (long long)xfer_resid)
2865 bytes_to_zero = zero_cnt;
2866 else
2867 bytes_to_zero = xfer_resid;
2868
b0d623f7 2869 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
9bccf70c 2870
1c79356b
A
2871 xfer_resid -= bytes_to_zero;
2872 zero_cnt -= bytes_to_zero;
2873 zero_off += bytes_to_zero;
2874 io_offset += bytes_to_zero;
2875 }
91447636 2876 if (xfer_resid && io_resid) {
2d21ac55
A
2877 u_int32_t io_requested;
2878
91447636 2879 bytes_to_move = min(io_resid, xfer_resid);
2d21ac55 2880 io_requested = bytes_to_move;
1c79356b 2881
2d21ac55 2882 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
9bccf70c 2883
1c79356b 2884 if (retval) {
9bccf70c 2885 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
2886
2887 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
b0d623f7 2888 upl, 0, 0, retval, 0);
1c79356b 2889 } else {
2d21ac55 2890 io_resid -= bytes_to_move;
1c79356b
A
2891 xfer_resid -= bytes_to_move;
2892 io_offset += bytes_to_move;
2893 }
2894 }
2895 while (xfer_resid && zero_cnt1 && retval == 0) {
2896
2897 if (zero_cnt1 < (long long)xfer_resid)
2898 bytes_to_zero = zero_cnt1;
2899 else
2900 bytes_to_zero = xfer_resid;
2901
b0d623f7
A
2902 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
2903
1c79356b
A
2904 xfer_resid -= bytes_to_zero;
2905 zero_cnt1 -= bytes_to_zero;
2906 zero_off1 += bytes_to_zero;
2907 io_offset += bytes_to_zero;
2908 }
1c79356b 2909 if (retval == 0) {
9bccf70c 2910 int cl_index;
2d21ac55 2911 int ret_cluster_try_push;
1c79356b
A
2912
2913 io_size += start_offset;
2914
2d21ac55 2915 if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
1c79356b
A
2916 /*
2917 * if we're extending the file with this write
2918 * we'll zero fill the rest of the page so that
2919 * if the file gets extended again in such a way as to leave a
2920 * hole starting at this EOF, we'll have zero's in the correct spot
2921 */
55e303ae 2922 cluster_zero(upl, io_size, upl_size - io_size, NULL);
1c79356b 2923 }
935ed37a
A
2924 /*
2925 * release the upl now if we hold one since...
2926 * 1) pages in it may be present in the sparse cluster map
2927 * and may span 2 separate buckets there... if they do and
2928 * we happen to have to flush a bucket to make room and it intersects
2929 * this upl, a deadlock may result on page BUSY
2930 * 2) we're delaying the I/O... from this point forward we're just updating
2931 * the cluster state... no need to hold the pages, so commit them
2932 * 3) IO_SYNC is set...
2933 * because we had to ask for a UPL that provides currenty non-present pages, the
2934 * UPL has been automatically set to clear the dirty flags (both software and hardware)
2935 * upon committing it... this is not the behavior we want since it's possible for
2936 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
2937 * we'll pick these pages back up later with the correct behavior specified.
2938 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
2939 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
2940 * we hold since the flushing context is holding the cluster lock.
2941 */
2942 ubc_upl_commit_range(upl, 0, upl_size,
2943 UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2944check_cluster:
2945 /*
2946 * calculate the last logical block number
2947 * that this delayed I/O encompassed
2948 */
2949 cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
2950
b0d623f7 2951 if (flags & IO_SYNC) {
9bccf70c
A
2952 /*
2953 * if the IO_SYNC flag is set than we need to
2954 * bypass any clusters and immediately issue
2955 * the I/O
2956 */
2957 goto issue_io;
b0d623f7 2958 }
91447636
A
2959 /*
2960 * take the lock to protect our accesses
2961 * of the writebehind and sparse cluster state
2962 */
2963 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
2964
91447636 2965 if (wbp->cl_scmap) {
55e303ae 2966
91447636 2967 if ( !(flags & IO_NOCACHE)) {
55e303ae
A
2968 /*
2969 * we've fallen into the sparse
2970 * cluster method of delaying dirty pages
55e303ae 2971 */
b0d623f7 2972 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
91447636
A
2973
2974 lck_mtx_unlock(&wbp->cl_lockw);
55e303ae
A
2975
2976 continue;
2977 }
2978 /*
2979 * must have done cached writes that fell into
2980 * the sparse cluster mechanism... we've switched
2981 * to uncached writes on the file, so go ahead
2982 * and push whatever's in the sparse map
2983 * and switch back to normal clustering
55e303ae 2984 */
91447636 2985 wbp->cl_number = 0;
935ed37a 2986
b0d623f7 2987 sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, callback, callback_arg);
55e303ae
A
2988 /*
2989 * no clusters of either type present at this point
2990 * so just go directly to start_new_cluster since
2991 * we know we need to delay this I/O since we've
2992 * already released the pages back into the cache
2993 * to avoid the deadlock with sparse_cluster_push
2994 */
2995 goto start_new_cluster;
2996 }
91447636 2997 if (wbp->cl_number == 0)
9bccf70c
A
2998 /*
2999 * no clusters currently present
3000 */
3001 goto start_new_cluster;
1c79356b 3002
91447636 3003 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
1c79356b 3004 /*
55e303ae
A
3005 * check each cluster that we currently hold
3006 * try to merge some or all of this write into
3007 * one or more of the existing clusters... if
3008 * any portion of the write remains, start a
3009 * new cluster
1c79356b 3010 */
91447636 3011 if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) {
9bccf70c
A
3012 /*
3013 * the current write starts at or after the current cluster
3014 */
cf7d32b8 3015 if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
1c79356b
A
3016 /*
3017 * we have a write that fits entirely
3018 * within the existing cluster limits
3019 */
91447636 3020 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr)
1c79356b 3021 /*
9bccf70c 3022 * update our idea of where the cluster ends
1c79356b 3023 */
91447636 3024 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
9bccf70c 3025 break;
1c79356b 3026 }
cf7d32b8 3027 if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
1c79356b
A
3028 /*
3029 * we have a write that starts in the middle of the current cluster
55e303ae
A
3030 * but extends beyond the cluster's limit... we know this because
3031 * of the previous checks
3032 * we'll extend the current cluster to the max
91447636 3033 * and update the b_addr for the current write to reflect that
55e303ae
A
3034 * the head of it was absorbed into this cluster...
3035 * note that we'll always have a leftover tail in this case since
3036 * full absorbtion would have occurred in the clause above
1c79356b 3037 */
cf7d32b8 3038 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
55e303ae 3039
91447636 3040 cl.b_addr = wbp->cl_clusters[cl_index].e_addr;
1c79356b
A
3041 }
3042 /*
55e303ae
A
3043 * we come here for the case where the current write starts
3044 * beyond the limit of the existing cluster or we have a leftover
3045 * tail after a partial absorbtion
9bccf70c
A
3046 *
3047 * in either case, we'll check the remaining clusters before
3048 * starting a new one
1c79356b 3049 */
9bccf70c 3050 } else {
1c79356b 3051 /*
55e303ae 3052 * the current write starts in front of the cluster we're currently considering
1c79356b 3053 */
cf7d32b8 3054 if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) {
1c79356b 3055 /*
55e303ae
A
3056 * we can just merge the new request into
3057 * this cluster and leave it in the cache
3058 * since the resulting cluster is still
3059 * less than the maximum allowable size
1c79356b 3060 */
91447636 3061 wbp->cl_clusters[cl_index].b_addr = cl.b_addr;
1c79356b 3062
91447636 3063 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) {
9bccf70c
A
3064 /*
3065 * the current write completely
55e303ae 3066 * envelops the existing cluster and since
cf7d32b8 3067 * each write is limited to at most max_cluster_pgcount pages
55e303ae
A
3068 * we can just use the start and last blocknos of the write
3069 * to generate the cluster limits
9bccf70c 3070 */
91447636 3071 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
9bccf70c
A
3072 }
3073 break;
1c79356b 3074 }
9bccf70c 3075
1c79356b 3076 /*
9bccf70c
A
3077 * if we were to combine this write with the current cluster
3078 * we would exceed the cluster size limit.... so,
3079 * let's see if there's any overlap of the new I/O with
55e303ae
A
3080 * the cluster we're currently considering... in fact, we'll
3081 * stretch the cluster out to it's full limit and see if we
3082 * get an intersection with the current write
9bccf70c 3083 *
1c79356b 3084 */
cf7d32b8 3085 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
1c79356b 3086 /*
55e303ae
A
3087 * the current write extends into the proposed cluster
3088 * clip the length of the current write after first combining it's
3089 * tail with the newly shaped cluster
1c79356b 3090 */
cf7d32b8 3091 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
55e303ae 3092
91447636 3093 cl.e_addr = wbp->cl_clusters[cl_index].b_addr;
55e303ae 3094 }
9bccf70c
A
3095 /*
3096 * if we get here, there was no way to merge
55e303ae
A
3097 * any portion of this write with this cluster
3098 * or we could only merge part of it which
3099 * will leave a tail...
9bccf70c
A
3100 * we'll check the remaining clusters before starting a new one
3101 */
1c79356b 3102 }
9bccf70c 3103 }
91447636 3104 if (cl_index < wbp->cl_number)
9bccf70c 3105 /*
55e303ae
A
3106 * we found an existing cluster(s) that we
3107 * could entirely merge this I/O into
9bccf70c
A
3108 */
3109 goto delay_io;
3110
2d21ac55 3111 if (wbp->cl_number < MAX_CLUSTERS)
9bccf70c
A
3112 /*
3113 * we didn't find an existing cluster to
3114 * merge into, but there's room to start
1c79356b
A
3115 * a new one
3116 */
9bccf70c 3117 goto start_new_cluster;
1c79356b 3118
9bccf70c
A
3119 /*
3120 * no exisitng cluster to merge with and no
3121 * room to start a new one... we'll try
55e303ae
A
3122 * pushing one of the existing ones... if none of
3123 * them are able to be pushed, we'll switch
3124 * to the sparse cluster mechanism
91447636 3125 * cluster_try_push updates cl_number to the
55e303ae
A
3126 * number of remaining clusters... and
3127 * returns the number of currently unused clusters
9bccf70c 3128 */
2d21ac55
A
3129 ret_cluster_try_push = 0;
3130
3131 /*
3132 * if writes are not deferred, call cluster push immediately
3133 */
91447636 3134 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
91447636 3135
2d21ac55 3136 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, callback, callback_arg);
91447636 3137 }
9bccf70c 3138
2d21ac55
A
3139 /*
3140 * execute following regardless of writes being deferred or not
3141 */
91447636 3142 if (ret_cluster_try_push == 0) {
55e303ae
A
3143 /*
3144 * no more room in the normal cluster mechanism
3145 * so let's switch to the more expansive but expensive
3146 * sparse mechanism....
55e303ae 3147 */
2d21ac55 3148 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg);
b0d623f7 3149 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
91447636
A
3150
3151 lck_mtx_unlock(&wbp->cl_lockw);
55e303ae
A
3152
3153 continue;
9bccf70c 3154 }
55e303ae
A
3155 /*
3156 * we pushed one cluster successfully, so we must be sequentially writing this file
3157 * otherwise, we would have failed and fallen into the sparse cluster support
2d21ac55
A
3158 * so let's take the opportunity to push out additional clusters...
3159 * this will give us better I/O locality if we're in a copy loop
3160 * (i.e. we won't jump back and forth between the read and write points
55e303ae 3161 */
91447636 3162 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
2d21ac55
A
3163 while (wbp->cl_number)
3164 cluster_try_push(wbp, vp, newEOF, 0, callback, callback_arg);
91447636 3165 }
55e303ae 3166
9bccf70c 3167start_new_cluster:
91447636
A
3168 wbp->cl_clusters[wbp->cl_number].b_addr = cl.b_addr;
3169 wbp->cl_clusters[wbp->cl_number].e_addr = cl.e_addr;
9bccf70c 3170
2d21ac55
A
3171 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3172
91447636 3173 if (flags & IO_NOCACHE)
2d21ac55
A
3174 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3175
3176 if (bflag & CL_PASSIVE)
3177 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3178
91447636 3179 wbp->cl_number++;
55e303ae 3180delay_io:
91447636
A
3181 lck_mtx_unlock(&wbp->cl_lockw);
3182
9bccf70c
A
3183 continue;
3184issue_io:
3185 /*
935ed37a 3186 * we don't hold the lock at this point
91447636 3187 *
935ed37a 3188 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
91447636 3189 * so that we correctly deal with a change in state of the hardware modify bit...
2d21ac55
A
3190 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
3191 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
91447636 3192 * responsible for generating the correct sized I/O(s)
9bccf70c 3193 */
2d21ac55 3194 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg);
1c79356b
A
3195 }
3196 }
2d21ac55 3197 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
1c79356b
A
3198
3199 return (retval);
3200}
3201
2d21ac55
A
3202
3203
9bccf70c 3204int
91447636 3205cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
1c79356b 3206{
2d21ac55
A
3207 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
3208}
3209
3210
3211int
3212cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
3213{
3214 int retval = 0;
3215 int flags;
3216 user_ssize_t cur_resid;
3217 u_int32_t io_size;
3218 u_int32_t read_length = 0;
3219 int read_type = IO_COPY;
1c79356b 3220
91447636 3221 flags = xflags;
1c79356b 3222
91447636
A
3223 if (vp->v_flag & VNOCACHE_DATA)
3224 flags |= IO_NOCACHE;
2d21ac55 3225 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled)
91447636
A
3226 flags |= IO_RAOFF;
3227
2d21ac55
A
3228 /*
3229 * do a read through the cache if one of the following is true....
3230 * NOCACHE is not true
3231 * the uio request doesn't target USERSPACE
3232 * otherwise, find out if we want the direct or contig variant for
3233 * the first vector in the uio request
3234 */
3235 if ( (flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
3236 retval = cluster_io_type(uio, &read_type, &read_length, 0);
cc9f6e38 3237
2d21ac55 3238 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
91447636 3239
2d21ac55
A
3240 switch (read_type) {
3241
3242 case IO_COPY:
91447636 3243 /*
2d21ac55
A
3244 * make sure the uio_resid isn't too big...
3245 * internally, we want to handle all of the I/O in
3246 * chunk sizes that fit in a 32 bit int
91447636 3247 */
2d21ac55
A
3248 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE))
3249 io_size = MAX_IO_REQUEST_SIZE;
3250 else
3251 io_size = (u_int32_t)cur_resid;
91447636 3252
2d21ac55
A
3253 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3254 break;
1c79356b 3255
2d21ac55
A
3256 case IO_DIRECT:
3257 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
3258 break;
91447636 3259
2d21ac55
A
3260 case IO_CONTIG:
3261 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
3262 break;
3263
3264 case IO_UNKNOWN:
3265 retval = cluster_io_type(uio, &read_type, &read_length, 0);
3266 break;
3267 }
3268 }
3269 return (retval);
3270}
91447636 3271
91447636 3272
91447636 3273
2d21ac55 3274static void
b0d623f7 3275cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
2d21ac55
A
3276{
3277 int range;
3278 int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
1c79356b 3279
2d21ac55 3280 if ((range = last_pg - start_pg)) {
b0d623f7 3281 if (take_reference)
2d21ac55
A
3282 abort_flags |= UPL_ABORT_REFERENCE;
3283
3284 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
3285 }
1c79356b
A
3286}
3287
2d21ac55 3288
9bccf70c 3289static int
2d21ac55 3290cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
3291{
3292 upl_page_info_t *pl;
3293 upl_t upl;
3294 vm_offset_t upl_offset;
b0d623f7 3295 u_int32_t upl_size;
1c79356b
A
3296 off_t upl_f_offset;
3297 int start_offset;
3298 int start_pg;
3299 int last_pg;
91447636 3300 int uio_last = 0;
1c79356b
A
3301 int pages_in_upl;
3302 off_t max_size;
55e303ae
A
3303 off_t last_ioread_offset;
3304 off_t last_request_offset;
1c79356b 3305 kern_return_t kret;
1c79356b
A
3306 int error = 0;
3307 int retval = 0;
2d21ac55
A
3308 u_int32_t size_of_prefetch;
3309 u_int32_t xsize;
3310 u_int32_t io_size;
cf7d32b8 3311 u_int32_t max_rd_size;
b0d623f7
A
3312 u_int32_t max_io_size;
3313 u_int32_t max_prefetch;
55e303ae
A
3314 u_int rd_ahead_enabled = 1;
3315 u_int prefetch_enabled = 1;
91447636
A
3316 struct cl_readahead * rap;
3317 struct clios iostate;
3318 struct cl_extent extent;
2d21ac55
A
3319 int bflag;
3320 int take_reference = 1;
3321 struct uthread *ut;
3322 int policy = IOPOL_DEFAULT;
3323
b0d623f7
A
3324
3325 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
3326 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
3327
2d21ac55
A
3328 policy = current_proc()->p_iopol_disk;
3329
3330 ut = get_bsdthread_info(current_thread());
3331
3332 if (ut->uu_iopol_disk != IOPOL_DEFAULT)
3333 policy = ut->uu_iopol_disk;
3334
b0d623f7 3335 if (policy == IOPOL_THROTTLE || (flags & IO_NOCACHE))
2d21ac55
A
3336 take_reference = 0;
3337
3338 if (flags & IO_PASSIVE)
cf7d32b8 3339 bflag = CL_PASSIVE;
2d21ac55 3340 else
b0d623f7 3341 bflag = 0;
cf7d32b8 3342
b0d623f7
A
3343 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
3344 max_prefetch = MAX_PREFETCH(vp, max_io_size);
3345 max_rd_size = max_prefetch;
55e303ae 3346
2d21ac55 3347 last_request_offset = uio->uio_offset + io_req_size;
55e303ae 3348
b0d623f7
A
3349 if (last_request_offset > filesize)
3350 last_request_offset = filesize;
3351
2d21ac55 3352 if ((flags & (IO_RAOFF|IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
55e303ae 3353 rd_ahead_enabled = 0;
91447636
A
3354 rap = NULL;
3355 } else {
b0d623f7 3356 if (cluster_hard_throttle_on(vp, 1)) {
91447636
A
3357 rd_ahead_enabled = 0;
3358 prefetch_enabled = 0;
55e303ae 3359
91447636 3360 max_rd_size = HARD_THROTTLE_MAXSIZE;
b0d623f7
A
3361 } else if (policy == IOPOL_THROTTLE) {
3362 rd_ahead_enabled = 0;
3363 prefetch_enabled = 0;
91447636
A
3364 }
3365 if ((rap = cluster_get_rap(vp)) == NULL)
3366 rd_ahead_enabled = 0;
b0d623f7
A
3367 else {
3368 extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
3369 extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
3370 }
55e303ae 3371 }
91447636 3372 if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
55e303ae
A
3373 /*
3374 * determine if we already have a read-ahead in the pipe courtesy of the
3375 * last read systemcall that was issued...
3376 * if so, pick up it's extent to determine where we should start
3377 * with respect to any read-ahead that might be necessary to
3378 * garner all the data needed to complete this read systemcall
3379 */
91447636 3380 last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
1c79356b 3381
55e303ae
A
3382 if (last_ioread_offset < uio->uio_offset)
3383 last_ioread_offset = (off_t)0;
3384 else if (last_ioread_offset > last_request_offset)
3385 last_ioread_offset = last_request_offset;
3386 } else
3387 last_ioread_offset = (off_t)0;
1c79356b 3388
2d21ac55 3389 while (io_req_size && uio->uio_offset < filesize && retval == 0) {
b0d623f7
A
3390
3391 max_size = filesize - uio->uio_offset;
1c79356b 3392
2d21ac55
A
3393 if ((off_t)(io_req_size) < max_size)
3394 io_size = io_req_size;
1c79356b
A
3395 else
3396 io_size = max_size;
9bccf70c 3397
91447636 3398 if (!(flags & IO_NOCACHE)) {
1c79356b 3399
55e303ae 3400 while (io_size) {
2d21ac55
A
3401 u_int32_t io_resid;
3402 u_int32_t io_requested;
1c79356b 3403
55e303ae
A
3404 /*
3405 * if we keep finding the pages we need already in the cache, then
2d21ac55 3406 * don't bother to call cluster_read_prefetch since it costs CPU cycles
55e303ae
A
3407 * to determine that we have all the pages we need... once we miss in
3408 * the cache and have issued an I/O, than we'll assume that we're likely
3409 * to continue to miss in the cache and it's to our advantage to try and prefetch
3410 */
3411 if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) {
3412 if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
3413 /*
3414 * we've already issued I/O for this request and
3415 * there's still work to do and
3416 * our prefetch stream is running dry, so issue a
3417 * pre-fetch I/O... the I/O latency will overlap
3418 * with the copying of the data
3419 */
3420 if (size_of_prefetch > max_rd_size)
3421 size_of_prefetch = max_rd_size;
1c79356b 3422
2d21ac55 3423 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
1c79356b 3424
55e303ae
A
3425 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
3426
3427 if (last_ioread_offset > last_request_offset)
3428 last_ioread_offset = last_request_offset;
3429 }
3430 }
3431 /*
3432 * limit the size of the copy we're about to do so that
3433 * we can notice that our I/O pipe is running dry and
3434 * get the next I/O issued before it does go dry
3435 */
cf7d32b8
A
3436 if (last_ioread_offset && io_size > (max_io_size / 4))
3437 io_resid = (max_io_size / 4);
55e303ae
A
3438 else
3439 io_resid = io_size;
1c79356b 3440
55e303ae 3441 io_requested = io_resid;
1c79356b 3442
b0d623f7 3443 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, last_ioread_offset == 0 ? take_reference : 0);
2d21ac55
A
3444
3445 xsize = io_requested - io_resid;
1c79356b 3446
2d21ac55
A
3447 io_size -= xsize;
3448 io_req_size -= xsize;
1c79356b 3449
55e303ae
A
3450 if (retval || io_resid)
3451 /*
3452 * if we run into a real error or
3453 * a page that is not in the cache
3454 * we need to leave streaming mode
3455 */
3456 break;
3457
b0d623f7 3458 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
55e303ae
A
3459 /*
3460 * we're already finished the I/O for this read request
3461 * let's see if we should do a read-ahead
3462 */
2d21ac55 3463 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
55e303ae 3464 }
1c79356b 3465 }
1c79356b
A
3466 if (retval)
3467 break;
1c79356b 3468 if (io_size == 0) {
91447636
A
3469 if (rap != NULL) {
3470 if (extent.e_addr < rap->cl_lastr)
3471 rap->cl_maxra = 0;
3472 rap->cl_lastr = extent.e_addr;
3473 }
1c79356b
A
3474 break;
3475 }
b0d623f7
A
3476 /*
3477 * recompute max_size since cluster_copy_ubc_data_internal
3478 * may have advanced uio->uio_offset
3479 */
3480 max_size = filesize - uio->uio_offset;
1c79356b 3481 }
b0d623f7
A
3482 /*
3483 * compute the size of the upl needed to encompass
3484 * the requested read... limit each call to cluster_io
3485 * to the maximum UPL size... cluster_io will clip if
3486 * this exceeds the maximum io_size for the device,
3487 * make sure to account for
3488 * a starting offset that's not page aligned
3489 */
3490 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3491 upl_f_offset = uio->uio_offset - (off_t)start_offset;
3492
55e303ae
A
3493 if (io_size > max_rd_size)
3494 io_size = max_rd_size;
3495
1c79356b 3496 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
55e303ae 3497
2d21ac55 3498 if (flags & IO_NOCACHE) {
cf7d32b8
A
3499 if (upl_size > max_io_size)
3500 upl_size = max_io_size;
2d21ac55 3501 } else {
cf7d32b8
A
3502 if (upl_size > max_io_size / 4)
3503 upl_size = max_io_size / 4;
2d21ac55 3504 }
1c79356b
A
3505 pages_in_upl = upl_size / PAGE_SIZE;
3506
3507 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
b0d623f7 3508 upl, (int)upl_f_offset, upl_size, start_offset, 0);
1c79356b 3509
0b4e3aa0 3510 kret = ubc_create_upl(vp,
91447636
A
3511 upl_f_offset,
3512 upl_size,
3513 &upl,
3514 &pl,
2d21ac55 3515 UPL_FILE_IO | UPL_SET_LITE);
1c79356b 3516 if (kret != KERN_SUCCESS)
2d21ac55 3517 panic("cluster_read_copy: failed to get pagelist");
1c79356b 3518
1c79356b 3519 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
b0d623f7 3520 upl, (int)upl_f_offset, upl_size, start_offset, 0);
1c79356b
A
3521
3522 /*
3523 * scan from the beginning of the upl looking for the first
3524 * non-valid page.... this will become the first page in
3525 * the request we're going to make to 'cluster_io'... if all
3526 * of the pages are valid, we won't call through to 'cluster_io'
3527 */
3528 for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
3529 if (!upl_valid_page(pl, start_pg))
3530 break;
3531 }
3532
3533 /*
3534 * scan from the starting invalid page looking for a valid
3535 * page before the end of the upl is reached, if we
3536 * find one, then it will be the last page of the request to
3537 * 'cluster_io'
3538 */
3539 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
3540 if (upl_valid_page(pl, last_pg))
3541 break;
3542 }
55e303ae
A
3543 iostate.io_completed = 0;
3544 iostate.io_issued = 0;
3545 iostate.io_error = 0;
3546 iostate.io_wanted = 0;
1c79356b
A
3547
3548 if (start_pg < last_pg) {
3549 /*
3550 * we found a range of 'invalid' pages that must be filled
3551 * if the last page in this range is the last page of the file
3552 * we may have to clip the size of it to keep from reading past
3553 * the end of the last physical block associated with the file
3554 */
3555 upl_offset = start_pg * PAGE_SIZE;
3556 io_size = (last_pg - start_pg) * PAGE_SIZE;
3557
b0d623f7 3558 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
1c79356b 3559 io_size = filesize - (upl_f_offset + upl_offset);
9bccf70c 3560
1c79356b 3561 /*
55e303ae 3562 * issue an asynchronous read to cluster_io
1c79356b
A
3563 */
3564
3565 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
2d21ac55 3566 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
1c79356b
A
3567 }
3568 if (error == 0) {
3569 /*
3570 * if the read completed successfully, or there was no I/O request
55e303ae
A
3571 * issued, than copy the data into user land via 'cluster_upl_copy_data'
3572 * we'll first add on any 'valid'
1c79356b
A
3573 * pages that were present in the upl when we acquired it.
3574 */
3575 u_int val_size;
1c79356b
A
3576
3577 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
3578 if (!upl_valid_page(pl, uio_last))
3579 break;
3580 }
2d21ac55
A
3581 if (uio_last < pages_in_upl) {
3582 /*
3583 * there were some invalid pages beyond the valid pages
3584 * that we didn't issue an I/O for, just release them
3585 * unchanged now, so that any prefetch/readahed can
3586 * include them
3587 */
3588 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
3589 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
3590 }
3591
1c79356b 3592 /*
2d21ac55 3593 * compute size to transfer this round, if io_req_size is
55e303ae 3594 * still non-zero after this attempt, we'll loop around and
1c79356b
A
3595 * set up for another I/O.
3596 */
3597 val_size = (uio_last * PAGE_SIZE) - start_offset;
3598
55e303ae 3599 if (val_size > max_size)
1c79356b
A
3600 val_size = max_size;
3601
2d21ac55
A
3602 if (val_size > io_req_size)
3603 val_size = io_req_size;
1c79356b 3604
2d21ac55 3605 if ((uio->uio_offset + val_size) > last_ioread_offset)
55e303ae 3606 last_ioread_offset = uio->uio_offset + val_size;
1c79356b 3607
55e303ae 3608 if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) {
1c79356b 3609
2d21ac55
A
3610 if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
3611 /*
3612 * if there's still I/O left to do for this request, and...
3613 * we're not in hard throttle mode, and...
3614 * we're close to using up the previous prefetch, then issue a
3615 * new pre-fetch I/O... the I/O latency will overlap
3616 * with the copying of the data
3617 */
3618 if (size_of_prefetch > max_rd_size)
3619 size_of_prefetch = max_rd_size;
3620
3621 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
3622
3623 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
55e303ae 3624
2d21ac55
A
3625 if (last_ioread_offset > last_request_offset)
3626 last_ioread_offset = last_request_offset;
3627 }
1c79356b 3628
55e303ae
A
3629 } else if ((uio->uio_offset + val_size) == last_request_offset) {
3630 /*
3631 * this transfer will finish this request, so...
3632 * let's try to read ahead if we're in
3633 * a sequential access pattern and we haven't
3634 * explicitly disabled it
3635 */
3636 if (rd_ahead_enabled)
2d21ac55 3637 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
91447636
A
3638
3639 if (rap != NULL) {
3640 if (extent.e_addr < rap->cl_lastr)
3641 rap->cl_maxra = 0;
3642 rap->cl_lastr = extent.e_addr;
3643 }
9bccf70c 3644 }
b0d623f7 3645 if (iostate.io_issued > iostate.io_completed) {
91447636 3646
b0d623f7 3647 lck_mtx_lock(cl_mtxp);
cf7d32b8 3648
b0d623f7
A
3649 while (iostate.io_issued != iostate.io_completed) {
3650 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
3651 iostate.io_issued, iostate.io_completed, 0, 0, 0);
cf7d32b8 3652
b0d623f7
A
3653 iostate.io_wanted = 1;
3654 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_copy", NULL);
91447636 3655
b0d623f7
A
3656 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
3657 iostate.io_issued, iostate.io_completed, 0, 0, 0);
3658 }
3659 lck_mtx_unlock(cl_mtxp);
3660 }
55e303ae
A
3661 if (iostate.io_error)
3662 error = iostate.io_error;
2d21ac55
A
3663 else {
3664 u_int32_t io_requested;
3665
3666 io_requested = val_size;
3667
3668 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
3669
3670 io_req_size -= (val_size - io_requested);
3671 }
1c79356b
A
3672 }
3673 if (start_pg < last_pg) {
3674 /*
3675 * compute the range of pages that we actually issued an I/O for
3676 * and either commit them as valid if the I/O succeeded
2d21ac55
A
3677 * or abort them if the I/O failed or we're not supposed to
3678 * keep them in the cache
1c79356b
A
3679 */
3680 io_size = (last_pg - start_pg) * PAGE_SIZE;
3681
b0d623f7 3682 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
1c79356b 3683
91447636 3684 if (error || (flags & IO_NOCACHE))
0b4e3aa0 3685 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
2d21ac55 3686 UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
b0d623f7
A
3687 else {
3688 int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
3689
3690 if (take_reference)
3691 commit_flags |= UPL_COMMIT_INACTIVATE;
3692 else
3693 commit_flags |= UPL_COMMIT_SPECULATE;
1c79356b 3694
b0d623f7
A
3695 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
3696 }
3697 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
1c79356b
A
3698 }
3699 if ((last_pg - start_pg) < pages_in_upl) {
1c79356b
A
3700 /*
3701 * the set of pages that we issued an I/O for did not encompass
3702 * the entire upl... so just release these without modifying
55e303ae 3703 * their state
1c79356b
A
3704 */
3705 if (error)
9bccf70c 3706 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 3707 else {
1c79356b 3708
2d21ac55 3709 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
b0d623f7 3710 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
2d21ac55
A
3711
3712 /*
3713 * handle any valid pages at the beginning of
3714 * the upl... release these appropriately
3715 */
b0d623f7 3716 cluster_read_upl_release(upl, 0, start_pg, take_reference);
2d21ac55
A
3717
3718 /*
3719 * handle any valid pages immediately after the
3720 * pages we issued I/O for... ... release these appropriately
3721 */
b0d623f7 3722 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
2d21ac55 3723
b0d623f7 3724 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
1c79356b
A
3725 }
3726 }
3727 if (retval == 0)
3728 retval = error;
91447636 3729
2d21ac55 3730 if (io_req_size) {
b0d623f7 3731 if (cluster_hard_throttle_on(vp, 1)) {
91447636
A
3732 rd_ahead_enabled = 0;
3733 prefetch_enabled = 0;
3734
3735 max_rd_size = HARD_THROTTLE_MAXSIZE;
3736 } else {
2d21ac55
A
3737 if (max_rd_size == HARD_THROTTLE_MAXSIZE) {
3738 /*
3739 * coming out of throttled state
3740 */
b0d623f7
A
3741 if (policy != IOPOL_THROTTLE) {
3742 if (rap != NULL)
3743 rd_ahead_enabled = 1;
3744 prefetch_enabled = 1;
3745 }
cf7d32b8 3746 max_rd_size = max_prefetch;
2d21ac55
A
3747 last_ioread_offset = 0;
3748 }
91447636
A
3749 }
3750 }
3751 }
3752 if (rap != NULL) {
3753 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
2d21ac55 3754 (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
91447636
A
3755
3756 lck_mtx_unlock(&rap->cl_lockr);
3757 } else {
3758 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
2d21ac55 3759 (int)uio->uio_offset, io_req_size, 0, retval, 0);
1c79356b
A
3760 }
3761
3762 return (retval);
3763}
3764
b4c24cb9 3765
9bccf70c 3766static int
2d21ac55
A
3767cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
3768 int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b
A
3769{
3770 upl_t upl;
3771 upl_page_info_t *pl;
2d21ac55 3772 off_t max_io_size;
b0d623f7
A
3773 vm_offset_t upl_offset, vector_upl_offset = 0;
3774 upl_size_t upl_size, vector_upl_size = 0;
2d21ac55
A
3775 vm_size_t upl_needed_size;
3776 unsigned int pages_in_pl;
1c79356b
A
3777 int upl_flags;
3778 kern_return_t kret;
2d21ac55 3779 unsigned int i;
1c79356b 3780 int force_data_sync;
1c79356b 3781 int retval = 0;
91447636 3782 int no_zero_fill = 0;
2d21ac55
A
3783 int io_flag = 0;
3784 int misaligned = 0;
d7e50217 3785 struct clios iostate;
2d21ac55
A
3786 user_addr_t iov_base;
3787 u_int32_t io_req_size;
3788 u_int32_t offset_in_file;
3789 u_int32_t offset_in_iovbase;
3790 u_int32_t io_size;
3791 u_int32_t io_min;
3792 u_int32_t xsize;
3793 u_int32_t devblocksize;
3794 u_int32_t mem_alignment_mask;
b0d623f7
A
3795 u_int32_t max_upl_size;
3796 u_int32_t max_rd_size;
3797 u_int32_t max_rd_ahead;
cf7d32b8 3798
b0d623f7
A
3799 u_int32_t vector_upl_iosize = 0;
3800 int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
3801 off_t v_upl_uio_offset = 0;
3802 int vector_upl_index=0;
3803 upl_t vector_upl = NULL;
cf7d32b8 3804
b0d623f7
A
3805 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
3806 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
cf7d32b8 3807
b0d623f7 3808 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
2d21ac55 3809
b0d623f7
A
3810 max_rd_size = max_upl_size;
3811 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
1c79356b 3812
b0d623f7
A
3813 io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
3814 if (flags & IO_PASSIVE)
3815 io_flag |= CL_PASSIVE;
1c79356b 3816
d7e50217
A
3817 iostate.io_completed = 0;
3818 iostate.io_issued = 0;
3819 iostate.io_error = 0;
3820 iostate.io_wanted = 0;
3821
2d21ac55
A
3822 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3823 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3824
3825 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
3826 (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
3827
3828 if (devblocksize == 1) {
3829 /*
3830 * the AFP client advertises a devblocksize of 1
3831 * however, its BLOCKMAP routine maps to physical
3832 * blocks that are PAGE_SIZE in size...
3833 * therefore we can't ask for I/Os that aren't page aligned
3834 * or aren't multiples of PAGE_SIZE in size
3835 * by setting devblocksize to PAGE_SIZE, we re-instate
3836 * the old behavior we had before the mem_alignment_mask
3837 * changes went in...
3838 */
3839 devblocksize = PAGE_SIZE;
3840 }
3841next_dread:
3842 io_req_size = *read_length;
3843 iov_base = uio_curriovbase(uio);
3844
3845 max_io_size = filesize - uio->uio_offset;
3846
3847 if ((off_t)io_req_size > max_io_size)
3848 io_req_size = max_io_size;
3849
3850 offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
3851 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
3852
3853 if (offset_in_file || offset_in_iovbase) {
3854 /*
3855 * one of the 2 important offsets is misaligned
3856 * so fire an I/O through the cache for this entire vector
3857 */
3858 misaligned = 1;
3859 }
3860 if (iov_base & (devblocksize - 1)) {
3861 /*
3862 * the offset in memory must be on a device block boundary
3863 * so that we can guarantee that we can generate an
3864 * I/O that ends on a page boundary in cluster_io
3865 */
3866 misaligned = 1;
3867 }
3868 /*
3869 * When we get to this point, we know...
3870 * -- the offset into the file is on a devblocksize boundary
3871 */
3872
3873 while (io_req_size && retval == 0) {
3874 u_int32_t io_start;
1c79356b 3875
b0d623f7 3876 if (cluster_hard_throttle_on(vp, 1)) {
91447636
A
3877 max_rd_size = HARD_THROTTLE_MAXSIZE;
3878 max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1;
3879 } else {
cf7d32b8 3880 max_rd_size = max_upl_size;
b0d623f7 3881 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
91447636 3882 }
2d21ac55 3883 io_start = io_size = io_req_size;
1c79356b 3884
d7e50217
A
3885 /*
3886 * First look for pages already in the cache
3887 * and move them to user space.
2d21ac55
A
3888 *
3889 * cluster_copy_ubc_data returns the resid
3890 * in io_size
d7e50217 3891 */
2d21ac55 3892 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
1c79356b 3893
2d21ac55
A
3894 /*
3895 * calculate the number of bytes actually copied
3896 * starting size - residual
3897 */
3898 xsize = io_start - io_size;
3899
3900 io_req_size -= xsize;
3901
b0d623f7
A
3902 if(useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
3903 /*
3904 * We found something in the cache or we have an iov_base that's not
3905 * page-aligned.
3906 *
3907 * Issue all I/O's that have been collected within this Vectored UPL.
3908 */
3909 if(vector_upl_index) {
3910 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3911 reset_vector_run_state();
3912 }
3913
3914 if(xsize)
3915 useVectorUPL = 0;
3916
3917 /*
3918 * After this point, if we are using the Vector UPL path and the base is
3919 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3920 */
3921 }
3922
2d21ac55
A
3923 /*
3924 * check to see if we are finished with this request...
3925 */
3926 if (io_req_size == 0 || misaligned) {
3927 /*
3928 * see if there's another uio vector to
3929 * process that's of type IO_DIRECT
3930 *
3931 * break out of while loop to get there
d7e50217 3932 */
2d21ac55 3933 break;
0b4e3aa0 3934 }
d7e50217 3935 /*
2d21ac55 3936 * assume the request ends on a device block boundary
d7e50217 3937 */
2d21ac55
A
3938 io_min = devblocksize;
3939
3940 /*
3941 * we can handle I/O's in multiples of the device block size
3942 * however, if io_size isn't a multiple of devblocksize we
3943 * want to clip it back to the nearest page boundary since
3944 * we are going to have to go through cluster_read_copy to
3945 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
3946 * multiple, we avoid asking the drive for the same physical
3947 * blocks twice.. once for the partial page at the end of the
3948 * request and a 2nd time for the page we read into the cache
3949 * (which overlaps the end of the direct read) in order to
3950 * get at the overhang bytes
3951 */
3952 if (io_size & (devblocksize - 1)) {
3953 /*
3954 * request does NOT end on a device block boundary
3955 * so clip it back to a PAGE_SIZE boundary
3956 */
3957 io_size &= ~PAGE_MASK;
3958 io_min = PAGE_SIZE;
3959 }
3960 if (retval || io_size < io_min) {
3961 /*
3962 * either an error or we only have the tail left to
3963 * complete via the copy path...
d7e50217
A
3964 * we may have already spun some portion of this request
3965 * off as async requests... we need to wait for the I/O
3966 * to complete before returning
3967 */
2d21ac55 3968 goto wait_for_dreads;
d7e50217 3969 }
2d21ac55
A
3970 if ((xsize = io_size) > max_rd_size)
3971 xsize = max_rd_size;
55e303ae 3972
d7e50217 3973 io_size = 0;
1c79356b 3974
2d21ac55 3975 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
55e303ae 3976
2d21ac55 3977 if (io_size == 0) {
d7e50217 3978 /*
2d21ac55
A
3979 * a page must have just come into the cache
3980 * since the first page in this range is no
3981 * longer absent, go back and re-evaluate
d7e50217 3982 */
2d21ac55
A
3983 continue;
3984 }
cc9f6e38 3985 iov_base = uio_curriovbase(uio);
1c79356b 3986
2d21ac55 3987 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
d7e50217 3988 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
1c79356b 3989
d7e50217 3990 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
cc9f6e38 3991 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
1c79356b 3992
0b4c1975 3993 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0))
91447636 3994 no_zero_fill = 1;
0b4c1975 3995 else
91447636 3996 no_zero_fill = 0;
0b4c1975 3997
d7e50217
A
3998 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
3999 pages_in_pl = 0;
4000 upl_size = upl_needed_size;
55e303ae 4001 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
1c79356b 4002
91447636
A
4003 if (no_zero_fill)
4004 upl_flags |= UPL_NOZEROFILL;
4005 if (force_data_sync)
4006 upl_flags |= UPL_FORCE_DATA_SYNC;
4007
91447636 4008 kret = vm_map_create_upl(current_map(),
cc9f6e38 4009 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
91447636 4010 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
1c79356b 4011
d7e50217
A
4012 if (kret != KERN_SUCCESS) {
4013 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4014 (int)upl_offset, upl_size, io_size, kret, 0);
d7e50217 4015 /*
2d21ac55 4016 * failed to get pagelist
d7e50217
A
4017 *
4018 * we may have already spun some portion of this request
4019 * off as async requests... we need to wait for the I/O
4020 * to complete before returning
4021 */
2d21ac55 4022 goto wait_for_dreads;
d7e50217
A
4023 }
4024 pages_in_pl = upl_size / PAGE_SIZE;
4025 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1c79356b 4026
d7e50217 4027 for (i = 0; i < pages_in_pl; i++) {
0b4c1975 4028 if (!upl_page_present(pl, i))
d7e50217
A
4029 break;
4030 }
4031 if (i == pages_in_pl)
4032 break;
0b4e3aa0 4033
0b4c1975 4034 ubc_upl_abort(upl, 0);
1c79356b 4035 }
d7e50217
A
4036 if (force_data_sync >= 3) {
4037 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4038 (int)upl_offset, upl_size, io_size, kret, 0);
1c79356b 4039
2d21ac55 4040 goto wait_for_dreads;
d7e50217
A
4041 }
4042 /*
4043 * Consider the possibility that upl_size wasn't satisfied.
4044 */
2d21ac55
A
4045 if (upl_size < upl_needed_size) {
4046 if (upl_size && upl_offset == 0)
4047 io_size = upl_size;
4048 else
4049 io_size = 0;
4050 }
d7e50217 4051 if (io_size == 0) {
0b4c1975 4052 ubc_upl_abort(upl, 0);
2d21ac55 4053 goto wait_for_dreads;
d7e50217
A
4054 }
4055 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4056 (int)upl_offset, upl_size, io_size, kret, 0);
1c79356b 4057
b0d623f7
A
4058 if(useVectorUPL) {
4059 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
4060 if(end_off)
4061 issueVectorUPL = 1;
4062 /*
4063 * After this point, if we are using a vector UPL, then
4064 * either all the UPL elements end on a page boundary OR
4065 * this UPL is the last element because it does not end
4066 * on a page boundary.
4067 */
4068 }
4069
d7e50217
A
4070 /*
4071 * request asynchronously so that we can overlap
4072 * the preparation of the next I/O
4073 * if there are already too many outstanding reads
4074 * wait until some have completed before issuing the next read
4075 */
b0d623f7 4076 if (iostate.io_issued > iostate.io_completed) {
91447636 4077
b0d623f7 4078 lck_mtx_lock(cl_mtxp);
cf7d32b8 4079
b0d623f7
A
4080 while ((iostate.io_issued - iostate.io_completed) > max_rd_ahead) {
4081 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4082 iostate.io_issued, iostate.io_completed, max_rd_ahead, 0, 0);
cf7d32b8 4083
b0d623f7
A
4084 iostate.io_wanted = 1;
4085 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
4086
4087 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4088 iostate.io_issued, iostate.io_completed, max_rd_ahead, 0, 0);
4089 }
4090 lck_mtx_unlock(cl_mtxp);
4091 }
d7e50217
A
4092 if (iostate.io_error) {
4093 /*
4094 * one of the earlier reads we issued ran into a hard error
4095 * don't issue any more reads, cleanup the UPL
4096 * that was just created but not used, then
4097 * go wait for any other reads to complete before
4098 * returning the error to the caller
4099 */
0b4c1975 4100 ubc_upl_abort(upl, 0);
1c79356b 4101
2d21ac55 4102 goto wait_for_dreads;
d7e50217
A
4103 }
4104 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
b0d623f7 4105 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
1c79356b 4106
2d21ac55 4107
b0d623f7
A
4108 if(!useVectorUPL) {
4109 if (no_zero_fill)
4110 io_flag &= ~CL_PRESERVE;
4111 else
4112 io_flag |= CL_PRESERVE;
4113
4114 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4115
4116 } else {
1c79356b 4117
b0d623f7
A
4118 if(!vector_upl_index) {
4119 vector_upl = vector_upl_create(upl_offset);
4120 v_upl_uio_offset = uio->uio_offset;
4121 vector_upl_offset = upl_offset;
4122 }
4123
4124 vector_upl_set_subupl(vector_upl,upl, upl_size);
4125 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
4126 vector_upl_index++;
4127 vector_upl_size += upl_size;
4128 vector_upl_iosize += io_size;
4129
4130 if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= MAX_VECTOR_UPL_SIZE) {
4131 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4132 reset_vector_run_state();
4133 }
4134 }
d7e50217
A
4135 /*
4136 * update the uio structure
4137 */
cc9f6e38 4138 uio_update(uio, (user_size_t)io_size);
1c79356b 4139
2d21ac55
A
4140 io_req_size -= io_size;
4141
d7e50217 4142 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
b0d623f7 4143 upl, (int)uio->uio_offset, io_req_size, retval, 0);
1c79356b
A
4144
4145 } /* end while */
4146
2d21ac55 4147 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
91447636 4148
2d21ac55
A
4149 retval = cluster_io_type(uio, read_type, read_length, 0);
4150
4151 if (retval == 0 && *read_type == IO_DIRECT) {
4152
4153 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
4154 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
4155
4156 goto next_dread;
4157 }
4158 }
4159
4160wait_for_dreads:
b0d623f7
A
4161
4162 if(retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
4163 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4164 reset_vector_run_state();
4165 }
4166 /*
4167 * make sure all async reads that are part of this stream
4168 * have completed before we return
4169 */
4170 if (iostate.io_issued > iostate.io_completed) {
4171
2d21ac55
A
4172 lck_mtx_lock(cl_mtxp);
4173
4174 while (iostate.io_issued != iostate.io_completed) {
b0d623f7
A
4175 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4176 iostate.io_issued, iostate.io_completed, 0, 0, 0);
cf7d32b8 4177
2d21ac55
A
4178 iostate.io_wanted = 1;
4179 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
cf7d32b8 4180
b0d623f7
A
4181 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4182 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2d21ac55
A
4183 }
4184 lck_mtx_unlock(cl_mtxp);
4185 }
d7e50217 4186 if (iostate.io_error)
2d21ac55
A
4187 retval = iostate.io_error;
4188
4189 if (io_req_size && retval == 0) {
4190 /*
4191 * we couldn't handle the tail of this request in DIRECT mode
4192 * so fire it through the copy path
4193 */
4194 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
1c79356b 4195
2d21ac55
A
4196 *read_type = IO_UNKNOWN;
4197 }
1c79356b 4198 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
2d21ac55 4199 (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
1c79356b
A
4200
4201 return (retval);
4202}
4203
4204
9bccf70c 4205static int
2d21ac55
A
4206cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
4207 int (*callback)(buf_t, void *), void *callback_arg, int flags)
0b4e3aa0 4208{
b4c24cb9 4209 upl_page_info_t *pl;
2d21ac55 4210 upl_t upl[MAX_VECTS];
0b4e3aa0 4211 vm_offset_t upl_offset;
2d21ac55 4212 addr64_t dst_paddr = 0;
cc9f6e38 4213 user_addr_t iov_base;
2d21ac55 4214 off_t max_size;
b0d623f7 4215 upl_size_t upl_size;
2d21ac55
A
4216 vm_size_t upl_needed_size;
4217 mach_msg_type_number_t pages_in_pl;
0b4e3aa0
A
4218 int upl_flags;
4219 kern_return_t kret;
b4c24cb9 4220 struct clios iostate;
2d21ac55
A
4221 int error= 0;
4222 int cur_upl = 0;
4223 int num_upl = 0;
4224 int n;
4225 u_int32_t xsize;
4226 u_int32_t io_size;
4227 u_int32_t devblocksize;
4228 u_int32_t mem_alignment_mask;
4229 u_int32_t tail_size = 0;
4230 int bflag;
4231
4232 if (flags & IO_PASSIVE)
b0d623f7 4233 bflag = CL_PASSIVE;
2d21ac55 4234 else
b0d623f7 4235 bflag = 0;
0b4e3aa0
A
4236
4237 /*
4238 * When we enter this routine, we know
2d21ac55
A
4239 * -- the read_length will not exceed the current iov_len
4240 * -- the target address is physically contiguous for read_length
0b4e3aa0 4241 */
2d21ac55 4242 cluster_syncup(vp, filesize, callback, callback_arg);
0b4e3aa0 4243
2d21ac55
A
4244 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
4245 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
91447636 4246
2d21ac55
A
4247 iostate.io_completed = 0;
4248 iostate.io_issued = 0;
4249 iostate.io_error = 0;
4250 iostate.io_wanted = 0;
4251
4252next_cread:
4253 io_size = *read_length;
0b4e3aa0
A
4254
4255 max_size = filesize - uio->uio_offset;
4256
2d21ac55 4257 if (io_size > max_size)
b4c24cb9 4258 io_size = max_size;
0b4e3aa0 4259
2d21ac55
A
4260 iov_base = uio_curriovbase(uio);
4261
4262 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
0b4e3aa0
A
4263 upl_needed_size = upl_offset + io_size;
4264
4265 pages_in_pl = 0;
4266 upl_size = upl_needed_size;
55e303ae 4267 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
0b4e3aa0 4268
2d21ac55
A
4269
4270 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
4271 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
4272
0b4e3aa0 4273 kret = vm_map_get_upl(current_map(),
cc9f6e38 4274 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2d21ac55
A
4275 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
4276
4277 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
4278 (int)upl_offset, upl_size, io_size, kret, 0);
0b4e3aa0 4279
b4c24cb9
A
4280 if (kret != KERN_SUCCESS) {
4281 /*
2d21ac55 4282 * failed to get pagelist
b4c24cb9 4283 */
2d21ac55
A
4284 error = EINVAL;
4285 goto wait_for_creads;
b4c24cb9 4286 }
2d21ac55
A
4287 num_upl++;
4288
b4c24cb9
A
4289 if (upl_size < upl_needed_size) {
4290 /*
4291 * The upl_size wasn't satisfied.
4292 */
2d21ac55
A
4293 error = EINVAL;
4294 goto wait_for_creads;
b4c24cb9 4295 }
2d21ac55 4296 pl = ubc_upl_pageinfo(upl[cur_upl]);
b4c24cb9 4297
cc9f6e38 4298 dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
0b4e3aa0 4299
b4c24cb9 4300 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2d21ac55 4301 u_int32_t head_size;
b4c24cb9 4302
2d21ac55 4303 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
b4c24cb9
A
4304
4305 if (head_size > io_size)
4306 head_size = io_size;
4307
2d21ac55 4308 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
b4c24cb9 4309
2d21ac55
A
4310 if (error)
4311 goto wait_for_creads;
b4c24cb9 4312
b4c24cb9
A
4313 upl_offset += head_size;
4314 dst_paddr += head_size;
4315 io_size -= head_size;
2d21ac55
A
4316
4317 iov_base += head_size;
4318 }
4319 if ((u_int32_t)iov_base & mem_alignment_mask) {
4320 /*
4321 * request doesn't set up on a memory boundary
4322 * the underlying DMA engine can handle...
4323 * return an error instead of going through
4324 * the slow copy path since the intent of this
4325 * path is direct I/O to device memory
4326 */
4327 error = EINVAL;
4328 goto wait_for_creads;
b4c24cb9 4329 }
2d21ac55 4330
b4c24cb9 4331 tail_size = io_size & (devblocksize - 1);
b4c24cb9 4332
2d21ac55 4333 io_size -= tail_size;
b4c24cb9
A
4334
4335 while (io_size && error == 0) {
b4c24cb9 4336
2d21ac55
A
4337 if (io_size > MAX_IO_CONTIG_SIZE)
4338 xsize = MAX_IO_CONTIG_SIZE;
b4c24cb9
A
4339 else
4340 xsize = io_size;
4341 /*
4342 * request asynchronously so that we can overlap
4343 * the preparation of the next I/O... we'll do
4344 * the commit after all the I/O has completed
4345 * since its all issued against the same UPL
4346 * if there are already too many outstanding reads
d7e50217 4347 * wait until some have completed before issuing the next
b4c24cb9 4348 */
b0d623f7 4349 if (iostate.io_issued > iostate.io_completed) {
2d21ac55 4350 lck_mtx_lock(cl_mtxp);
b4c24cb9 4351
b0d623f7
A
4352 while ((iostate.io_issued - iostate.io_completed) > (MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2))) {
4353 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4354 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
cf7d32b8 4355
2d21ac55
A
4356 iostate.io_wanted = 1;
4357 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
cf7d32b8 4358
b0d623f7
A
4359 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4360 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
2d21ac55
A
4361 }
4362 lck_mtx_unlock(cl_mtxp);
4363 }
4364 if (iostate.io_error) {
4365 /*
4366 * one of the earlier reads we issued ran into a hard error
4367 * don't issue any more reads...
4368 * go wait for any other reads to complete before
4369 * returning the error to the caller
4370 */
4371 goto wait_for_creads;
4372 }
4373 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
4374 CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
4375 (buf_t)NULL, &iostate, callback, callback_arg);
b4c24cb9
A
4376 /*
4377 * The cluster_io read was issued successfully,
4378 * update the uio structure
4379 */
4380 if (error == 0) {
cc9f6e38
A
4381 uio_update(uio, (user_size_t)xsize);
4382
4383 dst_paddr += xsize;
4384 upl_offset += xsize;
4385 io_size -= xsize;
b4c24cb9
A
4386 }
4387 }
2d21ac55
A
4388 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
4389
4390 error = cluster_io_type(uio, read_type, read_length, 0);
4391
4392 if (error == 0 && *read_type == IO_CONTIG) {
4393 cur_upl++;
4394 goto next_cread;
4395 }
4396 } else
4397 *read_type = IO_UNKNOWN;
4398
4399wait_for_creads:
0b4e3aa0 4400 /*
d7e50217
A
4401 * make sure all async reads that are part of this stream
4402 * have completed before we proceed
0b4e3aa0 4403 */
b0d623f7 4404 if (iostate.io_issued > iostate.io_completed) {
91447636 4405
b0d623f7 4406 lck_mtx_lock(cl_mtxp);
cf7d32b8 4407
b0d623f7
A
4408 while (iostate.io_issued != iostate.io_completed) {
4409 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4410 iostate.io_issued, iostate.io_completed, 0, 0, 0);
cf7d32b8 4411
b0d623f7
A
4412 iostate.io_wanted = 1;
4413 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
91447636 4414
b0d623f7
A
4415 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4416 iostate.io_issued, iostate.io_completed, 0, 0, 0);
4417 }
4418 lck_mtx_unlock(cl_mtxp);
4419 }
91447636 4420 if (iostate.io_error)
b4c24cb9 4421 error = iostate.io_error;
91447636 4422
b4c24cb9 4423 if (error == 0 && tail_size)
2d21ac55 4424 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
0b4e3aa0 4425
2d21ac55
A
4426 for (n = 0; n < num_upl; n++)
4427 /*
4428 * just release our hold on each physically contiguous
4429 * region without changing any state
4430 */
4431 ubc_upl_abort(upl[n], 0);
0b4e3aa0
A
4432
4433 return (error);
4434}
1c79356b 4435
b4c24cb9 4436
2d21ac55
A
4437static int
4438cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
4439{
4440 user_size_t iov_len;
4441 user_addr_t iov_base = 0;
4442 upl_t upl;
b0d623f7 4443 upl_size_t upl_size;
2d21ac55
A
4444 int upl_flags;
4445 int retval = 0;
4446
4447 /*
4448 * skip over any emtpy vectors
4449 */
4450 uio_update(uio, (user_size_t)0);
4451
4452 iov_len = uio_curriovlen(uio);
4453
b0d623f7 4454 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
2d21ac55
A
4455
4456 if (iov_len) {
4457 iov_base = uio_curriovbase(uio);
4458 /*
4459 * make sure the size of the vector isn't too big...
4460 * internally, we want to handle all of the I/O in
4461 * chunk sizes that fit in a 32 bit int
4462 */
4463 if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE)
4464 upl_size = MAX_IO_REQUEST_SIZE;
4465 else
4466 upl_size = (u_int32_t)iov_len;
4467
4468 upl_flags = UPL_QUERY_OBJECT_TYPE;
4469
4470 if ((vm_map_get_upl(current_map(),
4471 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
4472 &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
4473 /*
4474 * the user app must have passed in an invalid address
4475 */
4476 retval = EFAULT;
4477 }
4478 if (upl_size == 0)
4479 retval = EFAULT;
4480
4481 *io_length = upl_size;
4482
4483 if (upl_flags & UPL_PHYS_CONTIG)
4484 *io_type = IO_CONTIG;
4485 else if (iov_len >= min_length)
4486 *io_type = IO_DIRECT;
4487 else
4488 *io_type = IO_COPY;
4489 } else {
4490 /*
4491 * nothing left to do for this uio
4492 */
4493 *io_length = 0;
4494 *io_type = IO_UNKNOWN;
4495 }
b0d623f7 4496 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
2d21ac55
A
4497
4498 return (retval);
4499}
4500
4501
1c79356b
A
4502/*
4503 * generate advisory I/O's in the largest chunks possible
4504 * the completed pages will be released into the VM cache
4505 */
9bccf70c 4506int
91447636 4507advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
2d21ac55
A
4508{
4509 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
4510}
4511
4512int
4513advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
1c79356b 4514{
1c79356b
A
4515 upl_page_info_t *pl;
4516 upl_t upl;
4517 vm_offset_t upl_offset;
b0d623f7 4518 int upl_size;
1c79356b
A
4519 off_t upl_f_offset;
4520 int start_offset;
4521 int start_pg;
4522 int last_pg;
4523 int pages_in_upl;
4524 off_t max_size;
4525 int io_size;
4526 kern_return_t kret;
4527 int retval = 0;
9bccf70c 4528 int issued_io;
55e303ae 4529 int skip_range;
b0d623f7
A
4530 uint32_t max_io_size;
4531
4532
91447636 4533 if ( !UBCINFOEXISTS(vp))
1c79356b
A
4534 return(EINVAL);
4535
ca66cea6
A
4536 if (resid < 0)
4537 return(EINVAL);
4538
cf7d32b8 4539 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
b0d623f7 4540
1c79356b 4541 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
b0d623f7 4542 (int)f_offset, resid, (int)filesize, 0, 0);
1c79356b
A
4543
4544 while (resid && f_offset < filesize && retval == 0) {
4545 /*
4546 * compute the size of the upl needed to encompass
4547 * the requested read... limit each call to cluster_io
0b4e3aa0
A
4548 * to the maximum UPL size... cluster_io will clip if
4549 * this exceeds the maximum io_size for the device,
4550 * make sure to account for
1c79356b
A
4551 * a starting offset that's not page aligned
4552 */
4553 start_offset = (int)(f_offset & PAGE_MASK_64);
4554 upl_f_offset = f_offset - (off_t)start_offset;
4555 max_size = filesize - f_offset;
4556
4557 if (resid < max_size)
4558 io_size = resid;
4559 else
4560 io_size = max_size;
4561
4562 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
cf7d32b8
A
4563 if ((uint32_t)upl_size > max_io_size)
4564 upl_size = max_io_size;
55e303ae
A
4565
4566 skip_range = 0;
4567 /*
4568 * return the number of contiguously present pages in the cache
4569 * starting at upl_f_offset within the file
4570 */
4571 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
4572
4573 if (skip_range) {
4574 /*
4575 * skip over pages already present in the cache
4576 */
4577 io_size = skip_range - start_offset;
4578
4579 f_offset += io_size;
4580 resid -= io_size;
4581
4582 if (skip_range == upl_size)
4583 continue;
4584 /*
4585 * have to issue some real I/O
4586 * at this point, we know it's starting on a page boundary
4587 * because we've skipped over at least the first page in the request
4588 */
4589 start_offset = 0;
4590 upl_f_offset += skip_range;
4591 upl_size -= skip_range;
4592 }
1c79356b
A
4593 pages_in_upl = upl_size / PAGE_SIZE;
4594
55e303ae 4595 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
b0d623f7 4596 upl, (int)upl_f_offset, upl_size, start_offset, 0);
55e303ae 4597
0b4e3aa0 4598 kret = ubc_create_upl(vp,
91447636
A
4599 upl_f_offset,
4600 upl_size,
4601 &upl,
4602 &pl,
4603 UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
1c79356b 4604 if (kret != KERN_SUCCESS)
9bccf70c
A
4605 return(retval);
4606 issued_io = 0;
1c79356b
A
4607
4608 /*
9bccf70c
A
4609 * before we start marching forward, we must make sure we end on
4610 * a present page, otherwise we will be working with a freed
4611 * upl
1c79356b 4612 */
9bccf70c
A
4613 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
4614 if (upl_page_present(pl, last_pg))
4615 break;
1c79356b 4616 }
9bccf70c 4617 pages_in_upl = last_pg + 1;
1c79356b 4618
1c79356b 4619
55e303ae 4620 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
b0d623f7 4621 upl, (int)upl_f_offset, upl_size, start_offset, 0);
9bccf70c
A
4622
4623
4624 for (last_pg = 0; last_pg < pages_in_upl; ) {
1c79356b 4625 /*
9bccf70c
A
4626 * scan from the beginning of the upl looking for the first
4627 * page that is present.... this will become the first page in
4628 * the request we're going to make to 'cluster_io'... if all
4629 * of the pages are absent, we won't call through to 'cluster_io'
1c79356b 4630 */
9bccf70c
A
4631 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
4632 if (upl_page_present(pl, start_pg))
4633 break;
1c79356b 4634 }
1c79356b 4635
1c79356b 4636 /*
9bccf70c
A
4637 * scan from the starting present page looking for an absent
4638 * page before the end of the upl is reached, if we
4639 * find one, then it will terminate the range of pages being
4640 * presented to 'cluster_io'
1c79356b 4641 */
9bccf70c
A
4642 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
4643 if (!upl_page_present(pl, last_pg))
4644 break;
4645 }
4646
4647 if (last_pg > start_pg) {
4648 /*
4649 * we found a range of pages that must be filled
4650 * if the last page in this range is the last page of the file
4651 * we may have to clip the size of it to keep from reading past
4652 * the end of the last physical block associated with the file
4653 */
4654 upl_offset = start_pg * PAGE_SIZE;
4655 io_size = (last_pg - start_pg) * PAGE_SIZE;
4656
b0d623f7 4657 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
9bccf70c
A
4658 io_size = filesize - (upl_f_offset + upl_offset);
4659
4660 /*
4661 * issue an asynchronous read to cluster_io
4662 */
91447636 4663 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
2d21ac55 4664 CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
1c79356b 4665
9bccf70c
A
4666 issued_io = 1;
4667 }
1c79356b 4668 }
9bccf70c
A
4669 if (issued_io == 0)
4670 ubc_upl_abort(upl, 0);
4671
4672 io_size = upl_size - start_offset;
1c79356b
A
4673
4674 if (io_size > resid)
4675 io_size = resid;
4676 f_offset += io_size;
4677 resid -= io_size;
4678 }
9bccf70c 4679
1c79356b
A
4680 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
4681 (int)f_offset, resid, retval, 0, 0);
4682
4683 return(retval);
4684}
4685
4686
9bccf70c 4687int
91447636 4688cluster_push(vnode_t vp, int flags)
2d21ac55
A
4689{
4690 return cluster_push_ext(vp, flags, NULL, NULL);
4691}
4692
4693
4694int
4695cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
9bccf70c 4696{
91447636 4697 int retval;
b0d623f7 4698 int my_sparse_wait = 0;
91447636 4699 struct cl_writebehind *wbp;
9bccf70c 4700
91447636 4701 if ( !UBCINFOEXISTS(vp)) {
b0d623f7 4702 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -1, 0);
91447636
A
4703 return (0);
4704 }
4705 /* return if deferred write is set */
4706 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
4707 return (0);
4708 }
4709 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
b0d623f7 4710 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -2, 0);
91447636
A
4711 return (0);
4712 }
4713 if (wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
4714 lck_mtx_unlock(&wbp->cl_lockw);
9bccf70c 4715
b0d623f7 4716 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -3, 0);
91447636
A
4717 return(0);
4718 }
9bccf70c 4719 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
b0d623f7
A
4720 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
4721
4722 /*
4723 * if we have an fsync in progress, we don't want to allow any additional
4724 * sync/fsync/close(s) to occur until it finishes.
4725 * note that its possible for writes to continue to occur to this file
4726 * while we're waiting and also once the fsync starts to clean if we're
4727 * in the sparse map case
4728 */
4729 while (wbp->cl_sparse_wait) {
4730 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, vp, 0, 0, 0, 0);
4731
4732 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
4733
4734 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, vp, 0, 0, 0, 0);
4735 }
4736 if (flags & IO_SYNC) {
4737 my_sparse_wait = 1;
4738 wbp->cl_sparse_wait = 1;
9bccf70c 4739
b0d623f7
A
4740 /*
4741 * this is an fsync (or equivalent)... we must wait for any existing async
4742 * cleaning operations to complete before we evaulate the current state
4743 * and finish cleaning... this insures that all writes issued before this
4744 * fsync actually get cleaned to the disk before this fsync returns
4745 */
4746 while (wbp->cl_sparse_pushes) {
4747 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, vp, 0, 0, 0, 0);
4748
4749 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
4750
4751 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, vp, 0, 0, 0, 0);
4752 }
4753 }
91447636 4754 if (wbp->cl_scmap) {
b0d623f7
A
4755 void *scmap;
4756
4757 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
4758
4759 scmap = wbp->cl_scmap;
4760 wbp->cl_scmap = NULL;
4761
4762 wbp->cl_sparse_pushes++;
4763
4764 lck_mtx_unlock(&wbp->cl_lockw);
4765
4766 sparse_cluster_push(&scmap, vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
4767
4768 lck_mtx_lock(&wbp->cl_lockw);
9bccf70c 4769
b0d623f7
A
4770 wbp->cl_sparse_pushes--;
4771
4772 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0)
4773 wakeup((caddr_t)&wbp->cl_sparse_pushes);
4774 } else {
4775 sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
4776 }
55e303ae 4777 retval = 1;
b0d623f7 4778 } else {
2d21ac55 4779 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
b0d623f7 4780 }
91447636
A
4781 lck_mtx_unlock(&wbp->cl_lockw);
4782
4783 if (flags & IO_SYNC)
2d21ac55 4784 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
9bccf70c 4785
b0d623f7
A
4786 if (my_sparse_wait) {
4787 /*
4788 * I'm the owner of the serialization token
4789 * clear it and wakeup anyone that is waiting
4790 * for me to finish
4791 */
4792 lck_mtx_lock(&wbp->cl_lockw);
4793
4794 wbp->cl_sparse_wait = 0;
4795 wakeup((caddr_t)&wbp->cl_sparse_wait);
4796
4797 lck_mtx_unlock(&wbp->cl_lockw);
4798 }
55e303ae 4799 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
b0d623f7 4800 wbp->cl_scmap, wbp->cl_number, retval, 0, 0);
9bccf70c 4801
55e303ae
A
4802 return (retval);
4803}
9bccf70c 4804
9bccf70c 4805
91447636
A
4806__private_extern__ void
4807cluster_release(struct ubc_info *ubc)
55e303ae 4808{
91447636
A
4809 struct cl_writebehind *wbp;
4810 struct cl_readahead *rap;
4811
4812 if ((wbp = ubc->cl_wbehind)) {
9bccf70c 4813
b0d623f7 4814 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
91447636
A
4815
4816 if (wbp->cl_scmap)
4817 vfs_drt_control(&(wbp->cl_scmap), 0);
4818 } else {
b0d623f7 4819 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
91447636 4820 }
9bccf70c 4821
91447636 4822 rap = ubc->cl_rahead;
55e303ae 4823
91447636
A
4824 if (wbp != NULL) {
4825 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
4826 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
4827 }
4828 if ((rap = ubc->cl_rahead)) {
4829 lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
4830 FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
55e303ae 4831 }
91447636
A
4832 ubc->cl_rahead = NULL;
4833 ubc->cl_wbehind = NULL;
4834
b0d623f7 4835 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
91447636
A
4836}
4837
4838
9bccf70c 4839static int
2d21ac55 4840cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int (*callback)(buf_t, void *), void *callback_arg)
9bccf70c
A
4841{
4842 int cl_index;
4843 int cl_index1;
4844 int min_index;
4845 int cl_len;
55e303ae 4846 int cl_pushed = 0;
91447636 4847 struct cl_wextent l_clusters[MAX_CLUSTERS];
b0d623f7
A
4848 u_int max_cluster_pgcount;
4849
4850
4851 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
9bccf70c 4852 /*
91447636
A
4853 * the write behind context exists and has
4854 * already been locked...
2d21ac55
A
4855 */
4856 if (wbp->cl_number == 0)
4857 /*
4858 * no clusters to push
4859 * return number of empty slots
4860 */
4861 return (MAX_CLUSTERS);
4862
4863 /*
9bccf70c 4864 * make a local 'sorted' copy of the clusters
91447636 4865 * and clear wbp->cl_number so that new clusters can
9bccf70c
A
4866 * be developed
4867 */
91447636
A
4868 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
4869 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
4870 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr)
9bccf70c
A
4871 continue;
4872 if (min_index == -1)
4873 min_index = cl_index1;
91447636 4874 else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr)
9bccf70c
A
4875 min_index = cl_index1;
4876 }
4877 if (min_index == -1)
4878 break;
b0d623f7 4879
91447636
A
4880 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
4881 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
2d21ac55 4882 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
9bccf70c 4883
91447636 4884 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
9bccf70c 4885 }
91447636
A
4886 wbp->cl_number = 0;
4887
4888 cl_len = cl_index;
9bccf70c 4889
2d21ac55 4890 if ( (push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS ) {
55e303ae
A
4891 int i;
4892
4893 /*
4894 * determine if we appear to be writing the file sequentially
4895 * if not, by returning without having pushed any clusters
4896 * we will cause this vnode to be pushed into the sparse cluster mechanism
4897 * used for managing more random I/O patterns
4898 *
4899 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
2d21ac55 4900 * that's why we're in try_push with PUSH_DELAY...
55e303ae
A
4901 *
4902 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
4903 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
91447636
A
4904 * so we can just make a simple pass through, up to, but not including the last one...
4905 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
55e303ae
A
4906 * are sequential
4907 *
4908 * we let the last one be partial as long as it was adjacent to the previous one...
4909 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
4910 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
4911 */
4912 for (i = 0; i < MAX_CLUSTERS - 1; i++) {
cf7d32b8 4913 if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount)
55e303ae 4914 goto dont_try;
91447636 4915 if (l_clusters[i].e_addr != l_clusters[i+1].b_addr)
55e303ae
A
4916 goto dont_try;
4917 }
4918 }
4919 for (cl_index = 0; cl_index < cl_len; cl_index++) {
2d21ac55
A
4920 int flags;
4921 struct cl_extent cl;
91447636 4922
9bccf70c 4923 /*
91447636 4924 * try to push each cluster in turn...
9bccf70c 4925 */
2d21ac55 4926 if (l_clusters[cl_index].io_flags & CLW_IONOCACHE)
91447636
A
4927 flags = IO_NOCACHE;
4928 else
4929 flags = 0;
2d21ac55
A
4930
4931 if ((l_clusters[cl_index].io_flags & CLW_IOPASSIVE) || (push_flag & IO_PASSIVE))
4932 flags |= IO_PASSIVE;
4933
4934 if (push_flag & PUSH_SYNC)
4935 flags |= IO_SYNC;
4936
91447636
A
4937 cl.b_addr = l_clusters[cl_index].b_addr;
4938 cl.e_addr = l_clusters[cl_index].e_addr;
9bccf70c 4939
2d21ac55 4940 cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg);
9bccf70c 4941
91447636
A
4942 l_clusters[cl_index].b_addr = 0;
4943 l_clusters[cl_index].e_addr = 0;
4944
4945 cl_pushed++;
4946
2d21ac55 4947 if ( !(push_flag & PUSH_ALL) )
91447636 4948 break;
9bccf70c 4949 }
55e303ae 4950dont_try:
9bccf70c
A
4951 if (cl_len > cl_pushed) {
4952 /*
4953 * we didn't push all of the clusters, so
4954 * lets try to merge them back in to the vnode
4955 */
91447636 4956 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
9bccf70c
A
4957 /*
4958 * we picked up some new clusters while we were trying to
91447636
A
4959 * push the old ones... this can happen because I've dropped
4960 * the vnode lock... the sum of the
9bccf70c 4961 * leftovers plus the new cluster count exceeds our ability
55e303ae 4962 * to represent them, so switch to the sparse cluster mechanism
91447636
A
4963 *
4964 * collect the active public clusters...
9bccf70c 4965 */
2d21ac55 4966 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
55e303ae
A
4967
4968 for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
91447636 4969 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
9bccf70c 4970 continue;
91447636
A
4971 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
4972 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
2d21ac55 4973 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
9bccf70c 4974
55e303ae 4975 cl_index1++;
9bccf70c 4976 }
55e303ae
A
4977 /*
4978 * update the cluster count
4979 */
91447636 4980 wbp->cl_number = cl_index1;
55e303ae
A
4981
4982 /*
4983 * and collect the original clusters that were moved into the
4984 * local storage for sorting purposes
4985 */
2d21ac55 4986 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
55e303ae 4987
9bccf70c
A
4988 } else {
4989 /*
4990 * we've got room to merge the leftovers back in
4991 * just append them starting at the next 'hole'
91447636 4992 * represented by wbp->cl_number
9bccf70c 4993 */
91447636
A
4994 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
4995 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
9bccf70c
A
4996 continue;
4997
91447636
A
4998 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
4999 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
2d21ac55 5000 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
9bccf70c 5001
9bccf70c
A
5002 cl_index1++;
5003 }
5004 /*
5005 * update the cluster count
5006 */
91447636 5007 wbp->cl_number = cl_index1;
9bccf70c
A
5008 }
5009 }
2d21ac55 5010 return (MAX_CLUSTERS - wbp->cl_number);
9bccf70c
A
5011}
5012
5013
5014
5015static int
2d21ac55 5016cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1c79356b 5017{
1c79356b
A
5018 upl_page_info_t *pl;
5019 upl_t upl;
5020 vm_offset_t upl_offset;
5021 int upl_size;
5022 off_t upl_f_offset;
5023 int pages_in_upl;
5024 int start_pg;
5025 int last_pg;
5026 int io_size;
5027 int io_flags;
55e303ae 5028 int upl_flags;
2d21ac55 5029 int bflag;
1c79356b 5030 int size;
91447636
A
5031 int error = 0;
5032 int retval;
1c79356b
A
5033 kern_return_t kret;
5034
2d21ac55
A
5035 if (flags & IO_PASSIVE)
5036 bflag = CL_PASSIVE;
5037 else
5038 bflag = 0;
1c79356b 5039
9bccf70c 5040 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
91447636 5041 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
9bccf70c 5042
91447636 5043 if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
9bccf70c 5044 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
1c79356b 5045
91447636 5046 return (0);
9bccf70c 5047 }
1c79356b 5048 upl_size = pages_in_upl * PAGE_SIZE;
91447636 5049 upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
1c79356b 5050
9bccf70c
A
5051 if (upl_f_offset + upl_size >= EOF) {
5052
5053 if (upl_f_offset >= EOF) {
5054 /*
5055 * must have truncated the file and missed
5056 * clearing a dangling cluster (i.e. it's completely
5057 * beyond the new EOF
5058 */
5059 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
5060
91447636 5061 return(0);
9bccf70c
A
5062 }
5063 size = EOF - upl_f_offset;
1c79356b 5064
55e303ae 5065 upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
9bccf70c 5066 pages_in_upl = upl_size / PAGE_SIZE;
55e303ae 5067 } else
9bccf70c 5068 size = upl_size;
55e303ae
A
5069
5070 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
5071
91447636
A
5072 /*
5073 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
5074 *
5075 * - only pages that are currently dirty are returned... these are the ones we need to clean
5076 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
5077 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
5078 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
5079 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
5080 *
5081 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
5082 */
5083
5084 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE))
55e303ae
A
5085 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
5086 else
5087 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
5088
0b4e3aa0
A
5089 kret = ubc_create_upl(vp,
5090 upl_f_offset,
5091 upl_size,
5092 &upl,
9bccf70c 5093 &pl,
55e303ae 5094 upl_flags);
1c79356b
A
5095 if (kret != KERN_SUCCESS)
5096 panic("cluster_push: failed to get pagelist");
5097
b0d623f7 5098 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
9bccf70c 5099
55e303ae
A
5100 /*
5101 * since we only asked for the dirty pages back
5102 * it's possible that we may only get a few or even none, so...
5103 * before we start marching forward, we must make sure we know
5104 * where the last present page is in the UPL, otherwise we could
5105 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
5106 * employed by commit_range and abort_range.
5107 */
5108 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
5109 if (upl_page_present(pl, last_pg))
5110 break;
9bccf70c 5111 }
55e303ae 5112 pages_in_upl = last_pg + 1;
1c79356b 5113
55e303ae
A
5114 if (pages_in_upl == 0) {
5115 ubc_upl_abort(upl, 0);
1c79356b 5116
55e303ae 5117 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
91447636 5118 return(0);
55e303ae
A
5119 }
5120
5121 for (last_pg = 0; last_pg < pages_in_upl; ) {
5122 /*
5123 * find the next dirty page in the UPL
5124 * this will become the first page in the
5125 * next I/O to generate
5126 */
1c79356b 5127 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
55e303ae 5128 if (upl_dirty_page(pl, start_pg))
1c79356b 5129 break;
55e303ae
A
5130 if (upl_page_present(pl, start_pg))
5131 /*
5132 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
5133 * just release these unchanged since we're not going
5134 * to steal them or change their state
5135 */
5136 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1c79356b 5137 }
55e303ae
A
5138 if (start_pg >= pages_in_upl)
5139 /*
5140 * done... no more dirty pages to push
5141 */
5142 break;
5143 if (start_pg > last_pg)
5144 /*
5145 * skipped over some non-dirty pages
5146 */
5147 size -= ((start_pg - last_pg) * PAGE_SIZE);
1c79356b 5148
55e303ae
A
5149 /*
5150 * find a range of dirty pages to write
5151 */
1c79356b 5152 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
55e303ae 5153 if (!upl_dirty_page(pl, last_pg))
1c79356b
A
5154 break;
5155 }
5156 upl_offset = start_pg * PAGE_SIZE;
5157
5158 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
5159
2d21ac55 5160 io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
91447636
A
5161
5162 if ( !(flags & IO_SYNC))
5163 io_flags |= CL_ASYNC;
5164
5165 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
2d21ac55 5166 io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
1c79356b 5167
91447636
A
5168 if (error == 0 && retval)
5169 error = retval;
1c79356b
A
5170
5171 size -= io_size;
5172 }
9bccf70c
A
5173 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0);
5174
91447636 5175 return(error);
1c79356b 5176}
b4c24cb9
A
5177
5178
91447636
A
5179/*
5180 * sparse_cluster_switch is called with the write behind lock held
5181 */
5182static void
2d21ac55 5183sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
b4c24cb9 5184{
91447636 5185 int cl_index;
b4c24cb9 5186
b0d623f7 5187 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, vp, wbp->cl_scmap, 0, 0, 0);
91447636
A
5188
5189 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
5190 int flags;
5191 struct cl_extent cl;
5192
5193 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
b4c24cb9 5194
2d21ac55 5195 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
91447636
A
5196 if (flags & UPL_POP_DIRTY) {
5197 cl.e_addr = cl.b_addr + 1;
b4c24cb9 5198
b0d623f7 5199 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg);
91447636 5200 }
55e303ae
A
5201 }
5202 }
5203 }
91447636
A
5204 wbp->cl_number = 0;
5205
b0d623f7 5206 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, vp, wbp->cl_scmap, 0, 0, 0);
55e303ae
A
5207}
5208
5209
91447636 5210/*
b0d623f7
A
5211 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
5212 * still associated with the write-behind context... however, if the scmap has been disassociated
5213 * from the write-behind context (the cluster_push case), the wb lock is not held
91447636
A
5214 */
5215static void
b0d623f7 5216sparse_cluster_push(void **scmap, vnode_t vp, off_t EOF, int push_flag, int (*callback)(buf_t, void *), void *callback_arg)
55e303ae 5217{
91447636
A
5218 struct cl_extent cl;
5219 off_t offset;
5220 u_int length;
55e303ae 5221
b0d623f7 5222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, vp, (*scmap), 0, push_flag, 0);
55e303ae 5223
2d21ac55 5224 if (push_flag & PUSH_ALL)
b0d623f7 5225 vfs_drt_control(scmap, 1);
55e303ae
A
5226
5227 for (;;) {
b0d623f7 5228 if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS)
55e303ae 5229 break;
55e303ae 5230
91447636
A
5231 cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
5232 cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
5233
2d21ac55
A
5234 cluster_push_now(vp, &cl, EOF, push_flag & IO_PASSIVE, callback, callback_arg);
5235
2d21ac55 5236 if ( !(push_flag & PUSH_ALL) )
55e303ae
A
5237 break;
5238 }
b0d623f7 5239 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, vp, (*scmap), 0, 0, 0);
55e303ae
A
5240}
5241
5242
91447636
A
5243/*
5244 * sparse_cluster_add is called with the write behind lock held
5245 */
5246static void
b0d623f7 5247sparse_cluster_add(void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
55e303ae 5248{
91447636
A
5249 u_int new_dirty;
5250 u_int length;
5251 off_t offset;
55e303ae 5252
b0d623f7 5253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
55e303ae 5254
91447636
A
5255 offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
5256 length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
55e303ae 5257
b0d623f7 5258 while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
55e303ae
A
5259 /*
5260 * no room left in the map
5261 * only a partial update was done
5262 * push out some pages and try again
5263 */
b0d623f7 5264 sparse_cluster_push(scmap, vp, EOF, 0, callback, callback_arg);
55e303ae
A
5265
5266 offset += (new_dirty * PAGE_SIZE_64);
5267 length -= (new_dirty * PAGE_SIZE);
5268 }
b0d623f7 5269 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, vp, (*scmap), 0, 0, 0);
55e303ae
A
5270}
5271
5272
5273static int
2d21ac55 5274cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
55e303ae 5275{
55e303ae
A
5276 upl_page_info_t *pl;
5277 upl_t upl;
5278 addr64_t ubc_paddr;
5279 kern_return_t kret;
5280 int error = 0;
91447636
A
5281 int did_read = 0;
5282 int abort_flags;
5283 int upl_flags;
2d21ac55
A
5284 int bflag;
5285
5286 if (flags & IO_PASSIVE)
5287 bflag = CL_PASSIVE;
5288 else
5289 bflag = 0;
55e303ae 5290
91447636 5291 upl_flags = UPL_SET_LITE;
2d21ac55
A
5292
5293 if ( !(flags & CL_READ) ) {
91447636
A
5294 /*
5295 * "write" operation: let the UPL subsystem know
5296 * that we intend to modify the buffer cache pages
5297 * we're gathering.
5298 */
5299 upl_flags |= UPL_WILL_MODIFY;
2d21ac55
A
5300 } else {
5301 /*
5302 * indicate that there is no need to pull the
5303 * mapping for this page... we're only going
5304 * to read from it, not modify it.
5305 */
5306 upl_flags |= UPL_FILE_IO;
91447636 5307 }
55e303ae
A
5308 kret = ubc_create_upl(vp,
5309 uio->uio_offset & ~PAGE_MASK_64,
5310 PAGE_SIZE,
5311 &upl,
5312 &pl,
91447636 5313 upl_flags);
55e303ae
A
5314
5315 if (kret != KERN_SUCCESS)
5316 return(EINVAL);
5317
5318 if (!upl_valid_page(pl, 0)) {
5319 /*
5320 * issue a synchronous read to cluster_io
5321 */
91447636 5322 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
2d21ac55 5323 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
55e303ae 5324 if (error) {
b4c24cb9
A
5325 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5326
5327 return(error);
5328 }
91447636 5329 did_read = 1;
b4c24cb9 5330 }
55e303ae 5331 ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
b4c24cb9 5332
55e303ae
A
5333/*
5334 * NOTE: There is no prototype for the following in BSD. It, and the definitions
5335 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
5336 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
5337 * way to do so without exporting them to kexts as well.
5338 */
de355530 5339 if (flags & CL_READ)
55e303ae
A
5340// copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
5341 copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
de355530 5342 else
4a249263
A
5343// copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
5344 copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */
55e303ae
A
5345
5346 if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
5347 /*
5348 * issue a synchronous write to cluster_io
5349 */
91447636 5350 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
2d21ac55 5351 bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
de355530 5352 }
2d21ac55 5353 if (error == 0)
cc9f6e38
A
5354 uio_update(uio, (user_size_t)xsize);
5355
91447636
A
5356 if (did_read)
5357 abort_flags = UPL_ABORT_FREE_ON_EMPTY;
5358 else
5359 abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
5360
5361 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
55e303ae
A
5362
5363 return (error);
5364}
5365
5366
5367
5368int
2d21ac55 5369cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
55e303ae
A
5370{
5371 int pg_offset;
5372 int pg_index;
5373 int csize;
5374 int segflg;
5375 int retval = 0;
2d21ac55 5376 int xsize;
55e303ae 5377 upl_page_info_t *pl;
55e303ae 5378
2d21ac55
A
5379 xsize = *io_resid;
5380
55e303ae 5381 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
2d21ac55 5382 (int)uio->uio_offset, upl_offset, xsize, 0, 0);
55e303ae
A
5383
5384 segflg = uio->uio_segflg;
5385
5386 switch(segflg) {
5387
91447636
A
5388 case UIO_USERSPACE32:
5389 case UIO_USERISPACE32:
5390 uio->uio_segflg = UIO_PHYS_USERSPACE32;
5391 break;
5392
55e303ae
A
5393 case UIO_USERSPACE:
5394 case UIO_USERISPACE:
5395 uio->uio_segflg = UIO_PHYS_USERSPACE;
5396 break;
5397
91447636
A
5398 case UIO_USERSPACE64:
5399 case UIO_USERISPACE64:
5400 uio->uio_segflg = UIO_PHYS_USERSPACE64;
5401 break;
5402
55e303ae
A
5403 case UIO_SYSSPACE:
5404 uio->uio_segflg = UIO_PHYS_SYSSPACE;
5405 break;
91447636 5406
55e303ae
A
5407 }
5408 pl = ubc_upl_pageinfo(upl);
5409
5410 pg_index = upl_offset / PAGE_SIZE;
5411 pg_offset = upl_offset & PAGE_MASK;
5412 csize = min(PAGE_SIZE - pg_offset, xsize);
5413
5414 while (xsize && retval == 0) {
5415 addr64_t paddr;
5416
5417 paddr = ((addr64_t)upl_phys_page(pl, pg_index) << 12) + pg_offset;
de355530 5418
55e303ae
A
5419 retval = uiomove64(paddr, csize, uio);
5420
5421 pg_index += 1;
5422 pg_offset = 0;
5423 xsize -= csize;
5424 csize = min(PAGE_SIZE, xsize);
5425 }
2d21ac55
A
5426 *io_resid = xsize;
5427
55e303ae
A
5428 uio->uio_segflg = segflg;
5429
55e303ae 5430 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
2d21ac55 5431 (int)uio->uio_offset, xsize, retval, segflg, 0);
55e303ae
A
5432
5433 return (retval);
5434}
5435
5436
5437int
91447636 5438cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
2d21ac55
A
5439{
5440
5441 return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1));
5442}
5443
5444
5445static int
5446cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
55e303ae
A
5447{
5448 int segflg;
5449 int io_size;
5450 int xsize;
5451 int start_offset;
55e303ae
A
5452 int retval = 0;
5453 memory_object_control_t control;
55e303ae 5454
2d21ac55 5455 io_size = *io_resid;
55e303ae
A
5456
5457 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
2d21ac55 5458 (int)uio->uio_offset, 0, io_size, 0, 0);
55e303ae
A
5459
5460 control = ubc_getobject(vp, UBC_FLAGS_NONE);
2d21ac55 5461
55e303ae
A
5462 if (control == MEMORY_OBJECT_CONTROL_NULL) {
5463 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
2d21ac55 5464 (int)uio->uio_offset, io_size, retval, 3, 0);
55e303ae
A
5465
5466 return(0);
5467 }
55e303ae
A
5468 segflg = uio->uio_segflg;
5469
5470 switch(segflg) {
5471
91447636
A
5472 case UIO_USERSPACE32:
5473 case UIO_USERISPACE32:
5474 uio->uio_segflg = UIO_PHYS_USERSPACE32;
5475 break;
5476
5477 case UIO_USERSPACE64:
5478 case UIO_USERISPACE64:
5479 uio->uio_segflg = UIO_PHYS_USERSPACE64;
5480 break;
5481
55e303ae
A
5482 case UIO_USERSPACE:
5483 case UIO_USERISPACE:
5484 uio->uio_segflg = UIO_PHYS_USERSPACE;
5485 break;
5486
5487 case UIO_SYSSPACE:
5488 uio->uio_segflg = UIO_PHYS_SYSSPACE;
5489 break;
5490 }
55e303ae 5491
91447636
A
5492 if ( (io_size = *io_resid) ) {
5493 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
5494 xsize = uio_resid(uio);
55e303ae 5495
2d21ac55
A
5496 retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
5497 start_offset, io_size, mark_dirty, take_reference);
91447636
A
5498 xsize -= uio_resid(uio);
5499 io_size -= xsize;
55e303ae
A
5500 }
5501 uio->uio_segflg = segflg;
5502 *io_resid = io_size;
5503
55e303ae 5504 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
2d21ac55 5505 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
55e303ae
A
5506
5507 return(retval);
5508}
5509
5510
5511int
91447636 5512is_file_clean(vnode_t vp, off_t filesize)
55e303ae
A
5513{
5514 off_t f_offset;
5515 int flags;
5516 int total_dirty = 0;
5517
5518 for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
2d21ac55 5519 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
55e303ae
A
5520 if (flags & UPL_POP_DIRTY) {
5521 total_dirty++;
5522 }
5523 }
5524 }
5525 if (total_dirty)
5526 return(EINVAL);
5527
5528 return (0);
5529}
5530
5531
5532
5533/*
5534 * Dirty region tracking/clustering mechanism.
5535 *
5536 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
5537 * dirty regions within a larger space (file). It is primarily intended to
5538 * support clustering in large files with many dirty areas.
5539 *
5540 * The implementation assumes that the dirty regions are pages.
5541 *
5542 * To represent dirty pages within the file, we store bit vectors in a
5543 * variable-size circular hash.
5544 */
5545
5546/*
5547 * Bitvector size. This determines the number of pages we group in a
5548 * single hashtable entry. Each hashtable entry is aligned to this
5549 * size within the file.
5550 */
5551#define DRT_BITVECTOR_PAGES 256
5552
5553/*
5554 * File offset handling.
5555 *
5556 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
5557 * the correct formula is (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)
5558 */
5559#define DRT_ADDRESS_MASK (~((1 << 20) - 1))
5560#define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
5561
5562/*
5563 * Hashtable address field handling.
5564 *
5565 * The low-order bits of the hashtable address are used to conserve
5566 * space.
5567 *
5568 * DRT_HASH_COUNT_MASK must be large enough to store the range
5569 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
5570 * to indicate that the bucket is actually unoccupied.
5571 */
5572#define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
5573#define DRT_HASH_SET_ADDRESS(scm, i, a) \
5574 do { \
5575 (scm)->scm_hashtable[(i)].dhe_control = \
5576 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
5577 } while (0)
5578#define DRT_HASH_COUNT_MASK 0x1ff
5579#define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
5580#define DRT_HASH_SET_COUNT(scm, i, c) \
5581 do { \
5582 (scm)->scm_hashtable[(i)].dhe_control = \
5583 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
5584 } while (0)
5585#define DRT_HASH_CLEAR(scm, i) \
5586 do { \
5587 (scm)->scm_hashtable[(i)].dhe_control = 0; \
5588 } while (0)
5589#define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
5590#define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
5591#define DRT_HASH_COPY(oscm, oi, scm, i) \
5592 do { \
5593 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
5594 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
5595 } while(0);
5596
5597
5598/*
5599 * Hash table moduli.
5600 *
5601 * Since the hashtable entry's size is dependent on the size of
5602 * the bitvector, and since the hashtable size is constrained to
5603 * both being prime and fitting within the desired allocation
5604 * size, these values need to be manually determined.
5605 *
5606 * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
5607 *
5608 * The small hashtable allocation is 1024 bytes, so the modulus is 23.
5609 * The large hashtable allocation is 16384 bytes, so the modulus is 401.
5610 */
5611#define DRT_HASH_SMALL_MODULUS 23
5612#define DRT_HASH_LARGE_MODULUS 401
5613
b7266188
A
5614/*
5615 * Physical memory required before the large hash modulus is permitted.
5616 *
5617 * On small memory systems, the large hash modulus can lead to phsyical
5618 * memory starvation, so we avoid using it there.
5619 */
5620#define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
5621
55e303ae
A
5622#define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */
5623#define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */
5624
5625/* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
5626
5627/*
5628 * Hashtable bitvector handling.
5629 *
5630 * Bitvector fields are 32 bits long.
5631 */
5632
5633#define DRT_HASH_SET_BIT(scm, i, bit) \
5634 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
5635
5636#define DRT_HASH_CLEAR_BIT(scm, i, bit) \
5637 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
5638
5639#define DRT_HASH_TEST_BIT(scm, i, bit) \
5640 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
5641
5642#define DRT_BITVECTOR_CLEAR(scm, i) \
5643 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5644
5645#define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
5646 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
5647 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
5648 (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5649
5650
5651
5652/*
5653 * Hashtable entry.
5654 */
5655struct vfs_drt_hashentry {
5656 u_int64_t dhe_control;
5657 u_int32_t dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
5658};
5659
5660/*
5661 * Dirty Region Tracking structure.
5662 *
5663 * The hashtable is allocated entirely inside the DRT structure.
5664 *
5665 * The hash is a simple circular prime modulus arrangement, the structure
5666 * is resized from small to large if it overflows.
5667 */
5668
5669struct vfs_drt_clustermap {
5670 u_int32_t scm_magic; /* sanity/detection */
5671#define DRT_SCM_MAGIC 0x12020003
5672 u_int32_t scm_modulus; /* current ring size */
5673 u_int32_t scm_buckets; /* number of occupied buckets */
5674 u_int32_t scm_lastclean; /* last entry we cleaned */
5675 u_int32_t scm_iskips; /* number of slot skips */
5676
5677 struct vfs_drt_hashentry scm_hashtable[0];
5678};
5679
5680
5681#define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
5682#define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
5683
5684/*
5685 * Debugging codes and arguments.
5686 */
5687#define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
5688#define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
5689#define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
5690#define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
5691#define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
5692 * dirty */
5693 /* 0, setcount */
5694 /* 1 (clean, no map) */
5695 /* 2 (map alloc fail) */
5696 /* 3, resid (partial) */
5697#define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
5698#define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
5699 * lastclean, iskips */
5700
5701
55e303ae
A
5702static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
5703static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
5704static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
5705 u_int64_t offset, int *indexp);
5706static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
5707 u_int64_t offset,
5708 int *indexp,
5709 int recursed);
5710static kern_return_t vfs_drt_do_mark_pages(
5711 void **cmapp,
5712 u_int64_t offset,
5713 u_int length,
2d21ac55 5714 u_int *setcountp,
55e303ae
A
5715 int dirty);
5716static void vfs_drt_trace(
5717 struct vfs_drt_clustermap *cmap,
5718 int code,
5719 int arg1,
5720 int arg2,
5721 int arg3,
5722 int arg4);
5723
5724
5725/*
5726 * Allocate and initialise a sparse cluster map.
5727 *
5728 * Will allocate a new map, resize or compact an existing map.
5729 *
5730 * XXX we should probably have at least one intermediate map size,
5731 * as the 1:16 ratio seems a bit drastic.
5732 */
5733static kern_return_t
5734vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
5735{
5736 struct vfs_drt_clustermap *cmap, *ocmap;
5737 kern_return_t kret;
5738 u_int64_t offset;
2d21ac55
A
5739 u_int32_t i;
5740 int nsize, active_buckets, index, copycount;
55e303ae
A
5741
5742 ocmap = NULL;
5743 if (cmapp != NULL)
5744 ocmap = *cmapp;
5745
5746 /*
5747 * Decide on the size of the new map.
5748 */
5749 if (ocmap == NULL) {
5750 nsize = DRT_HASH_SMALL_MODULUS;
5751 } else {
5752 /* count the number of active buckets in the old map */
5753 active_buckets = 0;
5754 for (i = 0; i < ocmap->scm_modulus; i++) {
5755 if (!DRT_HASH_VACANT(ocmap, i) &&
5756 (DRT_HASH_GET_COUNT(ocmap, i) != 0))
5757 active_buckets++;
5758 }
5759 /*
5760 * If we're currently using the small allocation, check to
5761 * see whether we should grow to the large one.
5762 */
5763 if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
b7266188
A
5764 /*
5765 * If the ring is nearly full and we are allowed to
5766 * use the large modulus, upgrade.
5767 */
5768 if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
5769 (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
55e303ae
A
5770 nsize = DRT_HASH_LARGE_MODULUS;
5771 } else {
5772 nsize = DRT_HASH_SMALL_MODULUS;
5773 }
5774 } else {
5775 /* already using the large modulus */
5776 nsize = DRT_HASH_LARGE_MODULUS;
5777 /*
5778 * If the ring is completely full, there's
5779 * nothing useful for us to do. Behave as
5780 * though we had compacted into the new
5781 * array and return.
5782 */
5783 if (active_buckets >= DRT_HASH_LARGE_MODULUS)
5784 return(KERN_SUCCESS);
5785 }
5786 }
5787
5788 /*
5789 * Allocate and initialise the new map.
5790 */
5791
5792 kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap,
5793 (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
5794 if (kret != KERN_SUCCESS)
5795 return(kret);
5796 cmap->scm_magic = DRT_SCM_MAGIC;
5797 cmap->scm_modulus = nsize;
5798 cmap->scm_buckets = 0;
5799 cmap->scm_lastclean = 0;
5800 cmap->scm_iskips = 0;
5801 for (i = 0; i < cmap->scm_modulus; i++) {
5802 DRT_HASH_CLEAR(cmap, i);
5803 DRT_HASH_VACATE(cmap, i);
5804 DRT_BITVECTOR_CLEAR(cmap, i);
5805 }
5806
5807 /*
5808 * If there's an old map, re-hash entries from it into the new map.
5809 */
5810 copycount = 0;
5811 if (ocmap != NULL) {
5812 for (i = 0; i < ocmap->scm_modulus; i++) {
5813 /* skip empty buckets */
5814 if (DRT_HASH_VACANT(ocmap, i) ||
5815 (DRT_HASH_GET_COUNT(ocmap, i) == 0))
5816 continue;
5817 /* get new index */
5818 offset = DRT_HASH_GET_ADDRESS(ocmap, i);
5819 kret = vfs_drt_get_index(&cmap, offset, &index, 1);
5820 if (kret != KERN_SUCCESS) {
5821 /* XXX need to bail out gracefully here */
5822 panic("vfs_drt: new cluster map mysteriously too small");
2d21ac55 5823 index = 0;
55e303ae
A
5824 }
5825 /* copy */
5826 DRT_HASH_COPY(ocmap, i, cmap, index);
5827 copycount++;
5828 }
5829 }
5830
5831 /* log what we've done */
5832 vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
5833
5834 /*
5835 * It's important to ensure that *cmapp always points to
5836 * a valid map, so we must overwrite it before freeing
5837 * the old map.
5838 */
5839 *cmapp = cmap;
5840 if (ocmap != NULL) {
5841 /* emit stats into trace buffer */
5842 vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
5843 ocmap->scm_modulus,
5844 ocmap->scm_buckets,
5845 ocmap->scm_lastclean,
5846 ocmap->scm_iskips);
5847
5848 vfs_drt_free_map(ocmap);
5849 }
5850 return(KERN_SUCCESS);
5851}
5852
5853
5854/*
5855 * Free a sparse cluster map.
5856 */
5857static kern_return_t
5858vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
5859{
55e303ae
A
5860 kmem_free(kernel_map, (vm_offset_t)cmap,
5861 (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
5862 return(KERN_SUCCESS);
5863}
5864
5865
5866/*
5867 * Find the hashtable slot currently occupied by an entry for the supplied offset.
5868 */
5869static kern_return_t
5870vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
5871{
2d21ac55
A
5872 int index;
5873 u_int32_t i;
55e303ae
A
5874
5875 offset = DRT_ALIGN_ADDRESS(offset);
5876 index = DRT_HASH(cmap, offset);
5877
5878 /* traverse the hashtable */
5879 for (i = 0; i < cmap->scm_modulus; i++) {
5880
5881 /*
5882 * If the slot is vacant, we can stop.
5883 */
5884 if (DRT_HASH_VACANT(cmap, index))
5885 break;
5886
5887 /*
5888 * If the address matches our offset, we have success.
5889 */
5890 if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
5891 *indexp = index;
5892 return(KERN_SUCCESS);
5893 }
5894
5895 /*
5896 * Move to the next slot, try again.
5897 */
5898 index = DRT_HASH_NEXT(cmap, index);
5899 }
5900 /*
5901 * It's not there.
5902 */
5903 return(KERN_FAILURE);
5904}
5905
5906/*
5907 * Find the hashtable slot for the supplied offset. If we haven't allocated
5908 * one yet, allocate one and populate the address field. Note that it will
5909 * not have a nonzero page count and thus will still technically be free, so
5910 * in the case where we are called to clean pages, the slot will remain free.
5911 */
5912static kern_return_t
5913vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
5914{
5915 struct vfs_drt_clustermap *cmap;
5916 kern_return_t kret;
2d21ac55
A
5917 u_int32_t index;
5918 u_int32_t i;
55e303ae
A
5919
5920 cmap = *cmapp;
5921
5922 /* look for an existing entry */
5923 kret = vfs_drt_search_index(cmap, offset, indexp);
5924 if (kret == KERN_SUCCESS)
5925 return(kret);
5926
5927 /* need to allocate an entry */
5928 offset = DRT_ALIGN_ADDRESS(offset);
5929 index = DRT_HASH(cmap, offset);
5930
5931 /* scan from the index forwards looking for a vacant slot */
5932 for (i = 0; i < cmap->scm_modulus; i++) {
5933 /* slot vacant? */
5934 if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) {
5935 cmap->scm_buckets++;
5936 if (index < cmap->scm_lastclean)
5937 cmap->scm_lastclean = index;
5938 DRT_HASH_SET_ADDRESS(cmap, index, offset);
5939 DRT_HASH_SET_COUNT(cmap, index, 0);
5940 DRT_BITVECTOR_CLEAR(cmap, index);
5941 *indexp = index;
5942 vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
5943 return(KERN_SUCCESS);
5944 }
5945 cmap->scm_iskips += i;
5946 index = DRT_HASH_NEXT(cmap, index);
5947 }
5948
5949 /*
5950 * We haven't found a vacant slot, so the map is full. If we're not
5951 * already recursed, try reallocating/compacting it.
5952 */
5953 if (recursed)
5954 return(KERN_FAILURE);
5955 kret = vfs_drt_alloc_map(cmapp);
5956 if (kret == KERN_SUCCESS) {
5957 /* now try to insert again */
5958 kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
5959 }
5960 return(kret);
5961}
5962
5963/*
5964 * Implementation of set dirty/clean.
5965 *
5966 * In the 'clean' case, not finding a map is OK.
5967 */
5968static kern_return_t
5969vfs_drt_do_mark_pages(
5970 void **private,
5971 u_int64_t offset,
5972 u_int length,
2d21ac55 5973 u_int *setcountp,
55e303ae
A
5974 int dirty)
5975{
5976 struct vfs_drt_clustermap *cmap, **cmapp;
5977 kern_return_t kret;
5978 int i, index, pgoff, pgcount, setcount, ecount;
5979
5980 cmapp = (struct vfs_drt_clustermap **)private;
5981 cmap = *cmapp;
5982
5983 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
5984
5985 if (setcountp != NULL)
5986 *setcountp = 0;
5987
5988 /* allocate a cluster map if we don't already have one */
5989 if (cmap == NULL) {
5990 /* no cluster map, nothing to clean */
5991 if (!dirty) {
5992 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
5993 return(KERN_SUCCESS);
5994 }
5995 kret = vfs_drt_alloc_map(cmapp);
5996 if (kret != KERN_SUCCESS) {
5997 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
5998 return(kret);
5999 }
6000 }
6001 setcount = 0;
6002
6003 /*
6004 * Iterate over the length of the region.
6005 */
6006 while (length > 0) {
6007 /*
6008 * Get the hashtable index for this offset.
6009 *
6010 * XXX this will add blank entries if we are clearing a range
6011 * that hasn't been dirtied.
6012 */
6013 kret = vfs_drt_get_index(cmapp, offset, &index, 0);
6014 cmap = *cmapp; /* may have changed! */
6015 /* this may be a partial-success return */
6016 if (kret != KERN_SUCCESS) {
6017 if (setcountp != NULL)
6018 *setcountp = setcount;
6019 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
6020
6021 return(kret);
6022 }
6023
6024 /*
6025 * Work out how many pages we're modifying in this
6026 * hashtable entry.
6027 */
6028 pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE;
6029 pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
6030
6031 /*
6032 * Iterate over pages, dirty/clearing as we go.
6033 */
6034 ecount = DRT_HASH_GET_COUNT(cmap, index);
6035 for (i = 0; i < pgcount; i++) {
6036 if (dirty) {
6037 if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
6038 DRT_HASH_SET_BIT(cmap, index, pgoff + i);
6039 ecount++;
6040 setcount++;
6041 }
6042 } else {
6043 if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
6044 DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
6045 ecount--;
6046 setcount++;
6047 }
6048 }
6049 }
6050 DRT_HASH_SET_COUNT(cmap, index, ecount);
91447636 6051
55e303ae
A
6052 offset += pgcount * PAGE_SIZE;
6053 length -= pgcount * PAGE_SIZE;
6054 }
6055 if (setcountp != NULL)
6056 *setcountp = setcount;
6057
6058 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
6059
6060 return(KERN_SUCCESS);
6061}
6062
6063/*
6064 * Mark a set of pages as dirty/clean.
6065 *
6066 * This is a public interface.
6067 *
6068 * cmapp
6069 * Pointer to storage suitable for holding a pointer. Note that
6070 * this must either be NULL or a value set by this function.
6071 *
6072 * size
6073 * Current file size in bytes.
6074 *
6075 * offset
6076 * Offset of the first page to be marked as dirty, in bytes. Must be
6077 * page-aligned.
6078 *
6079 * length
6080 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
6081 *
6082 * setcountp
6083 * Number of pages newly marked dirty by this call (optional).
6084 *
6085 * Returns KERN_SUCCESS if all the pages were successfully marked.
6086 */
6087static kern_return_t
2d21ac55 6088vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
55e303ae
A
6089{
6090 /* XXX size unused, drop from interface */
6091 return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1));
6092}
6093
91447636 6094#if 0
55e303ae
A
6095static kern_return_t
6096vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
6097{
6098 return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0));
6099}
91447636 6100#endif
55e303ae
A
6101
6102/*
6103 * Get a cluster of dirty pages.
6104 *
6105 * This is a public interface.
6106 *
6107 * cmapp
6108 * Pointer to storage managed by drt_mark_pages. Note that this must
6109 * be NULL or a value set by drt_mark_pages.
6110 *
6111 * offsetp
6112 * Returns the byte offset into the file of the first page in the cluster.
6113 *
6114 * lengthp
6115 * Returns the length in bytes of the cluster of dirty pages.
6116 *
6117 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
6118 * are no dirty pages meeting the minmum size criteria. Private storage will
6119 * be released if there are no more dirty pages left in the map
6120 *
6121 */
6122static kern_return_t
6123vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
6124{
6125 struct vfs_drt_clustermap *cmap;
6126 u_int64_t offset;
6127 u_int length;
2d21ac55
A
6128 u_int32_t j;
6129 int index, i, fs, ls;
55e303ae
A
6130
6131 /* sanity */
6132 if ((cmapp == NULL) || (*cmapp == NULL))
6133 return(KERN_FAILURE);
6134 cmap = *cmapp;
6135
6136 /* walk the hashtable */
6137 for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
6138 index = DRT_HASH(cmap, offset);
6139
6140 if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0))
6141 continue;
6142
6143 /* scan the bitfield for a string of bits */
6144 fs = -1;
6145
6146 for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
6147 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
6148 fs = i;
6149 break;
6150 }
6151 }
6152 if (fs == -1) {
6153 /* didn't find any bits set */
6154 panic("vfs_drt: entry summary count > 0 but no bits set in map");
6155 }
6156 for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
6157 if (!DRT_HASH_TEST_BIT(cmap, index, i))
6158 break;
6159 }
6160
6161 /* compute offset and length, mark pages clean */
6162 offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
6163 length = ls * PAGE_SIZE;
6164 vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
6165 cmap->scm_lastclean = index;
6166
6167 /* return successful */
6168 *offsetp = (off_t)offset;
6169 *lengthp = length;
6170
6171 vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
6172 return(KERN_SUCCESS);
6173 }
6174 /*
6175 * We didn't find anything... hashtable is empty
6176 * emit stats into trace buffer and
6177 * then free it
6178 */
6179 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
6180 cmap->scm_modulus,
6181 cmap->scm_buckets,
6182 cmap->scm_lastclean,
6183 cmap->scm_iskips);
6184
6185 vfs_drt_free_map(cmap);
6186 *cmapp = NULL;
6187
6188 return(KERN_FAILURE);
6189}
6190
6191
6192static kern_return_t
6193vfs_drt_control(void **cmapp, int op_type)
6194{
6195 struct vfs_drt_clustermap *cmap;
6196
6197 /* sanity */
6198 if ((cmapp == NULL) || (*cmapp == NULL))
6199 return(KERN_FAILURE);
6200 cmap = *cmapp;
6201
6202 switch (op_type) {
6203 case 0:
6204 /* emit stats into trace buffer */
6205 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
6206 cmap->scm_modulus,
6207 cmap->scm_buckets,
6208 cmap->scm_lastclean,
6209 cmap->scm_iskips);
6210
6211 vfs_drt_free_map(cmap);
6212 *cmapp = NULL;
6213 break;
6214
6215 case 1:
6216 cmap->scm_lastclean = 0;
6217 break;
6218 }
6219 return(KERN_SUCCESS);
6220}
6221
6222
6223
6224/*
6225 * Emit a summary of the state of the clustermap into the trace buffer
6226 * along with some caller-provided data.
6227 */
91447636 6228#if KDEBUG
55e303ae 6229static void
91447636 6230vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
55e303ae
A
6231{
6232 KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
6233}
91447636
A
6234#else
6235static void
6236vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
6237 __unused int arg1, __unused int arg2, __unused int arg3,
6238 __unused int arg4)
6239{
6240}
6241#endif
55e303ae 6242
91447636 6243#if 0
55e303ae
A
6244/*
6245 * Perform basic sanity check on the hash entry summary count
6246 * vs. the actual bits set in the entry.
6247 */
6248static void
6249vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
6250{
6251 int index, i;
6252 int bits_on;
6253
6254 for (index = 0; index < cmap->scm_modulus; index++) {
6255 if (DRT_HASH_VACANT(cmap, index))
6256 continue;
6257
6258 for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
6259 if (DRT_HASH_TEST_BIT(cmap, index, i))
6260 bits_on++;
6261 }
6262 if (bits_on != DRT_HASH_GET_COUNT(cmap, index))
6263 panic("bits_on = %d, index = %d\n", bits_on, index);
6264 }
b4c24cb9 6265}
91447636 6266#endif