]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_cluster.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cluster.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
62 */
63
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <sys/malloc.h>
71 #include <sys/time.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <sys/uio_internal.h>
75 #include <libkern/libkern.h>
76 #include <machine/machine_routines.h>
77
78 #include <sys/ubc_internal.h>
79 #include <vm/vnode_pager.h>
80
81 #include <mach/mach_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/vm_map.h>
84 #include <mach/upl.h>
85
86 #include <vm/vm_kern.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_pageout.h>
89
90 #include <sys/kdebug.h>
91 #include <libkern/OSAtomic.h>
92
93 #if 0
94 #undef KERNEL_DEBUG
95 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
96 #endif
97
98
99 #define CL_READ 0x01
100 #define CL_WRITE 0x02
101 #define CL_ASYNC 0x04
102 #define CL_COMMIT 0x08
103 #define CL_PAGEOUT 0x10
104 #define CL_AGE 0x20
105 #define CL_NOZERO 0x40
106 #define CL_PAGEIN 0x80
107 #define CL_DEV_MEMORY 0x100
108 #define CL_PRESERVE 0x200
109 #define CL_THROTTLE 0x400
110 #define CL_KEEPCACHED 0x800
111 #define CL_DIRECT_IO 0x1000
112 #define CL_PASSIVE 0x2000
113 #define CL_IOSTREAMING 0x4000
114
115 #define MAX_VECTOR_UPL_ELEMENTS 8
116 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE) * PAGE_SIZE
117
118 extern upl_t vector_upl_create(vm_offset_t);
119 extern boolean_t vector_upl_is_valid(upl_t);
120 extern boolean_t vector_upl_set_subupl(upl_t,upl_t, u_int32_t);
121 extern void vector_upl_set_pagelist(upl_t);
122 extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, u_int32_t);
123
124 struct clios {
125 u_int io_completed; /* amount of io that has currently completed */
126 u_int io_issued; /* amount of io that was successfully issued */
127 int io_error; /* error code of first error encountered */
128 int io_wanted; /* someone is sleeping waiting for a change in state */
129 };
130
131 static lck_grp_t *cl_mtx_grp;
132 static lck_attr_t *cl_mtx_attr;
133 static lck_grp_attr_t *cl_mtx_grp_attr;
134 static lck_mtx_t *cl_mtxp;
135 static lck_mtx_t *cl_transaction_mtxp;
136
137
138 #define IO_UNKNOWN 0
139 #define IO_DIRECT 1
140 #define IO_CONTIG 2
141 #define IO_COPY 3
142
143 #define PUSH_DELAY 0x01
144 #define PUSH_ALL 0x02
145 #define PUSH_SYNC 0x04
146
147
148 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset);
149 static void cluster_wait_IO(buf_t cbp_head, int async);
150 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
151
152 static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
153
154 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
155 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
156 static int cluster_iodone(buf_t bp, void *callback_arg);
157 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags);
158 static int cluster_hard_throttle_on(vnode_t vp, uint32_t);
159
160 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg);
161
162 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
163 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
164
165 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
166 int (*)(buf_t, void *), void *callback_arg);
167 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
168 int flags, int (*)(buf_t, void *), void *callback_arg);
169 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
170 int (*)(buf_t, void *), void *callback_arg, int flags);
171
172 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
173 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg);
174 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF,
175 int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg);
176 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
177 int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag);
178
179 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
180
181 static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
182 static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
183
184 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg);
185
186 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int (*)(buf_t, void *), void *callback_arg);
187
188 static void sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
189 static void sparse_cluster_push(void **cmapp, vnode_t vp, off_t EOF, int push_flag, int (*)(buf_t, void *), void *callback_arg);
190 static void sparse_cluster_add(void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
191
192 static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
193 static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
194 static kern_return_t vfs_drt_control(void **cmapp, int op_type);
195
196
197 /*
198 * limit the internal I/O size so that we
199 * can represent it in a 32 bit int
200 */
201 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
202 #define MAX_IO_CONTIG_SIZE (MAX_UPL_SIZE * PAGE_SIZE)
203 #define MAX_VECTS 16
204 #define MIN_DIRECT_WRITE_SIZE (4 * PAGE_SIZE)
205
206 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * base)
207 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
208 #define MAX_PREFETCH(vp, io_size) (io_size * IO_SCALE(vp, 3))
209
210
211 int speculative_reads_disabled = 0;
212
213 /*
214 * throttle the number of async writes that
215 * can be outstanding on a single vnode
216 * before we issue a synchronous write
217 */
218 #define HARD_THROTTLE_MAXCNT 0
219 #define HARD_THROTTLE_MAXSIZE (32 * 1024)
220
221 int hard_throttle_on_root = 0;
222 struct timeval priority_IO_timestamp_for_root;
223
224
225 void
226 cluster_init(void) {
227 /*
228 * allocate lock group attribute and group
229 */
230 cl_mtx_grp_attr = lck_grp_attr_alloc_init();
231 cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr);
232
233 /*
234 * allocate the lock attribute
235 */
236 cl_mtx_attr = lck_attr_alloc_init();
237
238 /*
239 * allocate and initialize mutex's used to protect updates and waits
240 * on the cluster_io context
241 */
242 cl_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr);
243
244 if (cl_mtxp == NULL)
245 panic("cluster_init: failed to allocate cl_mtxp");
246
247 cl_transaction_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr);
248
249 if (cl_transaction_mtxp == NULL)
250 panic("cluster_init: failed to allocate cl_transaction_mtxp");
251 }
252
253
254 uint32_t
255 cluster_max_io_size(mount_t mp, int type)
256 {
257 uint32_t max_io_size;
258 uint32_t segcnt;
259 uint32_t maxcnt;
260
261 switch(type) {
262
263 case CL_READ:
264 segcnt = mp->mnt_segreadcnt;
265 maxcnt = mp->mnt_maxreadcnt;
266 break;
267 case CL_WRITE:
268 segcnt = mp->mnt_segwritecnt;
269 maxcnt = mp->mnt_maxwritecnt;
270 break;
271 default:
272 segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
273 maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
274 break;
275 }
276 if (segcnt > MAX_UPL_SIZE) {
277 /*
278 * don't allow a size beyond the max UPL size we can create
279 */
280 segcnt = MAX_UPL_SIZE;
281 }
282 max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
283
284 if (max_io_size < (MAX_UPL_TRANSFER * PAGE_SIZE)) {
285 /*
286 * don't allow a size smaller than the old fixed limit
287 */
288 max_io_size = (MAX_UPL_TRANSFER * PAGE_SIZE);
289 } else {
290 /*
291 * make sure the size specified is a multiple of PAGE_SIZE
292 */
293 max_io_size &= ~PAGE_MASK;
294 }
295 return (max_io_size);
296 }
297
298
299
300
301 #define CLW_ALLOCATE 0x01
302 #define CLW_RETURNLOCKED 0x02
303 #define CLW_IONOCACHE 0x04
304 #define CLW_IOPASSIVE 0x08
305
306 /*
307 * if the read ahead context doesn't yet exist,
308 * allocate and initialize it...
309 * the vnode lock serializes multiple callers
310 * during the actual assignment... first one
311 * to grab the lock wins... the other callers
312 * will release the now unnecessary storage
313 *
314 * once the context is present, try to grab (but don't block on)
315 * the lock associated with it... if someone
316 * else currently owns it, than the read
317 * will run without read-ahead. this allows
318 * multiple readers to run in parallel and
319 * since there's only 1 read ahead context,
320 * there's no real loss in only allowing 1
321 * reader to have read-ahead enabled.
322 */
323 static struct cl_readahead *
324 cluster_get_rap(vnode_t vp)
325 {
326 struct ubc_info *ubc;
327 struct cl_readahead *rap;
328
329 ubc = vp->v_ubcinfo;
330
331 if ((rap = ubc->cl_rahead) == NULL) {
332 MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK);
333
334 bzero(rap, sizeof *rap);
335 rap->cl_lastr = -1;
336 lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr);
337
338 vnode_lock(vp);
339
340 if (ubc->cl_rahead == NULL)
341 ubc->cl_rahead = rap;
342 else {
343 lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
344 FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
345 rap = ubc->cl_rahead;
346 }
347 vnode_unlock(vp);
348 }
349 if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE)
350 return(rap);
351
352 return ((struct cl_readahead *)NULL);
353 }
354
355
356 /*
357 * if the write behind context doesn't yet exist,
358 * and CLW_ALLOCATE is specified, allocate and initialize it...
359 * the vnode lock serializes multiple callers
360 * during the actual assignment... first one
361 * to grab the lock wins... the other callers
362 * will release the now unnecessary storage
363 *
364 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
365 * the lock associated with the write behind context before
366 * returning
367 */
368
369 static struct cl_writebehind *
370 cluster_get_wbp(vnode_t vp, int flags)
371 {
372 struct ubc_info *ubc;
373 struct cl_writebehind *wbp;
374
375 ubc = vp->v_ubcinfo;
376
377 if ((wbp = ubc->cl_wbehind) == NULL) {
378
379 if ( !(flags & CLW_ALLOCATE))
380 return ((struct cl_writebehind *)NULL);
381
382 MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK);
383
384 bzero(wbp, sizeof *wbp);
385 lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr);
386
387 vnode_lock(vp);
388
389 if (ubc->cl_wbehind == NULL)
390 ubc->cl_wbehind = wbp;
391 else {
392 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
393 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
394 wbp = ubc->cl_wbehind;
395 }
396 vnode_unlock(vp);
397 }
398 if (flags & CLW_RETURNLOCKED)
399 lck_mtx_lock(&wbp->cl_lockw);
400
401 return (wbp);
402 }
403
404
405 static void
406 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg)
407 {
408 struct cl_writebehind *wbp;
409
410 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
411
412 if (wbp->cl_number) {
413 lck_mtx_lock(&wbp->cl_lockw);
414
415 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | PUSH_SYNC, callback, callback_arg);
416
417 lck_mtx_unlock(&wbp->cl_lockw);
418 }
419 }
420 }
421
422
423 static int
424 cluster_hard_throttle_on(vnode_t vp, uint32_t hard_throttle)
425 {
426 struct uthread *ut;
427
428 if (hard_throttle) {
429 static struct timeval hard_throttle_maxelapsed = { 0, 200000 };
430
431 if (vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV) {
432 struct timeval elapsed;
433
434 if (hard_throttle_on_root)
435 return(1);
436
437 microuptime(&elapsed);
438 timevalsub(&elapsed, &priority_IO_timestamp_for_root);
439
440 if (timevalcmp(&elapsed, &hard_throttle_maxelapsed, <))
441 return(1);
442 }
443 }
444 if (throttle_get_io_policy(&ut) == IOPOL_THROTTLE) {
445 if (throttle_io_will_be_throttled(-1, vp->v_mount)) {
446 return(1);
447 }
448 }
449 return(0);
450 }
451
452
453 static int
454 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags)
455 {
456 int upl_abort_code = 0;
457 int page_in = 0;
458 int page_out = 0;
459
460 if (io_flags & B_PHYS)
461 /*
462 * direct write of any flavor, or a direct read that wasn't aligned
463 */
464 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
465 else {
466 if (io_flags & B_PAGEIO) {
467 if (io_flags & B_READ)
468 page_in = 1;
469 else
470 page_out = 1;
471 }
472 if (io_flags & B_CACHE)
473 /*
474 * leave pages in the cache unchanged on error
475 */
476 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
477 else if (page_out && (error != ENXIO))
478 /*
479 * transient error... leave pages unchanged
480 */
481 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
482 else if (page_in)
483 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
484 else
485 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
486
487 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
488 }
489 return (upl_abort_code);
490 }
491
492
493 static int
494 cluster_iodone(buf_t bp, void *callback_arg)
495 {
496 int b_flags;
497 int error;
498 int total_size;
499 int total_resid;
500 int upl_offset;
501 int zero_offset;
502 int pg_offset = 0;
503 int commit_size = 0;
504 int upl_flags = 0;
505 int transaction_size = 0;
506 upl_t upl;
507 buf_t cbp;
508 buf_t cbp_head;
509 buf_t cbp_next;
510 buf_t real_bp;
511 struct clios *iostate;
512 boolean_t transaction_complete = FALSE;
513
514 cbp_head = (buf_t)(bp->b_trans_head);
515
516 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
517 cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
518
519 if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) {
520
521 lck_mtx_lock_spin(cl_transaction_mtxp);
522
523 bp->b_flags |= B_TDONE;
524
525 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
526 /*
527 * all I/O requests that are part of this transaction
528 * have to complete before we can process it
529 */
530 if ( !(cbp->b_flags & B_TDONE)) {
531
532 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
533 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
534
535 lck_mtx_unlock(cl_transaction_mtxp);
536 return 0;
537 }
538 if (cbp->b_flags & B_EOT)
539 transaction_complete = TRUE;
540 }
541 lck_mtx_unlock(cl_transaction_mtxp);
542
543 if (transaction_complete == FALSE) {
544 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
545 cbp_head, 0, 0, 0, 0);
546
547 return 0;
548 }
549 }
550 error = 0;
551 total_size = 0;
552 total_resid = 0;
553
554 cbp = cbp_head;
555 upl_offset = cbp->b_uploffset;
556 upl = cbp->b_upl;
557 b_flags = cbp->b_flags;
558 real_bp = cbp->b_real_bp;
559 zero_offset= cbp->b_validend;
560 iostate = (struct clios *)cbp->b_iostate;
561
562 if (real_bp)
563 real_bp->b_dev = cbp->b_dev;
564
565 while (cbp) {
566 if ((cbp->b_flags & B_ERROR) && error == 0)
567 error = cbp->b_error;
568
569 total_resid += cbp->b_resid;
570 total_size += cbp->b_bcount;
571
572 cbp_next = cbp->b_trans_next;
573
574 if (cbp_next == NULL)
575 /*
576 * compute the overall size of the transaction
577 * in case we created one that has 'holes' in it
578 * 'total_size' represents the amount of I/O we
579 * did, not the span of the transaction w/r to the UPL
580 */
581 transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
582
583 if (cbp != cbp_head)
584 free_io_buf(cbp);
585
586 cbp = cbp_next;
587 }
588 if (error == 0 && total_resid)
589 error = EIO;
590
591 if (error == 0) {
592 int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
593
594 if (cliodone_func != NULL) {
595 cbp_head->b_bcount = transaction_size;
596
597 error = (*cliodone_func)(cbp_head, callback_arg);
598 }
599 }
600 if (zero_offset)
601 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
602
603 free_io_buf(cbp_head);
604
605 if (iostate) {
606 int need_wakeup = 0;
607
608 /*
609 * someone has issued multiple I/Os asynchrounsly
610 * and is waiting for them to complete (streaming)
611 */
612 lck_mtx_lock_spin(cl_mtxp);
613
614 if (error && iostate->io_error == 0)
615 iostate->io_error = error;
616
617 iostate->io_completed += total_size;
618
619 if (iostate->io_wanted) {
620 /*
621 * someone is waiting for the state of
622 * this io stream to change
623 */
624 iostate->io_wanted = 0;
625 need_wakeup = 1;
626 }
627 lck_mtx_unlock(cl_mtxp);
628
629 if (need_wakeup)
630 wakeup((caddr_t)&iostate->io_wanted);
631 }
632
633 if (b_flags & B_COMMIT_UPL) {
634
635 pg_offset = upl_offset & PAGE_MASK;
636 commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
637
638 if (error)
639 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags);
640 else {
641 upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
642
643 if ((b_flags & B_PHYS) && (b_flags & B_READ))
644 upl_flags |= UPL_COMMIT_SET_DIRTY;
645
646 if (b_flags & B_AGE)
647 upl_flags |= UPL_COMMIT_INACTIVATE;
648
649 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
650 }
651 }
652 if ((b_flags & B_NEED_IODONE) && real_bp) {
653 if (error) {
654 real_bp->b_flags |= B_ERROR;
655 real_bp->b_error = error;
656 }
657 real_bp->b_resid = total_resid;
658
659 buf_biodone(real_bp);
660 }
661 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
662 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
663
664 return (error);
665 }
666
667
668 uint32_t
669 cluster_hard_throttle_limit(vnode_t vp, uint32_t *limit, uint32_t hard_throttle)
670 {
671 if (cluster_hard_throttle_on(vp, hard_throttle)) {
672 *limit = HARD_THROTTLE_MAXSIZE;
673 return 1;
674 }
675 return 0;
676 }
677
678
679 void
680 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
681 {
682
683 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
684 upl_offset, size, bp, 0, 0);
685
686 if (bp == NULL || bp->b_datap == 0) {
687 upl_page_info_t *pl;
688 addr64_t zero_addr;
689
690 pl = ubc_upl_pageinfo(upl);
691
692 if (upl_device_page(pl) == TRUE) {
693 zero_addr = ((addr64_t)upl_phys_page(pl, 0) << 12) + upl_offset;
694
695 bzero_phys_nc(zero_addr, size);
696 } else {
697 while (size) {
698 int page_offset;
699 int page_index;
700 int zero_cnt;
701
702 page_index = upl_offset / PAGE_SIZE;
703 page_offset = upl_offset & PAGE_MASK;
704
705 zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << 12) + page_offset;
706 zero_cnt = min(PAGE_SIZE - page_offset, size);
707
708 bzero_phys(zero_addr, zero_cnt);
709
710 size -= zero_cnt;
711 upl_offset += zero_cnt;
712 }
713 }
714 } else
715 bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
716
717 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
718 upl_offset, size, 0, 0, 0);
719 }
720
721
722 static void
723 cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset)
724 {
725 cbp_head->b_validend = zero_offset;
726 cbp_tail->b_flags |= B_EOT;
727 }
728
729 static void
730 cluster_wait_IO(buf_t cbp_head, int async)
731 {
732 buf_t cbp;
733
734 if (async) {
735 /*
736 * async callback completion will not normally
737 * generate a wakeup upon I/O completion...
738 * by setting BL_WANTED, we will force a wakeup
739 * to occur as any outstanding I/Os complete...
740 * I/Os already completed will have BL_CALLDONE already
741 * set and we won't block in buf_biowait_callback..
742 * note that we're actually waiting for the bp to have
743 * completed the callback function... only then
744 * can we safely take back ownership of the bp
745 * need the main buf mutex in order to safely
746 * update b_lflags
747 */
748 buf_list_lock();
749
750 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
751 cbp->b_lflags |= BL_WANTED;
752
753 buf_list_unlock();
754 }
755 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
756 if (async)
757 buf_biowait_callback(cbp);
758 else
759 buf_biowait(cbp);
760 }
761 }
762
763 static void
764 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
765 {
766 buf_t cbp;
767 int error;
768
769 /*
770 * cluster_complete_transaction will
771 * only be called if we've issued a complete chain in synchronous mode
772 * or, we've already done a cluster_wait_IO on an incomplete chain
773 */
774 if (needwait) {
775 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next)
776 buf_biowait(cbp);
777 }
778 /*
779 * we've already waited on all of the I/Os in this transaction,
780 * so mark all of the buf_t's in this transaction as B_TDONE
781 * so that cluster_iodone sees the transaction as completed
782 */
783 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next)
784 cbp->b_flags |= B_TDONE;
785
786 error = cluster_iodone(*cbp_head, callback_arg);
787
788 if ( !(flags & CL_ASYNC) && error && *retval == 0) {
789 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO))
790 *retval = error;
791 }
792 *cbp_head = (buf_t)NULL;
793 }
794
795
796 static int
797 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
798 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
799 {
800 buf_t cbp;
801 u_int size;
802 u_int io_size;
803 int io_flags;
804 int bmap_flags;
805 int error = 0;
806 int retval = 0;
807 buf_t cbp_head = NULL;
808 buf_t cbp_tail = NULL;
809 int trans_count = 0;
810 int max_trans_count;
811 u_int pg_count;
812 int pg_offset;
813 u_int max_iosize;
814 u_int max_vectors;
815 int priv;
816 int zero_offset = 0;
817 int async_throttle = 0;
818 mount_t mp;
819 vm_offset_t upl_end_offset;
820 boolean_t need_EOT = FALSE;
821
822 /*
823 * we currently don't support buffers larger than a page
824 */
825 if (real_bp && non_rounded_size > PAGE_SIZE)
826 panic("%s(): Called with real buffer of size %d bytes which "
827 "is greater than the maximum allowed size of "
828 "%d bytes (the system PAGE_SIZE).\n",
829 __FUNCTION__, non_rounded_size, PAGE_SIZE);
830
831 mp = vp->v_mount;
832
833 /*
834 * we don't want to do any funny rounding of the size for IO requests
835 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
836 * belong to us... we can't extend (nor do we need to) the I/O to fill
837 * out a page
838 */
839 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
840 /*
841 * round the requested size up so that this I/O ends on a
842 * page boundary in case this is a 'write'... if the filesystem
843 * has blocks allocated to back the page beyond the EOF, we want to
844 * make sure to write out the zero's that are sitting beyond the EOF
845 * so that in case the filesystem doesn't explicitly zero this area
846 * if a hole is created via a lseek/write beyond the current EOF,
847 * it will return zeros when it's read back from the disk. If the
848 * physical allocation doesn't extend for the whole page, we'll
849 * only write/read from the disk up to the end of this allocation
850 * via the extent info returned from the VNOP_BLOCKMAP call.
851 */
852 pg_offset = upl_offset & PAGE_MASK;
853
854 size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
855 } else {
856 /*
857 * anyone advertising a blocksize of 1 byte probably
858 * can't deal with us rounding up the request size
859 * AFP is one such filesystem/device
860 */
861 size = non_rounded_size;
862 }
863 upl_end_offset = upl_offset + size;
864
865 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
866
867 /*
868 * Set the maximum transaction size to the maximum desired number of
869 * buffers.
870 */
871 max_trans_count = 8;
872 if (flags & CL_DEV_MEMORY)
873 max_trans_count = 16;
874
875 if (flags & CL_READ) {
876 io_flags = B_READ;
877 bmap_flags = VNODE_READ;
878
879 max_iosize = mp->mnt_maxreadcnt;
880 max_vectors = mp->mnt_segreadcnt;
881 } else {
882 io_flags = B_WRITE;
883 bmap_flags = VNODE_WRITE;
884
885 max_iosize = mp->mnt_maxwritecnt;
886 max_vectors = mp->mnt_segwritecnt;
887 }
888 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
889
890 /*
891 * make sure the maximum iosize is a
892 * multiple of the page size
893 */
894 max_iosize &= ~PAGE_MASK;
895
896 /*
897 * Ensure the maximum iosize is sensible.
898 */
899 if (!max_iosize)
900 max_iosize = PAGE_SIZE;
901
902 if (flags & CL_THROTTLE) {
903 if ( !(flags & CL_PAGEOUT) && cluster_hard_throttle_on(vp, 1)) {
904 if (max_iosize > HARD_THROTTLE_MAXSIZE)
905 max_iosize = HARD_THROTTLE_MAXSIZE;
906 async_throttle = HARD_THROTTLE_MAXCNT;
907 } else {
908 if ( (flags & CL_DEV_MEMORY) )
909 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
910 else {
911 u_int max_cluster;
912 u_int max_cluster_size;
913 u_int max_prefetch;
914
915 max_cluster_size = MAX_CLUSTER_SIZE(vp);
916 max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ));
917
918 if (max_iosize > max_cluster_size)
919 max_cluster = max_cluster_size;
920 else
921 max_cluster = max_iosize;
922
923 if (size < max_cluster)
924 max_cluster = size;
925
926 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), (max_prefetch / max_cluster) - 1);
927 }
928 }
929 }
930 if (flags & CL_AGE)
931 io_flags |= B_AGE;
932 if (flags & (CL_PAGEIN | CL_PAGEOUT))
933 io_flags |= B_PAGEIO;
934 if (flags & (CL_IOSTREAMING))
935 io_flags |= B_IOSTREAMING;
936 if (flags & CL_COMMIT)
937 io_flags |= B_COMMIT_UPL;
938 if (flags & CL_PRESERVE)
939 io_flags |= B_PHYS;
940 if (flags & CL_KEEPCACHED)
941 io_flags |= B_CACHE;
942 if (flags & CL_PASSIVE)
943 io_flags |= B_PASSIVE;
944 if (vp->v_flag & VSYSTEM)
945 io_flags |= B_META;
946
947 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
948 /*
949 * then we are going to end up
950 * with a page that we can't complete (the file size wasn't a multiple
951 * of PAGE_SIZE and we're trying to read to the end of the file
952 * so we'll go ahead and zero out the portion of the page we can't
953 * read in from the file
954 */
955 zero_offset = upl_offset + non_rounded_size;
956 }
957 while (size) {
958 daddr64_t blkno;
959 daddr64_t lblkno;
960 u_int io_size_wanted;
961 size_t io_size_tmp;
962
963 if (size > max_iosize)
964 io_size = max_iosize;
965 else
966 io_size = size;
967
968 io_size_wanted = io_size;
969 io_size_tmp = (size_t)io_size;
970
971 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL)))
972 break;
973
974 if (io_size_tmp > io_size_wanted)
975 io_size = io_size_wanted;
976 else
977 io_size = (u_int)io_size_tmp;
978
979 if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno))
980 real_bp->b_blkno = blkno;
981
982 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
983 (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
984
985 if (io_size == 0) {
986 /*
987 * vnop_blockmap didn't return an error... however, it did
988 * return an extent size of 0 which means we can't
989 * make forward progress on this I/O... a hole in the
990 * file would be returned as a blkno of -1 with a non-zero io_size
991 * a real extent is returned with a blkno != -1 and a non-zero io_size
992 */
993 error = EINVAL;
994 break;
995 }
996 if ( !(flags & CL_READ) && blkno == -1) {
997 off_t e_offset;
998 int pageout_flags;
999
1000 if(upl_get_internal_vectorupl(upl))
1001 panic("Vector UPLs should not take this code-path\n");
1002 /*
1003 * we're writing into a 'hole'
1004 */
1005 if (flags & CL_PAGEOUT) {
1006 /*
1007 * if we got here via cluster_pageout
1008 * then just error the request and return
1009 * the 'hole' should already have been covered
1010 */
1011 error = EINVAL;
1012 break;
1013 }
1014 /*
1015 * we can get here if the cluster code happens to
1016 * pick up a page that was dirtied via mmap vs
1017 * a 'write' and the page targets a 'hole'...
1018 * i.e. the writes to the cluster were sparse
1019 * and the file was being written for the first time
1020 *
1021 * we can also get here if the filesystem supports
1022 * 'holes' that are less than PAGE_SIZE.... because
1023 * we can't know if the range in the page that covers
1024 * the 'hole' has been dirtied via an mmap or not,
1025 * we have to assume the worst and try to push the
1026 * entire page to storage.
1027 *
1028 * Try paging out the page individually before
1029 * giving up entirely and dumping it (the pageout
1030 * path will insure that the zero extent accounting
1031 * has been taken care of before we get back into cluster_io)
1032 *
1033 * go direct to vnode_pageout so that we don't have to
1034 * unbusy the page from the UPL... we used to do this
1035 * so that we could call ubc_sync_range, but that results
1036 * in a potential deadlock if someone else races us to acquire
1037 * that page and wins and in addition needs one of the pages
1038 * we're continuing to hold in the UPL
1039 */
1040 pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
1041
1042 if ( !(flags & CL_ASYNC))
1043 pageout_flags |= UPL_IOSYNC;
1044 if ( !(flags & CL_COMMIT))
1045 pageout_flags |= UPL_NOCOMMIT;
1046
1047 if (cbp_head) {
1048 buf_t last_cbp;
1049
1050 /*
1051 * first we have to wait for the the current outstanding I/Os
1052 * to complete... EOT hasn't been set yet on this transaction
1053 * so the pages won't be released just because all of the current
1054 * I/O linked to this transaction has completed...
1055 */
1056 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1057
1058 /*
1059 * we've got a transcation that
1060 * includes the page we're about to push out through vnode_pageout...
1061 * find the last bp in the list which will be the one that
1062 * includes the head of this page and round it's iosize down
1063 * to a page boundary...
1064 */
1065 for (last_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next)
1066 last_cbp = cbp;
1067
1068 cbp->b_bcount &= ~PAGE_MASK;
1069
1070 if (cbp->b_bcount == 0) {
1071 /*
1072 * this buf no longer has any I/O associated with it
1073 */
1074 free_io_buf(cbp);
1075
1076 if (cbp == cbp_head) {
1077 /*
1078 * the buf we just freed was the only buf in
1079 * this transaction... so there's no I/O to do
1080 */
1081 cbp_head = NULL;
1082 } else {
1083 /*
1084 * remove the buf we just freed from
1085 * the transaction list
1086 */
1087 last_cbp->b_trans_next = NULL;
1088 cbp_tail = last_cbp;
1089 }
1090 }
1091 if (cbp_head) {
1092 /*
1093 * there was more to the current transaction
1094 * than just the page we are pushing out via vnode_pageout...
1095 * mark it as finished and complete it... we've already
1096 * waited for the I/Os to complete above in the call to cluster_wait_IO
1097 */
1098 cluster_EOT(cbp_head, cbp_tail, 0);
1099
1100 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1101
1102 trans_count = 0;
1103 }
1104 }
1105 if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1106 error = EINVAL;
1107 break;
1108 }
1109 e_offset = round_page_64(f_offset + 1);
1110 io_size = e_offset - f_offset;
1111
1112 f_offset += io_size;
1113 upl_offset += io_size;
1114
1115 if (size >= io_size)
1116 size -= io_size;
1117 else
1118 size = 0;
1119 /*
1120 * keep track of how much of the original request
1121 * that we've actually completed... non_rounded_size
1122 * may go negative due to us rounding the request
1123 * to a page size multiple (i.e. size > non_rounded_size)
1124 */
1125 non_rounded_size -= io_size;
1126
1127 if (non_rounded_size <= 0) {
1128 /*
1129 * we've transferred all of the data in the original
1130 * request, but we were unable to complete the tail
1131 * of the last page because the file didn't have
1132 * an allocation to back that portion... this is ok.
1133 */
1134 size = 0;
1135 }
1136 continue;
1137 }
1138 lblkno = (daddr64_t)(f_offset / PAGE_SIZE_64);
1139 /*
1140 * we have now figured out how much I/O we can do - this is in 'io_size'
1141 * pg_offset is the starting point in the first page for the I/O
1142 * pg_count is the number of full and partial pages that 'io_size' encompasses
1143 */
1144 pg_offset = upl_offset & PAGE_MASK;
1145
1146 if (flags & CL_DEV_MEMORY) {
1147 /*
1148 * treat physical requests as one 'giant' page
1149 */
1150 pg_count = 1;
1151 } else
1152 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1153
1154 if ((flags & CL_READ) && blkno == -1) {
1155 vm_offset_t commit_offset;
1156 int bytes_to_zero;
1157 int complete_transaction_now = 0;
1158
1159 /*
1160 * if we're reading and blkno == -1, then we've got a
1161 * 'hole' in the file that we need to deal with by zeroing
1162 * out the affected area in the upl
1163 */
1164 if (io_size >= (u_int)non_rounded_size) {
1165 /*
1166 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1167 * than 'zero_offset' will be non-zero
1168 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
1169 * (indicated by the io_size finishing off the I/O request for this UPL)
1170 * than we're not going to issue an I/O for the
1171 * last page in this upl... we need to zero both the hole and the tail
1172 * of the page beyond the EOF, since the delayed zero-fill won't kick in
1173 */
1174 bytes_to_zero = non_rounded_size;
1175 if (!(flags & CL_NOZERO))
1176 bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
1177
1178 zero_offset = 0;
1179 } else
1180 bytes_to_zero = io_size;
1181
1182 pg_count = 0;
1183
1184 cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
1185
1186 if (cbp_head) {
1187 int pg_resid;
1188
1189 /*
1190 * if there is a current I/O chain pending
1191 * then the first page of the group we just zero'd
1192 * will be handled by the I/O completion if the zero
1193 * fill started in the middle of the page
1194 */
1195 commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1196
1197 pg_resid = commit_offset - upl_offset;
1198
1199 if (bytes_to_zero >= pg_resid) {
1200 /*
1201 * the last page of the current I/O
1202 * has been completed...
1203 * compute the number of fully zero'd
1204 * pages that are beyond it
1205 * plus the last page if its partial
1206 * and we have no more I/O to issue...
1207 * otherwise a partial page is left
1208 * to begin the next I/O
1209 */
1210 if ((int)io_size >= non_rounded_size)
1211 pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
1212 else
1213 pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
1214
1215 complete_transaction_now = 1;
1216 }
1217 } else {
1218 /*
1219 * no pending I/O to deal with
1220 * so, commit all of the fully zero'd pages
1221 * plus the last page if its partial
1222 * and we have no more I/O to issue...
1223 * otherwise a partial page is left
1224 * to begin the next I/O
1225 */
1226 if ((int)io_size >= non_rounded_size)
1227 pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
1228 else
1229 pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
1230
1231 commit_offset = upl_offset & ~PAGE_MASK;
1232 }
1233 if ( (flags & CL_COMMIT) && pg_count) {
1234 ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE,
1235 UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
1236 }
1237 upl_offset += io_size;
1238 f_offset += io_size;
1239 size -= io_size;
1240
1241 /*
1242 * keep track of how much of the original request
1243 * that we've actually completed... non_rounded_size
1244 * may go negative due to us rounding the request
1245 * to a page size multiple (i.e. size > non_rounded_size)
1246 */
1247 non_rounded_size -= io_size;
1248
1249 if (non_rounded_size <= 0) {
1250 /*
1251 * we've transferred all of the data in the original
1252 * request, but we were unable to complete the tail
1253 * of the last page because the file didn't have
1254 * an allocation to back that portion... this is ok.
1255 */
1256 size = 0;
1257 }
1258 if (cbp_head && (complete_transaction_now || size == 0)) {
1259 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1260
1261 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
1262
1263 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1264
1265 trans_count = 0;
1266 }
1267 continue;
1268 }
1269 if (pg_count > max_vectors) {
1270 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
1271 io_size = PAGE_SIZE - pg_offset;
1272 pg_count = 1;
1273 } else {
1274 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
1275 pg_count = max_vectors;
1276 }
1277 }
1278 /*
1279 * If the transaction is going to reach the maximum number of
1280 * desired elements, truncate the i/o to the nearest page so
1281 * that the actual i/o is initiated after this buffer is
1282 * created and added to the i/o chain.
1283 *
1284 * I/O directed to physically contiguous memory
1285 * doesn't have a requirement to make sure we 'fill' a page
1286 */
1287 if ( !(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
1288 ((upl_offset + io_size) & PAGE_MASK)) {
1289 vm_offset_t aligned_ofs;
1290
1291 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1292 /*
1293 * If the io_size does not actually finish off even a
1294 * single page we have to keep adding buffers to the
1295 * transaction despite having reached the desired limit.
1296 *
1297 * Eventually we get here with the page being finished
1298 * off (and exceeded) and then we truncate the size of
1299 * this i/o request so that it is page aligned so that
1300 * we can finally issue the i/o on the transaction.
1301 */
1302 if (aligned_ofs > upl_offset) {
1303 io_size = aligned_ofs - upl_offset;
1304 pg_count--;
1305 }
1306 }
1307
1308 if ( !(mp->mnt_kern_flag & MNTK_VIRTUALDEV))
1309 /*
1310 * if we're not targeting a virtual device i.e. a disk image
1311 * it's safe to dip into the reserve pool since real devices
1312 * can complete this I/O request without requiring additional
1313 * bufs from the alloc_io_buf pool
1314 */
1315 priv = 1;
1316 else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
1317 /*
1318 * Throttle the speculative IO
1319 */
1320 priv = 0;
1321 else
1322 priv = 1;
1323
1324 cbp = alloc_io_buf(vp, priv);
1325
1326 if (flags & CL_PAGEOUT) {
1327 u_int i;
1328
1329 for (i = 0; i < pg_count; i++) {
1330 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY)
1331 panic("BUSY bp found in cluster_io");
1332 }
1333 }
1334 if (flags & CL_ASYNC) {
1335 if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg))
1336 panic("buf_setcallback failed\n");
1337 }
1338 cbp->b_cliodone = (void *)callback;
1339 cbp->b_flags |= io_flags;
1340
1341 cbp->b_lblkno = lblkno;
1342 cbp->b_blkno = blkno;
1343 cbp->b_bcount = io_size;
1344
1345 if (buf_setupl(cbp, upl, upl_offset))
1346 panic("buf_setupl failed\n");
1347
1348 cbp->b_trans_next = (buf_t)NULL;
1349
1350 if ((cbp->b_iostate = (void *)iostate))
1351 /*
1352 * caller wants to track the state of this
1353 * io... bump the amount issued against this stream
1354 */
1355 iostate->io_issued += io_size;
1356
1357 if (flags & CL_READ) {
1358 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
1359 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1360 }
1361 else {
1362 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
1363 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1364 }
1365
1366 if (cbp_head) {
1367 cbp_tail->b_trans_next = cbp;
1368 cbp_tail = cbp;
1369 } else {
1370 cbp_head = cbp;
1371 cbp_tail = cbp;
1372
1373 if ( (cbp_head->b_real_bp = real_bp) ) {
1374 cbp_head->b_flags |= B_NEED_IODONE;
1375 real_bp = (buf_t)NULL;
1376 }
1377 }
1378 *(buf_t *)(&cbp->b_trans_head) = cbp_head;
1379
1380 trans_count++;
1381
1382 upl_offset += io_size;
1383 f_offset += io_size;
1384 size -= io_size;
1385 /*
1386 * keep track of how much of the original request
1387 * that we've actually completed... non_rounded_size
1388 * may go negative due to us rounding the request
1389 * to a page size multiple (i.e. size > non_rounded_size)
1390 */
1391 non_rounded_size -= io_size;
1392
1393 if (non_rounded_size <= 0) {
1394 /*
1395 * we've transferred all of the data in the original
1396 * request, but we were unable to complete the tail
1397 * of the last page because the file didn't have
1398 * an allocation to back that portion... this is ok.
1399 */
1400 size = 0;
1401 }
1402 if (size == 0) {
1403 /*
1404 * we have no more I/O to issue, so go
1405 * finish the final transaction
1406 */
1407 need_EOT = TRUE;
1408 } else if ( ((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
1409 ((flags & CL_ASYNC) || trans_count > max_trans_count) ) {
1410 /*
1411 * I/O directed to physically contiguous memory...
1412 * which doesn't have a requirement to make sure we 'fill' a page
1413 * or...
1414 * the current I/O we've prepared fully
1415 * completes the last page in this request
1416 * and ...
1417 * it's either an ASYNC request or
1418 * we've already accumulated more than 8 I/O's into
1419 * this transaction so mark it as complete so that
1420 * it can finish asynchronously or via the cluster_complete_transaction
1421 * below if the request is synchronous
1422 */
1423 need_EOT = TRUE;
1424 }
1425 if (need_EOT == TRUE)
1426 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
1427
1428 if (flags & CL_THROTTLE)
1429 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
1430
1431 if ( !(io_flags & B_READ))
1432 vnode_startwrite(vp);
1433
1434 (void) VNOP_STRATEGY(cbp);
1435
1436 if (need_EOT == TRUE) {
1437 if ( !(flags & CL_ASYNC))
1438 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
1439
1440 need_EOT = FALSE;
1441 trans_count = 0;
1442 cbp_head = NULL;
1443 }
1444 }
1445 if (error) {
1446 int abort_size;
1447
1448 io_size = 0;
1449
1450 if (cbp_head) {
1451 /*
1452 * first wait until all of the outstanding I/O
1453 * for this partial transaction has completed
1454 */
1455 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1456
1457 /*
1458 * Rewind the upl offset to the beginning of the
1459 * transaction.
1460 */
1461 upl_offset = cbp_head->b_uploffset;
1462
1463 for (cbp = cbp_head; cbp;) {
1464 buf_t cbp_next;
1465
1466 size += cbp->b_bcount;
1467 io_size += cbp->b_bcount;
1468
1469 cbp_next = cbp->b_trans_next;
1470 free_io_buf(cbp);
1471 cbp = cbp_next;
1472 }
1473 }
1474 if (iostate) {
1475 int need_wakeup = 0;
1476
1477 /*
1478 * update the error condition for this stream
1479 * since we never really issued the io
1480 * just go ahead and adjust it back
1481 */
1482 lck_mtx_lock_spin(cl_mtxp);
1483
1484 if (iostate->io_error == 0)
1485 iostate->io_error = error;
1486 iostate->io_issued -= io_size;
1487
1488 if (iostate->io_wanted) {
1489 /*
1490 * someone is waiting for the state of
1491 * this io stream to change
1492 */
1493 iostate->io_wanted = 0;
1494 need_wakeup = 1;
1495 }
1496 lck_mtx_unlock(cl_mtxp);
1497
1498 if (need_wakeup)
1499 wakeup((caddr_t)&iostate->io_wanted);
1500 }
1501 if (flags & CL_COMMIT) {
1502 int upl_flags;
1503
1504 pg_offset = upl_offset & PAGE_MASK;
1505 abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK;
1506
1507 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags);
1508
1509 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
1510 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
1511 }
1512 if (retval == 0)
1513 retval = error;
1514 } else if (cbp_head)
1515 panic("%s(): cbp_head is not NULL.\n", __FUNCTION__);
1516
1517 if (real_bp) {
1518 /*
1519 * can get here if we either encountered an error
1520 * or we completely zero-filled the request and
1521 * no I/O was issued
1522 */
1523 if (error) {
1524 real_bp->b_flags |= B_ERROR;
1525 real_bp->b_error = error;
1526 }
1527 buf_biodone(real_bp);
1528 }
1529 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
1530
1531 return (retval);
1532 }
1533
1534 #define reset_vector_run_state() \
1535 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
1536
1537 static int
1538 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
1539 int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1540 {
1541 vector_upl_set_pagelist(vector_upl);
1542
1543 if(io_flag & CL_READ) {
1544 if(vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK)==0))
1545 io_flag &= ~CL_PRESERVE; /*don't zero fill*/
1546 else
1547 io_flag |= CL_PRESERVE; /*zero fill*/
1548 }
1549 return (cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg));
1550
1551 }
1552
1553 static int
1554 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
1555 {
1556 int pages_in_prefetch;
1557
1558 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
1559 (int)f_offset, size, (int)filesize, 0, 0);
1560
1561 if (f_offset >= filesize) {
1562 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
1563 (int)f_offset, 0, 0, 0, 0);
1564 return(0);
1565 }
1566 if ((off_t)size > (filesize - f_offset))
1567 size = filesize - f_offset;
1568 pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
1569
1570 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
1571
1572 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
1573 (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
1574
1575 return (pages_in_prefetch);
1576 }
1577
1578
1579
1580 static void
1581 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
1582 int bflag)
1583 {
1584 daddr64_t r_addr;
1585 off_t f_offset;
1586 int size_of_prefetch;
1587 u_int max_prefetch;
1588
1589
1590 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
1591 (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
1592
1593 if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
1594 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1595 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
1596 return;
1597 }
1598 if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
1599 rap->cl_ralen = 0;
1600 rap->cl_maxra = 0;
1601
1602 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1603 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
1604
1605 return;
1606 }
1607 max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ));
1608
1609 if (extent->e_addr < rap->cl_maxra) {
1610 if ((rap->cl_maxra - extent->e_addr) > ((max_prefetch / PAGE_SIZE) / 4)) {
1611
1612 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1613 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
1614 return;
1615 }
1616 }
1617 r_addr = max(extent->e_addr, rap->cl_maxra) + 1;
1618 f_offset = (off_t)(r_addr * PAGE_SIZE_64);
1619
1620 size_of_prefetch = 0;
1621
1622 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
1623
1624 if (size_of_prefetch) {
1625 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1626 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
1627 return;
1628 }
1629 if (f_offset < filesize) {
1630 daddr64_t read_size;
1631
1632 rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
1633
1634 read_size = (extent->e_addr + 1) - extent->b_addr;
1635
1636 if (read_size > rap->cl_ralen) {
1637 if (read_size > max_prefetch / PAGE_SIZE)
1638 rap->cl_ralen = max_prefetch / PAGE_SIZE;
1639 else
1640 rap->cl_ralen = read_size;
1641 }
1642 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
1643
1644 if (size_of_prefetch)
1645 rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
1646 }
1647 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1648 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
1649 }
1650
1651
1652 int
1653 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1654 int size, off_t filesize, int flags)
1655 {
1656 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1657
1658 }
1659
1660
1661 int
1662 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1663 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1664 {
1665 int io_size;
1666 int rounded_size;
1667 off_t max_size;
1668 int local_flags;
1669
1670 if (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)
1671 /*
1672 * if we know we're issuing this I/O to a virtual device (i.e. disk image)
1673 * then we don't want to enforce this throttle... if we do, we can
1674 * potentially deadlock since we're stalling the pageout thread at a time
1675 * when the disk image might need additional memory (which won't be available
1676 * if the pageout thread can't run)... instead we'll just depend on the throttle
1677 * that the pageout thread now has in place to deal with external files
1678 */
1679 local_flags = CL_PAGEOUT;
1680 else
1681 local_flags = CL_PAGEOUT | CL_THROTTLE;
1682
1683 if ((flags & UPL_IOSYNC) == 0)
1684 local_flags |= CL_ASYNC;
1685 if ((flags & UPL_NOCOMMIT) == 0)
1686 local_flags |= CL_COMMIT;
1687 if ((flags & UPL_KEEPCACHED))
1688 local_flags |= CL_KEEPCACHED;
1689
1690
1691 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
1692 (int)f_offset, size, (int)filesize, local_flags, 0);
1693
1694 /*
1695 * If they didn't specify any I/O, then we are done...
1696 * we can't issue an abort because we don't know how
1697 * big the upl really is
1698 */
1699 if (size <= 0)
1700 return (EINVAL);
1701
1702 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1703 if (local_flags & CL_COMMIT)
1704 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1705 return (EROFS);
1706 }
1707 /*
1708 * can't page-in from a negative offset
1709 * or if we're starting beyond the EOF
1710 * or if the file offset isn't page aligned
1711 * or the size requested isn't a multiple of PAGE_SIZE
1712 */
1713 if (f_offset < 0 || f_offset >= filesize ||
1714 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
1715 if (local_flags & CL_COMMIT)
1716 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1717 return (EINVAL);
1718 }
1719 max_size = filesize - f_offset;
1720
1721 if (size < max_size)
1722 io_size = size;
1723 else
1724 io_size = max_size;
1725
1726 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1727
1728 if (size > rounded_size) {
1729 if (local_flags & CL_COMMIT)
1730 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
1731 UPL_ABORT_FREE_ON_EMPTY);
1732 }
1733 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1734 local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg));
1735 }
1736
1737
1738 int
1739 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1740 int size, off_t filesize, int flags)
1741 {
1742 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1743 }
1744
1745
1746 int
1747 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1748 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1749 {
1750 u_int io_size;
1751 int rounded_size;
1752 off_t max_size;
1753 int retval;
1754 int local_flags = 0;
1755
1756 if (upl == NULL || size < 0)
1757 panic("cluster_pagein: NULL upl passed in");
1758
1759 if ((flags & UPL_IOSYNC) == 0)
1760 local_flags |= CL_ASYNC;
1761 if ((flags & UPL_NOCOMMIT) == 0)
1762 local_flags |= CL_COMMIT;
1763 if (flags & UPL_IOSTREAMING)
1764 local_flags |= CL_IOSTREAMING;
1765
1766
1767 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
1768 (int)f_offset, size, (int)filesize, local_flags, 0);
1769
1770 /*
1771 * can't page-in from a negative offset
1772 * or if we're starting beyond the EOF
1773 * or if the file offset isn't page aligned
1774 * or the size requested isn't a multiple of PAGE_SIZE
1775 */
1776 if (f_offset < 0 || f_offset >= filesize ||
1777 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
1778 if (local_flags & CL_COMMIT)
1779 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
1780 return (EINVAL);
1781 }
1782 max_size = filesize - f_offset;
1783
1784 if (size < max_size)
1785 io_size = size;
1786 else
1787 io_size = max_size;
1788
1789 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1790
1791 if (size > rounded_size && (local_flags & CL_COMMIT))
1792 ubc_upl_abort_range(upl, upl_offset + rounded_size,
1793 size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
1794
1795 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
1796 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
1797
1798 return (retval);
1799 }
1800
1801
1802 int
1803 cluster_bp(buf_t bp)
1804 {
1805 return cluster_bp_ext(bp, NULL, NULL);
1806 }
1807
1808
1809 int
1810 cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
1811 {
1812 off_t f_offset;
1813 int flags;
1814
1815 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
1816 bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
1817
1818 if (bp->b_flags & B_READ)
1819 flags = CL_ASYNC | CL_READ;
1820 else
1821 flags = CL_ASYNC;
1822 if (bp->b_flags & B_PASSIVE)
1823 flags |= CL_PASSIVE;
1824
1825 f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
1826
1827 return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg));
1828 }
1829
1830
1831
1832 int
1833 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
1834 {
1835 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
1836 }
1837
1838
1839 int
1840 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
1841 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
1842 {
1843 user_ssize_t cur_resid;
1844 int retval = 0;
1845 int flags;
1846 int zflags;
1847 int bflag;
1848 int write_type = IO_COPY;
1849 u_int32_t write_length;
1850
1851 flags = xflags;
1852
1853 if (flags & IO_PASSIVE)
1854 bflag = CL_PASSIVE;
1855 else
1856 bflag = 0;
1857
1858 if (vp->v_flag & VNOCACHE_DATA)
1859 flags |= IO_NOCACHE;
1860
1861 if (uio == NULL) {
1862 /*
1863 * no user data...
1864 * this call is being made to zero-fill some range in the file
1865 */
1866 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
1867
1868 return(retval);
1869 }
1870 /*
1871 * do a write through the cache if one of the following is true....
1872 * NOCACHE is not true and
1873 * the uio request doesn't target USERSPACE
1874 * otherwise, find out if we want the direct or contig variant for
1875 * the first vector in the uio request
1876 */
1877 if ( (flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
1878 retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
1879
1880 if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT)
1881 /*
1882 * must go through the cached variant in this case
1883 */
1884 write_type = IO_COPY;
1885
1886 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
1887
1888 switch (write_type) {
1889
1890 case IO_COPY:
1891 /*
1892 * make sure the uio_resid isn't too big...
1893 * internally, we want to handle all of the I/O in
1894 * chunk sizes that fit in a 32 bit int
1895 */
1896 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
1897 /*
1898 * we're going to have to call cluster_write_copy
1899 * more than once...
1900 *
1901 * only want the last call to cluster_write_copy to
1902 * have the IO_TAILZEROFILL flag set and only the
1903 * first call should have IO_HEADZEROFILL
1904 */
1905 zflags = flags & ~IO_TAILZEROFILL;
1906 flags &= ~IO_HEADZEROFILL;
1907
1908 write_length = MAX_IO_REQUEST_SIZE;
1909 } else {
1910 /*
1911 * last call to cluster_write_copy
1912 */
1913 zflags = flags;
1914
1915 write_length = (u_int32_t)cur_resid;
1916 }
1917 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
1918 break;
1919
1920 case IO_CONTIG:
1921 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
1922
1923 if (flags & IO_HEADZEROFILL) {
1924 /*
1925 * only do this once per request
1926 */
1927 flags &= ~IO_HEADZEROFILL;
1928
1929 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
1930 headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
1931 if (retval)
1932 break;
1933 }
1934 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
1935
1936 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
1937 /*
1938 * we're done with the data from the user specified buffer(s)
1939 * and we've been requested to zero fill at the tail
1940 * treat this as an IO_HEADZEROFILL which doesn't require a uio
1941 * by rearranging the args and passing in IO_HEADZEROFILL
1942 */
1943 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset,
1944 (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
1945 }
1946 break;
1947
1948 case IO_DIRECT:
1949 /*
1950 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
1951 */
1952 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
1953 break;
1954
1955 case IO_UNKNOWN:
1956 retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
1957 break;
1958 }
1959 /*
1960 * in case we end up calling cluster_write_copy (from cluster_write_direct)
1961 * multiple times to service a multi-vector request that is not aligned properly
1962 * we need to update the oldEOF so that we
1963 * don't zero-fill the head of a page if we've successfully written
1964 * data to that area... 'cluster_write_copy' will zero-fill the head of a
1965 * page that is beyond the oldEOF if the write is unaligned... we only
1966 * want that to happen for the very first page of the cluster_write,
1967 * NOT the first page of each vector making up a multi-vector write.
1968 */
1969 if (uio->uio_offset > oldEOF)
1970 oldEOF = uio->uio_offset;
1971 }
1972 return (retval);
1973 }
1974
1975
1976 static int
1977 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
1978 int flags, int (*callback)(buf_t, void *), void *callback_arg)
1979 {
1980 upl_t upl;
1981 upl_page_info_t *pl;
1982 vm_offset_t upl_offset;
1983 vm_offset_t vector_upl_offset = 0;
1984 u_int32_t io_req_size;
1985 u_int32_t offset_in_file;
1986 u_int32_t offset_in_iovbase;
1987 u_int32_t io_size;
1988 int io_flag = 0;
1989 upl_size_t upl_size, vector_upl_size = 0;
1990 vm_size_t upl_needed_size;
1991 mach_msg_type_number_t pages_in_pl;
1992 int upl_flags;
1993 kern_return_t kret;
1994 mach_msg_type_number_t i;
1995 int force_data_sync;
1996 int retval = 0;
1997 int first_IO = 1;
1998 struct clios iostate;
1999 user_addr_t iov_base;
2000 u_int32_t mem_alignment_mask;
2001 u_int32_t devblocksize;
2002 u_int32_t max_upl_size;
2003
2004 u_int32_t vector_upl_iosize = 0;
2005 int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
2006 off_t v_upl_uio_offset = 0;
2007 int vector_upl_index=0;
2008 upl_t vector_upl = NULL;
2009
2010
2011 /*
2012 * When we enter this routine, we know
2013 * -- the resid will not exceed iov_len
2014 */
2015 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
2016 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2017
2018 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2019
2020 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
2021
2022 if (flags & IO_PASSIVE)
2023 io_flag |= CL_PASSIVE;
2024
2025 iostate.io_completed = 0;
2026 iostate.io_issued = 0;
2027 iostate.io_error = 0;
2028 iostate.io_wanted = 0;
2029
2030 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2031 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2032
2033 if (devblocksize == 1) {
2034 /*
2035 * the AFP client advertises a devblocksize of 1
2036 * however, its BLOCKMAP routine maps to physical
2037 * blocks that are PAGE_SIZE in size...
2038 * therefore we can't ask for I/Os that aren't page aligned
2039 * or aren't multiples of PAGE_SIZE in size
2040 * by setting devblocksize to PAGE_SIZE, we re-instate
2041 * the old behavior we had before the mem_alignment_mask
2042 * changes went in...
2043 */
2044 devblocksize = PAGE_SIZE;
2045 }
2046
2047 next_dwrite:
2048 io_req_size = *write_length;
2049 iov_base = uio_curriovbase(uio);
2050
2051 offset_in_file = (u_int32_t)uio->uio_offset & PAGE_MASK;
2052 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
2053
2054 if (offset_in_file || offset_in_iovbase) {
2055 /*
2056 * one of the 2 important offsets is misaligned
2057 * so fire an I/O through the cache for this entire vector
2058 */
2059 goto wait_for_dwrites;
2060 }
2061 if (iov_base & (devblocksize - 1)) {
2062 /*
2063 * the offset in memory must be on a device block boundary
2064 * so that we can guarantee that we can generate an
2065 * I/O that ends on a page boundary in cluster_io
2066 */
2067 goto wait_for_dwrites;
2068 }
2069
2070 while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) {
2071
2072 if (first_IO) {
2073 cluster_syncup(vp, newEOF, callback, callback_arg);
2074 first_IO = 0;
2075 }
2076 io_size = io_req_size & ~PAGE_MASK;
2077 iov_base = uio_curriovbase(uio);
2078
2079 if (io_size > max_upl_size)
2080 io_size = max_upl_size;
2081
2082 if(useVectorUPL && (iov_base & PAGE_MASK)) {
2083 /*
2084 * We have an iov_base that's not page-aligned.
2085 * Issue all I/O's that have been collected within
2086 * this Vectored UPL.
2087 */
2088 if(vector_upl_index) {
2089 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2090 reset_vector_run_state();
2091 }
2092
2093 /*
2094 * After this point, if we are using the Vector UPL path and the base is
2095 * not page-aligned then the UPL with that base will be the first in the vector UPL.
2096 */
2097 }
2098
2099 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
2100 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
2101
2102 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
2103 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
2104
2105 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
2106 pages_in_pl = 0;
2107 upl_size = upl_needed_size;
2108 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
2109 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
2110
2111 kret = vm_map_get_upl(current_map(),
2112 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2113 &upl_size,
2114 &upl,
2115 NULL,
2116 &pages_in_pl,
2117 &upl_flags,
2118 force_data_sync);
2119
2120 if (kret != KERN_SUCCESS) {
2121 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2122 0, 0, 0, kret, 0);
2123 /*
2124 * failed to get pagelist
2125 *
2126 * we may have already spun some portion of this request
2127 * off as async requests... we need to wait for the I/O
2128 * to complete before returning
2129 */
2130 goto wait_for_dwrites;
2131 }
2132 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2133 pages_in_pl = upl_size / PAGE_SIZE;
2134
2135 for (i = 0; i < pages_in_pl; i++) {
2136 if (!upl_valid_page(pl, i))
2137 break;
2138 }
2139 if (i == pages_in_pl)
2140 break;
2141
2142 /*
2143 * didn't get all the pages back that we
2144 * needed... release this upl and try again
2145 */
2146 ubc_upl_abort(upl, 0);
2147 }
2148 if (force_data_sync >= 3) {
2149 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2150 i, pages_in_pl, upl_size, kret, 0);
2151 /*
2152 * for some reason, we couldn't acquire a hold on all
2153 * the pages needed in the user's address space
2154 *
2155 * we may have already spun some portion of this request
2156 * off as async requests... we need to wait for the I/O
2157 * to complete before returning
2158 */
2159 goto wait_for_dwrites;
2160 }
2161
2162 /*
2163 * Consider the possibility that upl_size wasn't satisfied.
2164 */
2165 if (upl_size < upl_needed_size) {
2166 if (upl_size && upl_offset == 0)
2167 io_size = upl_size;
2168 else
2169 io_size = 0;
2170 }
2171 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2172 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
2173
2174 if (io_size == 0) {
2175 ubc_upl_abort(upl, 0);
2176 /*
2177 * we may have already spun some portion of this request
2178 * off as async requests... we need to wait for the I/O
2179 * to complete before returning
2180 */
2181 goto wait_for_dwrites;
2182 }
2183
2184 if(useVectorUPL) {
2185 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
2186 if(end_off)
2187 issueVectorUPL = 1;
2188 /*
2189 * After this point, if we are using a vector UPL, then
2190 * either all the UPL elements end on a page boundary OR
2191 * this UPL is the last element because it does not end
2192 * on a page boundary.
2193 */
2194 }
2195
2196 /*
2197 * Now look for pages already in the cache
2198 * and throw them away.
2199 * uio->uio_offset is page aligned within the file
2200 * io_size is a multiple of PAGE_SIZE
2201 */
2202 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
2203
2204 /*
2205 * we want push out these writes asynchronously so that we can overlap
2206 * the preparation of the next I/O
2207 * if there are already too many outstanding writes
2208 * wait until some complete before issuing the next
2209 */
2210 if (iostate.io_issued > iostate.io_completed) {
2211
2212 lck_mtx_lock(cl_mtxp);
2213
2214 while ((iostate.io_issued - iostate.io_completed) > (max_upl_size * IO_SCALE(vp, 2))) {
2215
2216 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2217 iostate.io_issued, iostate.io_completed, max_upl_size * IO_SCALE(vp, 2), 0, 0);
2218
2219 iostate.io_wanted = 1;
2220 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
2221
2222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2223 iostate.io_issued, iostate.io_completed, max_upl_size * IO_SCALE(vp, 2), 0, 0);
2224 }
2225 lck_mtx_unlock(cl_mtxp);
2226 }
2227 if (iostate.io_error) {
2228 /*
2229 * one of the earlier writes we issued ran into a hard error
2230 * don't issue any more writes, cleanup the UPL
2231 * that was just created but not used, then
2232 * go wait for all writes that are part of this stream
2233 * to complete before returning the error to the caller
2234 */
2235 ubc_upl_abort(upl, 0);
2236
2237 goto wait_for_dwrites;
2238 }
2239
2240 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
2241 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
2242
2243 if(!useVectorUPL)
2244 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2245 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2246
2247 else {
2248 if(!vector_upl_index) {
2249 vector_upl = vector_upl_create(upl_offset);
2250 v_upl_uio_offset = uio->uio_offset;
2251 vector_upl_offset = upl_offset;
2252 }
2253
2254 vector_upl_set_subupl(vector_upl,upl,upl_size);
2255 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
2256 vector_upl_index++;
2257 vector_upl_iosize += io_size;
2258 vector_upl_size += upl_size;
2259
2260 if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= MAX_VECTOR_UPL_SIZE) {
2261 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2262 reset_vector_run_state();
2263 }
2264 }
2265
2266 /*
2267 * update the uio structure to
2268 * reflect the I/O that we just issued
2269 */
2270 uio_update(uio, (user_size_t)io_size);
2271
2272 /*
2273 * in case we end up calling through to cluster_write_copy to finish
2274 * the tail of this request, we need to update the oldEOF so that we
2275 * don't zero-fill the head of a page if we've successfully written
2276 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2277 * page that is beyond the oldEOF if the write is unaligned... we only
2278 * want that to happen for the very first page of the cluster_write,
2279 * NOT the first page of each vector making up a multi-vector write.
2280 */
2281 if (uio->uio_offset > oldEOF)
2282 oldEOF = uio->uio_offset;
2283
2284 io_req_size -= io_size;
2285
2286 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
2287 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
2288
2289 } /* end while */
2290
2291 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
2292
2293 retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE);
2294
2295 if (retval == 0 && *write_type == IO_DIRECT) {
2296
2297 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
2298 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2299
2300 goto next_dwrite;
2301 }
2302 }
2303
2304 wait_for_dwrites:
2305
2306 if(retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
2307 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2308 reset_vector_run_state();
2309 }
2310
2311 if (iostate.io_issued > iostate.io_completed) {
2312 /*
2313 * make sure all async writes issued as part of this stream
2314 * have completed before we return
2315 */
2316 lck_mtx_lock(cl_mtxp);
2317
2318 while (iostate.io_issued != iostate.io_completed) {
2319 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2320 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2321
2322 iostate.io_wanted = 1;
2323 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
2324
2325 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2326 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2327 }
2328 lck_mtx_unlock(cl_mtxp);
2329 }
2330 if (iostate.io_error)
2331 retval = iostate.io_error;
2332
2333 if (io_req_size && retval == 0) {
2334 /*
2335 * we couldn't handle the tail of this request in DIRECT mode
2336 * so fire it through the copy path
2337 *
2338 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
2339 * so we can just pass 0 in for the headOff and tailOff
2340 */
2341 if (uio->uio_offset > oldEOF)
2342 oldEOF = uio->uio_offset;
2343
2344 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
2345
2346 *write_type = IO_UNKNOWN;
2347 }
2348 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
2349 (int)uio->uio_offset, io_req_size, retval, 4, 0);
2350
2351 return (retval);
2352 }
2353
2354
2355 static int
2356 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
2357 int (*callback)(buf_t, void *), void *callback_arg, int bflag)
2358 {
2359 upl_page_info_t *pl;
2360 addr64_t src_paddr = 0;
2361 upl_t upl[MAX_VECTS];
2362 vm_offset_t upl_offset;
2363 u_int32_t tail_size = 0;
2364 u_int32_t io_size;
2365 u_int32_t xsize;
2366 upl_size_t upl_size;
2367 vm_size_t upl_needed_size;
2368 mach_msg_type_number_t pages_in_pl;
2369 int upl_flags;
2370 kern_return_t kret;
2371 struct clios iostate;
2372 int error = 0;
2373 int cur_upl = 0;
2374 int num_upl = 0;
2375 int n;
2376 user_addr_t iov_base;
2377 u_int32_t devblocksize;
2378 u_int32_t mem_alignment_mask;
2379
2380 /*
2381 * When we enter this routine, we know
2382 * -- the io_req_size will not exceed iov_len
2383 * -- the target address is physically contiguous
2384 */
2385 cluster_syncup(vp, newEOF, callback, callback_arg);
2386
2387 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2388 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2389
2390 iostate.io_completed = 0;
2391 iostate.io_issued = 0;
2392 iostate.io_error = 0;
2393 iostate.io_wanted = 0;
2394
2395 next_cwrite:
2396 io_size = *write_length;
2397
2398 iov_base = uio_curriovbase(uio);
2399
2400 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
2401 upl_needed_size = upl_offset + io_size;
2402
2403 pages_in_pl = 0;
2404 upl_size = upl_needed_size;
2405 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
2406 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
2407
2408 kret = vm_map_get_upl(current_map(),
2409 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2410 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
2411
2412 if (kret != KERN_SUCCESS) {
2413 /*
2414 * failed to get pagelist
2415 */
2416 error = EINVAL;
2417 goto wait_for_cwrites;
2418 }
2419 num_upl++;
2420
2421 /*
2422 * Consider the possibility that upl_size wasn't satisfied.
2423 */
2424 if (upl_size < upl_needed_size) {
2425 /*
2426 * This is a failure in the physical memory case.
2427 */
2428 error = EINVAL;
2429 goto wait_for_cwrites;
2430 }
2431 pl = ubc_upl_pageinfo(upl[cur_upl]);
2432
2433 src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
2434
2435 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2436 u_int32_t head_size;
2437
2438 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
2439
2440 if (head_size > io_size)
2441 head_size = io_size;
2442
2443 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
2444
2445 if (error)
2446 goto wait_for_cwrites;
2447
2448 upl_offset += head_size;
2449 src_paddr += head_size;
2450 io_size -= head_size;
2451
2452 iov_base += head_size;
2453 }
2454 if ((u_int32_t)iov_base & mem_alignment_mask) {
2455 /*
2456 * request doesn't set up on a memory boundary
2457 * the underlying DMA engine can handle...
2458 * return an error instead of going through
2459 * the slow copy path since the intent of this
2460 * path is direct I/O from device memory
2461 */
2462 error = EINVAL;
2463 goto wait_for_cwrites;
2464 }
2465
2466 tail_size = io_size & (devblocksize - 1);
2467 io_size -= tail_size;
2468
2469 while (io_size && error == 0) {
2470
2471 if (io_size > MAX_IO_CONTIG_SIZE)
2472 xsize = MAX_IO_CONTIG_SIZE;
2473 else
2474 xsize = io_size;
2475 /*
2476 * request asynchronously so that we can overlap
2477 * the preparation of the next I/O... we'll do
2478 * the commit after all the I/O has completed
2479 * since its all issued against the same UPL
2480 * if there are already too many outstanding writes
2481 * wait until some have completed before issuing the next
2482 */
2483 if (iostate.io_issued > iostate.io_completed) {
2484 lck_mtx_lock(cl_mtxp);
2485
2486 while ((iostate.io_issued - iostate.io_completed) > (MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2))) {
2487
2488 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2489 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
2490
2491 iostate.io_wanted = 1;
2492 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
2493
2494 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2495 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
2496 }
2497 lck_mtx_unlock(cl_mtxp);
2498 }
2499 if (iostate.io_error) {
2500 /*
2501 * one of the earlier writes we issued ran into a hard error
2502 * don't issue any more writes...
2503 * go wait for all writes that are part of this stream
2504 * to complete before returning the error to the caller
2505 */
2506 goto wait_for_cwrites;
2507 }
2508 /*
2509 * issue an asynchronous write to cluster_io
2510 */
2511 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
2512 xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
2513
2514 if (error == 0) {
2515 /*
2516 * The cluster_io write completed successfully,
2517 * update the uio structure
2518 */
2519 uio_update(uio, (user_size_t)xsize);
2520
2521 upl_offset += xsize;
2522 src_paddr += xsize;
2523 io_size -= xsize;
2524 }
2525 }
2526 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
2527
2528 error = cluster_io_type(uio, write_type, write_length, 0);
2529
2530 if (error == 0 && *write_type == IO_CONTIG) {
2531 cur_upl++;
2532 goto next_cwrite;
2533 }
2534 } else
2535 *write_type = IO_UNKNOWN;
2536
2537 wait_for_cwrites:
2538 /*
2539 * make sure all async writes that are part of this stream
2540 * have completed before we proceed
2541 */
2542 if (iostate.io_issued > iostate.io_completed) {
2543
2544 lck_mtx_lock(cl_mtxp);
2545
2546 while (iostate.io_issued != iostate.io_completed) {
2547 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
2548 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2549
2550 iostate.io_wanted = 1;
2551 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
2552
2553 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
2554 iostate.io_issued, iostate.io_completed, 0, 0, 0);
2555 }
2556 lck_mtx_unlock(cl_mtxp);
2557 }
2558 if (iostate.io_error)
2559 error = iostate.io_error;
2560
2561 if (error == 0 && tail_size)
2562 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
2563
2564 for (n = 0; n < num_upl; n++)
2565 /*
2566 * just release our hold on each physically contiguous
2567 * region without changing any state
2568 */
2569 ubc_upl_abort(upl[n], 0);
2570
2571 return (error);
2572 }
2573
2574
2575 /*
2576 * need to avoid a race between an msync of a range of pages dirtied via mmap
2577 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
2578 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
2579 *
2580 * we should never force-zero-fill pages that are already valid in the cache...
2581 * the entire page contains valid data (either from disk, zero-filled or dirtied
2582 * via an mmap) so we can only do damage by trying to zero-fill
2583 *
2584 */
2585 static int
2586 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
2587 {
2588 int zero_pg_index;
2589 boolean_t need_cluster_zero = TRUE;
2590
2591 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
2592
2593 bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
2594 zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
2595
2596 if (upl_valid_page(pl, zero_pg_index)) {
2597 /*
2598 * never force zero valid pages - dirty or clean
2599 * we'll leave these in the UPL for cluster_write_copy to deal with
2600 */
2601 need_cluster_zero = FALSE;
2602 }
2603 }
2604 if (need_cluster_zero == TRUE)
2605 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2606
2607 return (bytes_to_zero);
2608 }
2609
2610
2611 static int
2612 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
2613 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2614 {
2615 upl_page_info_t *pl;
2616 upl_t upl;
2617 vm_offset_t upl_offset = 0;
2618 vm_size_t upl_size;
2619 off_t upl_f_offset;
2620 int pages_in_upl;
2621 int start_offset;
2622 int xfer_resid;
2623 int io_size;
2624 int io_offset;
2625 int bytes_to_zero;
2626 int bytes_to_move;
2627 kern_return_t kret;
2628 int retval = 0;
2629 int io_resid;
2630 long long total_size;
2631 long long zero_cnt;
2632 off_t zero_off;
2633 long long zero_cnt1;
2634 off_t zero_off1;
2635 struct cl_extent cl;
2636 struct cl_writebehind *wbp;
2637 int bflag;
2638 u_int max_cluster_pgcount;
2639 u_int max_io_size;
2640
2641 if (uio) {
2642 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
2643 (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
2644
2645 io_resid = io_req_size;
2646 } else {
2647 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
2648 0, 0, (int)oldEOF, (int)newEOF, 0);
2649
2650 io_resid = 0;
2651 }
2652 if (flags & IO_PASSIVE)
2653 bflag = CL_PASSIVE;
2654 else
2655 bflag = 0;
2656
2657 zero_cnt = 0;
2658 zero_cnt1 = 0;
2659 zero_off = 0;
2660 zero_off1 = 0;
2661
2662 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
2663 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2664
2665 if (flags & IO_HEADZEROFILL) {
2666 /*
2667 * some filesystems (HFS is one) don't support unallocated holes within a file...
2668 * so we zero fill the intervening space between the old EOF and the offset
2669 * where the next chunk of real data begins.... ftruncate will also use this
2670 * routine to zero fill to the new EOF when growing a file... in this case, the
2671 * uio structure will not be provided
2672 */
2673 if (uio) {
2674 if (headOff < uio->uio_offset) {
2675 zero_cnt = uio->uio_offset - headOff;
2676 zero_off = headOff;
2677 }
2678 } else if (headOff < newEOF) {
2679 zero_cnt = newEOF - headOff;
2680 zero_off = headOff;
2681 }
2682 } else {
2683 if (uio && uio->uio_offset > oldEOF) {
2684 zero_off = uio->uio_offset & ~PAGE_MASK_64;
2685
2686 if (zero_off >= oldEOF) {
2687 zero_cnt = uio->uio_offset - zero_off;
2688
2689 flags |= IO_HEADZEROFILL;
2690 }
2691 }
2692 }
2693 if (flags & IO_TAILZEROFILL) {
2694 if (uio) {
2695 zero_off1 = uio->uio_offset + io_req_size;
2696
2697 if (zero_off1 < tailOff)
2698 zero_cnt1 = tailOff - zero_off1;
2699 }
2700 } else {
2701 if (uio && newEOF > oldEOF) {
2702 zero_off1 = uio->uio_offset + io_req_size;
2703
2704 if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
2705 zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
2706
2707 flags |= IO_TAILZEROFILL;
2708 }
2709 }
2710 }
2711 if (zero_cnt == 0 && uio == (struct uio *) 0) {
2712 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
2713 retval, 0, 0, 0, 0);
2714 return (0);
2715 }
2716
2717 while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
2718 /*
2719 * for this iteration of the loop, figure out where our starting point is
2720 */
2721 if (zero_cnt) {
2722 start_offset = (int)(zero_off & PAGE_MASK_64);
2723 upl_f_offset = zero_off - start_offset;
2724 } else if (io_resid) {
2725 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
2726 upl_f_offset = uio->uio_offset - start_offset;
2727 } else {
2728 start_offset = (int)(zero_off1 & PAGE_MASK_64);
2729 upl_f_offset = zero_off1 - start_offset;
2730 }
2731 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
2732 (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
2733
2734 if (total_size > max_io_size)
2735 total_size = max_io_size;
2736
2737 cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
2738
2739 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
2740 /*
2741 * assumption... total_size <= io_resid
2742 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
2743 */
2744 if ((start_offset + total_size) > max_io_size)
2745 total_size = max_io_size - start_offset;
2746 xfer_resid = total_size;
2747
2748 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
2749
2750 if (retval)
2751 break;
2752
2753 io_resid -= (total_size - xfer_resid);
2754 total_size = xfer_resid;
2755 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
2756 upl_f_offset = uio->uio_offset - start_offset;
2757
2758 if (total_size == 0) {
2759 if (start_offset) {
2760 /*
2761 * the write did not finish on a page boundary
2762 * which will leave upl_f_offset pointing to the
2763 * beginning of the last page written instead of
2764 * the page beyond it... bump it in this case
2765 * so that the cluster code records the last page
2766 * written as dirty
2767 */
2768 upl_f_offset += PAGE_SIZE_64;
2769 }
2770 upl_size = 0;
2771
2772 goto check_cluster;
2773 }
2774 }
2775 /*
2776 * compute the size of the upl needed to encompass
2777 * the requested write... limit each call to cluster_io
2778 * to the maximum UPL size... cluster_io will clip if
2779 * this exceeds the maximum io_size for the device,
2780 * make sure to account for
2781 * a starting offset that's not page aligned
2782 */
2783 upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2784
2785 if (upl_size > max_io_size)
2786 upl_size = max_io_size;
2787
2788 pages_in_upl = upl_size / PAGE_SIZE;
2789 io_size = upl_size - start_offset;
2790
2791 if ((long long)io_size > total_size)
2792 io_size = total_size;
2793
2794 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
2795
2796
2797 /*
2798 * Gather the pages from the buffer cache.
2799 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
2800 * that we intend to modify these pages.
2801 */
2802 kret = ubc_create_upl(vp,
2803 upl_f_offset,
2804 upl_size,
2805 &upl,
2806 &pl,
2807 UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY));
2808 if (kret != KERN_SUCCESS)
2809 panic("cluster_write_copy: failed to get pagelist");
2810
2811 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
2812 upl, (int)upl_f_offset, start_offset, 0, 0);
2813
2814 if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
2815 int read_size;
2816
2817 /*
2818 * we're starting in the middle of the first page of the upl
2819 * and the page isn't currently valid, so we're going to have
2820 * to read it in first... this is a synchronous operation
2821 */
2822 read_size = PAGE_SIZE;
2823
2824 if ((upl_f_offset + read_size) > oldEOF)
2825 read_size = oldEOF - upl_f_offset;
2826
2827 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
2828 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2829 if (retval) {
2830 /*
2831 * we had an error during the read which causes us to abort
2832 * the current cluster_write request... before we do, we need
2833 * to release the rest of the pages in the upl without modifying
2834 * there state and mark the failed page in error
2835 */
2836 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
2837
2838 if (upl_size > PAGE_SIZE)
2839 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
2840
2841 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
2842 upl, 0, 0, retval, 0);
2843 break;
2844 }
2845 }
2846 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
2847 /*
2848 * the last offset we're writing to in this upl does not end on a page
2849 * boundary... if it's not beyond the old EOF, then we'll also need to
2850 * pre-read this page in if it isn't already valid
2851 */
2852 upl_offset = upl_size - PAGE_SIZE;
2853
2854 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
2855 !upl_valid_page(pl, upl_offset / PAGE_SIZE)) {
2856 int read_size;
2857
2858 read_size = PAGE_SIZE;
2859
2860 if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF)
2861 read_size = oldEOF - (upl_f_offset + upl_offset);
2862
2863 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
2864 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2865 if (retval) {
2866 /*
2867 * we had an error during the read which causes us to abort
2868 * the current cluster_write request... before we do, we
2869 * need to release the rest of the pages in the upl without
2870 * modifying there state and mark the failed page in error
2871 */
2872 ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
2873
2874 if (upl_size > PAGE_SIZE)
2875 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
2876
2877 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
2878 upl, 0, 0, retval, 0);
2879 break;
2880 }
2881 }
2882 }
2883 xfer_resid = io_size;
2884 io_offset = start_offset;
2885
2886 while (zero_cnt && xfer_resid) {
2887
2888 if (zero_cnt < (long long)xfer_resid)
2889 bytes_to_zero = zero_cnt;
2890 else
2891 bytes_to_zero = xfer_resid;
2892
2893 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
2894
2895 xfer_resid -= bytes_to_zero;
2896 zero_cnt -= bytes_to_zero;
2897 zero_off += bytes_to_zero;
2898 io_offset += bytes_to_zero;
2899 }
2900 if (xfer_resid && io_resid) {
2901 u_int32_t io_requested;
2902
2903 bytes_to_move = min(io_resid, xfer_resid);
2904 io_requested = bytes_to_move;
2905
2906 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
2907
2908 if (retval) {
2909 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
2910
2911 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
2912 upl, 0, 0, retval, 0);
2913 } else {
2914 io_resid -= bytes_to_move;
2915 xfer_resid -= bytes_to_move;
2916 io_offset += bytes_to_move;
2917 }
2918 }
2919 while (xfer_resid && zero_cnt1 && retval == 0) {
2920
2921 if (zero_cnt1 < (long long)xfer_resid)
2922 bytes_to_zero = zero_cnt1;
2923 else
2924 bytes_to_zero = xfer_resid;
2925
2926 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
2927
2928 xfer_resid -= bytes_to_zero;
2929 zero_cnt1 -= bytes_to_zero;
2930 zero_off1 += bytes_to_zero;
2931 io_offset += bytes_to_zero;
2932 }
2933 if (retval == 0) {
2934 int cl_index;
2935 int ret_cluster_try_push;
2936
2937 io_size += start_offset;
2938
2939 if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
2940 /*
2941 * if we're extending the file with this write
2942 * we'll zero fill the rest of the page so that
2943 * if the file gets extended again in such a way as to leave a
2944 * hole starting at this EOF, we'll have zero's in the correct spot
2945 */
2946 cluster_zero(upl, io_size, upl_size - io_size, NULL);
2947 }
2948 /*
2949 * release the upl now if we hold one since...
2950 * 1) pages in it may be present in the sparse cluster map
2951 * and may span 2 separate buckets there... if they do and
2952 * we happen to have to flush a bucket to make room and it intersects
2953 * this upl, a deadlock may result on page BUSY
2954 * 2) we're delaying the I/O... from this point forward we're just updating
2955 * the cluster state... no need to hold the pages, so commit them
2956 * 3) IO_SYNC is set...
2957 * because we had to ask for a UPL that provides currenty non-present pages, the
2958 * UPL has been automatically set to clear the dirty flags (both software and hardware)
2959 * upon committing it... this is not the behavior we want since it's possible for
2960 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
2961 * we'll pick these pages back up later with the correct behavior specified.
2962 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
2963 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
2964 * we hold since the flushing context is holding the cluster lock.
2965 */
2966 ubc_upl_commit_range(upl, 0, upl_size,
2967 UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2968 check_cluster:
2969 /*
2970 * calculate the last logical block number
2971 * that this delayed I/O encompassed
2972 */
2973 cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
2974
2975 if (flags & IO_SYNC) {
2976 /*
2977 * if the IO_SYNC flag is set than we need to
2978 * bypass any clusters and immediately issue
2979 * the I/O
2980 */
2981 goto issue_io;
2982 }
2983 /*
2984 * take the lock to protect our accesses
2985 * of the writebehind and sparse cluster state
2986 */
2987 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
2988
2989 if (wbp->cl_scmap) {
2990
2991 if ( !(flags & IO_NOCACHE)) {
2992 /*
2993 * we've fallen into the sparse
2994 * cluster method of delaying dirty pages
2995 */
2996 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
2997
2998 lck_mtx_unlock(&wbp->cl_lockw);
2999
3000 continue;
3001 }
3002 /*
3003 * must have done cached writes that fell into
3004 * the sparse cluster mechanism... we've switched
3005 * to uncached writes on the file, so go ahead
3006 * and push whatever's in the sparse map
3007 * and switch back to normal clustering
3008 */
3009 wbp->cl_number = 0;
3010
3011 sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, callback, callback_arg);
3012 /*
3013 * no clusters of either type present at this point
3014 * so just go directly to start_new_cluster since
3015 * we know we need to delay this I/O since we've
3016 * already released the pages back into the cache
3017 * to avoid the deadlock with sparse_cluster_push
3018 */
3019 goto start_new_cluster;
3020 }
3021 if (wbp->cl_number == 0)
3022 /*
3023 * no clusters currently present
3024 */
3025 goto start_new_cluster;
3026
3027 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3028 /*
3029 * check each cluster that we currently hold
3030 * try to merge some or all of this write into
3031 * one or more of the existing clusters... if
3032 * any portion of the write remains, start a
3033 * new cluster
3034 */
3035 if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3036 /*
3037 * the current write starts at or after the current cluster
3038 */
3039 if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3040 /*
3041 * we have a write that fits entirely
3042 * within the existing cluster limits
3043 */
3044 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr)
3045 /*
3046 * update our idea of where the cluster ends
3047 */
3048 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
3049 break;
3050 }
3051 if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3052 /*
3053 * we have a write that starts in the middle of the current cluster
3054 * but extends beyond the cluster's limit... we know this because
3055 * of the previous checks
3056 * we'll extend the current cluster to the max
3057 * and update the b_addr for the current write to reflect that
3058 * the head of it was absorbed into this cluster...
3059 * note that we'll always have a leftover tail in this case since
3060 * full absorbtion would have occurred in the clause above
3061 */
3062 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3063
3064 cl.b_addr = wbp->cl_clusters[cl_index].e_addr;
3065 }
3066 /*
3067 * we come here for the case where the current write starts
3068 * beyond the limit of the existing cluster or we have a leftover
3069 * tail after a partial absorbtion
3070 *
3071 * in either case, we'll check the remaining clusters before
3072 * starting a new one
3073 */
3074 } else {
3075 /*
3076 * the current write starts in front of the cluster we're currently considering
3077 */
3078 if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) {
3079 /*
3080 * we can just merge the new request into
3081 * this cluster and leave it in the cache
3082 * since the resulting cluster is still
3083 * less than the maximum allowable size
3084 */
3085 wbp->cl_clusters[cl_index].b_addr = cl.b_addr;
3086
3087 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) {
3088 /*
3089 * the current write completely
3090 * envelops the existing cluster and since
3091 * each write is limited to at most max_cluster_pgcount pages
3092 * we can just use the start and last blocknos of the write
3093 * to generate the cluster limits
3094 */
3095 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
3096 }
3097 break;
3098 }
3099
3100 /*
3101 * if we were to combine this write with the current cluster
3102 * we would exceed the cluster size limit.... so,
3103 * let's see if there's any overlap of the new I/O with
3104 * the cluster we're currently considering... in fact, we'll
3105 * stretch the cluster out to it's full limit and see if we
3106 * get an intersection with the current write
3107 *
3108 */
3109 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
3110 /*
3111 * the current write extends into the proposed cluster
3112 * clip the length of the current write after first combining it's
3113 * tail with the newly shaped cluster
3114 */
3115 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
3116
3117 cl.e_addr = wbp->cl_clusters[cl_index].b_addr;
3118 }
3119 /*
3120 * if we get here, there was no way to merge
3121 * any portion of this write with this cluster
3122 * or we could only merge part of it which
3123 * will leave a tail...
3124 * we'll check the remaining clusters before starting a new one
3125 */
3126 }
3127 }
3128 if (cl_index < wbp->cl_number)
3129 /*
3130 * we found an existing cluster(s) that we
3131 * could entirely merge this I/O into
3132 */
3133 goto delay_io;
3134
3135 if (wbp->cl_number < MAX_CLUSTERS)
3136 /*
3137 * we didn't find an existing cluster to
3138 * merge into, but there's room to start
3139 * a new one
3140 */
3141 goto start_new_cluster;
3142
3143 /*
3144 * no exisitng cluster to merge with and no
3145 * room to start a new one... we'll try
3146 * pushing one of the existing ones... if none of
3147 * them are able to be pushed, we'll switch
3148 * to the sparse cluster mechanism
3149 * cluster_try_push updates cl_number to the
3150 * number of remaining clusters... and
3151 * returns the number of currently unused clusters
3152 */
3153 ret_cluster_try_push = 0;
3154
3155 /*
3156 * if writes are not deferred, call cluster push immediately
3157 */
3158 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
3159
3160 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, callback, callback_arg);
3161 }
3162
3163 /*
3164 * execute following regardless of writes being deferred or not
3165 */
3166 if (ret_cluster_try_push == 0) {
3167 /*
3168 * no more room in the normal cluster mechanism
3169 * so let's switch to the more expansive but expensive
3170 * sparse mechanism....
3171 */
3172 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg);
3173 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3174
3175 lck_mtx_unlock(&wbp->cl_lockw);
3176
3177 continue;
3178 }
3179 /*
3180 * we pushed one cluster successfully, so we must be sequentially writing this file
3181 * otherwise, we would have failed and fallen into the sparse cluster support
3182 * so let's take the opportunity to push out additional clusters...
3183 * this will give us better I/O locality if we're in a copy loop
3184 * (i.e. we won't jump back and forth between the read and write points
3185 */
3186 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
3187 while (wbp->cl_number)
3188 cluster_try_push(wbp, vp, newEOF, 0, callback, callback_arg);
3189 }
3190
3191 start_new_cluster:
3192 wbp->cl_clusters[wbp->cl_number].b_addr = cl.b_addr;
3193 wbp->cl_clusters[wbp->cl_number].e_addr = cl.e_addr;
3194
3195 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3196
3197 if (flags & IO_NOCACHE)
3198 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3199
3200 if (bflag & CL_PASSIVE)
3201 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3202
3203 wbp->cl_number++;
3204 delay_io:
3205 lck_mtx_unlock(&wbp->cl_lockw);
3206
3207 continue;
3208 issue_io:
3209 /*
3210 * we don't hold the lock at this point
3211 *
3212 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
3213 * so that we correctly deal with a change in state of the hardware modify bit...
3214 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
3215 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
3216 * responsible for generating the correct sized I/O(s)
3217 */
3218 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg);
3219 }
3220 }
3221 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
3222
3223 return (retval);
3224 }
3225
3226
3227
3228 int
3229 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
3230 {
3231 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
3232 }
3233
3234
3235 int
3236 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
3237 {
3238 int retval = 0;
3239 int flags;
3240 user_ssize_t cur_resid;
3241 u_int32_t io_size;
3242 u_int32_t read_length = 0;
3243 int read_type = IO_COPY;
3244
3245 flags = xflags;
3246
3247 if (vp->v_flag & VNOCACHE_DATA)
3248 flags |= IO_NOCACHE;
3249 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled)
3250 flags |= IO_RAOFF;
3251
3252 /*
3253 * do a read through the cache if one of the following is true....
3254 * NOCACHE is not true
3255 * the uio request doesn't target USERSPACE
3256 * otherwise, find out if we want the direct or contig variant for
3257 * the first vector in the uio request
3258 */
3259 if ( (flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
3260 retval = cluster_io_type(uio, &read_type, &read_length, 0);
3261
3262 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
3263
3264 switch (read_type) {
3265
3266 case IO_COPY:
3267 /*
3268 * make sure the uio_resid isn't too big...
3269 * internally, we want to handle all of the I/O in
3270 * chunk sizes that fit in a 32 bit int
3271 */
3272 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE))
3273 io_size = MAX_IO_REQUEST_SIZE;
3274 else
3275 io_size = (u_int32_t)cur_resid;
3276
3277 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3278 break;
3279
3280 case IO_DIRECT:
3281 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
3282 break;
3283
3284 case IO_CONTIG:
3285 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
3286 break;
3287
3288 case IO_UNKNOWN:
3289 retval = cluster_io_type(uio, &read_type, &read_length, 0);
3290 break;
3291 }
3292 }
3293 return (retval);
3294 }
3295
3296
3297
3298 static void
3299 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
3300 {
3301 int range;
3302 int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
3303
3304 if ((range = last_pg - start_pg)) {
3305 if (take_reference)
3306 abort_flags |= UPL_ABORT_REFERENCE;
3307
3308 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
3309 }
3310 }
3311
3312
3313 static int
3314 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3315 {
3316 upl_page_info_t *pl;
3317 upl_t upl;
3318 vm_offset_t upl_offset;
3319 u_int32_t upl_size;
3320 off_t upl_f_offset;
3321 int start_offset;
3322 int start_pg;
3323 int last_pg;
3324 int uio_last = 0;
3325 int pages_in_upl;
3326 off_t max_size;
3327 off_t last_ioread_offset;
3328 off_t last_request_offset;
3329 kern_return_t kret;
3330 int error = 0;
3331 int retval = 0;
3332 u_int32_t size_of_prefetch;
3333 u_int32_t xsize;
3334 u_int32_t io_size;
3335 u_int32_t max_rd_size;
3336 u_int32_t max_io_size;
3337 u_int32_t max_prefetch;
3338 u_int rd_ahead_enabled = 1;
3339 u_int prefetch_enabled = 1;
3340 struct cl_readahead * rap;
3341 struct clios iostate;
3342 struct cl_extent extent;
3343 int bflag;
3344 int take_reference = 1;
3345 struct uthread *ut;
3346 int policy = IOPOL_DEFAULT;
3347
3348
3349 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
3350 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
3351
3352 policy = current_proc()->p_iopol_disk;
3353
3354 ut = get_bsdthread_info(current_thread());
3355
3356 if (ut->uu_iopol_disk != IOPOL_DEFAULT)
3357 policy = ut->uu_iopol_disk;
3358
3359 if (policy == IOPOL_THROTTLE || (flags & IO_NOCACHE))
3360 take_reference = 0;
3361
3362 if (flags & IO_PASSIVE)
3363 bflag = CL_PASSIVE;
3364 else
3365 bflag = 0;
3366
3367 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
3368 max_prefetch = MAX_PREFETCH(vp, max_io_size);
3369 max_rd_size = max_prefetch;
3370
3371 last_request_offset = uio->uio_offset + io_req_size;
3372
3373 if (last_request_offset > filesize)
3374 last_request_offset = filesize;
3375
3376 if ((flags & (IO_RAOFF|IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
3377 rd_ahead_enabled = 0;
3378 rap = NULL;
3379 } else {
3380 if (cluster_hard_throttle_on(vp, 1)) {
3381 rd_ahead_enabled = 0;
3382 prefetch_enabled = 0;
3383
3384 max_rd_size = HARD_THROTTLE_MAXSIZE;
3385 } else if (policy == IOPOL_THROTTLE) {
3386 rd_ahead_enabled = 0;
3387 prefetch_enabled = 0;
3388 }
3389 if ((rap = cluster_get_rap(vp)) == NULL)
3390 rd_ahead_enabled = 0;
3391 else {
3392 extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
3393 extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
3394 }
3395 }
3396 if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
3397 /*
3398 * determine if we already have a read-ahead in the pipe courtesy of the
3399 * last read systemcall that was issued...
3400 * if so, pick up it's extent to determine where we should start
3401 * with respect to any read-ahead that might be necessary to
3402 * garner all the data needed to complete this read systemcall
3403 */
3404 last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
3405
3406 if (last_ioread_offset < uio->uio_offset)
3407 last_ioread_offset = (off_t)0;
3408 else if (last_ioread_offset > last_request_offset)
3409 last_ioread_offset = last_request_offset;
3410 } else
3411 last_ioread_offset = (off_t)0;
3412
3413 while (io_req_size && uio->uio_offset < filesize && retval == 0) {
3414
3415 max_size = filesize - uio->uio_offset;
3416
3417 if ((off_t)(io_req_size) < max_size)
3418 io_size = io_req_size;
3419 else
3420 io_size = max_size;
3421
3422 if (!(flags & IO_NOCACHE)) {
3423
3424 while (io_size) {
3425 u_int32_t io_resid;
3426 u_int32_t io_requested;
3427
3428 /*
3429 * if we keep finding the pages we need already in the cache, then
3430 * don't bother to call cluster_read_prefetch since it costs CPU cycles
3431 * to determine that we have all the pages we need... once we miss in
3432 * the cache and have issued an I/O, than we'll assume that we're likely
3433 * to continue to miss in the cache and it's to our advantage to try and prefetch
3434 */
3435 if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) {
3436 if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
3437 /*
3438 * we've already issued I/O for this request and
3439 * there's still work to do and
3440 * our prefetch stream is running dry, so issue a
3441 * pre-fetch I/O... the I/O latency will overlap
3442 * with the copying of the data
3443 */
3444 if (size_of_prefetch > max_rd_size)
3445 size_of_prefetch = max_rd_size;
3446
3447 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
3448
3449 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
3450
3451 if (last_ioread_offset > last_request_offset)
3452 last_ioread_offset = last_request_offset;
3453 }
3454 }
3455 /*
3456 * limit the size of the copy we're about to do so that
3457 * we can notice that our I/O pipe is running dry and
3458 * get the next I/O issued before it does go dry
3459 */
3460 if (last_ioread_offset && io_size > (max_io_size / 4))
3461 io_resid = (max_io_size / 4);
3462 else
3463 io_resid = io_size;
3464
3465 io_requested = io_resid;
3466
3467 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, last_ioread_offset == 0 ? take_reference : 0);
3468
3469 xsize = io_requested - io_resid;
3470
3471 io_size -= xsize;
3472 io_req_size -= xsize;
3473
3474 if (retval || io_resid)
3475 /*
3476 * if we run into a real error or
3477 * a page that is not in the cache
3478 * we need to leave streaming mode
3479 */
3480 break;
3481
3482 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
3483 /*
3484 * we're already finished the I/O for this read request
3485 * let's see if we should do a read-ahead
3486 */
3487 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
3488 }
3489 }
3490 if (retval)
3491 break;
3492 if (io_size == 0) {
3493 if (rap != NULL) {
3494 if (extent.e_addr < rap->cl_lastr)
3495 rap->cl_maxra = 0;
3496 rap->cl_lastr = extent.e_addr;
3497 }
3498 break;
3499 }
3500 /*
3501 * recompute max_size since cluster_copy_ubc_data_internal
3502 * may have advanced uio->uio_offset
3503 */
3504 max_size = filesize - uio->uio_offset;
3505 }
3506 /*
3507 * compute the size of the upl needed to encompass
3508 * the requested read... limit each call to cluster_io
3509 * to the maximum UPL size... cluster_io will clip if
3510 * this exceeds the maximum io_size for the device,
3511 * make sure to account for
3512 * a starting offset that's not page aligned
3513 */
3514 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3515 upl_f_offset = uio->uio_offset - (off_t)start_offset;
3516
3517 if (io_size > max_rd_size)
3518 io_size = max_rd_size;
3519
3520 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3521
3522 if (flags & IO_NOCACHE) {
3523 if (upl_size > max_io_size)
3524 upl_size = max_io_size;
3525 } else {
3526 if (upl_size > max_io_size / 4)
3527 upl_size = max_io_size / 4;
3528 }
3529 pages_in_upl = upl_size / PAGE_SIZE;
3530
3531 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
3532 upl, (int)upl_f_offset, upl_size, start_offset, 0);
3533
3534 kret = ubc_create_upl(vp,
3535 upl_f_offset,
3536 upl_size,
3537 &upl,
3538 &pl,
3539 UPL_FILE_IO | UPL_SET_LITE);
3540 if (kret != KERN_SUCCESS)
3541 panic("cluster_read_copy: failed to get pagelist");
3542
3543 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
3544 upl, (int)upl_f_offset, upl_size, start_offset, 0);
3545
3546 /*
3547 * scan from the beginning of the upl looking for the first
3548 * non-valid page.... this will become the first page in
3549 * the request we're going to make to 'cluster_io'... if all
3550 * of the pages are valid, we won't call through to 'cluster_io'
3551 */
3552 for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
3553 if (!upl_valid_page(pl, start_pg))
3554 break;
3555 }
3556
3557 /*
3558 * scan from the starting invalid page looking for a valid
3559 * page before the end of the upl is reached, if we
3560 * find one, then it will be the last page of the request to
3561 * 'cluster_io'
3562 */
3563 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
3564 if (upl_valid_page(pl, last_pg))
3565 break;
3566 }
3567 iostate.io_completed = 0;
3568 iostate.io_issued = 0;
3569 iostate.io_error = 0;
3570 iostate.io_wanted = 0;
3571
3572 if (start_pg < last_pg) {
3573 /*
3574 * we found a range of 'invalid' pages that must be filled
3575 * if the last page in this range is the last page of the file
3576 * we may have to clip the size of it to keep from reading past
3577 * the end of the last physical block associated with the file
3578 */
3579 upl_offset = start_pg * PAGE_SIZE;
3580 io_size = (last_pg - start_pg) * PAGE_SIZE;
3581
3582 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
3583 io_size = filesize - (upl_f_offset + upl_offset);
3584
3585 /*
3586 * issue an asynchronous read to cluster_io
3587 */
3588
3589 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
3590 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
3591 }
3592 if (error == 0) {
3593 /*
3594 * if the read completed successfully, or there was no I/O request
3595 * issued, than copy the data into user land via 'cluster_upl_copy_data'
3596 * we'll first add on any 'valid'
3597 * pages that were present in the upl when we acquired it.
3598 */
3599 u_int val_size;
3600
3601 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
3602 if (!upl_valid_page(pl, uio_last))
3603 break;
3604 }
3605 if (uio_last < pages_in_upl) {
3606 /*
3607 * there were some invalid pages beyond the valid pages
3608 * that we didn't issue an I/O for, just release them
3609 * unchanged now, so that any prefetch/readahed can
3610 * include them
3611 */
3612 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
3613 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
3614 }
3615
3616 /*
3617 * compute size to transfer this round, if io_req_size is
3618 * still non-zero after this attempt, we'll loop around and
3619 * set up for another I/O.
3620 */
3621 val_size = (uio_last * PAGE_SIZE) - start_offset;
3622
3623 if (val_size > max_size)
3624 val_size = max_size;
3625
3626 if (val_size > io_req_size)
3627 val_size = io_req_size;
3628
3629 if ((uio->uio_offset + val_size) > last_ioread_offset)
3630 last_ioread_offset = uio->uio_offset + val_size;
3631
3632 if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) {
3633
3634 if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
3635 /*
3636 * if there's still I/O left to do for this request, and...
3637 * we're not in hard throttle mode, and...
3638 * we're close to using up the previous prefetch, then issue a
3639 * new pre-fetch I/O... the I/O latency will overlap
3640 * with the copying of the data
3641 */
3642 if (size_of_prefetch > max_rd_size)
3643 size_of_prefetch = max_rd_size;
3644
3645 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
3646
3647 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
3648
3649 if (last_ioread_offset > last_request_offset)
3650 last_ioread_offset = last_request_offset;
3651 }
3652
3653 } else if ((uio->uio_offset + val_size) == last_request_offset) {
3654 /*
3655 * this transfer will finish this request, so...
3656 * let's try to read ahead if we're in
3657 * a sequential access pattern and we haven't
3658 * explicitly disabled it
3659 */
3660 if (rd_ahead_enabled)
3661 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
3662
3663 if (rap != NULL) {
3664 if (extent.e_addr < rap->cl_lastr)
3665 rap->cl_maxra = 0;
3666 rap->cl_lastr = extent.e_addr;
3667 }
3668 }
3669 if (iostate.io_issued > iostate.io_completed) {
3670
3671 lck_mtx_lock(cl_mtxp);
3672
3673 while (iostate.io_issued != iostate.io_completed) {
3674 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
3675 iostate.io_issued, iostate.io_completed, 0, 0, 0);
3676
3677 iostate.io_wanted = 1;
3678 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_copy", NULL);
3679
3680 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
3681 iostate.io_issued, iostate.io_completed, 0, 0, 0);
3682 }
3683 lck_mtx_unlock(cl_mtxp);
3684 }
3685 if (iostate.io_error)
3686 error = iostate.io_error;
3687 else {
3688 u_int32_t io_requested;
3689
3690 io_requested = val_size;
3691
3692 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
3693
3694 io_req_size -= (val_size - io_requested);
3695 }
3696 }
3697 if (start_pg < last_pg) {
3698 /*
3699 * compute the range of pages that we actually issued an I/O for
3700 * and either commit them as valid if the I/O succeeded
3701 * or abort them if the I/O failed or we're not supposed to
3702 * keep them in the cache
3703 */
3704 io_size = (last_pg - start_pg) * PAGE_SIZE;
3705
3706 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
3707
3708 if (error || (flags & IO_NOCACHE))
3709 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
3710 UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
3711 else {
3712 int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
3713
3714 if (take_reference)
3715 commit_flags |= UPL_COMMIT_INACTIVATE;
3716 else
3717 commit_flags |= UPL_COMMIT_SPECULATE;
3718
3719 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
3720 }
3721 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
3722 }
3723 if ((last_pg - start_pg) < pages_in_upl) {
3724 /*
3725 * the set of pages that we issued an I/O for did not encompass
3726 * the entire upl... so just release these without modifying
3727 * their state
3728 */
3729 if (error)
3730 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
3731 else {
3732
3733 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
3734 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
3735
3736 /*
3737 * handle any valid pages at the beginning of
3738 * the upl... release these appropriately
3739 */
3740 cluster_read_upl_release(upl, 0, start_pg, take_reference);
3741
3742 /*
3743 * handle any valid pages immediately after the
3744 * pages we issued I/O for... ... release these appropriately
3745 */
3746 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
3747
3748 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
3749 }
3750 }
3751 if (retval == 0)
3752 retval = error;
3753
3754 if (io_req_size) {
3755 if (cluster_hard_throttle_on(vp, 1)) {
3756 rd_ahead_enabled = 0;
3757 prefetch_enabled = 0;
3758
3759 max_rd_size = HARD_THROTTLE_MAXSIZE;
3760 } else {
3761 if (max_rd_size == HARD_THROTTLE_MAXSIZE) {
3762 /*
3763 * coming out of throttled state
3764 */
3765 if (policy != IOPOL_THROTTLE) {
3766 if (rap != NULL)
3767 rd_ahead_enabled = 1;
3768 prefetch_enabled = 1;
3769 }
3770 max_rd_size = max_prefetch;
3771 last_ioread_offset = 0;
3772 }
3773 }
3774 }
3775 }
3776 if (rap != NULL) {
3777 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
3778 (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
3779
3780 lck_mtx_unlock(&rap->cl_lockr);
3781 } else {
3782 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
3783 (int)uio->uio_offset, io_req_size, 0, retval, 0);
3784 }
3785
3786 return (retval);
3787 }
3788
3789
3790 static int
3791 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
3792 int flags, int (*callback)(buf_t, void *), void *callback_arg)
3793 {
3794 upl_t upl;
3795 upl_page_info_t *pl;
3796 off_t max_io_size;
3797 vm_offset_t upl_offset, vector_upl_offset = 0;
3798 upl_size_t upl_size, vector_upl_size = 0;
3799 vm_size_t upl_needed_size;
3800 unsigned int pages_in_pl;
3801 int upl_flags;
3802 kern_return_t kret;
3803 unsigned int i;
3804 int force_data_sync;
3805 int retval = 0;
3806 int no_zero_fill = 0;
3807 int io_flag = 0;
3808 int misaligned = 0;
3809 struct clios iostate;
3810 user_addr_t iov_base;
3811 u_int32_t io_req_size;
3812 u_int32_t offset_in_file;
3813 u_int32_t offset_in_iovbase;
3814 u_int32_t io_size;
3815 u_int32_t io_min;
3816 u_int32_t xsize;
3817 u_int32_t devblocksize;
3818 u_int32_t mem_alignment_mask;
3819 u_int32_t max_upl_size;
3820 u_int32_t max_rd_size;
3821 u_int32_t max_rd_ahead;
3822
3823 u_int32_t vector_upl_iosize = 0;
3824 int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
3825 off_t v_upl_uio_offset = 0;
3826 int vector_upl_index=0;
3827 upl_t vector_upl = NULL;
3828
3829 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
3830 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
3831
3832 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
3833
3834 max_rd_size = max_upl_size;
3835 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
3836
3837 io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
3838 if (flags & IO_PASSIVE)
3839 io_flag |= CL_PASSIVE;
3840
3841 iostate.io_completed = 0;
3842 iostate.io_issued = 0;
3843 iostate.io_error = 0;
3844 iostate.io_wanted = 0;
3845
3846 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
3847 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
3848
3849 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
3850 (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
3851
3852 if (devblocksize == 1) {
3853 /*
3854 * the AFP client advertises a devblocksize of 1
3855 * however, its BLOCKMAP routine maps to physical
3856 * blocks that are PAGE_SIZE in size...
3857 * therefore we can't ask for I/Os that aren't page aligned
3858 * or aren't multiples of PAGE_SIZE in size
3859 * by setting devblocksize to PAGE_SIZE, we re-instate
3860 * the old behavior we had before the mem_alignment_mask
3861 * changes went in...
3862 */
3863 devblocksize = PAGE_SIZE;
3864 }
3865 next_dread:
3866 io_req_size = *read_length;
3867 iov_base = uio_curriovbase(uio);
3868
3869 max_io_size = filesize - uio->uio_offset;
3870
3871 if ((off_t)io_req_size > max_io_size)
3872 io_req_size = max_io_size;
3873
3874 offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
3875 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
3876
3877 if (offset_in_file || offset_in_iovbase) {
3878 /*
3879 * one of the 2 important offsets is misaligned
3880 * so fire an I/O through the cache for this entire vector
3881 */
3882 misaligned = 1;
3883 }
3884 if (iov_base & (devblocksize - 1)) {
3885 /*
3886 * the offset in memory must be on a device block boundary
3887 * so that we can guarantee that we can generate an
3888 * I/O that ends on a page boundary in cluster_io
3889 */
3890 misaligned = 1;
3891 }
3892 /*
3893 * When we get to this point, we know...
3894 * -- the offset into the file is on a devblocksize boundary
3895 */
3896
3897 while (io_req_size && retval == 0) {
3898 u_int32_t io_start;
3899
3900 if (cluster_hard_throttle_on(vp, 1)) {
3901 max_rd_size = HARD_THROTTLE_MAXSIZE;
3902 max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1;
3903 } else {
3904 max_rd_size = max_upl_size;
3905 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
3906 }
3907 io_start = io_size = io_req_size;
3908
3909 /*
3910 * First look for pages already in the cache
3911 * and move them to user space.
3912 *
3913 * cluster_copy_ubc_data returns the resid
3914 * in io_size
3915 */
3916 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
3917
3918 /*
3919 * calculate the number of bytes actually copied
3920 * starting size - residual
3921 */
3922 xsize = io_start - io_size;
3923
3924 io_req_size -= xsize;
3925
3926 if(useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
3927 /*
3928 * We found something in the cache or we have an iov_base that's not
3929 * page-aligned.
3930 *
3931 * Issue all I/O's that have been collected within this Vectored UPL.
3932 */
3933 if(vector_upl_index) {
3934 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3935 reset_vector_run_state();
3936 }
3937
3938 if(xsize)
3939 useVectorUPL = 0;
3940
3941 /*
3942 * After this point, if we are using the Vector UPL path and the base is
3943 * not page-aligned then the UPL with that base will be the first in the vector UPL.
3944 */
3945 }
3946
3947 /*
3948 * check to see if we are finished with this request...
3949 */
3950 if (io_req_size == 0 || misaligned) {
3951 /*
3952 * see if there's another uio vector to
3953 * process that's of type IO_DIRECT
3954 *
3955 * break out of while loop to get there
3956 */
3957 break;
3958 }
3959 /*
3960 * assume the request ends on a device block boundary
3961 */
3962 io_min = devblocksize;
3963
3964 /*
3965 * we can handle I/O's in multiples of the device block size
3966 * however, if io_size isn't a multiple of devblocksize we
3967 * want to clip it back to the nearest page boundary since
3968 * we are going to have to go through cluster_read_copy to
3969 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
3970 * multiple, we avoid asking the drive for the same physical
3971 * blocks twice.. once for the partial page at the end of the
3972 * request and a 2nd time for the page we read into the cache
3973 * (which overlaps the end of the direct read) in order to
3974 * get at the overhang bytes
3975 */
3976 if (io_size & (devblocksize - 1)) {
3977 /*
3978 * request does NOT end on a device block boundary
3979 * so clip it back to a PAGE_SIZE boundary
3980 */
3981 io_size &= ~PAGE_MASK;
3982 io_min = PAGE_SIZE;
3983 }
3984 if (retval || io_size < io_min) {
3985 /*
3986 * either an error or we only have the tail left to
3987 * complete via the copy path...
3988 * we may have already spun some portion of this request
3989 * off as async requests... we need to wait for the I/O
3990 * to complete before returning
3991 */
3992 goto wait_for_dreads;
3993 }
3994 if ((xsize = io_size) > max_rd_size)
3995 xsize = max_rd_size;
3996
3997 io_size = 0;
3998
3999 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
4000
4001 if (io_size == 0) {
4002 /*
4003 * a page must have just come into the cache
4004 * since the first page in this range is no
4005 * longer absent, go back and re-evaluate
4006 */
4007 continue;
4008 }
4009 iov_base = uio_curriovbase(uio);
4010
4011 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
4012 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
4013
4014 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
4015 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
4016
4017 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0))
4018 no_zero_fill = 1;
4019 else
4020 no_zero_fill = 0;
4021
4022 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
4023 pages_in_pl = 0;
4024 upl_size = upl_needed_size;
4025 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
4026
4027 if (no_zero_fill)
4028 upl_flags |= UPL_NOZEROFILL;
4029 if (force_data_sync)
4030 upl_flags |= UPL_FORCE_DATA_SYNC;
4031
4032 kret = vm_map_create_upl(current_map(),
4033 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
4034 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
4035
4036 if (kret != KERN_SUCCESS) {
4037 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4038 (int)upl_offset, upl_size, io_size, kret, 0);
4039 /*
4040 * failed to get pagelist
4041 *
4042 * we may have already spun some portion of this request
4043 * off as async requests... we need to wait for the I/O
4044 * to complete before returning
4045 */
4046 goto wait_for_dreads;
4047 }
4048 pages_in_pl = upl_size / PAGE_SIZE;
4049 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
4050
4051 for (i = 0; i < pages_in_pl; i++) {
4052 if (!upl_page_present(pl, i))
4053 break;
4054 }
4055 if (i == pages_in_pl)
4056 break;
4057
4058 ubc_upl_abort(upl, 0);
4059 }
4060 if (force_data_sync >= 3) {
4061 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4062 (int)upl_offset, upl_size, io_size, kret, 0);
4063
4064 goto wait_for_dreads;
4065 }
4066 /*
4067 * Consider the possibility that upl_size wasn't satisfied.
4068 */
4069 if (upl_size < upl_needed_size) {
4070 if (upl_size && upl_offset == 0)
4071 io_size = upl_size;
4072 else
4073 io_size = 0;
4074 }
4075 if (io_size == 0) {
4076 ubc_upl_abort(upl, 0);
4077 goto wait_for_dreads;
4078 }
4079 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4080 (int)upl_offset, upl_size, io_size, kret, 0);
4081
4082 if(useVectorUPL) {
4083 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
4084 if(end_off)
4085 issueVectorUPL = 1;
4086 /*
4087 * After this point, if we are using a vector UPL, then
4088 * either all the UPL elements end on a page boundary OR
4089 * this UPL is the last element because it does not end
4090 * on a page boundary.
4091 */
4092 }
4093
4094 /*
4095 * request asynchronously so that we can overlap
4096 * the preparation of the next I/O
4097 * if there are already too many outstanding reads
4098 * wait until some have completed before issuing the next read
4099 */
4100 if (iostate.io_issued > iostate.io_completed) {
4101
4102 lck_mtx_lock(cl_mtxp);
4103
4104 while ((iostate.io_issued - iostate.io_completed) > max_rd_ahead) {
4105 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4106 iostate.io_issued, iostate.io_completed, max_rd_ahead, 0, 0);
4107
4108 iostate.io_wanted = 1;
4109 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
4110
4111 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4112 iostate.io_issued, iostate.io_completed, max_rd_ahead, 0, 0);
4113 }
4114 lck_mtx_unlock(cl_mtxp);
4115 }
4116 if (iostate.io_error) {
4117 /*
4118 * one of the earlier reads we issued ran into a hard error
4119 * don't issue any more reads, cleanup the UPL
4120 * that was just created but not used, then
4121 * go wait for any other reads to complete before
4122 * returning the error to the caller
4123 */
4124 ubc_upl_abort(upl, 0);
4125
4126 goto wait_for_dreads;
4127 }
4128 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
4129 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
4130
4131
4132 if(!useVectorUPL) {
4133 if (no_zero_fill)
4134 io_flag &= ~CL_PRESERVE;
4135 else
4136 io_flag |= CL_PRESERVE;
4137
4138 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4139
4140 } else {
4141
4142 if(!vector_upl_index) {
4143 vector_upl = vector_upl_create(upl_offset);
4144 v_upl_uio_offset = uio->uio_offset;
4145 vector_upl_offset = upl_offset;
4146 }
4147
4148 vector_upl_set_subupl(vector_upl,upl, upl_size);
4149 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
4150 vector_upl_index++;
4151 vector_upl_size += upl_size;
4152 vector_upl_iosize += io_size;
4153
4154 if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= MAX_VECTOR_UPL_SIZE) {
4155 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4156 reset_vector_run_state();
4157 }
4158 }
4159 /*
4160 * update the uio structure
4161 */
4162 uio_update(uio, (user_size_t)io_size);
4163
4164 io_req_size -= io_size;
4165
4166 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
4167 upl, (int)uio->uio_offset, io_req_size, retval, 0);
4168
4169 } /* end while */
4170
4171 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
4172
4173 retval = cluster_io_type(uio, read_type, read_length, 0);
4174
4175 if (retval == 0 && *read_type == IO_DIRECT) {
4176
4177 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
4178 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
4179
4180 goto next_dread;
4181 }
4182 }
4183
4184 wait_for_dreads:
4185
4186 if(retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
4187 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4188 reset_vector_run_state();
4189 }
4190 /*
4191 * make sure all async reads that are part of this stream
4192 * have completed before we return
4193 */
4194 if (iostate.io_issued > iostate.io_completed) {
4195
4196 lck_mtx_lock(cl_mtxp);
4197
4198 while (iostate.io_issued != iostate.io_completed) {
4199 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4200 iostate.io_issued, iostate.io_completed, 0, 0, 0);
4201
4202 iostate.io_wanted = 1;
4203 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
4204
4205 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4206 iostate.io_issued, iostate.io_completed, 0, 0, 0);
4207 }
4208 lck_mtx_unlock(cl_mtxp);
4209 }
4210 if (iostate.io_error)
4211 retval = iostate.io_error;
4212
4213 if (io_req_size && retval == 0) {
4214 /*
4215 * we couldn't handle the tail of this request in DIRECT mode
4216 * so fire it through the copy path
4217 */
4218 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
4219
4220 *read_type = IO_UNKNOWN;
4221 }
4222 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
4223 (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
4224
4225 return (retval);
4226 }
4227
4228
4229 static int
4230 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
4231 int (*callback)(buf_t, void *), void *callback_arg, int flags)
4232 {
4233 upl_page_info_t *pl;
4234 upl_t upl[MAX_VECTS];
4235 vm_offset_t upl_offset;
4236 addr64_t dst_paddr = 0;
4237 user_addr_t iov_base;
4238 off_t max_size;
4239 upl_size_t upl_size;
4240 vm_size_t upl_needed_size;
4241 mach_msg_type_number_t pages_in_pl;
4242 int upl_flags;
4243 kern_return_t kret;
4244 struct clios iostate;
4245 int error= 0;
4246 int cur_upl = 0;
4247 int num_upl = 0;
4248 int n;
4249 u_int32_t xsize;
4250 u_int32_t io_size;
4251 u_int32_t devblocksize;
4252 u_int32_t mem_alignment_mask;
4253 u_int32_t tail_size = 0;
4254 int bflag;
4255
4256 if (flags & IO_PASSIVE)
4257 bflag = CL_PASSIVE;
4258 else
4259 bflag = 0;
4260
4261 /*
4262 * When we enter this routine, we know
4263 * -- the read_length will not exceed the current iov_len
4264 * -- the target address is physically contiguous for read_length
4265 */
4266 cluster_syncup(vp, filesize, callback, callback_arg);
4267
4268 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
4269 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
4270
4271 iostate.io_completed = 0;
4272 iostate.io_issued = 0;
4273 iostate.io_error = 0;
4274 iostate.io_wanted = 0;
4275
4276 next_cread:
4277 io_size = *read_length;
4278
4279 max_size = filesize - uio->uio_offset;
4280
4281 if (io_size > max_size)
4282 io_size = max_size;
4283
4284 iov_base = uio_curriovbase(uio);
4285
4286 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
4287 upl_needed_size = upl_offset + io_size;
4288
4289 pages_in_pl = 0;
4290 upl_size = upl_needed_size;
4291 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
4292
4293
4294 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
4295 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
4296
4297 kret = vm_map_get_upl(current_map(),
4298 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
4299 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
4300
4301 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
4302 (int)upl_offset, upl_size, io_size, kret, 0);
4303
4304 if (kret != KERN_SUCCESS) {
4305 /*
4306 * failed to get pagelist
4307 */
4308 error = EINVAL;
4309 goto wait_for_creads;
4310 }
4311 num_upl++;
4312
4313 if (upl_size < upl_needed_size) {
4314 /*
4315 * The upl_size wasn't satisfied.
4316 */
4317 error = EINVAL;
4318 goto wait_for_creads;
4319 }
4320 pl = ubc_upl_pageinfo(upl[cur_upl]);
4321
4322 dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)upl_offset;
4323
4324 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
4325 u_int32_t head_size;
4326
4327 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
4328
4329 if (head_size > io_size)
4330 head_size = io_size;
4331
4332 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
4333
4334 if (error)
4335 goto wait_for_creads;
4336
4337 upl_offset += head_size;
4338 dst_paddr += head_size;
4339 io_size -= head_size;
4340
4341 iov_base += head_size;
4342 }
4343 if ((u_int32_t)iov_base & mem_alignment_mask) {
4344 /*
4345 * request doesn't set up on a memory boundary
4346 * the underlying DMA engine can handle...
4347 * return an error instead of going through
4348 * the slow copy path since the intent of this
4349 * path is direct I/O to device memory
4350 */
4351 error = EINVAL;
4352 goto wait_for_creads;
4353 }
4354
4355 tail_size = io_size & (devblocksize - 1);
4356
4357 io_size -= tail_size;
4358
4359 while (io_size && error == 0) {
4360
4361 if (io_size > MAX_IO_CONTIG_SIZE)
4362 xsize = MAX_IO_CONTIG_SIZE;
4363 else
4364 xsize = io_size;
4365 /*
4366 * request asynchronously so that we can overlap
4367 * the preparation of the next I/O... we'll do
4368 * the commit after all the I/O has completed
4369 * since its all issued against the same UPL
4370 * if there are already too many outstanding reads
4371 * wait until some have completed before issuing the next
4372 */
4373 if (iostate.io_issued > iostate.io_completed) {
4374 lck_mtx_lock(cl_mtxp);
4375
4376 while ((iostate.io_issued - iostate.io_completed) > (MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2))) {
4377 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4378 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
4379
4380 iostate.io_wanted = 1;
4381 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
4382
4383 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4384 iostate.io_issued, iostate.io_completed, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), 0, 0);
4385 }
4386 lck_mtx_unlock(cl_mtxp);
4387 }
4388 if (iostate.io_error) {
4389 /*
4390 * one of the earlier reads we issued ran into a hard error
4391 * don't issue any more reads...
4392 * go wait for any other reads to complete before
4393 * returning the error to the caller
4394 */
4395 goto wait_for_creads;
4396 }
4397 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
4398 CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
4399 (buf_t)NULL, &iostate, callback, callback_arg);
4400 /*
4401 * The cluster_io read was issued successfully,
4402 * update the uio structure
4403 */
4404 if (error == 0) {
4405 uio_update(uio, (user_size_t)xsize);
4406
4407 dst_paddr += xsize;
4408 upl_offset += xsize;
4409 io_size -= xsize;
4410 }
4411 }
4412 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
4413
4414 error = cluster_io_type(uio, read_type, read_length, 0);
4415
4416 if (error == 0 && *read_type == IO_CONTIG) {
4417 cur_upl++;
4418 goto next_cread;
4419 }
4420 } else
4421 *read_type = IO_UNKNOWN;
4422
4423 wait_for_creads:
4424 /*
4425 * make sure all async reads that are part of this stream
4426 * have completed before we proceed
4427 */
4428 if (iostate.io_issued > iostate.io_completed) {
4429
4430 lck_mtx_lock(cl_mtxp);
4431
4432 while (iostate.io_issued != iostate.io_completed) {
4433 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
4434 iostate.io_issued, iostate.io_completed, 0, 0, 0);
4435
4436 iostate.io_wanted = 1;
4437 msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
4438
4439 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
4440 iostate.io_issued, iostate.io_completed, 0, 0, 0);
4441 }
4442 lck_mtx_unlock(cl_mtxp);
4443 }
4444 if (iostate.io_error)
4445 error = iostate.io_error;
4446
4447 if (error == 0 && tail_size)
4448 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
4449
4450 for (n = 0; n < num_upl; n++)
4451 /*
4452 * just release our hold on each physically contiguous
4453 * region without changing any state
4454 */
4455 ubc_upl_abort(upl[n], 0);
4456
4457 return (error);
4458 }
4459
4460
4461 static int
4462 cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
4463 {
4464 user_size_t iov_len;
4465 user_addr_t iov_base = 0;
4466 upl_t upl;
4467 upl_size_t upl_size;
4468 int upl_flags;
4469 int retval = 0;
4470
4471 /*
4472 * skip over any emtpy vectors
4473 */
4474 uio_update(uio, (user_size_t)0);
4475
4476 iov_len = uio_curriovlen(uio);
4477
4478 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
4479
4480 if (iov_len) {
4481 iov_base = uio_curriovbase(uio);
4482 /*
4483 * make sure the size of the vector isn't too big...
4484 * internally, we want to handle all of the I/O in
4485 * chunk sizes that fit in a 32 bit int
4486 */
4487 if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE)
4488 upl_size = MAX_IO_REQUEST_SIZE;
4489 else
4490 upl_size = (u_int32_t)iov_len;
4491
4492 upl_flags = UPL_QUERY_OBJECT_TYPE;
4493
4494 if ((vm_map_get_upl(current_map(),
4495 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
4496 &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
4497 /*
4498 * the user app must have passed in an invalid address
4499 */
4500 retval = EFAULT;
4501 }
4502 if (upl_size == 0)
4503 retval = EFAULT;
4504
4505 *io_length = upl_size;
4506
4507 if (upl_flags & UPL_PHYS_CONTIG)
4508 *io_type = IO_CONTIG;
4509 else if (iov_len >= min_length)
4510 *io_type = IO_DIRECT;
4511 else
4512 *io_type = IO_COPY;
4513 } else {
4514 /*
4515 * nothing left to do for this uio
4516 */
4517 *io_length = 0;
4518 *io_type = IO_UNKNOWN;
4519 }
4520 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
4521
4522 return (retval);
4523 }
4524
4525
4526 /*
4527 * generate advisory I/O's in the largest chunks possible
4528 * the completed pages will be released into the VM cache
4529 */
4530 int
4531 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
4532 {
4533 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
4534 }
4535
4536 int
4537 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
4538 {
4539 upl_page_info_t *pl;
4540 upl_t upl;
4541 vm_offset_t upl_offset;
4542 int upl_size;
4543 off_t upl_f_offset;
4544 int start_offset;
4545 int start_pg;
4546 int last_pg;
4547 int pages_in_upl;
4548 off_t max_size;
4549 int io_size;
4550 kern_return_t kret;
4551 int retval = 0;
4552 int issued_io;
4553 int skip_range;
4554 uint32_t max_io_size;
4555
4556
4557 if ( !UBCINFOEXISTS(vp))
4558 return(EINVAL);
4559
4560 if (resid < 0)
4561 return(EINVAL);
4562
4563 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
4564
4565 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
4566 (int)f_offset, resid, (int)filesize, 0, 0);
4567
4568 while (resid && f_offset < filesize && retval == 0) {
4569 /*
4570 * compute the size of the upl needed to encompass
4571 * the requested read... limit each call to cluster_io
4572 * to the maximum UPL size... cluster_io will clip if
4573 * this exceeds the maximum io_size for the device,
4574 * make sure to account for
4575 * a starting offset that's not page aligned
4576 */
4577 start_offset = (int)(f_offset & PAGE_MASK_64);
4578 upl_f_offset = f_offset - (off_t)start_offset;
4579 max_size = filesize - f_offset;
4580
4581 if (resid < max_size)
4582 io_size = resid;
4583 else
4584 io_size = max_size;
4585
4586 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4587 if ((uint32_t)upl_size > max_io_size)
4588 upl_size = max_io_size;
4589
4590 skip_range = 0;
4591 /*
4592 * return the number of contiguously present pages in the cache
4593 * starting at upl_f_offset within the file
4594 */
4595 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
4596
4597 if (skip_range) {
4598 /*
4599 * skip over pages already present in the cache
4600 */
4601 io_size = skip_range - start_offset;
4602
4603 f_offset += io_size;
4604 resid -= io_size;
4605
4606 if (skip_range == upl_size)
4607 continue;
4608 /*
4609 * have to issue some real I/O
4610 * at this point, we know it's starting on a page boundary
4611 * because we've skipped over at least the first page in the request
4612 */
4613 start_offset = 0;
4614 upl_f_offset += skip_range;
4615 upl_size -= skip_range;
4616 }
4617 pages_in_upl = upl_size / PAGE_SIZE;
4618
4619 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
4620 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4621
4622 kret = ubc_create_upl(vp,
4623 upl_f_offset,
4624 upl_size,
4625 &upl,
4626 &pl,
4627 UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
4628 if (kret != KERN_SUCCESS)
4629 return(retval);
4630 issued_io = 0;
4631
4632 /*
4633 * before we start marching forward, we must make sure we end on
4634 * a present page, otherwise we will be working with a freed
4635 * upl
4636 */
4637 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
4638 if (upl_page_present(pl, last_pg))
4639 break;
4640 }
4641 pages_in_upl = last_pg + 1;
4642
4643
4644 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
4645 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4646
4647
4648 for (last_pg = 0; last_pg < pages_in_upl; ) {
4649 /*
4650 * scan from the beginning of the upl looking for the first
4651 * page that is present.... this will become the first page in
4652 * the request we're going to make to 'cluster_io'... if all
4653 * of the pages are absent, we won't call through to 'cluster_io'
4654 */
4655 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
4656 if (upl_page_present(pl, start_pg))
4657 break;
4658 }
4659
4660 /*
4661 * scan from the starting present page looking for an absent
4662 * page before the end of the upl is reached, if we
4663 * find one, then it will terminate the range of pages being
4664 * presented to 'cluster_io'
4665 */
4666 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
4667 if (!upl_page_present(pl, last_pg))
4668 break;
4669 }
4670
4671 if (last_pg > start_pg) {
4672 /*
4673 * we found a range of pages that must be filled
4674 * if the last page in this range is the last page of the file
4675 * we may have to clip the size of it to keep from reading past
4676 * the end of the last physical block associated with the file
4677 */
4678 upl_offset = start_pg * PAGE_SIZE;
4679 io_size = (last_pg - start_pg) * PAGE_SIZE;
4680
4681 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
4682 io_size = filesize - (upl_f_offset + upl_offset);
4683
4684 /*
4685 * issue an asynchronous read to cluster_io
4686 */
4687 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4688 CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
4689
4690 issued_io = 1;
4691 }
4692 }
4693 if (issued_io == 0)
4694 ubc_upl_abort(upl, 0);
4695
4696 io_size = upl_size - start_offset;
4697
4698 if (io_size > resid)
4699 io_size = resid;
4700 f_offset += io_size;
4701 resid -= io_size;
4702 }
4703
4704 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
4705 (int)f_offset, resid, retval, 0, 0);
4706
4707 return(retval);
4708 }
4709
4710
4711 int
4712 cluster_push(vnode_t vp, int flags)
4713 {
4714 return cluster_push_ext(vp, flags, NULL, NULL);
4715 }
4716
4717
4718 int
4719 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
4720 {
4721 int retval;
4722 int my_sparse_wait = 0;
4723 struct cl_writebehind *wbp;
4724
4725 if ( !UBCINFOEXISTS(vp)) {
4726 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -1, 0);
4727 return (0);
4728 }
4729 /* return if deferred write is set */
4730 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
4731 return (0);
4732 }
4733 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
4734 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -2, 0);
4735 return (0);
4736 }
4737 if (wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
4738 lck_mtx_unlock(&wbp->cl_lockw);
4739
4740 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -3, 0);
4741 return(0);
4742 }
4743 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
4744 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
4745
4746 /*
4747 * if we have an fsync in progress, we don't want to allow any additional
4748 * sync/fsync/close(s) to occur until it finishes.
4749 * note that its possible for writes to continue to occur to this file
4750 * while we're waiting and also once the fsync starts to clean if we're
4751 * in the sparse map case
4752 */
4753 while (wbp->cl_sparse_wait) {
4754 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, vp, 0, 0, 0, 0);
4755
4756 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
4757
4758 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, vp, 0, 0, 0, 0);
4759 }
4760 if (flags & IO_SYNC) {
4761 my_sparse_wait = 1;
4762 wbp->cl_sparse_wait = 1;
4763
4764 /*
4765 * this is an fsync (or equivalent)... we must wait for any existing async
4766 * cleaning operations to complete before we evaulate the current state
4767 * and finish cleaning... this insures that all writes issued before this
4768 * fsync actually get cleaned to the disk before this fsync returns
4769 */
4770 while (wbp->cl_sparse_pushes) {
4771 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, vp, 0, 0, 0, 0);
4772
4773 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
4774
4775 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, vp, 0, 0, 0, 0);
4776 }
4777 }
4778 if (wbp->cl_scmap) {
4779 void *scmap;
4780
4781 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
4782
4783 scmap = wbp->cl_scmap;
4784 wbp->cl_scmap = NULL;
4785
4786 wbp->cl_sparse_pushes++;
4787
4788 lck_mtx_unlock(&wbp->cl_lockw);
4789
4790 sparse_cluster_push(&scmap, vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
4791
4792 lck_mtx_lock(&wbp->cl_lockw);
4793
4794 wbp->cl_sparse_pushes--;
4795
4796 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0)
4797 wakeup((caddr_t)&wbp->cl_sparse_pushes);
4798 } else {
4799 sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
4800 }
4801 retval = 1;
4802 } else {
4803 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL | IO_PASSIVE, callback, callback_arg);
4804 }
4805 lck_mtx_unlock(&wbp->cl_lockw);
4806
4807 if (flags & IO_SYNC)
4808 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
4809
4810 if (my_sparse_wait) {
4811 /*
4812 * I'm the owner of the serialization token
4813 * clear it and wakeup anyone that is waiting
4814 * for me to finish
4815 */
4816 lck_mtx_lock(&wbp->cl_lockw);
4817
4818 wbp->cl_sparse_wait = 0;
4819 wakeup((caddr_t)&wbp->cl_sparse_wait);
4820
4821 lck_mtx_unlock(&wbp->cl_lockw);
4822 }
4823 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
4824 wbp->cl_scmap, wbp->cl_number, retval, 0, 0);
4825
4826 return (retval);
4827 }
4828
4829
4830 __private_extern__ void
4831 cluster_release(struct ubc_info *ubc)
4832 {
4833 struct cl_writebehind *wbp;
4834 struct cl_readahead *rap;
4835
4836 if ((wbp = ubc->cl_wbehind)) {
4837
4838 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
4839
4840 if (wbp->cl_scmap)
4841 vfs_drt_control(&(wbp->cl_scmap), 0);
4842 } else {
4843 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
4844 }
4845
4846 rap = ubc->cl_rahead;
4847
4848 if (wbp != NULL) {
4849 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
4850 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
4851 }
4852 if ((rap = ubc->cl_rahead)) {
4853 lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
4854 FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
4855 }
4856 ubc->cl_rahead = NULL;
4857 ubc->cl_wbehind = NULL;
4858
4859 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
4860 }
4861
4862
4863 static int
4864 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int (*callback)(buf_t, void *), void *callback_arg)
4865 {
4866 int cl_index;
4867 int cl_index1;
4868 int min_index;
4869 int cl_len;
4870 int cl_pushed = 0;
4871 struct cl_wextent l_clusters[MAX_CLUSTERS];
4872 u_int max_cluster_pgcount;
4873
4874
4875 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
4876 /*
4877 * the write behind context exists and has
4878 * already been locked...
4879 */
4880 if (wbp->cl_number == 0)
4881 /*
4882 * no clusters to push
4883 * return number of empty slots
4884 */
4885 return (MAX_CLUSTERS);
4886
4887 /*
4888 * make a local 'sorted' copy of the clusters
4889 * and clear wbp->cl_number so that new clusters can
4890 * be developed
4891 */
4892 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
4893 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
4894 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr)
4895 continue;
4896 if (min_index == -1)
4897 min_index = cl_index1;
4898 else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr)
4899 min_index = cl_index1;
4900 }
4901 if (min_index == -1)
4902 break;
4903
4904 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
4905 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
4906 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
4907
4908 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
4909 }
4910 wbp->cl_number = 0;
4911
4912 cl_len = cl_index;
4913
4914 if ( (push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS ) {
4915 int i;
4916
4917 /*
4918 * determine if we appear to be writing the file sequentially
4919 * if not, by returning without having pushed any clusters
4920 * we will cause this vnode to be pushed into the sparse cluster mechanism
4921 * used for managing more random I/O patterns
4922 *
4923 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
4924 * that's why we're in try_push with PUSH_DELAY...
4925 *
4926 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
4927 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
4928 * so we can just make a simple pass through, up to, but not including the last one...
4929 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
4930 * are sequential
4931 *
4932 * we let the last one be partial as long as it was adjacent to the previous one...
4933 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
4934 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
4935 */
4936 for (i = 0; i < MAX_CLUSTERS - 1; i++) {
4937 if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount)
4938 goto dont_try;
4939 if (l_clusters[i].e_addr != l_clusters[i+1].b_addr)
4940 goto dont_try;
4941 }
4942 }
4943 for (cl_index = 0; cl_index < cl_len; cl_index++) {
4944 int flags;
4945 struct cl_extent cl;
4946
4947 /*
4948 * try to push each cluster in turn...
4949 */
4950 if (l_clusters[cl_index].io_flags & CLW_IONOCACHE)
4951 flags = IO_NOCACHE;
4952 else
4953 flags = 0;
4954
4955 if ((l_clusters[cl_index].io_flags & CLW_IOPASSIVE) || (push_flag & IO_PASSIVE))
4956 flags |= IO_PASSIVE;
4957
4958 if (push_flag & PUSH_SYNC)
4959 flags |= IO_SYNC;
4960
4961 cl.b_addr = l_clusters[cl_index].b_addr;
4962 cl.e_addr = l_clusters[cl_index].e_addr;
4963
4964 cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg);
4965
4966 l_clusters[cl_index].b_addr = 0;
4967 l_clusters[cl_index].e_addr = 0;
4968
4969 cl_pushed++;
4970
4971 if ( !(push_flag & PUSH_ALL) )
4972 break;
4973 }
4974 dont_try:
4975 if (cl_len > cl_pushed) {
4976 /*
4977 * we didn't push all of the clusters, so
4978 * lets try to merge them back in to the vnode
4979 */
4980 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
4981 /*
4982 * we picked up some new clusters while we were trying to
4983 * push the old ones... this can happen because I've dropped
4984 * the vnode lock... the sum of the
4985 * leftovers plus the new cluster count exceeds our ability
4986 * to represent them, so switch to the sparse cluster mechanism
4987 *
4988 * collect the active public clusters...
4989 */
4990 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
4991
4992 for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
4993 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
4994 continue;
4995 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
4996 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
4997 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
4998
4999 cl_index1++;
5000 }
5001 /*
5002 * update the cluster count
5003 */
5004 wbp->cl_number = cl_index1;
5005
5006 /*
5007 * and collect the original clusters that were moved into the
5008 * local storage for sorting purposes
5009 */
5010 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5011
5012 } else {
5013 /*
5014 * we've got room to merge the leftovers back in
5015 * just append them starting at the next 'hole'
5016 * represented by wbp->cl_number
5017 */
5018 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
5019 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
5020 continue;
5021
5022 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
5023 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
5024 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
5025
5026 cl_index1++;
5027 }
5028 /*
5029 * update the cluster count
5030 */
5031 wbp->cl_number = cl_index1;
5032 }
5033 }
5034 return (MAX_CLUSTERS - wbp->cl_number);
5035 }
5036
5037
5038
5039 static int
5040 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5041 {
5042 upl_page_info_t *pl;
5043 upl_t upl;
5044 vm_offset_t upl_offset;
5045 int upl_size;
5046 off_t upl_f_offset;
5047 int pages_in_upl;
5048 int start_pg;
5049 int last_pg;
5050 int io_size;
5051 int io_flags;
5052 int upl_flags;
5053 int bflag;
5054 int size;
5055 int error = 0;
5056 int retval;
5057 kern_return_t kret;
5058
5059 if (flags & IO_PASSIVE)
5060 bflag = CL_PASSIVE;
5061 else
5062 bflag = 0;
5063
5064 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
5065 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
5066
5067 if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
5068 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
5069
5070 return (0);
5071 }
5072 upl_size = pages_in_upl * PAGE_SIZE;
5073 upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
5074
5075 if (upl_f_offset + upl_size >= EOF) {
5076
5077 if (upl_f_offset >= EOF) {
5078 /*
5079 * must have truncated the file and missed
5080 * clearing a dangling cluster (i.e. it's completely
5081 * beyond the new EOF
5082 */
5083 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
5084
5085 return(0);
5086 }
5087 size = EOF - upl_f_offset;
5088
5089 upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5090 pages_in_upl = upl_size / PAGE_SIZE;
5091 } else
5092 size = upl_size;
5093
5094 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
5095
5096 /*
5097 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
5098 *
5099 * - only pages that are currently dirty are returned... these are the ones we need to clean
5100 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
5101 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
5102 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
5103 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
5104 *
5105 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
5106 */
5107
5108 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE))
5109 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
5110 else
5111 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
5112
5113 kret = ubc_create_upl(vp,
5114 upl_f_offset,
5115 upl_size,
5116 &upl,
5117 &pl,
5118 upl_flags);
5119 if (kret != KERN_SUCCESS)
5120 panic("cluster_push: failed to get pagelist");
5121
5122 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
5123
5124 /*
5125 * since we only asked for the dirty pages back
5126 * it's possible that we may only get a few or even none, so...
5127 * before we start marching forward, we must make sure we know
5128 * where the last present page is in the UPL, otherwise we could
5129 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
5130 * employed by commit_range and abort_range.
5131 */
5132 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
5133 if (upl_page_present(pl, last_pg))
5134 break;
5135 }
5136 pages_in_upl = last_pg + 1;
5137
5138 if (pages_in_upl == 0) {
5139 ubc_upl_abort(upl, 0);
5140
5141 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
5142 return(0);
5143 }
5144
5145 for (last_pg = 0; last_pg < pages_in_upl; ) {
5146 /*
5147 * find the next dirty page in the UPL
5148 * this will become the first page in the
5149 * next I/O to generate
5150 */
5151 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
5152 if (upl_dirty_page(pl, start_pg))
5153 break;
5154 if (upl_page_present(pl, start_pg))
5155 /*
5156 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
5157 * just release these unchanged since we're not going
5158 * to steal them or change their state
5159 */
5160 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
5161 }
5162 if (start_pg >= pages_in_upl)
5163 /*
5164 * done... no more dirty pages to push
5165 */
5166 break;
5167 if (start_pg > last_pg)
5168 /*
5169 * skipped over some non-dirty pages
5170 */
5171 size -= ((start_pg - last_pg) * PAGE_SIZE);
5172
5173 /*
5174 * find a range of dirty pages to write
5175 */
5176 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
5177 if (!upl_dirty_page(pl, last_pg))
5178 break;
5179 }
5180 upl_offset = start_pg * PAGE_SIZE;
5181
5182 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
5183
5184 io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
5185
5186 if ( !(flags & IO_SYNC))
5187 io_flags |= CL_ASYNC;
5188
5189 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5190 io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5191
5192 if (error == 0 && retval)
5193 error = retval;
5194
5195 size -= io_size;
5196 }
5197 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0);
5198
5199 return(error);
5200 }
5201
5202
5203 /*
5204 * sparse_cluster_switch is called with the write behind lock held
5205 */
5206 static void
5207 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5208 {
5209 int cl_index;
5210
5211 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, vp, wbp->cl_scmap, 0, 0, 0);
5212
5213 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
5214 int flags;
5215 struct cl_extent cl;
5216
5217 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
5218
5219 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
5220 if (flags & UPL_POP_DIRTY) {
5221 cl.e_addr = cl.b_addr + 1;
5222
5223 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg);
5224 }
5225 }
5226 }
5227 }
5228 wbp->cl_number = 0;
5229
5230 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, vp, wbp->cl_scmap, 0, 0, 0);
5231 }
5232
5233
5234 /*
5235 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
5236 * still associated with the write-behind context... however, if the scmap has been disassociated
5237 * from the write-behind context (the cluster_push case), the wb lock is not held
5238 */
5239 static void
5240 sparse_cluster_push(void **scmap, vnode_t vp, off_t EOF, int push_flag, int (*callback)(buf_t, void *), void *callback_arg)
5241 {
5242 struct cl_extent cl;
5243 off_t offset;
5244 u_int length;
5245
5246 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, vp, (*scmap), 0, push_flag, 0);
5247
5248 if (push_flag & PUSH_ALL)
5249 vfs_drt_control(scmap, 1);
5250
5251 for (;;) {
5252 if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS)
5253 break;
5254
5255 cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
5256 cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
5257
5258 cluster_push_now(vp, &cl, EOF, push_flag & IO_PASSIVE, callback, callback_arg);
5259
5260 if ( !(push_flag & PUSH_ALL) )
5261 break;
5262 }
5263 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, vp, (*scmap), 0, 0, 0);
5264 }
5265
5266
5267 /*
5268 * sparse_cluster_add is called with the write behind lock held
5269 */
5270 static void
5271 sparse_cluster_add(void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5272 {
5273 u_int new_dirty;
5274 u_int length;
5275 off_t offset;
5276
5277 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
5278
5279 offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
5280 length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
5281
5282 while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
5283 /*
5284 * no room left in the map
5285 * only a partial update was done
5286 * push out some pages and try again
5287 */
5288 sparse_cluster_push(scmap, vp, EOF, 0, callback, callback_arg);
5289
5290 offset += (new_dirty * PAGE_SIZE_64);
5291 length -= (new_dirty * PAGE_SIZE);
5292 }
5293 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, vp, (*scmap), 0, 0, 0);
5294 }
5295
5296
5297 static int
5298 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5299 {
5300 upl_page_info_t *pl;
5301 upl_t upl;
5302 addr64_t ubc_paddr;
5303 kern_return_t kret;
5304 int error = 0;
5305 int did_read = 0;
5306 int abort_flags;
5307 int upl_flags;
5308 int bflag;
5309
5310 if (flags & IO_PASSIVE)
5311 bflag = CL_PASSIVE;
5312 else
5313 bflag = 0;
5314
5315 upl_flags = UPL_SET_LITE;
5316
5317 if ( !(flags & CL_READ) ) {
5318 /*
5319 * "write" operation: let the UPL subsystem know
5320 * that we intend to modify the buffer cache pages
5321 * we're gathering.
5322 */
5323 upl_flags |= UPL_WILL_MODIFY;
5324 } else {
5325 /*
5326 * indicate that there is no need to pull the
5327 * mapping for this page... we're only going
5328 * to read from it, not modify it.
5329 */
5330 upl_flags |= UPL_FILE_IO;
5331 }
5332 kret = ubc_create_upl(vp,
5333 uio->uio_offset & ~PAGE_MASK_64,
5334 PAGE_SIZE,
5335 &upl,
5336 &pl,
5337 upl_flags);
5338
5339 if (kret != KERN_SUCCESS)
5340 return(EINVAL);
5341
5342 if (!upl_valid_page(pl, 0)) {
5343 /*
5344 * issue a synchronous read to cluster_io
5345 */
5346 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5347 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5348 if (error) {
5349 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5350
5351 return(error);
5352 }
5353 did_read = 1;
5354 }
5355 ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
5356
5357 /*
5358 * NOTE: There is no prototype for the following in BSD. It, and the definitions
5359 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
5360 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
5361 * way to do so without exporting them to kexts as well.
5362 */
5363 if (flags & CL_READ)
5364 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
5365 copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
5366 else
5367 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
5368 copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */
5369
5370 if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
5371 /*
5372 * issue a synchronous write to cluster_io
5373 */
5374 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5375 bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5376 }
5377 if (error == 0)
5378 uio_update(uio, (user_size_t)xsize);
5379
5380 if (did_read)
5381 abort_flags = UPL_ABORT_FREE_ON_EMPTY;
5382 else
5383 abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
5384
5385 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
5386
5387 return (error);
5388 }
5389
5390
5391
5392 int
5393 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
5394 {
5395 int pg_offset;
5396 int pg_index;
5397 int csize;
5398 int segflg;
5399 int retval = 0;
5400 int xsize;
5401 upl_page_info_t *pl;
5402
5403 xsize = *io_resid;
5404
5405 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
5406 (int)uio->uio_offset, upl_offset, xsize, 0, 0);
5407
5408 segflg = uio->uio_segflg;
5409
5410 switch(segflg) {
5411
5412 case UIO_USERSPACE32:
5413 case UIO_USERISPACE32:
5414 uio->uio_segflg = UIO_PHYS_USERSPACE32;
5415 break;
5416
5417 case UIO_USERSPACE:
5418 case UIO_USERISPACE:
5419 uio->uio_segflg = UIO_PHYS_USERSPACE;
5420 break;
5421
5422 case UIO_USERSPACE64:
5423 case UIO_USERISPACE64:
5424 uio->uio_segflg = UIO_PHYS_USERSPACE64;
5425 break;
5426
5427 case UIO_SYSSPACE:
5428 uio->uio_segflg = UIO_PHYS_SYSSPACE;
5429 break;
5430
5431 }
5432 pl = ubc_upl_pageinfo(upl);
5433
5434 pg_index = upl_offset / PAGE_SIZE;
5435 pg_offset = upl_offset & PAGE_MASK;
5436 csize = min(PAGE_SIZE - pg_offset, xsize);
5437
5438 while (xsize && retval == 0) {
5439 addr64_t paddr;
5440
5441 paddr = ((addr64_t)upl_phys_page(pl, pg_index) << 12) + pg_offset;
5442
5443 retval = uiomove64(paddr, csize, uio);
5444
5445 pg_index += 1;
5446 pg_offset = 0;
5447 xsize -= csize;
5448 csize = min(PAGE_SIZE, xsize);
5449 }
5450 *io_resid = xsize;
5451
5452 uio->uio_segflg = segflg;
5453
5454 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
5455 (int)uio->uio_offset, xsize, retval, segflg, 0);
5456
5457 return (retval);
5458 }
5459
5460
5461 int
5462 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
5463 {
5464
5465 return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1));
5466 }
5467
5468
5469 static int
5470 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
5471 {
5472 int segflg;
5473 int io_size;
5474 int xsize;
5475 int start_offset;
5476 int retval = 0;
5477 memory_object_control_t control;
5478
5479 io_size = *io_resid;
5480
5481 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
5482 (int)uio->uio_offset, 0, io_size, 0, 0);
5483
5484 control = ubc_getobject(vp, UBC_FLAGS_NONE);
5485
5486 if (control == MEMORY_OBJECT_CONTROL_NULL) {
5487 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
5488 (int)uio->uio_offset, io_size, retval, 3, 0);
5489
5490 return(0);
5491 }
5492 segflg = uio->uio_segflg;
5493
5494 switch(segflg) {
5495
5496 case UIO_USERSPACE32:
5497 case UIO_USERISPACE32:
5498 uio->uio_segflg = UIO_PHYS_USERSPACE32;
5499 break;
5500
5501 case UIO_USERSPACE64:
5502 case UIO_USERISPACE64:
5503 uio->uio_segflg = UIO_PHYS_USERSPACE64;
5504 break;
5505
5506 case UIO_USERSPACE:
5507 case UIO_USERISPACE:
5508 uio->uio_segflg = UIO_PHYS_USERSPACE;
5509 break;
5510
5511 case UIO_SYSSPACE:
5512 uio->uio_segflg = UIO_PHYS_SYSSPACE;
5513 break;
5514 }
5515
5516 if ( (io_size = *io_resid) ) {
5517 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
5518 xsize = uio_resid(uio);
5519
5520 retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
5521 start_offset, io_size, mark_dirty, take_reference);
5522 xsize -= uio_resid(uio);
5523 io_size -= xsize;
5524 }
5525 uio->uio_segflg = segflg;
5526 *io_resid = io_size;
5527
5528 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
5529 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
5530
5531 return(retval);
5532 }
5533
5534
5535 int
5536 is_file_clean(vnode_t vp, off_t filesize)
5537 {
5538 off_t f_offset;
5539 int flags;
5540 int total_dirty = 0;
5541
5542 for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
5543 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
5544 if (flags & UPL_POP_DIRTY) {
5545 total_dirty++;
5546 }
5547 }
5548 }
5549 if (total_dirty)
5550 return(EINVAL);
5551
5552 return (0);
5553 }
5554
5555
5556
5557 /*
5558 * Dirty region tracking/clustering mechanism.
5559 *
5560 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
5561 * dirty regions within a larger space (file). It is primarily intended to
5562 * support clustering in large files with many dirty areas.
5563 *
5564 * The implementation assumes that the dirty regions are pages.
5565 *
5566 * To represent dirty pages within the file, we store bit vectors in a
5567 * variable-size circular hash.
5568 */
5569
5570 /*
5571 * Bitvector size. This determines the number of pages we group in a
5572 * single hashtable entry. Each hashtable entry is aligned to this
5573 * size within the file.
5574 */
5575 #define DRT_BITVECTOR_PAGES 256
5576
5577 /*
5578 * File offset handling.
5579 *
5580 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
5581 * the correct formula is (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)
5582 */
5583 #define DRT_ADDRESS_MASK (~((1 << 20) - 1))
5584 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
5585
5586 /*
5587 * Hashtable address field handling.
5588 *
5589 * The low-order bits of the hashtable address are used to conserve
5590 * space.
5591 *
5592 * DRT_HASH_COUNT_MASK must be large enough to store the range
5593 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
5594 * to indicate that the bucket is actually unoccupied.
5595 */
5596 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
5597 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
5598 do { \
5599 (scm)->scm_hashtable[(i)].dhe_control = \
5600 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
5601 } while (0)
5602 #define DRT_HASH_COUNT_MASK 0x1ff
5603 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
5604 #define DRT_HASH_SET_COUNT(scm, i, c) \
5605 do { \
5606 (scm)->scm_hashtable[(i)].dhe_control = \
5607 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
5608 } while (0)
5609 #define DRT_HASH_CLEAR(scm, i) \
5610 do { \
5611 (scm)->scm_hashtable[(i)].dhe_control = 0; \
5612 } while (0)
5613 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
5614 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
5615 #define DRT_HASH_COPY(oscm, oi, scm, i) \
5616 do { \
5617 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
5618 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
5619 } while(0);
5620
5621
5622 /*
5623 * Hash table moduli.
5624 *
5625 * Since the hashtable entry's size is dependent on the size of
5626 * the bitvector, and since the hashtable size is constrained to
5627 * both being prime and fitting within the desired allocation
5628 * size, these values need to be manually determined.
5629 *
5630 * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
5631 *
5632 * The small hashtable allocation is 1024 bytes, so the modulus is 23.
5633 * The large hashtable allocation is 16384 bytes, so the modulus is 401.
5634 */
5635 #define DRT_HASH_SMALL_MODULUS 23
5636 #define DRT_HASH_LARGE_MODULUS 401
5637
5638 /*
5639 * Physical memory required before the large hash modulus is permitted.
5640 *
5641 * On small memory systems, the large hash modulus can lead to phsyical
5642 * memory starvation, so we avoid using it there.
5643 */
5644 #define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
5645
5646 #define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */
5647 #define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */
5648
5649 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
5650
5651 /*
5652 * Hashtable bitvector handling.
5653 *
5654 * Bitvector fields are 32 bits long.
5655 */
5656
5657 #define DRT_HASH_SET_BIT(scm, i, bit) \
5658 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
5659
5660 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
5661 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
5662
5663 #define DRT_HASH_TEST_BIT(scm, i, bit) \
5664 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
5665
5666 #define DRT_BITVECTOR_CLEAR(scm, i) \
5667 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5668
5669 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
5670 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
5671 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
5672 (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
5673
5674
5675
5676 /*
5677 * Hashtable entry.
5678 */
5679 struct vfs_drt_hashentry {
5680 u_int64_t dhe_control;
5681 u_int32_t dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
5682 };
5683
5684 /*
5685 * Dirty Region Tracking structure.
5686 *
5687 * The hashtable is allocated entirely inside the DRT structure.
5688 *
5689 * The hash is a simple circular prime modulus arrangement, the structure
5690 * is resized from small to large if it overflows.
5691 */
5692
5693 struct vfs_drt_clustermap {
5694 u_int32_t scm_magic; /* sanity/detection */
5695 #define DRT_SCM_MAGIC 0x12020003
5696 u_int32_t scm_modulus; /* current ring size */
5697 u_int32_t scm_buckets; /* number of occupied buckets */
5698 u_int32_t scm_lastclean; /* last entry we cleaned */
5699 u_int32_t scm_iskips; /* number of slot skips */
5700
5701 struct vfs_drt_hashentry scm_hashtable[0];
5702 };
5703
5704
5705 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
5706 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
5707
5708 /*
5709 * Debugging codes and arguments.
5710 */
5711 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
5712 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
5713 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
5714 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
5715 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
5716 * dirty */
5717 /* 0, setcount */
5718 /* 1 (clean, no map) */
5719 /* 2 (map alloc fail) */
5720 /* 3, resid (partial) */
5721 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
5722 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
5723 * lastclean, iskips */
5724
5725
5726 static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
5727 static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
5728 static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
5729 u_int64_t offset, int *indexp);
5730 static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
5731 u_int64_t offset,
5732 int *indexp,
5733 int recursed);
5734 static kern_return_t vfs_drt_do_mark_pages(
5735 void **cmapp,
5736 u_int64_t offset,
5737 u_int length,
5738 u_int *setcountp,
5739 int dirty);
5740 static void vfs_drt_trace(
5741 struct vfs_drt_clustermap *cmap,
5742 int code,
5743 int arg1,
5744 int arg2,
5745 int arg3,
5746 int arg4);
5747
5748
5749 /*
5750 * Allocate and initialise a sparse cluster map.
5751 *
5752 * Will allocate a new map, resize or compact an existing map.
5753 *
5754 * XXX we should probably have at least one intermediate map size,
5755 * as the 1:16 ratio seems a bit drastic.
5756 */
5757 static kern_return_t
5758 vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
5759 {
5760 struct vfs_drt_clustermap *cmap, *ocmap;
5761 kern_return_t kret;
5762 u_int64_t offset;
5763 u_int32_t i;
5764 int nsize, active_buckets, index, copycount;
5765
5766 ocmap = NULL;
5767 if (cmapp != NULL)
5768 ocmap = *cmapp;
5769
5770 /*
5771 * Decide on the size of the new map.
5772 */
5773 if (ocmap == NULL) {
5774 nsize = DRT_HASH_SMALL_MODULUS;
5775 } else {
5776 /* count the number of active buckets in the old map */
5777 active_buckets = 0;
5778 for (i = 0; i < ocmap->scm_modulus; i++) {
5779 if (!DRT_HASH_VACANT(ocmap, i) &&
5780 (DRT_HASH_GET_COUNT(ocmap, i) != 0))
5781 active_buckets++;
5782 }
5783 /*
5784 * If we're currently using the small allocation, check to
5785 * see whether we should grow to the large one.
5786 */
5787 if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
5788 /*
5789 * If the ring is nearly full and we are allowed to
5790 * use the large modulus, upgrade.
5791 */
5792 if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
5793 (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
5794 nsize = DRT_HASH_LARGE_MODULUS;
5795 } else {
5796 nsize = DRT_HASH_SMALL_MODULUS;
5797 }
5798 } else {
5799 /* already using the large modulus */
5800 nsize = DRT_HASH_LARGE_MODULUS;
5801 /*
5802 * If the ring is completely full, there's
5803 * nothing useful for us to do. Behave as
5804 * though we had compacted into the new
5805 * array and return.
5806 */
5807 if (active_buckets >= DRT_HASH_LARGE_MODULUS)
5808 return(KERN_SUCCESS);
5809 }
5810 }
5811
5812 /*
5813 * Allocate and initialise the new map.
5814 */
5815
5816 kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap,
5817 (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
5818 if (kret != KERN_SUCCESS)
5819 return(kret);
5820 cmap->scm_magic = DRT_SCM_MAGIC;
5821 cmap->scm_modulus = nsize;
5822 cmap->scm_buckets = 0;
5823 cmap->scm_lastclean = 0;
5824 cmap->scm_iskips = 0;
5825 for (i = 0; i < cmap->scm_modulus; i++) {
5826 DRT_HASH_CLEAR(cmap, i);
5827 DRT_HASH_VACATE(cmap, i);
5828 DRT_BITVECTOR_CLEAR(cmap, i);
5829 }
5830
5831 /*
5832 * If there's an old map, re-hash entries from it into the new map.
5833 */
5834 copycount = 0;
5835 if (ocmap != NULL) {
5836 for (i = 0; i < ocmap->scm_modulus; i++) {
5837 /* skip empty buckets */
5838 if (DRT_HASH_VACANT(ocmap, i) ||
5839 (DRT_HASH_GET_COUNT(ocmap, i) == 0))
5840 continue;
5841 /* get new index */
5842 offset = DRT_HASH_GET_ADDRESS(ocmap, i);
5843 kret = vfs_drt_get_index(&cmap, offset, &index, 1);
5844 if (kret != KERN_SUCCESS) {
5845 /* XXX need to bail out gracefully here */
5846 panic("vfs_drt: new cluster map mysteriously too small");
5847 index = 0;
5848 }
5849 /* copy */
5850 DRT_HASH_COPY(ocmap, i, cmap, index);
5851 copycount++;
5852 }
5853 }
5854
5855 /* log what we've done */
5856 vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
5857
5858 /*
5859 * It's important to ensure that *cmapp always points to
5860 * a valid map, so we must overwrite it before freeing
5861 * the old map.
5862 */
5863 *cmapp = cmap;
5864 if (ocmap != NULL) {
5865 /* emit stats into trace buffer */
5866 vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
5867 ocmap->scm_modulus,
5868 ocmap->scm_buckets,
5869 ocmap->scm_lastclean,
5870 ocmap->scm_iskips);
5871
5872 vfs_drt_free_map(ocmap);
5873 }
5874 return(KERN_SUCCESS);
5875 }
5876
5877
5878 /*
5879 * Free a sparse cluster map.
5880 */
5881 static kern_return_t
5882 vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
5883 {
5884 kmem_free(kernel_map, (vm_offset_t)cmap,
5885 (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
5886 return(KERN_SUCCESS);
5887 }
5888
5889
5890 /*
5891 * Find the hashtable slot currently occupied by an entry for the supplied offset.
5892 */
5893 static kern_return_t
5894 vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
5895 {
5896 int index;
5897 u_int32_t i;
5898
5899 offset = DRT_ALIGN_ADDRESS(offset);
5900 index = DRT_HASH(cmap, offset);
5901
5902 /* traverse the hashtable */
5903 for (i = 0; i < cmap->scm_modulus; i++) {
5904
5905 /*
5906 * If the slot is vacant, we can stop.
5907 */
5908 if (DRT_HASH_VACANT(cmap, index))
5909 break;
5910
5911 /*
5912 * If the address matches our offset, we have success.
5913 */
5914 if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
5915 *indexp = index;
5916 return(KERN_SUCCESS);
5917 }
5918
5919 /*
5920 * Move to the next slot, try again.
5921 */
5922 index = DRT_HASH_NEXT(cmap, index);
5923 }
5924 /*
5925 * It's not there.
5926 */
5927 return(KERN_FAILURE);
5928 }
5929
5930 /*
5931 * Find the hashtable slot for the supplied offset. If we haven't allocated
5932 * one yet, allocate one and populate the address field. Note that it will
5933 * not have a nonzero page count and thus will still technically be free, so
5934 * in the case where we are called to clean pages, the slot will remain free.
5935 */
5936 static kern_return_t
5937 vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
5938 {
5939 struct vfs_drt_clustermap *cmap;
5940 kern_return_t kret;
5941 u_int32_t index;
5942 u_int32_t i;
5943
5944 cmap = *cmapp;
5945
5946 /* look for an existing entry */
5947 kret = vfs_drt_search_index(cmap, offset, indexp);
5948 if (kret == KERN_SUCCESS)
5949 return(kret);
5950
5951 /* need to allocate an entry */
5952 offset = DRT_ALIGN_ADDRESS(offset);
5953 index = DRT_HASH(cmap, offset);
5954
5955 /* scan from the index forwards looking for a vacant slot */
5956 for (i = 0; i < cmap->scm_modulus; i++) {
5957 /* slot vacant? */
5958 if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) {
5959 cmap->scm_buckets++;
5960 if (index < cmap->scm_lastclean)
5961 cmap->scm_lastclean = index;
5962 DRT_HASH_SET_ADDRESS(cmap, index, offset);
5963 DRT_HASH_SET_COUNT(cmap, index, 0);
5964 DRT_BITVECTOR_CLEAR(cmap, index);
5965 *indexp = index;
5966 vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
5967 return(KERN_SUCCESS);
5968 }
5969 cmap->scm_iskips += i;
5970 index = DRT_HASH_NEXT(cmap, index);
5971 }
5972
5973 /*
5974 * We haven't found a vacant slot, so the map is full. If we're not
5975 * already recursed, try reallocating/compacting it.
5976 */
5977 if (recursed)
5978 return(KERN_FAILURE);
5979 kret = vfs_drt_alloc_map(cmapp);
5980 if (kret == KERN_SUCCESS) {
5981 /* now try to insert again */
5982 kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
5983 }
5984 return(kret);
5985 }
5986
5987 /*
5988 * Implementation of set dirty/clean.
5989 *
5990 * In the 'clean' case, not finding a map is OK.
5991 */
5992 static kern_return_t
5993 vfs_drt_do_mark_pages(
5994 void **private,
5995 u_int64_t offset,
5996 u_int length,
5997 u_int *setcountp,
5998 int dirty)
5999 {
6000 struct vfs_drt_clustermap *cmap, **cmapp;
6001 kern_return_t kret;
6002 int i, index, pgoff, pgcount, setcount, ecount;
6003
6004 cmapp = (struct vfs_drt_clustermap **)private;
6005 cmap = *cmapp;
6006
6007 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
6008
6009 if (setcountp != NULL)
6010 *setcountp = 0;
6011
6012 /* allocate a cluster map if we don't already have one */
6013 if (cmap == NULL) {
6014 /* no cluster map, nothing to clean */
6015 if (!dirty) {
6016 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
6017 return(KERN_SUCCESS);
6018 }
6019 kret = vfs_drt_alloc_map(cmapp);
6020 if (kret != KERN_SUCCESS) {
6021 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
6022 return(kret);
6023 }
6024 }
6025 setcount = 0;
6026
6027 /*
6028 * Iterate over the length of the region.
6029 */
6030 while (length > 0) {
6031 /*
6032 * Get the hashtable index for this offset.
6033 *
6034 * XXX this will add blank entries if we are clearing a range
6035 * that hasn't been dirtied.
6036 */
6037 kret = vfs_drt_get_index(cmapp, offset, &index, 0);
6038 cmap = *cmapp; /* may have changed! */
6039 /* this may be a partial-success return */
6040 if (kret != KERN_SUCCESS) {
6041 if (setcountp != NULL)
6042 *setcountp = setcount;
6043 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
6044
6045 return(kret);
6046 }
6047
6048 /*
6049 * Work out how many pages we're modifying in this
6050 * hashtable entry.
6051 */
6052 pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE;
6053 pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
6054
6055 /*
6056 * Iterate over pages, dirty/clearing as we go.
6057 */
6058 ecount = DRT_HASH_GET_COUNT(cmap, index);
6059 for (i = 0; i < pgcount; i++) {
6060 if (dirty) {
6061 if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
6062 DRT_HASH_SET_BIT(cmap, index, pgoff + i);
6063 ecount++;
6064 setcount++;
6065 }
6066 } else {
6067 if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
6068 DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
6069 ecount--;
6070 setcount++;
6071 }
6072 }
6073 }
6074 DRT_HASH_SET_COUNT(cmap, index, ecount);
6075
6076 offset += pgcount * PAGE_SIZE;
6077 length -= pgcount * PAGE_SIZE;
6078 }
6079 if (setcountp != NULL)
6080 *setcountp = setcount;
6081
6082 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
6083
6084 return(KERN_SUCCESS);
6085 }
6086
6087 /*
6088 * Mark a set of pages as dirty/clean.
6089 *
6090 * This is a public interface.
6091 *
6092 * cmapp
6093 * Pointer to storage suitable for holding a pointer. Note that
6094 * this must either be NULL or a value set by this function.
6095 *
6096 * size
6097 * Current file size in bytes.
6098 *
6099 * offset
6100 * Offset of the first page to be marked as dirty, in bytes. Must be
6101 * page-aligned.
6102 *
6103 * length
6104 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
6105 *
6106 * setcountp
6107 * Number of pages newly marked dirty by this call (optional).
6108 *
6109 * Returns KERN_SUCCESS if all the pages were successfully marked.
6110 */
6111 static kern_return_t
6112 vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
6113 {
6114 /* XXX size unused, drop from interface */
6115 return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1));
6116 }
6117
6118 #if 0
6119 static kern_return_t
6120 vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
6121 {
6122 return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0));
6123 }
6124 #endif
6125
6126 /*
6127 * Get a cluster of dirty pages.
6128 *
6129 * This is a public interface.
6130 *
6131 * cmapp
6132 * Pointer to storage managed by drt_mark_pages. Note that this must
6133 * be NULL or a value set by drt_mark_pages.
6134 *
6135 * offsetp
6136 * Returns the byte offset into the file of the first page in the cluster.
6137 *
6138 * lengthp
6139 * Returns the length in bytes of the cluster of dirty pages.
6140 *
6141 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
6142 * are no dirty pages meeting the minmum size criteria. Private storage will
6143 * be released if there are no more dirty pages left in the map
6144 *
6145 */
6146 static kern_return_t
6147 vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
6148 {
6149 struct vfs_drt_clustermap *cmap;
6150 u_int64_t offset;
6151 u_int length;
6152 u_int32_t j;
6153 int index, i, fs, ls;
6154
6155 /* sanity */
6156 if ((cmapp == NULL) || (*cmapp == NULL))
6157 return(KERN_FAILURE);
6158 cmap = *cmapp;
6159
6160 /* walk the hashtable */
6161 for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
6162 index = DRT_HASH(cmap, offset);
6163
6164 if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0))
6165 continue;
6166
6167 /* scan the bitfield for a string of bits */
6168 fs = -1;
6169
6170 for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
6171 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
6172 fs = i;
6173 break;
6174 }
6175 }
6176 if (fs == -1) {
6177 /* didn't find any bits set */
6178 panic("vfs_drt: entry summary count > 0 but no bits set in map");
6179 }
6180 for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
6181 if (!DRT_HASH_TEST_BIT(cmap, index, i))
6182 break;
6183 }
6184
6185 /* compute offset and length, mark pages clean */
6186 offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
6187 length = ls * PAGE_SIZE;
6188 vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
6189 cmap->scm_lastclean = index;
6190
6191 /* return successful */
6192 *offsetp = (off_t)offset;
6193 *lengthp = length;
6194
6195 vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
6196 return(KERN_SUCCESS);
6197 }
6198 /*
6199 * We didn't find anything... hashtable is empty
6200 * emit stats into trace buffer and
6201 * then free it
6202 */
6203 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
6204 cmap->scm_modulus,
6205 cmap->scm_buckets,
6206 cmap->scm_lastclean,
6207 cmap->scm_iskips);
6208
6209 vfs_drt_free_map(cmap);
6210 *cmapp = NULL;
6211
6212 return(KERN_FAILURE);
6213 }
6214
6215
6216 static kern_return_t
6217 vfs_drt_control(void **cmapp, int op_type)
6218 {
6219 struct vfs_drt_clustermap *cmap;
6220
6221 /* sanity */
6222 if ((cmapp == NULL) || (*cmapp == NULL))
6223 return(KERN_FAILURE);
6224 cmap = *cmapp;
6225
6226 switch (op_type) {
6227 case 0:
6228 /* emit stats into trace buffer */
6229 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
6230 cmap->scm_modulus,
6231 cmap->scm_buckets,
6232 cmap->scm_lastclean,
6233 cmap->scm_iskips);
6234
6235 vfs_drt_free_map(cmap);
6236 *cmapp = NULL;
6237 break;
6238
6239 case 1:
6240 cmap->scm_lastclean = 0;
6241 break;
6242 }
6243 return(KERN_SUCCESS);
6244 }
6245
6246
6247
6248 /*
6249 * Emit a summary of the state of the clustermap into the trace buffer
6250 * along with some caller-provided data.
6251 */
6252 #if KDEBUG
6253 static void
6254 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
6255 {
6256 KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
6257 }
6258 #else
6259 static void
6260 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
6261 __unused int arg1, __unused int arg2, __unused int arg3,
6262 __unused int arg4)
6263 {
6264 }
6265 #endif
6266
6267 #if 0
6268 /*
6269 * Perform basic sanity check on the hash entry summary count
6270 * vs. the actual bits set in the entry.
6271 */
6272 static void
6273 vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
6274 {
6275 int index, i;
6276 int bits_on;
6277
6278 for (index = 0; index < cmap->scm_modulus; index++) {
6279 if (DRT_HASH_VACANT(cmap, index))
6280 continue;
6281
6282 for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
6283 if (DRT_HASH_TEST_BIT(cmap, index, i))
6284 bits_on++;
6285 }
6286 if (bits_on != DRT_HASH_GET_COUNT(cmap, index))
6287 panic("bits_on = %d, index = %d\n", bits_on, index);
6288 }
6289 }
6290 #endif