]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_cluster.c
xnu-3789.31.2.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_cluster.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95
62 */
63
64 #include <sys/param.h>
65 #include <sys/proc_internal.h>
66 #include <sys/buf_internal.h>
67 #include <sys/mount_internal.h>
68 #include <sys/vnode_internal.h>
69 #include <sys/trace.h>
70 #include <sys/malloc.h>
71 #include <sys/time.h>
72 #include <sys/kernel.h>
73 #include <sys/resourcevar.h>
74 #include <miscfs/specfs/specdev.h>
75 #include <sys/uio_internal.h>
76 #include <libkern/libkern.h>
77 #include <machine/machine_routines.h>
78
79 #include <sys/ubc_internal.h>
80 #include <vm/vnode_pager.h>
81
82 #include <mach/mach_types.h>
83 #include <mach/memory_object_types.h>
84 #include <mach/vm_map.h>
85 #include <mach/upl.h>
86 #include <kern/task.h>
87 #include <kern/policy_internal.h>
88
89 #include <vm/vm_kern.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_fault.h>
93
94 #include <sys/kdebug.h>
95 #include <libkern/OSAtomic.h>
96
97 #include <sys/sdt.h>
98
99 #include <stdbool.h>
100
101 #if 0
102 #undef KERNEL_DEBUG
103 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
104 #endif
105
106
107 #define CL_READ 0x01
108 #define CL_WRITE 0x02
109 #define CL_ASYNC 0x04
110 #define CL_COMMIT 0x08
111 #define CL_PAGEOUT 0x10
112 #define CL_AGE 0x20
113 #define CL_NOZERO 0x40
114 #define CL_PAGEIN 0x80
115 #define CL_DEV_MEMORY 0x100
116 #define CL_PRESERVE 0x200
117 #define CL_THROTTLE 0x400
118 #define CL_KEEPCACHED 0x800
119 #define CL_DIRECT_IO 0x1000
120 #define CL_PASSIVE 0x2000
121 #define CL_IOSTREAMING 0x4000
122 #define CL_CLOSE 0x8000
123 #define CL_ENCRYPTED 0x10000
124 #define CL_RAW_ENCRYPTED 0x20000
125 #define CL_NOCACHE 0x40000
126
127 #define MAX_VECTOR_UPL_ELEMENTS 8
128 #define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE_BYTES)
129
130 #define CLUSTER_IO_WAITING ((buf_t)1)
131
132 extern upl_t vector_upl_create(vm_offset_t);
133 extern boolean_t vector_upl_is_valid(upl_t);
134 extern boolean_t vector_upl_set_subupl(upl_t,upl_t, u_int32_t);
135 extern void vector_upl_set_pagelist(upl_t);
136 extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, u_int32_t);
137
138 struct clios {
139 lck_mtx_t io_mtxp;
140 u_int io_completed; /* amount of io that has currently completed */
141 u_int io_issued; /* amount of io that was successfully issued */
142 int io_error; /* error code of first error encountered */
143 int io_wanted; /* someone is sleeping waiting for a change in state */
144 };
145
146 struct cl_direct_read_lock {
147 LIST_ENTRY(cl_direct_read_lock) chain;
148 int32_t ref_count;
149 vnode_t vp;
150 lck_rw_t rw_lock;
151 };
152
153 #define CL_DIRECT_READ_LOCK_BUCKETS 61
154
155 static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock)
156 cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS];
157
158 static lck_spin_t cl_direct_read_spin_lock;
159
160 static lck_grp_t *cl_mtx_grp;
161 static lck_attr_t *cl_mtx_attr;
162 static lck_grp_attr_t *cl_mtx_grp_attr;
163 static lck_mtx_t *cl_transaction_mtxp;
164
165 #define IO_UNKNOWN 0
166 #define IO_DIRECT 1
167 #define IO_CONTIG 2
168 #define IO_COPY 3
169
170 #define PUSH_DELAY 0x01
171 #define PUSH_ALL 0x02
172 #define PUSH_SYNC 0x04
173
174
175 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset);
176 static void cluster_wait_IO(buf_t cbp_head, int async);
177 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
178
179 static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length);
180
181 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
182 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
183 static int cluster_iodone(buf_t bp, void *callback_arg);
184 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp);
185 static int cluster_is_throttled(vnode_t vp);
186
187 static void cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name);
188
189 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg, int flags);
190
191 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
192 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
193
194 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
195 int (*)(buf_t, void *), void *callback_arg);
196 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
197 int flags, int (*)(buf_t, void *), void *callback_arg);
198 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
199 int (*)(buf_t, void *), void *callback_arg, int flags);
200
201 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
202 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg);
203 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF,
204 int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg);
205 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
206 int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag);
207
208 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
209
210 static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
211 static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
212
213 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg);
214
215 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), void *callback_arg);
216
217 static void sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
218 static void sparse_cluster_push(void **cmapp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*)(buf_t, void *), void *callback_arg);
219 static void sparse_cluster_add(void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
220
221 static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp);
222 static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
223 static kern_return_t vfs_drt_control(void **cmapp, int op_type);
224
225
226 /*
227 * For throttled IO to check whether
228 * a block is cached by the boot cache
229 * and thus it can avoid delaying the IO.
230 *
231 * bootcache_contains_block is initially
232 * NULL. The BootCache will set it while
233 * the cache is active and clear it when
234 * the cache is jettisoned.
235 *
236 * Returns 0 if the block is not
237 * contained in the cache, 1 if it is
238 * contained.
239 *
240 * The function pointer remains valid
241 * after the cache has been evicted even
242 * if bootcache_contains_block has been
243 * cleared.
244 *
245 * See rdar://9974130 The new throttling mechanism breaks the boot cache for throttled IOs
246 */
247 int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL;
248
249
250 /*
251 * limit the internal I/O size so that we
252 * can represent it in a 32 bit int
253 */
254 #define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512)
255 #define MAX_IO_CONTIG_SIZE MAX_UPL_SIZE_BYTES
256 #define MAX_VECTS 16
257 /*
258 * The MIN_DIRECT_WRITE_SIZE governs how much I/O should be issued before we consider
259 * allowing the caller to bypass the buffer cache. For small I/Os (less than 16k),
260 * we have not historically allowed the write to bypass the UBC.
261 */
262 #define MIN_DIRECT_WRITE_SIZE (16384)
263
264 #define WRITE_THROTTLE 6
265 #define WRITE_THROTTLE_SSD 2
266 #define WRITE_BEHIND 1
267 #define WRITE_BEHIND_SSD 1
268
269 #define PREFETCH 3
270 #define PREFETCH_SSD 2
271 uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */
272 uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/
273
274
275 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base))
276 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
277 #define MAX_PREFETCH(vp, size, is_ssd) (size * IO_SCALE(vp, ((is_ssd && !ignore_is_ssd) ? PREFETCH_SSD : PREFETCH)))
278
279 int ignore_is_ssd = 0;
280 int speculative_reads_disabled = 0;
281
282 /*
283 * throttle the number of async writes that
284 * can be outstanding on a single vnode
285 * before we issue a synchronous write
286 */
287 #define THROTTLE_MAXCNT 0
288
289 uint32_t throttle_max_iosize = (128 * 1024);
290
291 #define THROTTLE_MAX_IOSIZE (throttle_max_iosize)
292
293 SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_max_iosize, 0, "");
294
295
296 void
297 cluster_init(void) {
298 /*
299 * allocate lock group attribute and group
300 */
301 cl_mtx_grp_attr = lck_grp_attr_alloc_init();
302 cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr);
303
304 /*
305 * allocate the lock attribute
306 */
307 cl_mtx_attr = lck_attr_alloc_init();
308
309 cl_transaction_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr);
310
311 if (cl_transaction_mtxp == NULL)
312 panic("cluster_init: failed to allocate cl_transaction_mtxp");
313
314 lck_spin_init(&cl_direct_read_spin_lock, cl_mtx_grp, cl_mtx_attr);
315
316 for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i)
317 LIST_INIT(&cl_direct_read_locks[i]);
318 }
319
320
321 uint32_t
322 cluster_max_io_size(mount_t mp, int type)
323 {
324 uint32_t max_io_size;
325 uint32_t segcnt;
326 uint32_t maxcnt;
327
328 switch(type) {
329
330 case CL_READ:
331 segcnt = mp->mnt_segreadcnt;
332 maxcnt = mp->mnt_maxreadcnt;
333 break;
334 case CL_WRITE:
335 segcnt = mp->mnt_segwritecnt;
336 maxcnt = mp->mnt_maxwritecnt;
337 break;
338 default:
339 segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
340 maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
341 break;
342 }
343 if (segcnt > (MAX_UPL_SIZE_BYTES >> PAGE_SHIFT)) {
344 /*
345 * don't allow a size beyond the max UPL size we can create
346 */
347 segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT;
348 }
349 max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
350
351 if (max_io_size < MAX_UPL_TRANSFER_BYTES) {
352 /*
353 * don't allow a size smaller than the old fixed limit
354 */
355 max_io_size = MAX_UPL_TRANSFER_BYTES;
356 } else {
357 /*
358 * make sure the size specified is a multiple of PAGE_SIZE
359 */
360 max_io_size &= ~PAGE_MASK;
361 }
362 return (max_io_size);
363 }
364
365
366
367
368 #define CLW_ALLOCATE 0x01
369 #define CLW_RETURNLOCKED 0x02
370 #define CLW_IONOCACHE 0x04
371 #define CLW_IOPASSIVE 0x08
372
373 /*
374 * if the read ahead context doesn't yet exist,
375 * allocate and initialize it...
376 * the vnode lock serializes multiple callers
377 * during the actual assignment... first one
378 * to grab the lock wins... the other callers
379 * will release the now unnecessary storage
380 *
381 * once the context is present, try to grab (but don't block on)
382 * the lock associated with it... if someone
383 * else currently owns it, than the read
384 * will run without read-ahead. this allows
385 * multiple readers to run in parallel and
386 * since there's only 1 read ahead context,
387 * there's no real loss in only allowing 1
388 * reader to have read-ahead enabled.
389 */
390 static struct cl_readahead *
391 cluster_get_rap(vnode_t vp)
392 {
393 struct ubc_info *ubc;
394 struct cl_readahead *rap;
395
396 ubc = vp->v_ubcinfo;
397
398 if ((rap = ubc->cl_rahead) == NULL) {
399 MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK);
400
401 bzero(rap, sizeof *rap);
402 rap->cl_lastr = -1;
403 lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr);
404
405 vnode_lock(vp);
406
407 if (ubc->cl_rahead == NULL)
408 ubc->cl_rahead = rap;
409 else {
410 lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
411 FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
412 rap = ubc->cl_rahead;
413 }
414 vnode_unlock(vp);
415 }
416 if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE)
417 return(rap);
418
419 return ((struct cl_readahead *)NULL);
420 }
421
422
423 /*
424 * if the write behind context doesn't yet exist,
425 * and CLW_ALLOCATE is specified, allocate and initialize it...
426 * the vnode lock serializes multiple callers
427 * during the actual assignment... first one
428 * to grab the lock wins... the other callers
429 * will release the now unnecessary storage
430 *
431 * if CLW_RETURNLOCKED is set, grab (blocking if necessary)
432 * the lock associated with the write behind context before
433 * returning
434 */
435
436 static struct cl_writebehind *
437 cluster_get_wbp(vnode_t vp, int flags)
438 {
439 struct ubc_info *ubc;
440 struct cl_writebehind *wbp;
441
442 ubc = vp->v_ubcinfo;
443
444 if ((wbp = ubc->cl_wbehind) == NULL) {
445
446 if ( !(flags & CLW_ALLOCATE))
447 return ((struct cl_writebehind *)NULL);
448
449 MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK);
450
451 bzero(wbp, sizeof *wbp);
452 lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr);
453
454 vnode_lock(vp);
455
456 if (ubc->cl_wbehind == NULL)
457 ubc->cl_wbehind = wbp;
458 else {
459 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
460 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
461 wbp = ubc->cl_wbehind;
462 }
463 vnode_unlock(vp);
464 }
465 if (flags & CLW_RETURNLOCKED)
466 lck_mtx_lock(&wbp->cl_lockw);
467
468 return (wbp);
469 }
470
471
472 static void
473 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, int flags)
474 {
475 struct cl_writebehind *wbp;
476
477 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
478
479 if (wbp->cl_number) {
480 lck_mtx_lock(&wbp->cl_lockw);
481
482 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg);
483
484 lck_mtx_unlock(&wbp->cl_lockw);
485 }
486 }
487 }
488
489
490 static int
491 cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
492 {
493 daddr64_t blkno;
494 size_t io_size;
495 int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
496
497 if (bootcache_check_fn) {
498 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ, NULL))
499 return(0);
500
501 if (io_size == 0)
502 return (0);
503
504 if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno))
505 return(1);
506 }
507 return(0);
508 }
509
510
511 static int
512 cluster_is_throttled(vnode_t vp)
513 {
514 return (throttle_io_will_be_throttled(-1, vp->v_mount));
515 }
516
517
518 static void
519 cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name)
520 {
521
522 lck_mtx_lock(&iostate->io_mtxp);
523
524 while ((iostate->io_issued - iostate->io_completed) > target) {
525
526 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
527 iostate->io_issued, iostate->io_completed, target, 0, 0);
528
529 iostate->io_wanted = 1;
530 msleep((caddr_t)&iostate->io_wanted, &iostate->io_mtxp, PRIBIO + 1, wait_name, NULL);
531
532 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
533 iostate->io_issued, iostate->io_completed, target, 0, 0);
534 }
535 lck_mtx_unlock(&iostate->io_mtxp);
536 }
537
538 static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl,
539 upl_offset_t upl_offset, upl_size_t size)
540 {
541 if (!size)
542 return;
543
544 upl_t associated_upl = upl_associated_upl(upl);
545
546 if (!associated_upl)
547 return;
548
549 #if 0
550 printf("1: %d %d\n", upl_offset, upl_offset + size);
551 #endif
552
553 /*
554 * The associated UPL is page aligned to file offsets whereas the
555 * UPL it's attached to has different alignment requirements. The
556 * upl_offset that we have refers to @upl. The code that follows
557 * has to deal with the first and last pages in this transaction
558 * which might straddle pages in the associated UPL. To keep
559 * track of these pages, we use the mark bits: if the mark bit is
560 * set, we know another transaction has completed its part of that
561 * page and so we can unlock that page here.
562 *
563 * The following illustrates what we have to deal with:
564 *
565 * MEM u <------------ 1 PAGE ------------> e
566 * +-------------+----------------------+-----------------
567 * | |######################|#################
568 * +-------------+----------------------+-----------------
569 * FILE | <--- a ---> o <------------ 1 PAGE ------------>
570 *
571 * So here we show a write to offset @o. The data that is to be
572 * written is in a buffer that is not page aligned; it has offset
573 * @a in the page. The upl that carries the data starts in memory
574 * at @u. The associated upl starts in the file at offset @o. A
575 * transaction will always end on a page boundary (like @e above)
576 * except for the very last transaction in the group. We cannot
577 * unlock the page at @o in the associated upl until both the
578 * transaction ending at @e and the following transaction (that
579 * starts at @e) has completed.
580 */
581
582 /*
583 * We record whether or not the two UPLs are aligned as the mark
584 * bit in the first page of @upl.
585 */
586 upl_page_info_t *pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
587 bool is_unaligned = upl_page_get_mark(pl, 0);
588
589 if (is_unaligned) {
590 upl_page_info_t *assoc_pl = UPL_GET_INTERNAL_PAGE_LIST(associated_upl);
591
592 upl_offset_t upl_end = upl_offset + size;
593 assert(upl_end >= PAGE_SIZE);
594
595 upl_size_t assoc_upl_size = upl_get_size(associated_upl);
596
597 /*
598 * In the very first transaction in the group, upl_offset will
599 * not be page aligned, but after that it will be and in that
600 * case we want the preceding page in the associated UPL hence
601 * the minus one.
602 */
603 assert(upl_offset);
604 if (upl_offset)
605 upl_offset = trunc_page_32(upl_offset - 1);
606
607 lck_mtx_lock_spin(&iostate->io_mtxp);
608
609 // Look at the first page...
610 if (upl_offset
611 && !upl_page_get_mark(assoc_pl, upl_offset >> PAGE_SHIFT)) {
612 /*
613 * The first page isn't marked so let another transaction
614 * completion handle it.
615 */
616 upl_page_set_mark(assoc_pl, upl_offset >> PAGE_SHIFT, true);
617 upl_offset += PAGE_SIZE;
618 }
619
620 // And now the last page...
621
622 /*
623 * This needs to be > rather than >= because if it's equal, it
624 * means there's another transaction that is sharing the last
625 * page.
626 */
627 if (upl_end > assoc_upl_size)
628 upl_end = assoc_upl_size;
629 else {
630 upl_end = trunc_page_32(upl_end);
631 const int last_pg = (upl_end >> PAGE_SHIFT) - 1;
632
633 if (!upl_page_get_mark(assoc_pl, last_pg)) {
634 /*
635 * The last page isn't marked so mark the page and let another
636 * transaction completion handle it.
637 */
638 upl_page_set_mark(assoc_pl, last_pg, true);
639 upl_end -= PAGE_SIZE;
640 }
641 }
642
643 lck_mtx_unlock(&iostate->io_mtxp);
644
645 #if 0
646 printf("2: %d %d\n", upl_offset, upl_end);
647 #endif
648
649 if (upl_end <= upl_offset)
650 return;
651
652 size = upl_end - upl_offset;
653 } else {
654 assert(!(upl_offset & PAGE_MASK));
655 assert(!(size & PAGE_MASK));
656 }
657
658 boolean_t empty;
659
660 /*
661 * We can unlock these pages now and as this is for a
662 * direct/uncached write, we want to dump the pages too.
663 */
664 kern_return_t kr = upl_abort_range(associated_upl, upl_offset, size,
665 UPL_ABORT_DUMP_PAGES, &empty);
666
667 assert(!kr);
668
669 if (!kr && empty) {
670 upl_set_associated_upl(upl, NULL);
671 upl_deallocate(associated_upl);
672 }
673 }
674
675 static int
676 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp)
677 {
678 int upl_abort_code = 0;
679 int page_in = 0;
680 int page_out = 0;
681
682 if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE))
683 /*
684 * direct write of any flavor, or a direct read that wasn't aligned
685 */
686 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
687 else {
688 if (io_flags & B_PAGEIO) {
689 if (io_flags & B_READ)
690 page_in = 1;
691 else
692 page_out = 1;
693 }
694 if (io_flags & B_CACHE)
695 /*
696 * leave pages in the cache unchanged on error
697 */
698 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
699 else if (page_out && ((error != ENXIO) || vnode_isswap(vp)))
700 /*
701 * transient error... leave pages unchanged
702 */
703 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
704 else if (page_in)
705 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
706 else
707 upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
708
709 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
710 }
711 return (upl_abort_code);
712 }
713
714
715 static int
716 cluster_iodone(buf_t bp, void *callback_arg)
717 {
718 int b_flags;
719 int error;
720 int total_size;
721 int total_resid;
722 int upl_offset;
723 int zero_offset;
724 int pg_offset = 0;
725 int commit_size = 0;
726 int upl_flags = 0;
727 int transaction_size = 0;
728 upl_t upl;
729 buf_t cbp;
730 buf_t cbp_head;
731 buf_t cbp_next;
732 buf_t real_bp;
733 vnode_t vp;
734 struct clios *iostate;
735 boolean_t transaction_complete = FALSE;
736
737 __IGNORE_WCASTALIGN(cbp_head = (buf_t)(bp->b_trans_head));
738
739 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
740 cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
741
742 if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) {
743 lck_mtx_lock_spin(cl_transaction_mtxp);
744
745 bp->b_flags |= B_TDONE;
746
747 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
748 /*
749 * all I/O requests that are part of this transaction
750 * have to complete before we can process it
751 */
752 if ( !(cbp->b_flags & B_TDONE)) {
753
754 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
755 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
756
757 lck_mtx_unlock(cl_transaction_mtxp);
758
759 return 0;
760 }
761
762 if (cbp->b_trans_next == CLUSTER_IO_WAITING) {
763 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
764 cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0);
765
766 lck_mtx_unlock(cl_transaction_mtxp);
767 wakeup(cbp);
768
769 return 0;
770 }
771
772 if (cbp->b_flags & B_EOT)
773 transaction_complete = TRUE;
774 }
775 lck_mtx_unlock(cl_transaction_mtxp);
776
777 if (transaction_complete == FALSE) {
778 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
779 cbp_head, 0, 0, 0, 0);
780 return 0;
781 }
782 }
783 error = 0;
784 total_size = 0;
785 total_resid = 0;
786
787 cbp = cbp_head;
788 vp = cbp->b_vp;
789 upl_offset = cbp->b_uploffset;
790 upl = cbp->b_upl;
791 b_flags = cbp->b_flags;
792 real_bp = cbp->b_real_bp;
793 zero_offset= cbp->b_validend;
794 iostate = (struct clios *)cbp->b_iostate;
795
796 if (real_bp)
797 real_bp->b_dev = cbp->b_dev;
798
799 while (cbp) {
800 if ((cbp->b_flags & B_ERROR) && error == 0)
801 error = cbp->b_error;
802
803 total_resid += cbp->b_resid;
804 total_size += cbp->b_bcount;
805
806 cbp_next = cbp->b_trans_next;
807
808 if (cbp_next == NULL)
809 /*
810 * compute the overall size of the transaction
811 * in case we created one that has 'holes' in it
812 * 'total_size' represents the amount of I/O we
813 * did, not the span of the transaction w/r to the UPL
814 */
815 transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset;
816
817 if (cbp != cbp_head)
818 free_io_buf(cbp);
819
820 cbp = cbp_next;
821 }
822
823 if (ISSET(b_flags, B_COMMIT_UPL)) {
824 cluster_handle_associated_upl(iostate,
825 cbp_head->b_upl,
826 upl_offset,
827 transaction_size);
828 }
829
830 if (error == 0 && total_resid)
831 error = EIO;
832
833 if (error == 0) {
834 int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone);
835
836 if (cliodone_func != NULL) {
837 cbp_head->b_bcount = transaction_size;
838
839 error = (*cliodone_func)(cbp_head, callback_arg);
840 }
841 }
842 if (zero_offset)
843 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
844
845 free_io_buf(cbp_head);
846
847 if (iostate) {
848 int need_wakeup = 0;
849
850 /*
851 * someone has issued multiple I/Os asynchrounsly
852 * and is waiting for them to complete (streaming)
853 */
854 lck_mtx_lock_spin(&iostate->io_mtxp);
855
856 if (error && iostate->io_error == 0)
857 iostate->io_error = error;
858
859 iostate->io_completed += total_size;
860
861 if (iostate->io_wanted) {
862 /*
863 * someone is waiting for the state of
864 * this io stream to change
865 */
866 iostate->io_wanted = 0;
867 need_wakeup = 1;
868 }
869 lck_mtx_unlock(&iostate->io_mtxp);
870
871 if (need_wakeup)
872 wakeup((caddr_t)&iostate->io_wanted);
873 }
874
875 if (b_flags & B_COMMIT_UPL) {
876 pg_offset = upl_offset & PAGE_MASK;
877 commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
878
879 if (error)
880 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp);
881 else {
882 upl_flags = UPL_COMMIT_FREE_ON_EMPTY;
883
884 if ((b_flags & B_PHYS) && (b_flags & B_READ))
885 upl_flags |= UPL_COMMIT_SET_DIRTY;
886
887 if (b_flags & B_AGE)
888 upl_flags |= UPL_COMMIT_INACTIVATE;
889
890 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
891 }
892 }
893 if (real_bp) {
894 if (error) {
895 real_bp->b_flags |= B_ERROR;
896 real_bp->b_error = error;
897 }
898 real_bp->b_resid = total_resid;
899
900 buf_biodone(real_bp);
901 }
902 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
903 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
904
905 return (error);
906 }
907
908
909 uint32_t
910 cluster_throttle_io_limit(vnode_t vp, uint32_t *limit)
911 {
912 if (cluster_is_throttled(vp)) {
913 *limit = THROTTLE_MAX_IOSIZE;
914 return 1;
915 }
916 return 0;
917 }
918
919
920 void
921 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
922 {
923
924 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
925 upl_offset, size, bp, 0, 0);
926
927 if (bp == NULL || bp->b_datap == 0) {
928 upl_page_info_t *pl;
929 addr64_t zero_addr;
930
931 pl = ubc_upl_pageinfo(upl);
932
933 if (upl_device_page(pl) == TRUE) {
934 zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset;
935
936 bzero_phys_nc(zero_addr, size);
937 } else {
938 while (size) {
939 int page_offset;
940 int page_index;
941 int zero_cnt;
942
943 page_index = upl_offset / PAGE_SIZE;
944 page_offset = upl_offset & PAGE_MASK;
945
946 zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << PAGE_SHIFT) + page_offset;
947 zero_cnt = min(PAGE_SIZE - page_offset, size);
948
949 bzero_phys(zero_addr, zero_cnt);
950
951 size -= zero_cnt;
952 upl_offset += zero_cnt;
953 }
954 }
955 } else
956 bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size);
957
958 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
959 upl_offset, size, 0, 0, 0);
960 }
961
962
963 static void
964 cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset)
965 {
966 cbp_head->b_validend = zero_offset;
967 cbp_tail->b_flags |= B_EOT;
968 }
969
970 static void
971 cluster_wait_IO(buf_t cbp_head, int async)
972 {
973 buf_t cbp;
974
975 if (async) {
976 /*
977 * Async callback completion will not normally generate a
978 * wakeup upon I/O completion. To get woken up, we set
979 * b_trans_next (which is safe for us to modify) on the last
980 * buffer to CLUSTER_IO_WAITING so that cluster_iodone knows
981 * to wake us up when all buffers as part of this transaction
982 * are completed. This is done under the umbrella of
983 * cl_transaction_mtxp which is also taken in cluster_iodone.
984 */
985 bool done = true;
986 buf_t last = NULL;
987
988 lck_mtx_lock_spin(cl_transaction_mtxp);
989
990 for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) {
991 if (!ISSET(cbp->b_flags, B_TDONE))
992 done = false;
993 }
994
995 if (!done) {
996 last->b_trans_next = CLUSTER_IO_WAITING;
997
998 DTRACE_IO1(wait__start, buf_t, last);
999 do {
1000 msleep(last, cl_transaction_mtxp, PSPIN | (PRIBIO+1), "cluster_wait_IO", NULL);
1001
1002 /*
1003 * We should only have been woken up if all the
1004 * buffers are completed, but just in case...
1005 */
1006 done = true;
1007 for (cbp = cbp_head; cbp != CLUSTER_IO_WAITING; cbp = cbp->b_trans_next) {
1008 if (!ISSET(cbp->b_flags, B_TDONE)) {
1009 done = false;
1010 break;
1011 }
1012 }
1013 } while (!done);
1014 DTRACE_IO1(wait__done, buf_t, last);
1015
1016 last->b_trans_next = NULL;
1017 }
1018
1019 lck_mtx_unlock(cl_transaction_mtxp);
1020 } else { // !async
1021 for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
1022 buf_biowait(cbp);
1023 }
1024 }
1025
1026 static void
1027 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
1028 {
1029 buf_t cbp;
1030 int error;
1031 boolean_t isswapout = FALSE;
1032
1033 /*
1034 * cluster_complete_transaction will
1035 * only be called if we've issued a complete chain in synchronous mode
1036 * or, we've already done a cluster_wait_IO on an incomplete chain
1037 */
1038 if (needwait) {
1039 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next)
1040 buf_biowait(cbp);
1041 }
1042 /*
1043 * we've already waited on all of the I/Os in this transaction,
1044 * so mark all of the buf_t's in this transaction as B_TDONE
1045 * so that cluster_iodone sees the transaction as completed
1046 */
1047 for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next)
1048 cbp->b_flags |= B_TDONE;
1049 cbp = *cbp_head;
1050
1051 if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp))
1052 isswapout = TRUE;
1053
1054 error = cluster_iodone(cbp, callback_arg);
1055
1056 if ( !(flags & CL_ASYNC) && error && *retval == 0) {
1057 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO))
1058 *retval = error;
1059 else if (isswapout == TRUE)
1060 *retval = error;
1061 }
1062 *cbp_head = (buf_t)NULL;
1063 }
1064
1065
1066 static int
1067 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1068 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1069 {
1070 buf_t cbp;
1071 u_int size;
1072 u_int io_size;
1073 int io_flags;
1074 int bmap_flags;
1075 int error = 0;
1076 int retval = 0;
1077 buf_t cbp_head = NULL;
1078 buf_t cbp_tail = NULL;
1079 int trans_count = 0;
1080 int max_trans_count;
1081 u_int pg_count;
1082 int pg_offset;
1083 u_int max_iosize;
1084 u_int max_vectors;
1085 int priv;
1086 int zero_offset = 0;
1087 int async_throttle = 0;
1088 mount_t mp;
1089 vm_offset_t upl_end_offset;
1090 boolean_t need_EOT = FALSE;
1091
1092 /*
1093 * we currently don't support buffers larger than a page
1094 */
1095 if (real_bp && non_rounded_size > PAGE_SIZE)
1096 panic("%s(): Called with real buffer of size %d bytes which "
1097 "is greater than the maximum allowed size of "
1098 "%d bytes (the system PAGE_SIZE).\n",
1099 __FUNCTION__, non_rounded_size, PAGE_SIZE);
1100
1101 mp = vp->v_mount;
1102
1103 /*
1104 * we don't want to do any funny rounding of the size for IO requests
1105 * coming through the DIRECT or CONTIGUOUS paths... those pages don't
1106 * belong to us... we can't extend (nor do we need to) the I/O to fill
1107 * out a page
1108 */
1109 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
1110 /*
1111 * round the requested size up so that this I/O ends on a
1112 * page boundary in case this is a 'write'... if the filesystem
1113 * has blocks allocated to back the page beyond the EOF, we want to
1114 * make sure to write out the zero's that are sitting beyond the EOF
1115 * so that in case the filesystem doesn't explicitly zero this area
1116 * if a hole is created via a lseek/write beyond the current EOF,
1117 * it will return zeros when it's read back from the disk. If the
1118 * physical allocation doesn't extend for the whole page, we'll
1119 * only write/read from the disk up to the end of this allocation
1120 * via the extent info returned from the VNOP_BLOCKMAP call.
1121 */
1122 pg_offset = upl_offset & PAGE_MASK;
1123
1124 size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset;
1125 } else {
1126 /*
1127 * anyone advertising a blocksize of 1 byte probably
1128 * can't deal with us rounding up the request size
1129 * AFP is one such filesystem/device
1130 */
1131 size = non_rounded_size;
1132 }
1133 upl_end_offset = upl_offset + size;
1134
1135 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
1136
1137 /*
1138 * Set the maximum transaction size to the maximum desired number of
1139 * buffers.
1140 */
1141 max_trans_count = 8;
1142 if (flags & CL_DEV_MEMORY)
1143 max_trans_count = 16;
1144
1145 if (flags & CL_READ) {
1146 io_flags = B_READ;
1147 bmap_flags = VNODE_READ;
1148
1149 max_iosize = mp->mnt_maxreadcnt;
1150 max_vectors = mp->mnt_segreadcnt;
1151 } else {
1152 io_flags = B_WRITE;
1153 bmap_flags = VNODE_WRITE;
1154
1155 max_iosize = mp->mnt_maxwritecnt;
1156 max_vectors = mp->mnt_segwritecnt;
1157 }
1158 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_NONE, max_iosize, max_vectors, mp->mnt_devblocksize, 0, 0);
1159
1160 /*
1161 * make sure the maximum iosize is a
1162 * multiple of the page size
1163 */
1164 max_iosize &= ~PAGE_MASK;
1165
1166 /*
1167 * Ensure the maximum iosize is sensible.
1168 */
1169 if (!max_iosize)
1170 max_iosize = PAGE_SIZE;
1171
1172 if (flags & CL_THROTTLE) {
1173 if ( !(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) {
1174 if (max_iosize > THROTTLE_MAX_IOSIZE)
1175 max_iosize = THROTTLE_MAX_IOSIZE;
1176 async_throttle = THROTTLE_MAXCNT;
1177 } else {
1178 if ( (flags & CL_DEV_MEMORY) )
1179 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
1180 else {
1181 u_int max_cluster;
1182 u_int max_cluster_size;
1183 u_int scale;
1184
1185 if (vp->v_mount->mnt_minsaturationbytecount) {
1186 max_cluster_size = vp->v_mount->mnt_minsaturationbytecount;
1187
1188 scale = 1;
1189 } else {
1190 max_cluster_size = MAX_CLUSTER_SIZE(vp);
1191
1192 if ((vp->v_mount->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd)
1193 scale = WRITE_THROTTLE_SSD;
1194 else
1195 scale = WRITE_THROTTLE;
1196 }
1197 if (max_iosize > max_cluster_size)
1198 max_cluster = max_cluster_size;
1199 else
1200 max_cluster = max_iosize;
1201
1202 if (size < max_cluster)
1203 max_cluster = size;
1204
1205 if (flags & CL_CLOSE)
1206 scale += MAX_CLUSTERS;
1207
1208 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1);
1209 }
1210 }
1211 }
1212 if (flags & CL_AGE)
1213 io_flags |= B_AGE;
1214 if (flags & (CL_PAGEIN | CL_PAGEOUT))
1215 io_flags |= B_PAGEIO;
1216 if (flags & (CL_IOSTREAMING))
1217 io_flags |= B_IOSTREAMING;
1218 if (flags & CL_COMMIT)
1219 io_flags |= B_COMMIT_UPL;
1220 if (flags & CL_DIRECT_IO)
1221 io_flags |= B_PHYS;
1222 if (flags & (CL_PRESERVE | CL_KEEPCACHED))
1223 io_flags |= B_CACHE;
1224 if (flags & CL_PASSIVE)
1225 io_flags |= B_PASSIVE;
1226 if (flags & CL_ENCRYPTED)
1227 io_flags |= B_ENCRYPTED_IO;
1228
1229 if (vp->v_flag & VSYSTEM)
1230 io_flags |= B_META;
1231
1232 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1233 /*
1234 * then we are going to end up
1235 * with a page that we can't complete (the file size wasn't a multiple
1236 * of PAGE_SIZE and we're trying to read to the end of the file
1237 * so we'll go ahead and zero out the portion of the page we can't
1238 * read in from the file
1239 */
1240 zero_offset = upl_offset + non_rounded_size;
1241 } else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO)) {
1242 assert(ISSET(flags, CL_COMMIT));
1243
1244 // For a direct/uncached write, we need to lock pages...
1245
1246 upl_t cached_upl;
1247
1248 /*
1249 * Create a UPL to lock the pages in the cache whilst the
1250 * write is in progress.
1251 */
1252 ubc_create_upl(vp, f_offset, non_rounded_size, &cached_upl,
1253 NULL, UPL_SET_LITE);
1254
1255 /*
1256 * Attach this UPL to the other UPL so that we can find it
1257 * later.
1258 */
1259 upl_set_associated_upl(upl, cached_upl);
1260
1261 if (upl_offset & PAGE_MASK) {
1262 /*
1263 * The two UPLs are not aligned, so mark the first page in
1264 * @upl so that cluster_handle_associated_upl can handle
1265 * it accordingly.
1266 */
1267 upl_page_info_t *pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1268 upl_page_set_mark(pl, 0, true);
1269 }
1270 }
1271
1272 while (size) {
1273 daddr64_t blkno;
1274 daddr64_t lblkno;
1275 u_int io_size_wanted;
1276 size_t io_size_tmp;
1277
1278 if (size > max_iosize)
1279 io_size = max_iosize;
1280 else
1281 io_size = size;
1282
1283 io_size_wanted = io_size;
1284 io_size_tmp = (size_t)io_size;
1285
1286 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL)))
1287 break;
1288
1289 if (io_size_tmp > io_size_wanted)
1290 io_size = io_size_wanted;
1291 else
1292 io_size = (u_int)io_size_tmp;
1293
1294 if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno))
1295 real_bp->b_blkno = blkno;
1296
1297 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
1298 (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
1299
1300 if (io_size == 0) {
1301 /*
1302 * vnop_blockmap didn't return an error... however, it did
1303 * return an extent size of 0 which means we can't
1304 * make forward progress on this I/O... a hole in the
1305 * file would be returned as a blkno of -1 with a non-zero io_size
1306 * a real extent is returned with a blkno != -1 and a non-zero io_size
1307 */
1308 error = EINVAL;
1309 break;
1310 }
1311 if ( !(flags & CL_READ) && blkno == -1) {
1312 off_t e_offset;
1313 int pageout_flags;
1314
1315 if (upl_get_internal_vectorupl(upl))
1316 panic("Vector UPLs should not take this code-path\n");
1317 /*
1318 * we're writing into a 'hole'
1319 */
1320 if (flags & CL_PAGEOUT) {
1321 /*
1322 * if we got here via cluster_pageout
1323 * then just error the request and return
1324 * the 'hole' should already have been covered
1325 */
1326 error = EINVAL;
1327 break;
1328 }
1329 /*
1330 * we can get here if the cluster code happens to
1331 * pick up a page that was dirtied via mmap vs
1332 * a 'write' and the page targets a 'hole'...
1333 * i.e. the writes to the cluster were sparse
1334 * and the file was being written for the first time
1335 *
1336 * we can also get here if the filesystem supports
1337 * 'holes' that are less than PAGE_SIZE.... because
1338 * we can't know if the range in the page that covers
1339 * the 'hole' has been dirtied via an mmap or not,
1340 * we have to assume the worst and try to push the
1341 * entire page to storage.
1342 *
1343 * Try paging out the page individually before
1344 * giving up entirely and dumping it (the pageout
1345 * path will insure that the zero extent accounting
1346 * has been taken care of before we get back into cluster_io)
1347 *
1348 * go direct to vnode_pageout so that we don't have to
1349 * unbusy the page from the UPL... we used to do this
1350 * so that we could call ubc_msync, but that results
1351 * in a potential deadlock if someone else races us to acquire
1352 * that page and wins and in addition needs one of the pages
1353 * we're continuing to hold in the UPL
1354 */
1355 pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT;
1356
1357 if ( !(flags & CL_ASYNC))
1358 pageout_flags |= UPL_IOSYNC;
1359 if ( !(flags & CL_COMMIT))
1360 pageout_flags |= UPL_NOCOMMIT;
1361
1362 if (cbp_head) {
1363 buf_t last_cbp;
1364
1365 /*
1366 * first we have to wait for the the current outstanding I/Os
1367 * to complete... EOT hasn't been set yet on this transaction
1368 * so the pages won't be released just because all of the current
1369 * I/O linked to this transaction has completed...
1370 */
1371 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1372
1373 /*
1374 * we've got a transcation that
1375 * includes the page we're about to push out through vnode_pageout...
1376 * find the last bp in the list which will be the one that
1377 * includes the head of this page and round it's iosize down
1378 * to a page boundary...
1379 */
1380 for (last_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next)
1381 last_cbp = cbp;
1382
1383 cbp->b_bcount &= ~PAGE_MASK;
1384
1385 if (cbp->b_bcount == 0) {
1386 /*
1387 * this buf no longer has any I/O associated with it
1388 */
1389 free_io_buf(cbp);
1390
1391 if (cbp == cbp_head) {
1392 /*
1393 * the buf we just freed was the only buf in
1394 * this transaction... so there's no I/O to do
1395 */
1396 cbp_head = NULL;
1397 } else {
1398 /*
1399 * remove the buf we just freed from
1400 * the transaction list
1401 */
1402 last_cbp->b_trans_next = NULL;
1403 cbp_tail = last_cbp;
1404 }
1405 }
1406 if (cbp_head) {
1407 /*
1408 * there was more to the current transaction
1409 * than just the page we are pushing out via vnode_pageout...
1410 * mark it as finished and complete it... we've already
1411 * waited for the I/Os to complete above in the call to cluster_wait_IO
1412 */
1413 cluster_EOT(cbp_head, cbp_tail, 0);
1414
1415 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1416
1417 trans_count = 0;
1418 }
1419 }
1420 if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1421 error = EINVAL;
1422 }
1423 e_offset = round_page_64(f_offset + 1);
1424 io_size = e_offset - f_offset;
1425
1426 f_offset += io_size;
1427 upl_offset += io_size;
1428
1429 if (size >= io_size)
1430 size -= io_size;
1431 else
1432 size = 0;
1433 /*
1434 * keep track of how much of the original request
1435 * that we've actually completed... non_rounded_size
1436 * may go negative due to us rounding the request
1437 * to a page size multiple (i.e. size > non_rounded_size)
1438 */
1439 non_rounded_size -= io_size;
1440
1441 if (non_rounded_size <= 0) {
1442 /*
1443 * we've transferred all of the data in the original
1444 * request, but we were unable to complete the tail
1445 * of the last page because the file didn't have
1446 * an allocation to back that portion... this is ok.
1447 */
1448 size = 0;
1449 }
1450 if (error) {
1451 if (size == 0)
1452 flags &= ~CL_COMMIT;
1453 break;
1454 }
1455 continue;
1456 }
1457 lblkno = (daddr64_t)(f_offset / 0x1000);
1458 /*
1459 * we have now figured out how much I/O we can do - this is in 'io_size'
1460 * pg_offset is the starting point in the first page for the I/O
1461 * pg_count is the number of full and partial pages that 'io_size' encompasses
1462 */
1463 pg_offset = upl_offset & PAGE_MASK;
1464
1465 if (flags & CL_DEV_MEMORY) {
1466 /*
1467 * treat physical requests as one 'giant' page
1468 */
1469 pg_count = 1;
1470 } else
1471 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1472
1473 if ((flags & CL_READ) && blkno == -1) {
1474 vm_offset_t commit_offset;
1475 int bytes_to_zero;
1476 int complete_transaction_now = 0;
1477
1478 /*
1479 * if we're reading and blkno == -1, then we've got a
1480 * 'hole' in the file that we need to deal with by zeroing
1481 * out the affected area in the upl
1482 */
1483 if (io_size >= (u_int)non_rounded_size) {
1484 /*
1485 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1486 * than 'zero_offset' will be non-zero
1487 * if the 'hole' returned by vnop_blockmap extends all the way to the eof
1488 * (indicated by the io_size finishing off the I/O request for this UPL)
1489 * than we're not going to issue an I/O for the
1490 * last page in this upl... we need to zero both the hole and the tail
1491 * of the page beyond the EOF, since the delayed zero-fill won't kick in
1492 */
1493 bytes_to_zero = non_rounded_size;
1494 if (!(flags & CL_NOZERO))
1495 bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
1496
1497 zero_offset = 0;
1498 } else
1499 bytes_to_zero = io_size;
1500
1501 pg_count = 0;
1502
1503 cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
1504
1505 if (cbp_head) {
1506 int pg_resid;
1507
1508 /*
1509 * if there is a current I/O chain pending
1510 * then the first page of the group we just zero'd
1511 * will be handled by the I/O completion if the zero
1512 * fill started in the middle of the page
1513 */
1514 commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1515
1516 pg_resid = commit_offset - upl_offset;
1517
1518 if (bytes_to_zero >= pg_resid) {
1519 /*
1520 * the last page of the current I/O
1521 * has been completed...
1522 * compute the number of fully zero'd
1523 * pages that are beyond it
1524 * plus the last page if its partial
1525 * and we have no more I/O to issue...
1526 * otherwise a partial page is left
1527 * to begin the next I/O
1528 */
1529 if ((int)io_size >= non_rounded_size)
1530 pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE;
1531 else
1532 pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE;
1533
1534 complete_transaction_now = 1;
1535 }
1536 } else {
1537 /*
1538 * no pending I/O to deal with
1539 * so, commit all of the fully zero'd pages
1540 * plus the last page if its partial
1541 * and we have no more I/O to issue...
1542 * otherwise a partial page is left
1543 * to begin the next I/O
1544 */
1545 if ((int)io_size >= non_rounded_size)
1546 pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE;
1547 else
1548 pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE;
1549
1550 commit_offset = upl_offset & ~PAGE_MASK;
1551 }
1552
1553 // Associated UPL is currently only used in the direct write path
1554 assert(!upl_associated_upl(upl));
1555
1556 if ( (flags & CL_COMMIT) && pg_count) {
1557 ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE,
1558 UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
1559 }
1560 upl_offset += io_size;
1561 f_offset += io_size;
1562 size -= io_size;
1563
1564 /*
1565 * keep track of how much of the original request
1566 * that we've actually completed... non_rounded_size
1567 * may go negative due to us rounding the request
1568 * to a page size multiple (i.e. size > non_rounded_size)
1569 */
1570 non_rounded_size -= io_size;
1571
1572 if (non_rounded_size <= 0) {
1573 /*
1574 * we've transferred all of the data in the original
1575 * request, but we were unable to complete the tail
1576 * of the last page because the file didn't have
1577 * an allocation to back that portion... this is ok.
1578 */
1579 size = 0;
1580 }
1581 if (cbp_head && (complete_transaction_now || size == 0)) {
1582 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1583
1584 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
1585
1586 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1587
1588 trans_count = 0;
1589 }
1590 continue;
1591 }
1592 if (pg_count > max_vectors) {
1593 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
1594 io_size = PAGE_SIZE - pg_offset;
1595 pg_count = 1;
1596 } else {
1597 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
1598 pg_count = max_vectors;
1599 }
1600 }
1601 /*
1602 * If the transaction is going to reach the maximum number of
1603 * desired elements, truncate the i/o to the nearest page so
1604 * that the actual i/o is initiated after this buffer is
1605 * created and added to the i/o chain.
1606 *
1607 * I/O directed to physically contiguous memory
1608 * doesn't have a requirement to make sure we 'fill' a page
1609 */
1610 if ( !(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
1611 ((upl_offset + io_size) & PAGE_MASK)) {
1612 vm_offset_t aligned_ofs;
1613
1614 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1615 /*
1616 * If the io_size does not actually finish off even a
1617 * single page we have to keep adding buffers to the
1618 * transaction despite having reached the desired limit.
1619 *
1620 * Eventually we get here with the page being finished
1621 * off (and exceeded) and then we truncate the size of
1622 * this i/o request so that it is page aligned so that
1623 * we can finally issue the i/o on the transaction.
1624 */
1625 if (aligned_ofs > upl_offset) {
1626 io_size = aligned_ofs - upl_offset;
1627 pg_count--;
1628 }
1629 }
1630
1631 if ( !(mp->mnt_kern_flag & MNTK_VIRTUALDEV))
1632 /*
1633 * if we're not targeting a virtual device i.e. a disk image
1634 * it's safe to dip into the reserve pool since real devices
1635 * can complete this I/O request without requiring additional
1636 * bufs from the alloc_io_buf pool
1637 */
1638 priv = 1;
1639 else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
1640 /*
1641 * Throttle the speculative IO
1642 */
1643 priv = 0;
1644 else
1645 priv = 1;
1646
1647 cbp = alloc_io_buf(vp, priv);
1648
1649 if (flags & CL_PAGEOUT) {
1650 u_int i;
1651
1652 /*
1653 * since blocks are in offsets of 0x1000, scale
1654 * iteration to (PAGE_SIZE * pg_count) of blks.
1655 */
1656 for (i = 0; i < (PAGE_SIZE * pg_count)/0x1000; i++) {
1657 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY)
1658 panic("BUSY bp found in cluster_io");
1659 }
1660 }
1661 if (flags & CL_ASYNC) {
1662 if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg))
1663 panic("buf_setcallback failed\n");
1664 }
1665 cbp->b_cliodone = (void *)callback;
1666 cbp->b_flags |= io_flags;
1667 if (flags & CL_NOCACHE)
1668 cbp->b_attr.ba_flags |= BA_NOCACHE;
1669
1670 cbp->b_lblkno = lblkno;
1671 cbp->b_blkno = blkno;
1672 cbp->b_bcount = io_size;
1673
1674 if (buf_setupl(cbp, upl, upl_offset))
1675 panic("buf_setupl failed\n");
1676 #if CONFIG_IOSCHED
1677 upl_set_blkno(upl, upl_offset, io_size, blkno);
1678 #endif
1679 cbp->b_trans_next = (buf_t)NULL;
1680
1681 if ((cbp->b_iostate = (void *)iostate))
1682 /*
1683 * caller wants to track the state of this
1684 * io... bump the amount issued against this stream
1685 */
1686 iostate->io_issued += io_size;
1687
1688 if (flags & CL_READ) {
1689 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
1690 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1691 }
1692 else {
1693 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
1694 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1695 }
1696
1697 if (cbp_head) {
1698 cbp_tail->b_trans_next = cbp;
1699 cbp_tail = cbp;
1700 } else {
1701 cbp_head = cbp;
1702 cbp_tail = cbp;
1703
1704 if ( (cbp_head->b_real_bp = real_bp) )
1705 real_bp = (buf_t)NULL;
1706 }
1707 *(buf_t *)(&cbp->b_trans_head) = cbp_head;
1708
1709 trans_count++;
1710
1711 upl_offset += io_size;
1712 f_offset += io_size;
1713 size -= io_size;
1714 /*
1715 * keep track of how much of the original request
1716 * that we've actually completed... non_rounded_size
1717 * may go negative due to us rounding the request
1718 * to a page size multiple (i.e. size > non_rounded_size)
1719 */
1720 non_rounded_size -= io_size;
1721
1722 if (non_rounded_size <= 0) {
1723 /*
1724 * we've transferred all of the data in the original
1725 * request, but we were unable to complete the tail
1726 * of the last page because the file didn't have
1727 * an allocation to back that portion... this is ok.
1728 */
1729 size = 0;
1730 }
1731 if (size == 0) {
1732 /*
1733 * we have no more I/O to issue, so go
1734 * finish the final transaction
1735 */
1736 need_EOT = TRUE;
1737 } else if ( ((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
1738 ((flags & CL_ASYNC) || trans_count > max_trans_count) ) {
1739 /*
1740 * I/O directed to physically contiguous memory...
1741 * which doesn't have a requirement to make sure we 'fill' a page
1742 * or...
1743 * the current I/O we've prepared fully
1744 * completes the last page in this request
1745 * and ...
1746 * it's either an ASYNC request or
1747 * we've already accumulated more than 8 I/O's into
1748 * this transaction so mark it as complete so that
1749 * it can finish asynchronously or via the cluster_complete_transaction
1750 * below if the request is synchronous
1751 */
1752 need_EOT = TRUE;
1753 }
1754 if (need_EOT == TRUE)
1755 cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0);
1756
1757 if (flags & CL_THROTTLE)
1758 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
1759
1760 if ( !(io_flags & B_READ))
1761 vnode_startwrite(vp);
1762
1763 if (flags & CL_RAW_ENCRYPTED) {
1764 /*
1765 * User requested raw encrypted bytes.
1766 * Twiddle the bit in the ba_flags for the buffer
1767 */
1768 cbp->b_attr.ba_flags |= BA_RAW_ENCRYPTED_IO;
1769 }
1770
1771 (void) VNOP_STRATEGY(cbp);
1772
1773 if (need_EOT == TRUE) {
1774 if ( !(flags & CL_ASYNC))
1775 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
1776
1777 need_EOT = FALSE;
1778 trans_count = 0;
1779 cbp_head = NULL;
1780 }
1781 }
1782 if (error) {
1783 int abort_size;
1784
1785 io_size = 0;
1786
1787 if (cbp_head) {
1788 /*
1789 * Wait until all of the outstanding I/O
1790 * for this partial transaction has completed
1791 */
1792 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1793
1794 /*
1795 * Rewind the upl offset to the beginning of the
1796 * transaction.
1797 */
1798 upl_offset = cbp_head->b_uploffset;
1799 }
1800
1801 if (ISSET(flags, CL_COMMIT)) {
1802 cluster_handle_associated_upl(iostate, upl, upl_offset,
1803 upl_end_offset - upl_offset);
1804 }
1805
1806 // Free all the IO buffers in this transaction
1807 for (cbp = cbp_head; cbp;) {
1808 buf_t cbp_next;
1809
1810 size += cbp->b_bcount;
1811 io_size += cbp->b_bcount;
1812
1813 cbp_next = cbp->b_trans_next;
1814 free_io_buf(cbp);
1815 cbp = cbp_next;
1816 }
1817
1818 if (iostate) {
1819 int need_wakeup = 0;
1820
1821 /*
1822 * update the error condition for this stream
1823 * since we never really issued the io
1824 * just go ahead and adjust it back
1825 */
1826 lck_mtx_lock_spin(&iostate->io_mtxp);
1827
1828 if (iostate->io_error == 0)
1829 iostate->io_error = error;
1830 iostate->io_issued -= io_size;
1831
1832 if (iostate->io_wanted) {
1833 /*
1834 * someone is waiting for the state of
1835 * this io stream to change
1836 */
1837 iostate->io_wanted = 0;
1838 need_wakeup = 1;
1839 }
1840 lck_mtx_unlock(&iostate->io_mtxp);
1841
1842 if (need_wakeup)
1843 wakeup((caddr_t)&iostate->io_wanted);
1844 }
1845
1846 if (flags & CL_COMMIT) {
1847 int upl_flags;
1848
1849 pg_offset = upl_offset & PAGE_MASK;
1850 abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK;
1851
1852 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags, vp);
1853
1854 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
1855 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
1856 }
1857 if (retval == 0)
1858 retval = error;
1859 } else if (cbp_head)
1860 panic("%s(): cbp_head is not NULL.\n", __FUNCTION__);
1861
1862 if (real_bp) {
1863 /*
1864 * can get here if we either encountered an error
1865 * or we completely zero-filled the request and
1866 * no I/O was issued
1867 */
1868 if (error) {
1869 real_bp->b_flags |= B_ERROR;
1870 real_bp->b_error = error;
1871 }
1872 buf_biodone(real_bp);
1873 }
1874 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0);
1875
1876 return (retval);
1877 }
1878
1879 #define reset_vector_run_state() \
1880 issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0;
1881
1882 static int
1883 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
1884 int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
1885 {
1886 vector_upl_set_pagelist(vector_upl);
1887
1888 if(io_flag & CL_READ) {
1889 if(vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK)==0))
1890 io_flag &= ~CL_PRESERVE; /*don't zero fill*/
1891 else
1892 io_flag |= CL_PRESERVE; /*zero fill*/
1893 }
1894 return (cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg));
1895
1896 }
1897
1898 static int
1899 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
1900 {
1901 int pages_in_prefetch;
1902
1903 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
1904 (int)f_offset, size, (int)filesize, 0, 0);
1905
1906 if (f_offset >= filesize) {
1907 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
1908 (int)f_offset, 0, 0, 0, 0);
1909 return(0);
1910 }
1911 if ((off_t)size > (filesize - f_offset))
1912 size = filesize - f_offset;
1913 pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
1914
1915 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
1916
1917 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
1918 (int)f_offset + size, pages_in_prefetch, 0, 1, 0);
1919
1920 return (pages_in_prefetch);
1921 }
1922
1923
1924
1925 static void
1926 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
1927 int bflag)
1928 {
1929 daddr64_t r_addr;
1930 off_t f_offset;
1931 int size_of_prefetch;
1932 u_int max_prefetch;
1933
1934
1935 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
1936 (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0);
1937
1938 if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) {
1939 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1940 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0);
1941 return;
1942 }
1943 if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) {
1944 rap->cl_ralen = 0;
1945 rap->cl_maxra = 0;
1946
1947 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1948 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0);
1949
1950 return;
1951 }
1952 max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ), (vp->v_mount->mnt_kern_flag & MNTK_SSD));
1953
1954 if (max_prefetch > speculative_prefetch_max)
1955 max_prefetch = speculative_prefetch_max;
1956
1957 if (max_prefetch <= PAGE_SIZE) {
1958 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1959 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0);
1960 return;
1961 }
1962 if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) {
1963 if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) {
1964
1965 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1966 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
1967 return;
1968 }
1969 }
1970 r_addr = max(extent->e_addr, rap->cl_maxra) + 1;
1971 f_offset = (off_t)(r_addr * PAGE_SIZE_64);
1972
1973 size_of_prefetch = 0;
1974
1975 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
1976
1977 if (size_of_prefetch) {
1978 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
1979 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0);
1980 return;
1981 }
1982 if (f_offset < filesize) {
1983 daddr64_t read_size;
1984
1985 rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
1986
1987 read_size = (extent->e_addr + 1) - extent->b_addr;
1988
1989 if (read_size > rap->cl_ralen) {
1990 if (read_size > max_prefetch / PAGE_SIZE)
1991 rap->cl_ralen = max_prefetch / PAGE_SIZE;
1992 else
1993 rap->cl_ralen = read_size;
1994 }
1995 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
1996
1997 if (size_of_prefetch)
1998 rap->cl_maxra = (r_addr + size_of_prefetch) - 1;
1999 }
2000 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
2001 rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0);
2002 }
2003
2004
2005 int
2006 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2007 int size, off_t filesize, int flags)
2008 {
2009 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2010
2011 }
2012
2013
2014 int
2015 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2016 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2017 {
2018 int io_size;
2019 int rounded_size;
2020 off_t max_size;
2021 int local_flags;
2022
2023 local_flags = CL_PAGEOUT | CL_THROTTLE;
2024
2025 if ((flags & UPL_IOSYNC) == 0)
2026 local_flags |= CL_ASYNC;
2027 if ((flags & UPL_NOCOMMIT) == 0)
2028 local_flags |= CL_COMMIT;
2029 if ((flags & UPL_KEEPCACHED))
2030 local_flags |= CL_KEEPCACHED;
2031 if (flags & UPL_PAGING_ENCRYPTED)
2032 local_flags |= CL_ENCRYPTED;
2033
2034
2035 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
2036 (int)f_offset, size, (int)filesize, local_flags, 0);
2037
2038 /*
2039 * If they didn't specify any I/O, then we are done...
2040 * we can't issue an abort because we don't know how
2041 * big the upl really is
2042 */
2043 if (size <= 0)
2044 return (EINVAL);
2045
2046 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
2047 if (local_flags & CL_COMMIT)
2048 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2049 return (EROFS);
2050 }
2051 /*
2052 * can't page-in from a negative offset
2053 * or if we're starting beyond the EOF
2054 * or if the file offset isn't page aligned
2055 * or the size requested isn't a multiple of PAGE_SIZE
2056 */
2057 if (f_offset < 0 || f_offset >= filesize ||
2058 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
2059 if (local_flags & CL_COMMIT)
2060 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
2061 return (EINVAL);
2062 }
2063 max_size = filesize - f_offset;
2064
2065 if (size < max_size)
2066 io_size = size;
2067 else
2068 io_size = max_size;
2069
2070 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2071
2072 if (size > rounded_size) {
2073 if (local_flags & CL_COMMIT)
2074 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
2075 UPL_ABORT_FREE_ON_EMPTY);
2076 }
2077 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
2078 local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg));
2079 }
2080
2081
2082 int
2083 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2084 int size, off_t filesize, int flags)
2085 {
2086 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
2087 }
2088
2089
2090 int
2091 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
2092 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2093 {
2094 u_int io_size;
2095 int rounded_size;
2096 off_t max_size;
2097 int retval;
2098 int local_flags = 0;
2099
2100 if (upl == NULL || size < 0)
2101 panic("cluster_pagein: NULL upl passed in");
2102
2103 if ((flags & UPL_IOSYNC) == 0)
2104 local_flags |= CL_ASYNC;
2105 if ((flags & UPL_NOCOMMIT) == 0)
2106 local_flags |= CL_COMMIT;
2107 if (flags & UPL_IOSTREAMING)
2108 local_flags |= CL_IOSTREAMING;
2109 if (flags & UPL_PAGING_ENCRYPTED)
2110 local_flags |= CL_ENCRYPTED;
2111
2112
2113 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
2114 (int)f_offset, size, (int)filesize, local_flags, 0);
2115
2116 /*
2117 * can't page-in from a negative offset
2118 * or if we're starting beyond the EOF
2119 * or if the file offset isn't page aligned
2120 * or the size requested isn't a multiple of PAGE_SIZE
2121 */
2122 if (f_offset < 0 || f_offset >= filesize ||
2123 (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
2124 if (local_flags & CL_COMMIT)
2125 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2126 return (EINVAL);
2127 }
2128 max_size = filesize - f_offset;
2129
2130 if (size < max_size)
2131 io_size = size;
2132 else
2133 io_size = max_size;
2134
2135 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
2136
2137 if (size > rounded_size && (local_flags & CL_COMMIT))
2138 ubc_upl_abort_range(upl, upl_offset + rounded_size,
2139 size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2140
2141 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2142 local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
2143
2144 return (retval);
2145 }
2146
2147
2148 int
2149 cluster_bp(buf_t bp)
2150 {
2151 return cluster_bp_ext(bp, NULL, NULL);
2152 }
2153
2154
2155 int
2156 cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg)
2157 {
2158 off_t f_offset;
2159 int flags;
2160
2161 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
2162 bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
2163
2164 if (bp->b_flags & B_READ)
2165 flags = CL_ASYNC | CL_READ;
2166 else
2167 flags = CL_ASYNC;
2168 if (bp->b_flags & B_PASSIVE)
2169 flags |= CL_PASSIVE;
2170
2171 f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
2172
2173 return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg));
2174 }
2175
2176
2177
2178 int
2179 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
2180 {
2181 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
2182 }
2183
2184
2185 int
2186 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
2187 int xflags, int (*callback)(buf_t, void *), void *callback_arg)
2188 {
2189 user_ssize_t cur_resid;
2190 int retval = 0;
2191 int flags;
2192 int zflags;
2193 int bflag;
2194 int write_type = IO_COPY;
2195 u_int32_t write_length;
2196
2197 flags = xflags;
2198
2199 if (flags & IO_PASSIVE)
2200 bflag = CL_PASSIVE;
2201 else
2202 bflag = 0;
2203
2204 if (vp->v_flag & VNOCACHE_DATA){
2205 flags |= IO_NOCACHE;
2206 bflag |= CL_NOCACHE;
2207 }
2208 if (uio == NULL) {
2209 /*
2210 * no user data...
2211 * this call is being made to zero-fill some range in the file
2212 */
2213 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
2214
2215 return(retval);
2216 }
2217 /*
2218 * do a write through the cache if one of the following is true....
2219 * NOCACHE is not true or NODIRECT is true
2220 * the uio request doesn't target USERSPACE
2221 * otherwise, find out if we want the direct or contig variant for
2222 * the first vector in the uio request
2223 */
2224 if ( ((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
2225 retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
2226
2227 if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT)
2228 /*
2229 * must go through the cached variant in this case
2230 */
2231 write_type = IO_COPY;
2232
2233 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) {
2234
2235 switch (write_type) {
2236
2237 case IO_COPY:
2238 /*
2239 * make sure the uio_resid isn't too big...
2240 * internally, we want to handle all of the I/O in
2241 * chunk sizes that fit in a 32 bit int
2242 */
2243 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) {
2244 /*
2245 * we're going to have to call cluster_write_copy
2246 * more than once...
2247 *
2248 * only want the last call to cluster_write_copy to
2249 * have the IO_TAILZEROFILL flag set and only the
2250 * first call should have IO_HEADZEROFILL
2251 */
2252 zflags = flags & ~IO_TAILZEROFILL;
2253 flags &= ~IO_HEADZEROFILL;
2254
2255 write_length = MAX_IO_REQUEST_SIZE;
2256 } else {
2257 /*
2258 * last call to cluster_write_copy
2259 */
2260 zflags = flags;
2261
2262 write_length = (u_int32_t)cur_resid;
2263 }
2264 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
2265 break;
2266
2267 case IO_CONTIG:
2268 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
2269
2270 if (flags & IO_HEADZEROFILL) {
2271 /*
2272 * only do this once per request
2273 */
2274 flags &= ~IO_HEADZEROFILL;
2275
2276 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
2277 headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2278 if (retval)
2279 break;
2280 }
2281 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
2282
2283 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
2284 /*
2285 * we're done with the data from the user specified buffer(s)
2286 * and we've been requested to zero fill at the tail
2287 * treat this as an IO_HEADZEROFILL which doesn't require a uio
2288 * by rearranging the args and passing in IO_HEADZEROFILL
2289 */
2290 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset,
2291 (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg);
2292 }
2293 break;
2294
2295 case IO_DIRECT:
2296 /*
2297 * cluster_write_direct is never called with IO_TAILZEROFILL || IO_HEADZEROFILL
2298 */
2299 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
2300 break;
2301
2302 case IO_UNKNOWN:
2303 retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE);
2304 break;
2305 }
2306 /*
2307 * in case we end up calling cluster_write_copy (from cluster_write_direct)
2308 * multiple times to service a multi-vector request that is not aligned properly
2309 * we need to update the oldEOF so that we
2310 * don't zero-fill the head of a page if we've successfully written
2311 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2312 * page that is beyond the oldEOF if the write is unaligned... we only
2313 * want that to happen for the very first page of the cluster_write,
2314 * NOT the first page of each vector making up a multi-vector write.
2315 */
2316 if (uio->uio_offset > oldEOF)
2317 oldEOF = uio->uio_offset;
2318 }
2319 return (retval);
2320 }
2321
2322
2323 static int
2324 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
2325 int flags, int (*callback)(buf_t, void *), void *callback_arg)
2326 {
2327 upl_t upl;
2328 upl_page_info_t *pl;
2329 vm_offset_t upl_offset;
2330 vm_offset_t vector_upl_offset = 0;
2331 u_int32_t io_req_size;
2332 u_int32_t offset_in_file;
2333 u_int32_t offset_in_iovbase;
2334 u_int32_t io_size;
2335 int io_flag = 0;
2336 upl_size_t upl_size, vector_upl_size = 0;
2337 vm_size_t upl_needed_size;
2338 mach_msg_type_number_t pages_in_pl;
2339 upl_control_flags_t upl_flags;
2340 kern_return_t kret;
2341 mach_msg_type_number_t i;
2342 int force_data_sync;
2343 int retval = 0;
2344 int first_IO = 1;
2345 struct clios iostate;
2346 user_addr_t iov_base;
2347 u_int32_t mem_alignment_mask;
2348 u_int32_t devblocksize;
2349 u_int32_t max_io_size;
2350 u_int32_t max_upl_size;
2351 u_int32_t max_vector_size;
2352 u_int32_t bytes_outstanding_limit;
2353 boolean_t io_throttled = FALSE;
2354
2355 u_int32_t vector_upl_iosize = 0;
2356 int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
2357 off_t v_upl_uio_offset = 0;
2358 int vector_upl_index=0;
2359 upl_t vector_upl = NULL;
2360
2361
2362 /*
2363 * When we enter this routine, we know
2364 * -- the resid will not exceed iov_len
2365 */
2366 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
2367 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2368
2369 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2370
2371 io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO;
2372
2373 if (flags & IO_PASSIVE)
2374 io_flag |= CL_PASSIVE;
2375
2376 if (flags & IO_NOCACHE)
2377 io_flag |= CL_NOCACHE;
2378
2379 if (flags & IO_SKIP_ENCRYPTION)
2380 io_flag |= CL_ENCRYPTED;
2381
2382 iostate.io_completed = 0;
2383 iostate.io_issued = 0;
2384 iostate.io_error = 0;
2385 iostate.io_wanted = 0;
2386
2387 lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
2388
2389 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2390 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2391
2392 if (devblocksize == 1) {
2393 /*
2394 * the AFP client advertises a devblocksize of 1
2395 * however, its BLOCKMAP routine maps to physical
2396 * blocks that are PAGE_SIZE in size...
2397 * therefore we can't ask for I/Os that aren't page aligned
2398 * or aren't multiples of PAGE_SIZE in size
2399 * by setting devblocksize to PAGE_SIZE, we re-instate
2400 * the old behavior we had before the mem_alignment_mask
2401 * changes went in...
2402 */
2403 devblocksize = PAGE_SIZE;
2404 }
2405
2406 next_dwrite:
2407 io_req_size = *write_length;
2408 iov_base = uio_curriovbase(uio);
2409
2410 offset_in_file = (u_int32_t)uio->uio_offset & PAGE_MASK;
2411 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
2412
2413 if (offset_in_file || offset_in_iovbase) {
2414 /*
2415 * one of the 2 important offsets is misaligned
2416 * so fire an I/O through the cache for this entire vector
2417 */
2418 goto wait_for_dwrites;
2419 }
2420 if (iov_base & (devblocksize - 1)) {
2421 /*
2422 * the offset in memory must be on a device block boundary
2423 * so that we can guarantee that we can generate an
2424 * I/O that ends on a page boundary in cluster_io
2425 */
2426 goto wait_for_dwrites;
2427 }
2428
2429 task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp);
2430 while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) {
2431 int throttle_type;
2432
2433 if ( (throttle_type = cluster_is_throttled(vp)) ) {
2434 /*
2435 * we're in the throttle window, at the very least
2436 * we want to limit the size of the I/O we're about
2437 * to issue
2438 */
2439 if ( (flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) {
2440 /*
2441 * we're in the throttle window and at least 1 I/O
2442 * has already been issued by a throttleable thread
2443 * in this window, so return with EAGAIN to indicate
2444 * to the FS issuing the cluster_write call that it
2445 * should now throttle after dropping any locks
2446 */
2447 throttle_info_update_by_mount(vp->v_mount);
2448
2449 io_throttled = TRUE;
2450 goto wait_for_dwrites;
2451 }
2452 max_vector_size = THROTTLE_MAX_IOSIZE;
2453 max_io_size = THROTTLE_MAX_IOSIZE;
2454 } else {
2455 max_vector_size = MAX_VECTOR_UPL_SIZE;
2456 max_io_size = max_upl_size;
2457 }
2458
2459 if (first_IO) {
2460 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
2461 first_IO = 0;
2462 }
2463 io_size = io_req_size & ~PAGE_MASK;
2464 iov_base = uio_curriovbase(uio);
2465
2466 if (io_size > max_io_size)
2467 io_size = max_io_size;
2468
2469 if(useVectorUPL && (iov_base & PAGE_MASK)) {
2470 /*
2471 * We have an iov_base that's not page-aligned.
2472 * Issue all I/O's that have been collected within
2473 * this Vectored UPL.
2474 */
2475 if(vector_upl_index) {
2476 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2477 reset_vector_run_state();
2478 }
2479
2480 /*
2481 * After this point, if we are using the Vector UPL path and the base is
2482 * not page-aligned then the UPL with that base will be the first in the vector UPL.
2483 */
2484 }
2485
2486 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
2487 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
2488
2489 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
2490 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
2491
2492 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
2493 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
2494 pages_in_pl = 0;
2495 upl_size = upl_needed_size;
2496 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
2497 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
2498 | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
2499
2500 kret = vm_map_get_upl(map,
2501 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2502 &upl_size,
2503 &upl,
2504 NULL,
2505 &pages_in_pl,
2506 &upl_flags,
2507 force_data_sync);
2508
2509 if (kret != KERN_SUCCESS) {
2510 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2511 0, 0, 0, kret, 0);
2512 /*
2513 * failed to get pagelist
2514 *
2515 * we may have already spun some portion of this request
2516 * off as async requests... we need to wait for the I/O
2517 * to complete before returning
2518 */
2519 goto wait_for_dwrites;
2520 }
2521 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2522 pages_in_pl = upl_size / PAGE_SIZE;
2523
2524 for (i = 0; i < pages_in_pl; i++) {
2525 if (!upl_valid_page(pl, i))
2526 break;
2527 }
2528 if (i == pages_in_pl)
2529 break;
2530
2531 /*
2532 * didn't get all the pages back that we
2533 * needed... release this upl and try again
2534 */
2535 ubc_upl_abort(upl, 0);
2536 }
2537 if (force_data_sync >= 3) {
2538 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2539 i, pages_in_pl, upl_size, kret, 0);
2540 /*
2541 * for some reason, we couldn't acquire a hold on all
2542 * the pages needed in the user's address space
2543 *
2544 * we may have already spun some portion of this request
2545 * off as async requests... we need to wait for the I/O
2546 * to complete before returning
2547 */
2548 goto wait_for_dwrites;
2549 }
2550
2551 /*
2552 * Consider the possibility that upl_size wasn't satisfied.
2553 */
2554 if (upl_size < upl_needed_size) {
2555 if (upl_size && upl_offset == 0)
2556 io_size = upl_size;
2557 else
2558 io_size = 0;
2559 }
2560 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
2561 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
2562
2563 if (io_size == 0) {
2564 ubc_upl_abort(upl, 0);
2565 /*
2566 * we may have already spun some portion of this request
2567 * off as async requests... we need to wait for the I/O
2568 * to complete before returning
2569 */
2570 goto wait_for_dwrites;
2571 }
2572
2573 if(useVectorUPL) {
2574 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
2575 if(end_off)
2576 issueVectorUPL = 1;
2577 /*
2578 * After this point, if we are using a vector UPL, then
2579 * either all the UPL elements end on a page boundary OR
2580 * this UPL is the last element because it does not end
2581 * on a page boundary.
2582 */
2583 }
2584
2585 /*
2586 * we want push out these writes asynchronously so that we can overlap
2587 * the preparation of the next I/O
2588 * if there are already too many outstanding writes
2589 * wait until some complete before issuing the next
2590 */
2591 if (vp->v_mount->mnt_minsaturationbytecount)
2592 bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount;
2593 else
2594 bytes_outstanding_limit = max_upl_size * IO_SCALE(vp, 2);
2595
2596 cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct");
2597
2598 if (iostate.io_error) {
2599 /*
2600 * one of the earlier writes we issued ran into a hard error
2601 * don't issue any more writes, cleanup the UPL
2602 * that was just created but not used, then
2603 * go wait for all writes that are part of this stream
2604 * to complete before returning the error to the caller
2605 */
2606 ubc_upl_abort(upl, 0);
2607
2608 goto wait_for_dwrites;
2609 }
2610
2611 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
2612 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
2613
2614 if(!useVectorUPL)
2615 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2616 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2617
2618 else {
2619 if(!vector_upl_index) {
2620 vector_upl = vector_upl_create(upl_offset);
2621 v_upl_uio_offset = uio->uio_offset;
2622 vector_upl_offset = upl_offset;
2623 }
2624
2625 vector_upl_set_subupl(vector_upl,upl,upl_size);
2626 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
2627 vector_upl_index++;
2628 vector_upl_iosize += io_size;
2629 vector_upl_size += upl_size;
2630
2631 if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) {
2632 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2633 reset_vector_run_state();
2634 }
2635 }
2636
2637 /*
2638 * update the uio structure to
2639 * reflect the I/O that we just issued
2640 */
2641 uio_update(uio, (user_size_t)io_size);
2642
2643 /*
2644 * in case we end up calling through to cluster_write_copy to finish
2645 * the tail of this request, we need to update the oldEOF so that we
2646 * don't zero-fill the head of a page if we've successfully written
2647 * data to that area... 'cluster_write_copy' will zero-fill the head of a
2648 * page that is beyond the oldEOF if the write is unaligned... we only
2649 * want that to happen for the very first page of the cluster_write,
2650 * NOT the first page of each vector making up a multi-vector write.
2651 */
2652 if (uio->uio_offset > oldEOF)
2653 oldEOF = uio->uio_offset;
2654
2655 io_req_size -= io_size;
2656
2657 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
2658 (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0);
2659
2660 } /* end while */
2661
2662 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) {
2663
2664 retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE);
2665
2666 if (retval == 0 && *write_type == IO_DIRECT) {
2667
2668 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE,
2669 (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0);
2670
2671 goto next_dwrite;
2672 }
2673 }
2674
2675 wait_for_dwrites:
2676
2677 if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
2678 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2679 reset_vector_run_state();
2680 }
2681 /*
2682 * make sure all async writes issued as part of this stream
2683 * have completed before we return
2684 */
2685 cluster_iostate_wait(&iostate, 0, "cluster_write_direct");
2686
2687 if (iostate.io_error)
2688 retval = iostate.io_error;
2689
2690 lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
2691
2692 if (io_throttled == TRUE && retval == 0)
2693 retval = EAGAIN;
2694
2695 if (io_req_size && retval == 0) {
2696 /*
2697 * we couldn't handle the tail of this request in DIRECT mode
2698 * so fire it through the copy path
2699 *
2700 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
2701 * so we can just pass 0 in for the headOff and tailOff
2702 */
2703 if (uio->uio_offset > oldEOF)
2704 oldEOF = uio->uio_offset;
2705
2706 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
2707
2708 *write_type = IO_UNKNOWN;
2709 }
2710 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
2711 (int)uio->uio_offset, io_req_size, retval, 4, 0);
2712
2713 return (retval);
2714 }
2715
2716
2717 static int
2718 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
2719 int (*callback)(buf_t, void *), void *callback_arg, int bflag)
2720 {
2721 upl_page_info_t *pl;
2722 addr64_t src_paddr = 0;
2723 upl_t upl[MAX_VECTS];
2724 vm_offset_t upl_offset;
2725 u_int32_t tail_size = 0;
2726 u_int32_t io_size;
2727 u_int32_t xsize;
2728 upl_size_t upl_size;
2729 vm_size_t upl_needed_size;
2730 mach_msg_type_number_t pages_in_pl;
2731 upl_control_flags_t upl_flags;
2732 kern_return_t kret;
2733 struct clios iostate;
2734 int error = 0;
2735 int cur_upl = 0;
2736 int num_upl = 0;
2737 int n;
2738 user_addr_t iov_base;
2739 u_int32_t devblocksize;
2740 u_int32_t mem_alignment_mask;
2741
2742 /*
2743 * When we enter this routine, we know
2744 * -- the io_req_size will not exceed iov_len
2745 * -- the target address is physically contiguous
2746 */
2747 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
2748
2749 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2750 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2751
2752 iostate.io_completed = 0;
2753 iostate.io_issued = 0;
2754 iostate.io_error = 0;
2755 iostate.io_wanted = 0;
2756
2757 lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
2758
2759 next_cwrite:
2760 io_size = *write_length;
2761
2762 iov_base = uio_curriovbase(uio);
2763
2764 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
2765 upl_needed_size = upl_offset + io_size;
2766
2767 pages_in_pl = 0;
2768 upl_size = upl_needed_size;
2769 upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
2770 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
2771 | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
2772
2773 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
2774 kret = vm_map_get_upl(map,
2775 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
2776 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
2777
2778 if (kret != KERN_SUCCESS) {
2779 /*
2780 * failed to get pagelist
2781 */
2782 error = EINVAL;
2783 goto wait_for_cwrites;
2784 }
2785 num_upl++;
2786
2787 /*
2788 * Consider the possibility that upl_size wasn't satisfied.
2789 */
2790 if (upl_size < upl_needed_size) {
2791 /*
2792 * This is a failure in the physical memory case.
2793 */
2794 error = EINVAL;
2795 goto wait_for_cwrites;
2796 }
2797 pl = ubc_upl_pageinfo(upl[cur_upl]);
2798
2799 src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
2800
2801 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2802 u_int32_t head_size;
2803
2804 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
2805
2806 if (head_size > io_size)
2807 head_size = io_size;
2808
2809 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
2810
2811 if (error)
2812 goto wait_for_cwrites;
2813
2814 upl_offset += head_size;
2815 src_paddr += head_size;
2816 io_size -= head_size;
2817
2818 iov_base += head_size;
2819 }
2820 if ((u_int32_t)iov_base & mem_alignment_mask) {
2821 /*
2822 * request doesn't set up on a memory boundary
2823 * the underlying DMA engine can handle...
2824 * return an error instead of going through
2825 * the slow copy path since the intent of this
2826 * path is direct I/O from device memory
2827 */
2828 error = EINVAL;
2829 goto wait_for_cwrites;
2830 }
2831
2832 tail_size = io_size & (devblocksize - 1);
2833 io_size -= tail_size;
2834
2835 while (io_size && error == 0) {
2836
2837 if (io_size > MAX_IO_CONTIG_SIZE)
2838 xsize = MAX_IO_CONTIG_SIZE;
2839 else
2840 xsize = io_size;
2841 /*
2842 * request asynchronously so that we can overlap
2843 * the preparation of the next I/O... we'll do
2844 * the commit after all the I/O has completed
2845 * since its all issued against the same UPL
2846 * if there are already too many outstanding writes
2847 * wait until some have completed before issuing the next
2848 */
2849 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
2850
2851 if (iostate.io_error) {
2852 /*
2853 * one of the earlier writes we issued ran into a hard error
2854 * don't issue any more writes...
2855 * go wait for all writes that are part of this stream
2856 * to complete before returning the error to the caller
2857 */
2858 goto wait_for_cwrites;
2859 }
2860 /*
2861 * issue an asynchronous write to cluster_io
2862 */
2863 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
2864 xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg);
2865
2866 if (error == 0) {
2867 /*
2868 * The cluster_io write completed successfully,
2869 * update the uio structure
2870 */
2871 uio_update(uio, (user_size_t)xsize);
2872
2873 upl_offset += xsize;
2874 src_paddr += xsize;
2875 io_size -= xsize;
2876 }
2877 }
2878 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
2879
2880 error = cluster_io_type(uio, write_type, write_length, 0);
2881
2882 if (error == 0 && *write_type == IO_CONTIG) {
2883 cur_upl++;
2884 goto next_cwrite;
2885 }
2886 } else
2887 *write_type = IO_UNKNOWN;
2888
2889 wait_for_cwrites:
2890 /*
2891 * make sure all async writes that are part of this stream
2892 * have completed before we proceed
2893 */
2894 cluster_iostate_wait(&iostate, 0, "cluster_write_contig");
2895
2896 if (iostate.io_error)
2897 error = iostate.io_error;
2898
2899 lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
2900
2901 if (error == 0 && tail_size)
2902 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
2903
2904 for (n = 0; n < num_upl; n++)
2905 /*
2906 * just release our hold on each physically contiguous
2907 * region without changing any state
2908 */
2909 ubc_upl_abort(upl[n], 0);
2910
2911 return (error);
2912 }
2913
2914
2915 /*
2916 * need to avoid a race between an msync of a range of pages dirtied via mmap
2917 * vs a filesystem such as HFS deciding to write a 'hole' to disk via cluster_write's
2918 * zerofill mechanism before it has seen the VNOP_PAGEOUTs for the pages being msync'd
2919 *
2920 * we should never force-zero-fill pages that are already valid in the cache...
2921 * the entire page contains valid data (either from disk, zero-filled or dirtied
2922 * via an mmap) so we can only do damage by trying to zero-fill
2923 *
2924 */
2925 static int
2926 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
2927 {
2928 int zero_pg_index;
2929 boolean_t need_cluster_zero = TRUE;
2930
2931 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
2932
2933 bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
2934 zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
2935
2936 if (upl_valid_page(pl, zero_pg_index)) {
2937 /*
2938 * never force zero valid pages - dirty or clean
2939 * we'll leave these in the UPL for cluster_write_copy to deal with
2940 */
2941 need_cluster_zero = FALSE;
2942 }
2943 }
2944 if (need_cluster_zero == TRUE)
2945 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2946
2947 return (bytes_to_zero);
2948 }
2949
2950
2951 static int
2952 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
2953 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2954 {
2955 upl_page_info_t *pl;
2956 upl_t upl;
2957 vm_offset_t upl_offset = 0;
2958 vm_size_t upl_size;
2959 off_t upl_f_offset;
2960 int pages_in_upl;
2961 int start_offset;
2962 int xfer_resid;
2963 int io_size;
2964 int io_offset;
2965 int bytes_to_zero;
2966 int bytes_to_move;
2967 kern_return_t kret;
2968 int retval = 0;
2969 int io_resid;
2970 long long total_size;
2971 long long zero_cnt;
2972 off_t zero_off;
2973 long long zero_cnt1;
2974 off_t zero_off1;
2975 off_t write_off = 0;
2976 int write_cnt = 0;
2977 boolean_t first_pass = FALSE;
2978 struct cl_extent cl;
2979 struct cl_writebehind *wbp;
2980 int bflag;
2981 u_int max_cluster_pgcount;
2982 u_int max_io_size;
2983
2984 if (uio) {
2985 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
2986 (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0);
2987
2988 io_resid = io_req_size;
2989 } else {
2990 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
2991 0, 0, (int)oldEOF, (int)newEOF, 0);
2992
2993 io_resid = 0;
2994 }
2995 if (flags & IO_PASSIVE)
2996 bflag = CL_PASSIVE;
2997 else
2998 bflag = 0;
2999 if (flags & IO_NOCACHE)
3000 bflag |= CL_NOCACHE;
3001
3002 if (flags & IO_SKIP_ENCRYPTION)
3003 bflag |= CL_ENCRYPTED;
3004
3005 zero_cnt = 0;
3006 zero_cnt1 = 0;
3007 zero_off = 0;
3008 zero_off1 = 0;
3009
3010 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
3011 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
3012
3013 if (flags & IO_HEADZEROFILL) {
3014 /*
3015 * some filesystems (HFS is one) don't support unallocated holes within a file...
3016 * so we zero fill the intervening space between the old EOF and the offset
3017 * where the next chunk of real data begins.... ftruncate will also use this
3018 * routine to zero fill to the new EOF when growing a file... in this case, the
3019 * uio structure will not be provided
3020 */
3021 if (uio) {
3022 if (headOff < uio->uio_offset) {
3023 zero_cnt = uio->uio_offset - headOff;
3024 zero_off = headOff;
3025 }
3026 } else if (headOff < newEOF) {
3027 zero_cnt = newEOF - headOff;
3028 zero_off = headOff;
3029 }
3030 } else {
3031 if (uio && uio->uio_offset > oldEOF) {
3032 zero_off = uio->uio_offset & ~PAGE_MASK_64;
3033
3034 if (zero_off >= oldEOF) {
3035 zero_cnt = uio->uio_offset - zero_off;
3036
3037 flags |= IO_HEADZEROFILL;
3038 }
3039 }
3040 }
3041 if (flags & IO_TAILZEROFILL) {
3042 if (uio) {
3043 zero_off1 = uio->uio_offset + io_req_size;
3044
3045 if (zero_off1 < tailOff)
3046 zero_cnt1 = tailOff - zero_off1;
3047 }
3048 } else {
3049 if (uio && newEOF > oldEOF) {
3050 zero_off1 = uio->uio_offset + io_req_size;
3051
3052 if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) {
3053 zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64);
3054
3055 flags |= IO_TAILZEROFILL;
3056 }
3057 }
3058 }
3059 if (zero_cnt == 0 && uio == (struct uio *) 0) {
3060 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
3061 retval, 0, 0, 0, 0);
3062 return (0);
3063 }
3064 if (uio) {
3065 write_off = uio->uio_offset;
3066 write_cnt = uio_resid(uio);
3067 /*
3068 * delay updating the sequential write info
3069 * in the control block until we've obtained
3070 * the lock for it
3071 */
3072 first_pass = TRUE;
3073 }
3074 while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) {
3075 /*
3076 * for this iteration of the loop, figure out where our starting point is
3077 */
3078 if (zero_cnt) {
3079 start_offset = (int)(zero_off & PAGE_MASK_64);
3080 upl_f_offset = zero_off - start_offset;
3081 } else if (io_resid) {
3082 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3083 upl_f_offset = uio->uio_offset - start_offset;
3084 } else {
3085 start_offset = (int)(zero_off1 & PAGE_MASK_64);
3086 upl_f_offset = zero_off1 - start_offset;
3087 }
3088 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
3089 (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
3090
3091 if (total_size > max_io_size)
3092 total_size = max_io_size;
3093
3094 cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
3095
3096 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
3097 /*
3098 * assumption... total_size <= io_resid
3099 * because IO_HEADZEROFILL and IO_TAILZEROFILL not set
3100 */
3101 if ((start_offset + total_size) > max_io_size)
3102 total_size = max_io_size - start_offset;
3103 xfer_resid = total_size;
3104
3105 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
3106
3107 if (retval)
3108 break;
3109
3110 io_resid -= (total_size - xfer_resid);
3111 total_size = xfer_resid;
3112 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3113 upl_f_offset = uio->uio_offset - start_offset;
3114
3115 if (total_size == 0) {
3116 if (start_offset) {
3117 /*
3118 * the write did not finish on a page boundary
3119 * which will leave upl_f_offset pointing to the
3120 * beginning of the last page written instead of
3121 * the page beyond it... bump it in this case
3122 * so that the cluster code records the last page
3123 * written as dirty
3124 */
3125 upl_f_offset += PAGE_SIZE_64;
3126 }
3127 upl_size = 0;
3128
3129 goto check_cluster;
3130 }
3131 }
3132 /*
3133 * compute the size of the upl needed to encompass
3134 * the requested write... limit each call to cluster_io
3135 * to the maximum UPL size... cluster_io will clip if
3136 * this exceeds the maximum io_size for the device,
3137 * make sure to account for
3138 * a starting offset that's not page aligned
3139 */
3140 upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3141
3142 if (upl_size > max_io_size)
3143 upl_size = max_io_size;
3144
3145 pages_in_upl = upl_size / PAGE_SIZE;
3146 io_size = upl_size - start_offset;
3147
3148 if ((long long)io_size > total_size)
3149 io_size = total_size;
3150
3151 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
3152
3153
3154 /*
3155 * Gather the pages from the buffer cache.
3156 * The UPL_WILL_MODIFY flag lets the UPL subsystem know
3157 * that we intend to modify these pages.
3158 */
3159 kret = ubc_create_upl(vp,
3160 upl_f_offset,
3161 upl_size,
3162 &upl,
3163 &pl,
3164 UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY));
3165 if (kret != KERN_SUCCESS)
3166 panic("cluster_write_copy: failed to get pagelist");
3167
3168 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
3169 upl, (int)upl_f_offset, start_offset, 0, 0);
3170
3171 if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) {
3172 int read_size;
3173
3174 /*
3175 * we're starting in the middle of the first page of the upl
3176 * and the page isn't currently valid, so we're going to have
3177 * to read it in first... this is a synchronous operation
3178 */
3179 read_size = PAGE_SIZE;
3180
3181 if ((upl_f_offset + read_size) > oldEOF)
3182 read_size = oldEOF - upl_f_offset;
3183
3184 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
3185 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
3186 if (retval) {
3187 /*
3188 * we had an error during the read which causes us to abort
3189 * the current cluster_write request... before we do, we need
3190 * to release the rest of the pages in the upl without modifying
3191 * there state and mark the failed page in error
3192 */
3193 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
3194
3195 if (upl_size > PAGE_SIZE)
3196 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
3197
3198 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
3199 upl, 0, 0, retval, 0);
3200 break;
3201 }
3202 }
3203 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
3204 /*
3205 * the last offset we're writing to in this upl does not end on a page
3206 * boundary... if it's not beyond the old EOF, then we'll also need to
3207 * pre-read this page in if it isn't already valid
3208 */
3209 upl_offset = upl_size - PAGE_SIZE;
3210
3211 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
3212 !upl_valid_page(pl, upl_offset / PAGE_SIZE)) {
3213 int read_size;
3214
3215 read_size = PAGE_SIZE;
3216
3217 if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF)
3218 read_size = oldEOF - (upl_f_offset + upl_offset);
3219
3220 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
3221 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
3222 if (retval) {
3223 /*
3224 * we had an error during the read which causes us to abort
3225 * the current cluster_write request... before we do, we
3226 * need to release the rest of the pages in the upl without
3227 * modifying there state and mark the failed page in error
3228 */
3229 ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
3230
3231 if (upl_size > PAGE_SIZE)
3232 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
3233
3234 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
3235 upl, 0, 0, retval, 0);
3236 break;
3237 }
3238 }
3239 }
3240 xfer_resid = io_size;
3241 io_offset = start_offset;
3242
3243 while (zero_cnt && xfer_resid) {
3244
3245 if (zero_cnt < (long long)xfer_resid)
3246 bytes_to_zero = zero_cnt;
3247 else
3248 bytes_to_zero = xfer_resid;
3249
3250 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
3251
3252 xfer_resid -= bytes_to_zero;
3253 zero_cnt -= bytes_to_zero;
3254 zero_off += bytes_to_zero;
3255 io_offset += bytes_to_zero;
3256 }
3257 if (xfer_resid && io_resid) {
3258 u_int32_t io_requested;
3259
3260 bytes_to_move = min(io_resid, xfer_resid);
3261 io_requested = bytes_to_move;
3262
3263 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
3264
3265 if (retval) {
3266 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
3267
3268 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
3269 upl, 0, 0, retval, 0);
3270 } else {
3271 io_resid -= bytes_to_move;
3272 xfer_resid -= bytes_to_move;
3273 io_offset += bytes_to_move;
3274 }
3275 }
3276 while (xfer_resid && zero_cnt1 && retval == 0) {
3277
3278 if (zero_cnt1 < (long long)xfer_resid)
3279 bytes_to_zero = zero_cnt1;
3280 else
3281 bytes_to_zero = xfer_resid;
3282
3283 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
3284
3285 xfer_resid -= bytes_to_zero;
3286 zero_cnt1 -= bytes_to_zero;
3287 zero_off1 += bytes_to_zero;
3288 io_offset += bytes_to_zero;
3289 }
3290 if (retval == 0) {
3291 int cl_index;
3292 int ret_cluster_try_push;
3293
3294 io_size += start_offset;
3295
3296 if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
3297 /*
3298 * if we're extending the file with this write
3299 * we'll zero fill the rest of the page so that
3300 * if the file gets extended again in such a way as to leave a
3301 * hole starting at this EOF, we'll have zero's in the correct spot
3302 */
3303 cluster_zero(upl, io_size, upl_size - io_size, NULL);
3304 }
3305 /*
3306 * release the upl now if we hold one since...
3307 * 1) pages in it may be present in the sparse cluster map
3308 * and may span 2 separate buckets there... if they do and
3309 * we happen to have to flush a bucket to make room and it intersects
3310 * this upl, a deadlock may result on page BUSY
3311 * 2) we're delaying the I/O... from this point forward we're just updating
3312 * the cluster state... no need to hold the pages, so commit them
3313 * 3) IO_SYNC is set...
3314 * because we had to ask for a UPL that provides currenty non-present pages, the
3315 * UPL has been automatically set to clear the dirty flags (both software and hardware)
3316 * upon committing it... this is not the behavior we want since it's possible for
3317 * pages currently present as part of a mapped file to be dirtied while the I/O is in flight.
3318 * we'll pick these pages back up later with the correct behavior specified.
3319 * 4) we don't want to hold pages busy in a UPL and then block on the cluster lock... if a flush
3320 * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages
3321 * we hold since the flushing context is holding the cluster lock.
3322 */
3323 ubc_upl_commit_range(upl, 0, upl_size,
3324 UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
3325 check_cluster:
3326 /*
3327 * calculate the last logical block number
3328 * that this delayed I/O encompassed
3329 */
3330 cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64);
3331
3332 if (flags & IO_SYNC) {
3333 /*
3334 * if the IO_SYNC flag is set than we need to
3335 * bypass any clusters and immediately issue
3336 * the I/O
3337 */
3338 goto issue_io;
3339 }
3340 /*
3341 * take the lock to protect our accesses
3342 * of the writebehind and sparse cluster state
3343 */
3344 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3345
3346 if (wbp->cl_scmap) {
3347
3348 if ( !(flags & IO_NOCACHE)) {
3349 /*
3350 * we've fallen into the sparse
3351 * cluster method of delaying dirty pages
3352 */
3353 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3354
3355 lck_mtx_unlock(&wbp->cl_lockw);
3356
3357 continue;
3358 }
3359 /*
3360 * must have done cached writes that fell into
3361 * the sparse cluster mechanism... we've switched
3362 * to uncached writes on the file, so go ahead
3363 * and push whatever's in the sparse map
3364 * and switch back to normal clustering
3365 */
3366 wbp->cl_number = 0;
3367
3368 sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg);
3369 /*
3370 * no clusters of either type present at this point
3371 * so just go directly to start_new_cluster since
3372 * we know we need to delay this I/O since we've
3373 * already released the pages back into the cache
3374 * to avoid the deadlock with sparse_cluster_push
3375 */
3376 goto start_new_cluster;
3377 }
3378 if (first_pass) {
3379 if (write_off == wbp->cl_last_write)
3380 wbp->cl_seq_written += write_cnt;
3381 else
3382 wbp->cl_seq_written = write_cnt;
3383
3384 wbp->cl_last_write = write_off + write_cnt;
3385
3386 first_pass = FALSE;
3387 }
3388 if (wbp->cl_number == 0)
3389 /*
3390 * no clusters currently present
3391 */
3392 goto start_new_cluster;
3393
3394 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
3395 /*
3396 * check each cluster that we currently hold
3397 * try to merge some or all of this write into
3398 * one or more of the existing clusters... if
3399 * any portion of the write remains, start a
3400 * new cluster
3401 */
3402 if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) {
3403 /*
3404 * the current write starts at or after the current cluster
3405 */
3406 if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3407 /*
3408 * we have a write that fits entirely
3409 * within the existing cluster limits
3410 */
3411 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr)
3412 /*
3413 * update our idea of where the cluster ends
3414 */
3415 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
3416 break;
3417 }
3418 if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
3419 /*
3420 * we have a write that starts in the middle of the current cluster
3421 * but extends beyond the cluster's limit... we know this because
3422 * of the previous checks
3423 * we'll extend the current cluster to the max
3424 * and update the b_addr for the current write to reflect that
3425 * the head of it was absorbed into this cluster...
3426 * note that we'll always have a leftover tail in this case since
3427 * full absorbtion would have occurred in the clause above
3428 */
3429 wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
3430
3431 cl.b_addr = wbp->cl_clusters[cl_index].e_addr;
3432 }
3433 /*
3434 * we come here for the case where the current write starts
3435 * beyond the limit of the existing cluster or we have a leftover
3436 * tail after a partial absorbtion
3437 *
3438 * in either case, we'll check the remaining clusters before
3439 * starting a new one
3440 */
3441 } else {
3442 /*
3443 * the current write starts in front of the cluster we're currently considering
3444 */
3445 if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) {
3446 /*
3447 * we can just merge the new request into
3448 * this cluster and leave it in the cache
3449 * since the resulting cluster is still
3450 * less than the maximum allowable size
3451 */
3452 wbp->cl_clusters[cl_index].b_addr = cl.b_addr;
3453
3454 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) {
3455 /*
3456 * the current write completely
3457 * envelops the existing cluster and since
3458 * each write is limited to at most max_cluster_pgcount pages
3459 * we can just use the start and last blocknos of the write
3460 * to generate the cluster limits
3461 */
3462 wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
3463 }
3464 break;
3465 }
3466
3467 /*
3468 * if we were to combine this write with the current cluster
3469 * we would exceed the cluster size limit.... so,
3470 * let's see if there's any overlap of the new I/O with
3471 * the cluster we're currently considering... in fact, we'll
3472 * stretch the cluster out to it's full limit and see if we
3473 * get an intersection with the current write
3474 *
3475 */
3476 if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
3477 /*
3478 * the current write extends into the proposed cluster
3479 * clip the length of the current write after first combining it's
3480 * tail with the newly shaped cluster
3481 */
3482 wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
3483
3484 cl.e_addr = wbp->cl_clusters[cl_index].b_addr;
3485 }
3486 /*
3487 * if we get here, there was no way to merge
3488 * any portion of this write with this cluster
3489 * or we could only merge part of it which
3490 * will leave a tail...
3491 * we'll check the remaining clusters before starting a new one
3492 */
3493 }
3494 }
3495 if (cl_index < wbp->cl_number)
3496 /*
3497 * we found an existing cluster(s) that we
3498 * could entirely merge this I/O into
3499 */
3500 goto delay_io;
3501
3502 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) &&
3503 wbp->cl_number == MAX_CLUSTERS &&
3504 wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) {
3505 uint32_t n;
3506
3507 if (vp->v_mount->mnt_minsaturationbytecount) {
3508 n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp);
3509
3510 if (n > MAX_CLUSTERS)
3511 n = MAX_CLUSTERS;
3512 } else
3513 n = 0;
3514
3515 if (n == 0) {
3516 if (vp->v_mount->mnt_kern_flag & MNTK_SSD)
3517 n = WRITE_BEHIND_SSD;
3518 else
3519 n = WRITE_BEHIND;
3520 }
3521 while (n--)
3522 cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg);
3523 }
3524 if (wbp->cl_number < MAX_CLUSTERS) {
3525 /*
3526 * we didn't find an existing cluster to
3527 * merge into, but there's room to start
3528 * a new one
3529 */
3530 goto start_new_cluster;
3531 }
3532 /*
3533 * no exisitng cluster to merge with and no
3534 * room to start a new one... we'll try
3535 * pushing one of the existing ones... if none of
3536 * them are able to be pushed, we'll switch
3537 * to the sparse cluster mechanism
3538 * cluster_try_push updates cl_number to the
3539 * number of remaining clusters... and
3540 * returns the number of currently unused clusters
3541 */
3542 ret_cluster_try_push = 0;
3543
3544 /*
3545 * if writes are not deferred, call cluster push immediately
3546 */
3547 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
3548
3549 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg);
3550 }
3551
3552 /*
3553 * execute following regardless of writes being deferred or not
3554 */
3555 if (ret_cluster_try_push == 0) {
3556 /*
3557 * no more room in the normal cluster mechanism
3558 * so let's switch to the more expansive but expensive
3559 * sparse mechanism....
3560 */
3561 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg);
3562 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3563
3564 lck_mtx_unlock(&wbp->cl_lockw);
3565
3566 continue;
3567 }
3568 start_new_cluster:
3569 wbp->cl_clusters[wbp->cl_number].b_addr = cl.b_addr;
3570 wbp->cl_clusters[wbp->cl_number].e_addr = cl.e_addr;
3571
3572 wbp->cl_clusters[wbp->cl_number].io_flags = 0;
3573
3574 if (flags & IO_NOCACHE)
3575 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE;
3576
3577 if (bflag & CL_PASSIVE)
3578 wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE;
3579
3580 wbp->cl_number++;
3581 delay_io:
3582 lck_mtx_unlock(&wbp->cl_lockw);
3583
3584 continue;
3585 issue_io:
3586 /*
3587 * we don't hold the lock at this point
3588 *
3589 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
3590 * so that we correctly deal with a change in state of the hardware modify bit...
3591 * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force
3592 * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also
3593 * responsible for generating the correct sized I/O(s)
3594 */
3595 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg);
3596 }
3597 }
3598 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0);
3599
3600 return (retval);
3601 }
3602
3603
3604
3605 int
3606 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
3607 {
3608 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
3609 }
3610
3611
3612 int
3613 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
3614 {
3615 int retval = 0;
3616 int flags;
3617 user_ssize_t cur_resid;
3618 u_int32_t io_size;
3619 u_int32_t read_length = 0;
3620 int read_type = IO_COPY;
3621
3622 flags = xflags;
3623
3624 if (vp->v_flag & VNOCACHE_DATA)
3625 flags |= IO_NOCACHE;
3626 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled)
3627 flags |= IO_RAOFF;
3628
3629 if (flags & IO_SKIP_ENCRYPTION)
3630 flags |= IO_ENCRYPTED;
3631
3632 /*
3633 * do a read through the cache if one of the following is true....
3634 * NOCACHE is not true
3635 * the uio request doesn't target USERSPACE
3636 * Alternatively, if IO_ENCRYPTED is set, then we want to bypass the cache as well.
3637 * Reading encrypted data from a CP filesystem should never result in the data touching
3638 * the UBC.
3639 *
3640 * otherwise, find out if we want the direct or contig variant for
3641 * the first vector in the uio request
3642 */
3643 if ( ((flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) || (flags & IO_ENCRYPTED) ) {
3644
3645 retval = cluster_io_type(uio, &read_type, &read_length, 0);
3646 }
3647
3648 while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) {
3649
3650 switch (read_type) {
3651
3652 case IO_COPY:
3653 /*
3654 * make sure the uio_resid isn't too big...
3655 * internally, we want to handle all of the I/O in
3656 * chunk sizes that fit in a 32 bit int
3657 */
3658 if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE))
3659 io_size = MAX_IO_REQUEST_SIZE;
3660 else
3661 io_size = (u_int32_t)cur_resid;
3662
3663 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3664 break;
3665
3666 case IO_DIRECT:
3667 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
3668 break;
3669
3670 case IO_CONTIG:
3671 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
3672 break;
3673
3674 case IO_UNKNOWN:
3675 retval = cluster_io_type(uio, &read_type, &read_length, 0);
3676 break;
3677 }
3678 }
3679 return (retval);
3680 }
3681
3682
3683
3684 static void
3685 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
3686 {
3687 int range;
3688 int abort_flags = UPL_ABORT_FREE_ON_EMPTY;
3689
3690 if ((range = last_pg - start_pg)) {
3691 if (take_reference)
3692 abort_flags |= UPL_ABORT_REFERENCE;
3693
3694 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
3695 }
3696 }
3697
3698
3699 static int
3700 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3701 {
3702 upl_page_info_t *pl;
3703 upl_t upl;
3704 vm_offset_t upl_offset;
3705 u_int32_t upl_size;
3706 off_t upl_f_offset;
3707 int start_offset;
3708 int start_pg;
3709 int last_pg;
3710 int uio_last = 0;
3711 int pages_in_upl;
3712 off_t max_size;
3713 off_t last_ioread_offset;
3714 off_t last_request_offset;
3715 kern_return_t kret;
3716 int error = 0;
3717 int retval = 0;
3718 u_int32_t size_of_prefetch;
3719 u_int32_t xsize;
3720 u_int32_t io_size;
3721 u_int32_t max_rd_size;
3722 u_int32_t max_io_size;
3723 u_int32_t max_prefetch;
3724 u_int rd_ahead_enabled = 1;
3725 u_int prefetch_enabled = 1;
3726 struct cl_readahead * rap;
3727 struct clios iostate;
3728 struct cl_extent extent;
3729 int bflag;
3730 int take_reference = 1;
3731 int policy = IOPOL_DEFAULT;
3732 boolean_t iolock_inited = FALSE;
3733
3734 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
3735 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
3736
3737 if (flags & IO_ENCRYPTED) {
3738 panic ("encrypted blocks will hit UBC!");
3739 }
3740
3741 policy = throttle_get_io_policy(NULL);
3742
3743 if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE))
3744 take_reference = 0;
3745
3746 if (flags & IO_PASSIVE)
3747 bflag = CL_PASSIVE;
3748 else
3749 bflag = 0;
3750
3751 if (flags & IO_NOCACHE)
3752 bflag |= CL_NOCACHE;
3753
3754 if (flags & IO_SKIP_ENCRYPTION)
3755 bflag |= CL_ENCRYPTED;
3756
3757 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
3758 max_prefetch = MAX_PREFETCH(vp, max_io_size, (vp->v_mount->mnt_kern_flag & MNTK_SSD));
3759 max_rd_size = max_prefetch;
3760
3761 last_request_offset = uio->uio_offset + io_req_size;
3762
3763 if (last_request_offset > filesize)
3764 last_request_offset = filesize;
3765
3766 if ((flags & (IO_RAOFF|IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
3767 rd_ahead_enabled = 0;
3768 rap = NULL;
3769 } else {
3770 if (cluster_is_throttled(vp)) {
3771 /*
3772 * we're in the throttle window, at the very least
3773 * we want to limit the size of the I/O we're about
3774 * to issue
3775 */
3776 rd_ahead_enabled = 0;
3777 prefetch_enabled = 0;
3778
3779 max_rd_size = THROTTLE_MAX_IOSIZE;
3780 }
3781 if ((rap = cluster_get_rap(vp)) == NULL)
3782 rd_ahead_enabled = 0;
3783 else {
3784 extent.b_addr = uio->uio_offset / PAGE_SIZE_64;
3785 extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64;
3786 }
3787 }
3788 if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) {
3789 /*
3790 * determine if we already have a read-ahead in the pipe courtesy of the
3791 * last read systemcall that was issued...
3792 * if so, pick up it's extent to determine where we should start
3793 * with respect to any read-ahead that might be necessary to
3794 * garner all the data needed to complete this read systemcall
3795 */
3796 last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
3797
3798 if (last_ioread_offset < uio->uio_offset)
3799 last_ioread_offset = (off_t)0;
3800 else if (last_ioread_offset > last_request_offset)
3801 last_ioread_offset = last_request_offset;
3802 } else
3803 last_ioread_offset = (off_t)0;
3804
3805 while (io_req_size && uio->uio_offset < filesize && retval == 0) {
3806
3807 max_size = filesize - uio->uio_offset;
3808
3809 if ((off_t)(io_req_size) < max_size)
3810 io_size = io_req_size;
3811 else
3812 io_size = max_size;
3813
3814 if (!(flags & IO_NOCACHE)) {
3815
3816 while (io_size) {
3817 u_int32_t io_resid;
3818 u_int32_t io_requested;
3819
3820 /*
3821 * if we keep finding the pages we need already in the cache, then
3822 * don't bother to call cluster_read_prefetch since it costs CPU cycles
3823 * to determine that we have all the pages we need... once we miss in
3824 * the cache and have issued an I/O, than we'll assume that we're likely
3825 * to continue to miss in the cache and it's to our advantage to try and prefetch
3826 */
3827 if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) {
3828 if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
3829 /*
3830 * we've already issued I/O for this request and
3831 * there's still work to do and
3832 * our prefetch stream is running dry, so issue a
3833 * pre-fetch I/O... the I/O latency will overlap
3834 * with the copying of the data
3835 */
3836 if (size_of_prefetch > max_rd_size)
3837 size_of_prefetch = max_rd_size;
3838
3839 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
3840
3841 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
3842
3843 if (last_ioread_offset > last_request_offset)
3844 last_ioread_offset = last_request_offset;
3845 }
3846 }
3847 /*
3848 * limit the size of the copy we're about to do so that
3849 * we can notice that our I/O pipe is running dry and
3850 * get the next I/O issued before it does go dry
3851 */
3852 if (last_ioread_offset && io_size > (max_io_size / 4))
3853 io_resid = (max_io_size / 4);
3854 else
3855 io_resid = io_size;
3856
3857 io_requested = io_resid;
3858
3859 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
3860
3861 xsize = io_requested - io_resid;
3862
3863 io_size -= xsize;
3864 io_req_size -= xsize;
3865
3866 if (retval || io_resid)
3867 /*
3868 * if we run into a real error or
3869 * a page that is not in the cache
3870 * we need to leave streaming mode
3871 */
3872 break;
3873
3874 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
3875 /*
3876 * we're already finished the I/O for this read request
3877 * let's see if we should do a read-ahead
3878 */
3879 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
3880 }
3881 }
3882 if (retval)
3883 break;
3884 if (io_size == 0) {
3885 if (rap != NULL) {
3886 if (extent.e_addr < rap->cl_lastr)
3887 rap->cl_maxra = 0;
3888 rap->cl_lastr = extent.e_addr;
3889 }
3890 break;
3891 }
3892 /*
3893 * recompute max_size since cluster_copy_ubc_data_internal
3894 * may have advanced uio->uio_offset
3895 */
3896 max_size = filesize - uio->uio_offset;
3897 }
3898
3899 iostate.io_completed = 0;
3900 iostate.io_issued = 0;
3901 iostate.io_error = 0;
3902 iostate.io_wanted = 0;
3903
3904 if ( (flags & IO_RETURN_ON_THROTTLE) ) {
3905 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
3906 if ( !cluster_io_present_in_BC(vp, uio->uio_offset)) {
3907 /*
3908 * we're in the throttle window and at least 1 I/O
3909 * has already been issued by a throttleable thread
3910 * in this window, so return with EAGAIN to indicate
3911 * to the FS issuing the cluster_read call that it
3912 * should now throttle after dropping any locks
3913 */
3914 throttle_info_update_by_mount(vp->v_mount);
3915
3916 retval = EAGAIN;
3917 break;
3918 }
3919 }
3920 }
3921
3922 /*
3923 * compute the size of the upl needed to encompass
3924 * the requested read... limit each call to cluster_io
3925 * to the maximum UPL size... cluster_io will clip if
3926 * this exceeds the maximum io_size for the device,
3927 * make sure to account for
3928 * a starting offset that's not page aligned
3929 */
3930 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
3931 upl_f_offset = uio->uio_offset - (off_t)start_offset;
3932
3933 if (io_size > max_rd_size)
3934 io_size = max_rd_size;
3935
3936 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3937
3938 if (flags & IO_NOCACHE) {
3939 if (upl_size > max_io_size)
3940 upl_size = max_io_size;
3941 } else {
3942 if (upl_size > max_io_size / 4) {
3943 upl_size = max_io_size / 4;
3944 upl_size &= ~PAGE_MASK;
3945
3946 if (upl_size == 0)
3947 upl_size = PAGE_SIZE;
3948 }
3949 }
3950 pages_in_upl = upl_size / PAGE_SIZE;
3951
3952 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
3953 upl, (int)upl_f_offset, upl_size, start_offset, 0);
3954
3955 kret = ubc_create_upl(vp,
3956 upl_f_offset,
3957 upl_size,
3958 &upl,
3959 &pl,
3960 UPL_FILE_IO | UPL_SET_LITE);
3961 if (kret != KERN_SUCCESS)
3962 panic("cluster_read_copy: failed to get pagelist");
3963
3964 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
3965 upl, (int)upl_f_offset, upl_size, start_offset, 0);
3966
3967 /*
3968 * scan from the beginning of the upl looking for the first
3969 * non-valid page.... this will become the first page in
3970 * the request we're going to make to 'cluster_io'... if all
3971 * of the pages are valid, we won't call through to 'cluster_io'
3972 */
3973 for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
3974 if (!upl_valid_page(pl, start_pg))
3975 break;
3976 }
3977
3978 /*
3979 * scan from the starting invalid page looking for a valid
3980 * page before the end of the upl is reached, if we
3981 * find one, then it will be the last page of the request to
3982 * 'cluster_io'
3983 */
3984 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
3985 if (upl_valid_page(pl, last_pg))
3986 break;
3987 }
3988
3989 if (start_pg < last_pg) {
3990 /*
3991 * we found a range of 'invalid' pages that must be filled
3992 * if the last page in this range is the last page of the file
3993 * we may have to clip the size of it to keep from reading past
3994 * the end of the last physical block associated with the file
3995 */
3996 if (iolock_inited == FALSE) {
3997 lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
3998
3999 iolock_inited = TRUE;
4000 }
4001 upl_offset = start_pg * PAGE_SIZE;
4002 io_size = (last_pg - start_pg) * PAGE_SIZE;
4003
4004 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
4005 io_size = filesize - (upl_f_offset + upl_offset);
4006
4007 /*
4008 * issue an asynchronous read to cluster_io
4009 */
4010
4011 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
4012 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
4013
4014 if (rap) {
4015 if (extent.e_addr < rap->cl_maxra) {
4016 /*
4017 * we've just issued a read for a block that should have been
4018 * in the cache courtesy of the read-ahead engine... something
4019 * has gone wrong with the pipeline, so reset the read-ahead
4020 * logic which will cause us to restart from scratch
4021 */
4022 rap->cl_maxra = 0;
4023 }
4024 }
4025 }
4026 if (error == 0) {
4027 /*
4028 * if the read completed successfully, or there was no I/O request
4029 * issued, than copy the data into user land via 'cluster_upl_copy_data'
4030 * we'll first add on any 'valid'
4031 * pages that were present in the upl when we acquired it.
4032 */
4033 u_int val_size;
4034
4035 for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
4036 if (!upl_valid_page(pl, uio_last))
4037 break;
4038 }
4039 if (uio_last < pages_in_upl) {
4040 /*
4041 * there were some invalid pages beyond the valid pages
4042 * that we didn't issue an I/O for, just release them
4043 * unchanged now, so that any prefetch/readahed can
4044 * include them
4045 */
4046 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
4047 (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4048 }
4049
4050 /*
4051 * compute size to transfer this round, if io_req_size is
4052 * still non-zero after this attempt, we'll loop around and
4053 * set up for another I/O.
4054 */
4055 val_size = (uio_last * PAGE_SIZE) - start_offset;
4056
4057 if (val_size > max_size)
4058 val_size = max_size;
4059
4060 if (val_size > io_req_size)
4061 val_size = io_req_size;
4062
4063 if ((uio->uio_offset + val_size) > last_ioread_offset)
4064 last_ioread_offset = uio->uio_offset + val_size;
4065
4066 if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) {
4067
4068 if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) {
4069 /*
4070 * if there's still I/O left to do for this request, and...
4071 * we're not in hard throttle mode, and...
4072 * we're close to using up the previous prefetch, then issue a
4073 * new pre-fetch I/O... the I/O latency will overlap
4074 * with the copying of the data
4075 */
4076 if (size_of_prefetch > max_rd_size)
4077 size_of_prefetch = max_rd_size;
4078
4079 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
4080
4081 last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
4082
4083 if (last_ioread_offset > last_request_offset)
4084 last_ioread_offset = last_request_offset;
4085 }
4086
4087 } else if ((uio->uio_offset + val_size) == last_request_offset) {
4088 /*
4089 * this transfer will finish this request, so...
4090 * let's try to read ahead if we're in
4091 * a sequential access pattern and we haven't
4092 * explicitly disabled it
4093 */
4094 if (rd_ahead_enabled)
4095 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
4096
4097 if (rap != NULL) {
4098 if (extent.e_addr < rap->cl_lastr)
4099 rap->cl_maxra = 0;
4100 rap->cl_lastr = extent.e_addr;
4101 }
4102 }
4103 if (iolock_inited == TRUE)
4104 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4105
4106 if (iostate.io_error)
4107 error = iostate.io_error;
4108 else {
4109 u_int32_t io_requested;
4110
4111 io_requested = val_size;
4112
4113 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
4114
4115 io_req_size -= (val_size - io_requested);
4116 }
4117 } else {
4118 if (iolock_inited == TRUE)
4119 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4120 }
4121 if (start_pg < last_pg) {
4122 /*
4123 * compute the range of pages that we actually issued an I/O for
4124 * and either commit them as valid if the I/O succeeded
4125 * or abort them if the I/O failed or we're not supposed to
4126 * keep them in the cache
4127 */
4128 io_size = (last_pg - start_pg) * PAGE_SIZE;
4129
4130 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4131
4132 if (error || (flags & IO_NOCACHE))
4133 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
4134 UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
4135 else {
4136 int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY;
4137
4138 if (take_reference)
4139 commit_flags |= UPL_COMMIT_INACTIVATE;
4140 else
4141 commit_flags |= UPL_COMMIT_SPECULATE;
4142
4143 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
4144 }
4145 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4146 }
4147 if ((last_pg - start_pg) < pages_in_upl) {
4148 /*
4149 * the set of pages that we issued an I/O for did not encompass
4150 * the entire upl... so just release these without modifying
4151 * their state
4152 */
4153 if (error)
4154 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
4155 else {
4156
4157 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
4158 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
4159
4160 /*
4161 * handle any valid pages at the beginning of
4162 * the upl... release these appropriately
4163 */
4164 cluster_read_upl_release(upl, 0, start_pg, take_reference);
4165
4166 /*
4167 * handle any valid pages immediately after the
4168 * pages we issued I/O for... ... release these appropriately
4169 */
4170 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
4171
4172 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
4173 }
4174 }
4175 if (retval == 0)
4176 retval = error;
4177
4178 if (io_req_size) {
4179 if (cluster_is_throttled(vp)) {
4180 /*
4181 * we're in the throttle window, at the very least
4182 * we want to limit the size of the I/O we're about
4183 * to issue
4184 */
4185 rd_ahead_enabled = 0;
4186 prefetch_enabled = 0;
4187 max_rd_size = THROTTLE_MAX_IOSIZE;
4188 } else {
4189 if (max_rd_size == THROTTLE_MAX_IOSIZE) {
4190 /*
4191 * coming out of throttled state
4192 */
4193 if (policy != THROTTLE_LEVEL_TIER3 && policy != THROTTLE_LEVEL_TIER2) {
4194 if (rap != NULL)
4195 rd_ahead_enabled = 1;
4196 prefetch_enabled = 1;
4197 }
4198 max_rd_size = max_prefetch;
4199 last_ioread_offset = 0;
4200 }
4201 }
4202 }
4203 }
4204 if (iolock_inited == TRUE) {
4205 /*
4206 * cluster_io returned an error after it
4207 * had already issued some I/O. we need
4208 * to wait for that I/O to complete before
4209 * we can destroy the iostate mutex...
4210 * 'retval' already contains the early error
4211 * so no need to pick it up from iostate.io_error
4212 */
4213 cluster_iostate_wait(&iostate, 0, "cluster_read_copy");
4214
4215 lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
4216 }
4217 if (rap != NULL) {
4218 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
4219 (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0);
4220
4221 lck_mtx_unlock(&rap->cl_lockr);
4222 } else {
4223 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
4224 (int)uio->uio_offset, io_req_size, 0, retval, 0);
4225 }
4226
4227 return (retval);
4228 }
4229
4230 /*
4231 * We don't want another read/write lock for every vnode in the system
4232 * so we keep a hash of them here. There should never be very many of
4233 * these around at any point in time.
4234 */
4235 cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type)
4236 {
4237 struct cl_direct_read_locks *head
4238 = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp)
4239 % CL_DIRECT_READ_LOCK_BUCKETS];
4240
4241 struct cl_direct_read_lock *lck, *new_lck = NULL;
4242
4243 for (;;) {
4244 lck_spin_lock(&cl_direct_read_spin_lock);
4245
4246 LIST_FOREACH(lck, head, chain) {
4247 if (lck->vp == vp) {
4248 ++lck->ref_count;
4249 lck_spin_unlock(&cl_direct_read_spin_lock);
4250 if (new_lck) {
4251 // Someone beat us to it, ditch the allocation
4252 lck_rw_destroy(&new_lck->rw_lock, cl_mtx_grp);
4253 FREE(new_lck, M_TEMP);
4254 }
4255 lck_rw_lock(&lck->rw_lock, type);
4256 return lck;
4257 }
4258 }
4259
4260 if (new_lck) {
4261 // Use the lock we allocated
4262 LIST_INSERT_HEAD(head, new_lck, chain);
4263 lck_spin_unlock(&cl_direct_read_spin_lock);
4264 lck_rw_lock(&new_lck->rw_lock, type);
4265 return new_lck;
4266 }
4267
4268 lck_spin_unlock(&cl_direct_read_spin_lock);
4269
4270 // Allocate a new lock
4271 MALLOC(new_lck, cl_direct_read_lock_t *, sizeof(*new_lck),
4272 M_TEMP, M_WAITOK);
4273 lck_rw_init(&new_lck->rw_lock, cl_mtx_grp, cl_mtx_attr);
4274 new_lck->vp = vp;
4275 new_lck->ref_count = 1;
4276
4277 // Got to go round again
4278 }
4279 }
4280
4281 void cluster_unlock_direct_read(cl_direct_read_lock_t *lck)
4282 {
4283 lck_rw_done(&lck->rw_lock);
4284
4285 lck_spin_lock(&cl_direct_read_spin_lock);
4286 if (lck->ref_count == 1) {
4287 LIST_REMOVE(lck, chain);
4288 lck_spin_unlock(&cl_direct_read_spin_lock);
4289 lck_rw_destroy(&lck->rw_lock, cl_mtx_grp);
4290 FREE(lck, M_TEMP);
4291 } else {
4292 --lck->ref_count;
4293 lck_spin_unlock(&cl_direct_read_spin_lock);
4294 }
4295 }
4296
4297 static int
4298 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
4299 int flags, int (*callback)(buf_t, void *), void *callback_arg)
4300 {
4301 upl_t upl;
4302 upl_page_info_t *pl;
4303 off_t max_io_size;
4304 vm_offset_t upl_offset, vector_upl_offset = 0;
4305 upl_size_t upl_size, vector_upl_size = 0;
4306 vm_size_t upl_needed_size;
4307 unsigned int pages_in_pl;
4308 upl_control_flags_t upl_flags;
4309 kern_return_t kret;
4310 unsigned int i;
4311 int force_data_sync;
4312 int retval = 0;
4313 int no_zero_fill = 0;
4314 int io_flag = 0;
4315 int misaligned = 0;
4316 struct clios iostate;
4317 user_addr_t iov_base;
4318 u_int32_t io_req_size;
4319 u_int32_t offset_in_file;
4320 u_int32_t offset_in_iovbase;
4321 u_int32_t io_size;
4322 u_int32_t io_min;
4323 u_int32_t xsize;
4324 u_int32_t devblocksize;
4325 u_int32_t mem_alignment_mask;
4326 u_int32_t max_upl_size;
4327 u_int32_t max_rd_size;
4328 u_int32_t max_rd_ahead;
4329 u_int32_t max_vector_size;
4330 boolean_t strict_uncached_IO = FALSE;
4331 boolean_t io_throttled = FALSE;
4332
4333 u_int32_t vector_upl_iosize = 0;
4334 int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1);
4335 off_t v_upl_uio_offset = 0;
4336 int vector_upl_index=0;
4337 upl_t vector_upl = NULL;
4338 cl_direct_read_lock_t *lock = NULL;
4339
4340 user_addr_t orig_iov_base = 0;
4341 user_addr_t last_iov_base = 0;
4342 user_addr_t next_iov_base = 0;
4343
4344 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
4345 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
4346
4347 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
4348
4349 max_rd_size = max_upl_size;
4350 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
4351
4352 io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO;
4353
4354 if (flags & IO_PASSIVE)
4355 io_flag |= CL_PASSIVE;
4356
4357 if (flags & IO_ENCRYPTED) {
4358 io_flag |= CL_RAW_ENCRYPTED;
4359 }
4360
4361 if (flags & IO_NOCACHE) {
4362 io_flag |= CL_NOCACHE;
4363 }
4364
4365 if (flags & IO_SKIP_ENCRYPTION)
4366 io_flag |= CL_ENCRYPTED;
4367
4368 iostate.io_completed = 0;
4369 iostate.io_issued = 0;
4370 iostate.io_error = 0;
4371 iostate.io_wanted = 0;
4372
4373 lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
4374
4375 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
4376 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
4377
4378 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
4379 (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0);
4380
4381 if (devblocksize == 1) {
4382 /*
4383 * the AFP client advertises a devblocksize of 1
4384 * however, its BLOCKMAP routine maps to physical
4385 * blocks that are PAGE_SIZE in size...
4386 * therefore we can't ask for I/Os that aren't page aligned
4387 * or aren't multiples of PAGE_SIZE in size
4388 * by setting devblocksize to PAGE_SIZE, we re-instate
4389 * the old behavior we had before the mem_alignment_mask
4390 * changes went in...
4391 */
4392 devblocksize = PAGE_SIZE;
4393 }
4394
4395 strict_uncached_IO = ubc_strict_uncached_IO(vp);
4396
4397 orig_iov_base = uio_curriovbase(uio);
4398 last_iov_base = orig_iov_base;
4399
4400 next_dread:
4401 io_req_size = *read_length;
4402 iov_base = uio_curriovbase(uio);
4403
4404 offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1);
4405 offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask;
4406
4407 if (offset_in_file || offset_in_iovbase) {
4408 /*
4409 * one of the 2 important offsets is misaligned
4410 * so fire an I/O through the cache for this entire vector
4411 */
4412 misaligned = 1;
4413 }
4414 if (iov_base & (devblocksize - 1)) {
4415 /*
4416 * the offset in memory must be on a device block boundary
4417 * so that we can guarantee that we can generate an
4418 * I/O that ends on a page boundary in cluster_io
4419 */
4420 misaligned = 1;
4421 }
4422
4423 max_io_size = filesize - uio->uio_offset;
4424
4425 /*
4426 * The user must request IO in aligned chunks. If the
4427 * offset into the file is bad, or the userland pointer
4428 * is non-aligned, then we cannot service the encrypted IO request.
4429 */
4430 if (flags & IO_ENCRYPTED) {
4431 if (misaligned || (io_req_size & (devblocksize - 1)))
4432 retval = EINVAL;
4433
4434 max_io_size = roundup(max_io_size, devblocksize);
4435 }
4436
4437 if ((off_t)io_req_size > max_io_size)
4438 io_req_size = max_io_size;
4439
4440 /*
4441 * When we get to this point, we know...
4442 * -- the offset into the file is on a devblocksize boundary
4443 */
4444
4445 while (io_req_size && retval == 0) {
4446 u_int32_t io_start;
4447
4448 if (cluster_is_throttled(vp)) {
4449 /*
4450 * we're in the throttle window, at the very least
4451 * we want to limit the size of the I/O we're about
4452 * to issue
4453 */
4454 max_rd_size = THROTTLE_MAX_IOSIZE;
4455 max_rd_ahead = THROTTLE_MAX_IOSIZE - 1;
4456 max_vector_size = THROTTLE_MAX_IOSIZE;
4457 } else {
4458 max_rd_size = max_upl_size;
4459 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
4460 max_vector_size = MAX_VECTOR_UPL_SIZE;
4461 }
4462 io_start = io_size = io_req_size;
4463
4464 /*
4465 * First look for pages already in the cache
4466 * and move them to user space. But only do this
4467 * check if we are not retrieving encrypted data directly
4468 * from the filesystem; those blocks should never
4469 * be in the UBC.
4470 *
4471 * cluster_copy_ubc_data returns the resid
4472 * in io_size
4473 */
4474 if ((strict_uncached_IO == FALSE) && ((flags & IO_ENCRYPTED) == 0)) {
4475 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
4476 }
4477 /*
4478 * calculate the number of bytes actually copied
4479 * starting size - residual
4480 */
4481 xsize = io_start - io_size;
4482
4483 io_req_size -= xsize;
4484
4485 if(useVectorUPL && (xsize || (iov_base & PAGE_MASK))) {
4486 /*
4487 * We found something in the cache or we have an iov_base that's not
4488 * page-aligned.
4489 *
4490 * Issue all I/O's that have been collected within this Vectored UPL.
4491 */
4492 if(vector_upl_index) {
4493 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4494 reset_vector_run_state();
4495 }
4496
4497 if(xsize)
4498 useVectorUPL = 0;
4499
4500 /*
4501 * After this point, if we are using the Vector UPL path and the base is
4502 * not page-aligned then the UPL with that base will be the first in the vector UPL.
4503 */
4504 }
4505
4506 /*
4507 * check to see if we are finished with this request.
4508 *
4509 * If we satisfied this IO already, then io_req_size will be 0.
4510 * Otherwise, see if the IO was mis-aligned and needs to go through
4511 * the UBC to deal with the 'tail'.
4512 *
4513 */
4514 if (io_req_size == 0 || (misaligned)) {
4515 /*
4516 * see if there's another uio vector to
4517 * process that's of type IO_DIRECT
4518 *
4519 * break out of while loop to get there
4520 */
4521 break;
4522 }
4523 /*
4524 * assume the request ends on a device block boundary
4525 */
4526 io_min = devblocksize;
4527
4528 /*
4529 * we can handle I/O's in multiples of the device block size
4530 * however, if io_size isn't a multiple of devblocksize we
4531 * want to clip it back to the nearest page boundary since
4532 * we are going to have to go through cluster_read_copy to
4533 * deal with the 'overhang'... by clipping it to a PAGE_SIZE
4534 * multiple, we avoid asking the drive for the same physical
4535 * blocks twice.. once for the partial page at the end of the
4536 * request and a 2nd time for the page we read into the cache
4537 * (which overlaps the end of the direct read) in order to
4538 * get at the overhang bytes
4539 */
4540 if (io_size & (devblocksize - 1)) {
4541 assert(!(flags & IO_ENCRYPTED));
4542 /*
4543 * Clip the request to the previous page size boundary
4544 * since request does NOT end on a device block boundary
4545 */
4546 io_size &= ~PAGE_MASK;
4547 io_min = PAGE_SIZE;
4548 }
4549 if (retval || io_size < io_min) {
4550 /*
4551 * either an error or we only have the tail left to
4552 * complete via the copy path...
4553 * we may have already spun some portion of this request
4554 * off as async requests... we need to wait for the I/O
4555 * to complete before returning
4556 */
4557 goto wait_for_dreads;
4558 }
4559
4560 /*
4561 * Don't re-check the UBC data if we are looking for uncached IO
4562 * or asking for encrypted blocks.
4563 */
4564 if ((strict_uncached_IO == FALSE) && ((flags & IO_ENCRYPTED) == 0)) {
4565
4566 if ((xsize = io_size) > max_rd_size)
4567 xsize = max_rd_size;
4568
4569 io_size = 0;
4570
4571 if (!lock) {
4572 /*
4573 * We hold a lock here between the time we check the
4574 * cache and the time we issue I/O. This saves us
4575 * from having to lock the pages in the cache. Not
4576 * all clients will care about this lock but some
4577 * clients may want to guarantee stability between
4578 * here and when the I/O is issued in which case they
4579 * will take the lock exclusively.
4580 */
4581 lock = cluster_lock_direct_read(vp, LCK_RW_TYPE_SHARED);
4582 }
4583
4584 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
4585
4586 if (io_size == 0) {
4587 /*
4588 * a page must have just come into the cache
4589 * since the first page in this range is no
4590 * longer absent, go back and re-evaluate
4591 */
4592 continue;
4593 }
4594 }
4595 if ( (flags & IO_RETURN_ON_THROTTLE) ) {
4596 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
4597 if ( !cluster_io_present_in_BC(vp, uio->uio_offset)) {
4598 /*
4599 * we're in the throttle window and at least 1 I/O
4600 * has already been issued by a throttleable thread
4601 * in this window, so return with EAGAIN to indicate
4602 * to the FS issuing the cluster_read call that it
4603 * should now throttle after dropping any locks
4604 */
4605 throttle_info_update_by_mount(vp->v_mount);
4606
4607 io_throttled = TRUE;
4608 goto wait_for_dreads;
4609 }
4610 }
4611 }
4612 if (io_size > max_rd_size)
4613 io_size = max_rd_size;
4614
4615 iov_base = uio_curriovbase(uio);
4616
4617 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
4618 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
4619
4620 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
4621 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
4622
4623 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0))
4624 no_zero_fill = 1;
4625 else
4626 no_zero_fill = 0;
4627
4628 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
4629 for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
4630 pages_in_pl = 0;
4631 upl_size = upl_needed_size;
4632 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
4633 | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
4634 if (no_zero_fill)
4635 upl_flags |= UPL_NOZEROFILL;
4636 if (force_data_sync)
4637 upl_flags |= UPL_FORCE_DATA_SYNC;
4638
4639 kret = vm_map_create_upl(map,
4640 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
4641 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
4642
4643 if (kret != KERN_SUCCESS) {
4644 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4645 (int)upl_offset, upl_size, io_size, kret, 0);
4646 /*
4647 * failed to get pagelist
4648 *
4649 * we may have already spun some portion of this request
4650 * off as async requests... we need to wait for the I/O
4651 * to complete before returning
4652 */
4653 goto wait_for_dreads;
4654 }
4655 pages_in_pl = upl_size / PAGE_SIZE;
4656 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
4657
4658 for (i = 0; i < pages_in_pl; i++) {
4659 if (!upl_page_present(pl, i))
4660 break;
4661 }
4662 if (i == pages_in_pl)
4663 break;
4664
4665 ubc_upl_abort(upl, 0);
4666 }
4667 if (force_data_sync >= 3) {
4668 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4669 (int)upl_offset, upl_size, io_size, kret, 0);
4670
4671 goto wait_for_dreads;
4672 }
4673 /*
4674 * Consider the possibility that upl_size wasn't satisfied.
4675 */
4676 if (upl_size < upl_needed_size) {
4677 if (upl_size && upl_offset == 0)
4678 io_size = upl_size;
4679 else
4680 io_size = 0;
4681 }
4682 if (io_size == 0) {
4683 ubc_upl_abort(upl, 0);
4684 goto wait_for_dreads;
4685 }
4686 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
4687 (int)upl_offset, upl_size, io_size, kret, 0);
4688
4689 if(useVectorUPL) {
4690 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
4691 if(end_off)
4692 issueVectorUPL = 1;
4693 /*
4694 * After this point, if we are using a vector UPL, then
4695 * either all the UPL elements end on a page boundary OR
4696 * this UPL is the last element because it does not end
4697 * on a page boundary.
4698 */
4699 }
4700
4701 /*
4702 * request asynchronously so that we can overlap
4703 * the preparation of the next I/O
4704 * if there are already too many outstanding reads
4705 * wait until some have completed before issuing the next read
4706 */
4707 cluster_iostate_wait(&iostate, max_rd_ahead, "cluster_read_direct");
4708
4709 if (iostate.io_error) {
4710 /*
4711 * one of the earlier reads we issued ran into a hard error
4712 * don't issue any more reads, cleanup the UPL
4713 * that was just created but not used, then
4714 * go wait for any other reads to complete before
4715 * returning the error to the caller
4716 */
4717 ubc_upl_abort(upl, 0);
4718
4719 goto wait_for_dreads;
4720 }
4721 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
4722 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
4723
4724 if(!useVectorUPL) {
4725 if (no_zero_fill)
4726 io_flag &= ~CL_PRESERVE;
4727 else
4728 io_flag |= CL_PRESERVE;
4729
4730 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4731
4732 } else {
4733
4734 if(!vector_upl_index) {
4735 vector_upl = vector_upl_create(upl_offset);
4736 v_upl_uio_offset = uio->uio_offset;
4737 vector_upl_offset = upl_offset;
4738 }
4739
4740 vector_upl_set_subupl(vector_upl,upl, upl_size);
4741 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
4742 vector_upl_index++;
4743 vector_upl_size += upl_size;
4744 vector_upl_iosize += io_size;
4745
4746 if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) {
4747 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4748 reset_vector_run_state();
4749 }
4750 }
4751 last_iov_base = iov_base + io_size;
4752
4753 if (lock) {
4754 // We don't need to wait for the I/O to complete
4755 cluster_unlock_direct_read(lock);
4756 lock = NULL;
4757 }
4758
4759 /*
4760 * update the uio structure
4761 */
4762 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
4763 uio_update(uio, (user_size_t)max_io_size);
4764 }
4765 else {
4766 uio_update(uio, (user_size_t)io_size);
4767 }
4768
4769 io_req_size -= io_size;
4770
4771 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
4772 upl, (int)uio->uio_offset, io_req_size, retval, 0);
4773
4774 } /* end while */
4775
4776 if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) {
4777
4778 retval = cluster_io_type(uio, read_type, read_length, 0);
4779
4780 if (retval == 0 && *read_type == IO_DIRECT) {
4781
4782 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE,
4783 (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
4784
4785 goto next_dread;
4786 }
4787 }
4788
4789 wait_for_dreads:
4790
4791 if(retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) {
4792 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4793 reset_vector_run_state();
4794 }
4795
4796 // We don't need to wait for the I/O to complete
4797 if (lock)
4798 cluster_unlock_direct_read(lock);
4799
4800 /*
4801 * make sure all async reads that are part of this stream
4802 * have completed before we return
4803 */
4804 cluster_iostate_wait(&iostate, 0, "cluster_read_direct");
4805
4806 if (iostate.io_error)
4807 retval = iostate.io_error;
4808
4809 lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
4810
4811 if (io_throttled == TRUE && retval == 0)
4812 retval = EAGAIN;
4813
4814 for (next_iov_base = orig_iov_base; next_iov_base < last_iov_base; next_iov_base += PAGE_SIZE) {
4815 /*
4816 * This is specifically done for pmap accounting purposes.
4817 * vm_pre_fault() will call vm_fault() to enter the page into
4818 * the pmap if there isn't _a_ physical page for that VA already.
4819 */
4820 vm_pre_fault(vm_map_trunc_page(next_iov_base, PAGE_MASK));
4821 }
4822
4823 if (io_req_size && retval == 0) {
4824 /*
4825 * we couldn't handle the tail of this request in DIRECT mode
4826 * so fire it through the copy path
4827 */
4828 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
4829
4830 *read_type = IO_UNKNOWN;
4831 }
4832 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
4833 (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0);
4834
4835 return (retval);
4836 }
4837
4838
4839 static int
4840 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
4841 int (*callback)(buf_t, void *), void *callback_arg, int flags)
4842 {
4843 upl_page_info_t *pl;
4844 upl_t upl[MAX_VECTS];
4845 vm_offset_t upl_offset;
4846 addr64_t dst_paddr = 0;
4847 user_addr_t iov_base;
4848 off_t max_size;
4849 upl_size_t upl_size;
4850 vm_size_t upl_needed_size;
4851 mach_msg_type_number_t pages_in_pl;
4852 upl_control_flags_t upl_flags;
4853 kern_return_t kret;
4854 struct clios iostate;
4855 int error= 0;
4856 int cur_upl = 0;
4857 int num_upl = 0;
4858 int n;
4859 u_int32_t xsize;
4860 u_int32_t io_size;
4861 u_int32_t devblocksize;
4862 u_int32_t mem_alignment_mask;
4863 u_int32_t tail_size = 0;
4864 int bflag;
4865
4866 if (flags & IO_PASSIVE)
4867 bflag = CL_PASSIVE;
4868 else
4869 bflag = 0;
4870
4871 if (flags & IO_NOCACHE)
4872 bflag |= CL_NOCACHE;
4873
4874 /*
4875 * When we enter this routine, we know
4876 * -- the read_length will not exceed the current iov_len
4877 * -- the target address is physically contiguous for read_length
4878 */
4879 cluster_syncup(vp, filesize, callback, callback_arg, PUSH_SYNC);
4880
4881 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
4882 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
4883
4884 iostate.io_completed = 0;
4885 iostate.io_issued = 0;
4886 iostate.io_error = 0;
4887 iostate.io_wanted = 0;
4888
4889 lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr);
4890
4891 next_cread:
4892 io_size = *read_length;
4893
4894 max_size = filesize - uio->uio_offset;
4895
4896 if (io_size > max_size)
4897 io_size = max_size;
4898
4899 iov_base = uio_curriovbase(uio);
4900
4901 upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
4902 upl_needed_size = upl_offset + io_size;
4903
4904 pages_in_pl = 0;
4905 upl_size = upl_needed_size;
4906 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
4907 | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
4908
4909
4910 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
4911 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
4912
4913 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
4914 kret = vm_map_get_upl(map,
4915 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
4916 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
4917
4918 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
4919 (int)upl_offset, upl_size, io_size, kret, 0);
4920
4921 if (kret != KERN_SUCCESS) {
4922 /*
4923 * failed to get pagelist
4924 */
4925 error = EINVAL;
4926 goto wait_for_creads;
4927 }
4928 num_upl++;
4929
4930 if (upl_size < upl_needed_size) {
4931 /*
4932 * The upl_size wasn't satisfied.
4933 */
4934 error = EINVAL;
4935 goto wait_for_creads;
4936 }
4937 pl = ubc_upl_pageinfo(upl[cur_upl]);
4938
4939 dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset;
4940
4941 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
4942 u_int32_t head_size;
4943
4944 head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1));
4945
4946 if (head_size > io_size)
4947 head_size = io_size;
4948
4949 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
4950
4951 if (error)
4952 goto wait_for_creads;
4953
4954 upl_offset += head_size;
4955 dst_paddr += head_size;
4956 io_size -= head_size;
4957
4958 iov_base += head_size;
4959 }
4960 if ((u_int32_t)iov_base & mem_alignment_mask) {
4961 /*
4962 * request doesn't set up on a memory boundary
4963 * the underlying DMA engine can handle...
4964 * return an error instead of going through
4965 * the slow copy path since the intent of this
4966 * path is direct I/O to device memory
4967 */
4968 error = EINVAL;
4969 goto wait_for_creads;
4970 }
4971
4972 tail_size = io_size & (devblocksize - 1);
4973
4974 io_size -= tail_size;
4975
4976 while (io_size && error == 0) {
4977
4978 if (io_size > MAX_IO_CONTIG_SIZE)
4979 xsize = MAX_IO_CONTIG_SIZE;
4980 else
4981 xsize = io_size;
4982 /*
4983 * request asynchronously so that we can overlap
4984 * the preparation of the next I/O... we'll do
4985 * the commit after all the I/O has completed
4986 * since its all issued against the same UPL
4987 * if there are already too many outstanding reads
4988 * wait until some have completed before issuing the next
4989 */
4990 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig");
4991
4992 if (iostate.io_error) {
4993 /*
4994 * one of the earlier reads we issued ran into a hard error
4995 * don't issue any more reads...
4996 * go wait for any other reads to complete before
4997 * returning the error to the caller
4998 */
4999 goto wait_for_creads;
5000 }
5001 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
5002 CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag,
5003 (buf_t)NULL, &iostate, callback, callback_arg);
5004 /*
5005 * The cluster_io read was issued successfully,
5006 * update the uio structure
5007 */
5008 if (error == 0) {
5009 uio_update(uio, (user_size_t)xsize);
5010
5011 dst_paddr += xsize;
5012 upl_offset += xsize;
5013 io_size -= xsize;
5014 }
5015 }
5016 if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) {
5017
5018 error = cluster_io_type(uio, read_type, read_length, 0);
5019
5020 if (error == 0 && *read_type == IO_CONTIG) {
5021 cur_upl++;
5022 goto next_cread;
5023 }
5024 } else
5025 *read_type = IO_UNKNOWN;
5026
5027 wait_for_creads:
5028 /*
5029 * make sure all async reads that are part of this stream
5030 * have completed before we proceed
5031 */
5032 cluster_iostate_wait(&iostate, 0, "cluster_read_contig");
5033
5034 if (iostate.io_error)
5035 error = iostate.io_error;
5036
5037 lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp);
5038
5039 if (error == 0 && tail_size)
5040 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
5041
5042 for (n = 0; n < num_upl; n++)
5043 /*
5044 * just release our hold on each physically contiguous
5045 * region without changing any state
5046 */
5047 ubc_upl_abort(upl[n], 0);
5048
5049 return (error);
5050 }
5051
5052
5053 static int
5054 cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length)
5055 {
5056 user_size_t iov_len;
5057 user_addr_t iov_base = 0;
5058 upl_t upl;
5059 upl_size_t upl_size;
5060 upl_control_flags_t upl_flags;
5061 int retval = 0;
5062
5063 /*
5064 * skip over any emtpy vectors
5065 */
5066 uio_update(uio, (user_size_t)0);
5067
5068 iov_len = uio_curriovlen(uio);
5069
5070 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0);
5071
5072 if (iov_len) {
5073 iov_base = uio_curriovbase(uio);
5074 /*
5075 * make sure the size of the vector isn't too big...
5076 * internally, we want to handle all of the I/O in
5077 * chunk sizes that fit in a 32 bit int
5078 */
5079 if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE)
5080 upl_size = MAX_IO_REQUEST_SIZE;
5081 else
5082 upl_size = (u_int32_t)iov_len;
5083
5084 upl_flags = UPL_QUERY_OBJECT_TYPE | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
5085
5086 vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
5087 if ((vm_map_get_upl(map,
5088 (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
5089 &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
5090 /*
5091 * the user app must have passed in an invalid address
5092 */
5093 retval = EFAULT;
5094 }
5095 if (upl_size == 0)
5096 retval = EFAULT;
5097
5098 *io_length = upl_size;
5099
5100 if (upl_flags & UPL_PHYS_CONTIG)
5101 *io_type = IO_CONTIG;
5102 else if (iov_len >= min_length)
5103 *io_type = IO_DIRECT;
5104 else
5105 *io_type = IO_COPY;
5106 } else {
5107 /*
5108 * nothing left to do for this uio
5109 */
5110 *io_length = 0;
5111 *io_type = IO_UNKNOWN;
5112 }
5113 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0);
5114
5115 return (retval);
5116 }
5117
5118
5119 /*
5120 * generate advisory I/O's in the largest chunks possible
5121 * the completed pages will be released into the VM cache
5122 */
5123 int
5124 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
5125 {
5126 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
5127 }
5128
5129 int
5130 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
5131 {
5132 upl_page_info_t *pl;
5133 upl_t upl;
5134 vm_offset_t upl_offset;
5135 int upl_size;
5136 off_t upl_f_offset;
5137 int start_offset;
5138 int start_pg;
5139 int last_pg;
5140 int pages_in_upl;
5141 off_t max_size;
5142 int io_size;
5143 kern_return_t kret;
5144 int retval = 0;
5145 int issued_io;
5146 int skip_range;
5147 uint32_t max_io_size;
5148
5149
5150 if ( !UBCINFOEXISTS(vp))
5151 return(EINVAL);
5152
5153 if (resid < 0)
5154 return(EINVAL);
5155
5156 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
5157
5158 if ((vp->v_mount->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd) {
5159 if (max_io_size > speculative_prefetch_max_iosize)
5160 max_io_size = speculative_prefetch_max_iosize;
5161 }
5162
5163 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
5164 (int)f_offset, resid, (int)filesize, 0, 0);
5165
5166 while (resid && f_offset < filesize && retval == 0) {
5167 /*
5168 * compute the size of the upl needed to encompass
5169 * the requested read... limit each call to cluster_io
5170 * to the maximum UPL size... cluster_io will clip if
5171 * this exceeds the maximum io_size for the device,
5172 * make sure to account for
5173 * a starting offset that's not page aligned
5174 */
5175 start_offset = (int)(f_offset & PAGE_MASK_64);
5176 upl_f_offset = f_offset - (off_t)start_offset;
5177 max_size = filesize - f_offset;
5178
5179 if (resid < max_size)
5180 io_size = resid;
5181 else
5182 io_size = max_size;
5183
5184 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5185 if ((uint32_t)upl_size > max_io_size)
5186 upl_size = max_io_size;
5187
5188 skip_range = 0;
5189 /*
5190 * return the number of contiguously present pages in the cache
5191 * starting at upl_f_offset within the file
5192 */
5193 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
5194
5195 if (skip_range) {
5196 /*
5197 * skip over pages already present in the cache
5198 */
5199 io_size = skip_range - start_offset;
5200
5201 f_offset += io_size;
5202 resid -= io_size;
5203
5204 if (skip_range == upl_size)
5205 continue;
5206 /*
5207 * have to issue some real I/O
5208 * at this point, we know it's starting on a page boundary
5209 * because we've skipped over at least the first page in the request
5210 */
5211 start_offset = 0;
5212 upl_f_offset += skip_range;
5213 upl_size -= skip_range;
5214 }
5215 pages_in_upl = upl_size / PAGE_SIZE;
5216
5217 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
5218 upl, (int)upl_f_offset, upl_size, start_offset, 0);
5219
5220 kret = ubc_create_upl(vp,
5221 upl_f_offset,
5222 upl_size,
5223 &upl,
5224 &pl,
5225 UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
5226 if (kret != KERN_SUCCESS)
5227 return(retval);
5228 issued_io = 0;
5229
5230 /*
5231 * before we start marching forward, we must make sure we end on
5232 * a present page, otherwise we will be working with a freed
5233 * upl
5234 */
5235 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
5236 if (upl_page_present(pl, last_pg))
5237 break;
5238 }
5239 pages_in_upl = last_pg + 1;
5240
5241
5242 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
5243 upl, (int)upl_f_offset, upl_size, start_offset, 0);
5244
5245
5246 for (last_pg = 0; last_pg < pages_in_upl; ) {
5247 /*
5248 * scan from the beginning of the upl looking for the first
5249 * page that is present.... this will become the first page in
5250 * the request we're going to make to 'cluster_io'... if all
5251 * of the pages are absent, we won't call through to 'cluster_io'
5252 */
5253 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
5254 if (upl_page_present(pl, start_pg))
5255 break;
5256 }
5257
5258 /*
5259 * scan from the starting present page looking for an absent
5260 * page before the end of the upl is reached, if we
5261 * find one, then it will terminate the range of pages being
5262 * presented to 'cluster_io'
5263 */
5264 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
5265 if (!upl_page_present(pl, last_pg))
5266 break;
5267 }
5268
5269 if (last_pg > start_pg) {
5270 /*
5271 * we found a range of pages that must be filled
5272 * if the last page in this range is the last page of the file
5273 * we may have to clip the size of it to keep from reading past
5274 * the end of the last physical block associated with the file
5275 */
5276 upl_offset = start_pg * PAGE_SIZE;
5277 io_size = (last_pg - start_pg) * PAGE_SIZE;
5278
5279 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
5280 io_size = filesize - (upl_f_offset + upl_offset);
5281
5282 /*
5283 * issue an asynchronous read to cluster_io
5284 */
5285 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5286 CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5287
5288 issued_io = 1;
5289 }
5290 }
5291 if (issued_io == 0)
5292 ubc_upl_abort(upl, 0);
5293
5294 io_size = upl_size - start_offset;
5295
5296 if (io_size > resid)
5297 io_size = resid;
5298 f_offset += io_size;
5299 resid -= io_size;
5300 }
5301
5302 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
5303 (int)f_offset, resid, retval, 0, 0);
5304
5305 return(retval);
5306 }
5307
5308
5309 int
5310 cluster_push(vnode_t vp, int flags)
5311 {
5312 return cluster_push_ext(vp, flags, NULL, NULL);
5313 }
5314
5315
5316 int
5317 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5318 {
5319 int retval;
5320 int my_sparse_wait = 0;
5321 struct cl_writebehind *wbp;
5322
5323 if ( !UBCINFOEXISTS(vp)) {
5324 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0);
5325 return (0);
5326 }
5327 /* return if deferred write is set */
5328 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
5329 return (0);
5330 }
5331 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
5332 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0);
5333 return (0);
5334 }
5335 if (!ISSET(flags, IO_SYNC) && wbp->cl_number == 0 && wbp->cl_scmap == NULL) {
5336 lck_mtx_unlock(&wbp->cl_lockw);
5337
5338 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0);
5339 return(0);
5340 }
5341 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
5342 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
5343
5344 /*
5345 * if we have an fsync in progress, we don't want to allow any additional
5346 * sync/fsync/close(s) to occur until it finishes.
5347 * note that its possible for writes to continue to occur to this file
5348 * while we're waiting and also once the fsync starts to clean if we're
5349 * in the sparse map case
5350 */
5351 while (wbp->cl_sparse_wait) {
5352 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
5353
5354 msleep((caddr_t)&wbp->cl_sparse_wait, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
5355
5356 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
5357 }
5358 if (flags & IO_SYNC) {
5359 my_sparse_wait = 1;
5360 wbp->cl_sparse_wait = 1;
5361
5362 /*
5363 * this is an fsync (or equivalent)... we must wait for any existing async
5364 * cleaning operations to complete before we evaulate the current state
5365 * and finish cleaning... this insures that all writes issued before this
5366 * fsync actually get cleaned to the disk before this fsync returns
5367 */
5368 while (wbp->cl_sparse_pushes) {
5369 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, kdebug_vnode(vp), 0, 0, 0, 0);
5370
5371 msleep((caddr_t)&wbp->cl_sparse_pushes, &wbp->cl_lockw, PRIBIO + 1, "cluster_push_ext", NULL);
5372
5373 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, kdebug_vnode(vp), 0, 0, 0, 0);
5374 }
5375 }
5376 if (wbp->cl_scmap) {
5377 void *scmap;
5378
5379 if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) {
5380
5381 scmap = wbp->cl_scmap;
5382 wbp->cl_scmap = NULL;
5383
5384 wbp->cl_sparse_pushes++;
5385
5386 lck_mtx_unlock(&wbp->cl_lockw);
5387
5388 sparse_cluster_push(&scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg);
5389
5390 lck_mtx_lock(&wbp->cl_lockw);
5391
5392 wbp->cl_sparse_pushes--;
5393
5394 if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0)
5395 wakeup((caddr_t)&wbp->cl_sparse_pushes);
5396 } else {
5397 sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg);
5398 }
5399 retval = 1;
5400 } else {
5401 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg);
5402 }
5403 lck_mtx_unlock(&wbp->cl_lockw);
5404
5405 if (flags & IO_SYNC)
5406 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
5407
5408 if (my_sparse_wait) {
5409 /*
5410 * I'm the owner of the serialization token
5411 * clear it and wakeup anyone that is waiting
5412 * for me to finish
5413 */
5414 lck_mtx_lock(&wbp->cl_lockw);
5415
5416 wbp->cl_sparse_wait = 0;
5417 wakeup((caddr_t)&wbp->cl_sparse_wait);
5418
5419 lck_mtx_unlock(&wbp->cl_lockw);
5420 }
5421 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
5422 wbp->cl_scmap, wbp->cl_number, retval, 0, 0);
5423
5424 return (retval);
5425 }
5426
5427
5428 __private_extern__ void
5429 cluster_release(struct ubc_info *ubc)
5430 {
5431 struct cl_writebehind *wbp;
5432 struct cl_readahead *rap;
5433
5434 if ((wbp = ubc->cl_wbehind)) {
5435
5436 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0);
5437
5438 if (wbp->cl_scmap)
5439 vfs_drt_control(&(wbp->cl_scmap), 0);
5440 } else {
5441 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0);
5442 }
5443
5444 rap = ubc->cl_rahead;
5445
5446 if (wbp != NULL) {
5447 lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp);
5448 FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND);
5449 }
5450 if ((rap = ubc->cl_rahead)) {
5451 lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp);
5452 FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD);
5453 }
5454 ubc->cl_rahead = NULL;
5455 ubc->cl_wbehind = NULL;
5456
5457 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0);
5458 }
5459
5460
5461 static int
5462 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg)
5463 {
5464 int cl_index;
5465 int cl_index1;
5466 int min_index;
5467 int cl_len;
5468 int cl_pushed = 0;
5469 struct cl_wextent l_clusters[MAX_CLUSTERS];
5470 u_int max_cluster_pgcount;
5471
5472
5473 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
5474 /*
5475 * the write behind context exists and has
5476 * already been locked...
5477 */
5478 if (wbp->cl_number == 0)
5479 /*
5480 * no clusters to push
5481 * return number of empty slots
5482 */
5483 return (MAX_CLUSTERS);
5484
5485 /*
5486 * make a local 'sorted' copy of the clusters
5487 * and clear wbp->cl_number so that new clusters can
5488 * be developed
5489 */
5490 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
5491 for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) {
5492 if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr)
5493 continue;
5494 if (min_index == -1)
5495 min_index = cl_index1;
5496 else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr)
5497 min_index = cl_index1;
5498 }
5499 if (min_index == -1)
5500 break;
5501
5502 l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr;
5503 l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr;
5504 l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags;
5505
5506 wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr;
5507 }
5508 wbp->cl_number = 0;
5509
5510 cl_len = cl_index;
5511
5512 /* skip switching to the sparse cluster mechanism if on diskimage */
5513 if ( ((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS ) &&
5514 !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) ) {
5515 int i;
5516
5517 /*
5518 * determine if we appear to be writing the file sequentially
5519 * if not, by returning without having pushed any clusters
5520 * we will cause this vnode to be pushed into the sparse cluster mechanism
5521 * used for managing more random I/O patterns
5522 *
5523 * we know that we've got all clusters currently in use and the next write doesn't fit into one of them...
5524 * that's why we're in try_push with PUSH_DELAY...
5525 *
5526 * check to make sure that all the clusters except the last one are 'full'... and that each cluster
5527 * is adjacent to the next (i.e. we're looking for sequential writes) they were sorted above
5528 * so we can just make a simple pass through, up to, but not including the last one...
5529 * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they
5530 * are sequential
5531 *
5532 * we let the last one be partial as long as it was adjacent to the previous one...
5533 * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out
5534 * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
5535 */
5536 for (i = 0; i < MAX_CLUSTERS - 1; i++) {
5537 if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount)
5538 goto dont_try;
5539 if (l_clusters[i].e_addr != l_clusters[i+1].b_addr)
5540 goto dont_try;
5541 }
5542 }
5543 for (cl_index = 0; cl_index < cl_len; cl_index++) {
5544 int flags;
5545 struct cl_extent cl;
5546
5547 flags = io_flags & (IO_PASSIVE|IO_CLOSE);
5548
5549 /*
5550 * try to push each cluster in turn...
5551 */
5552 if (l_clusters[cl_index].io_flags & CLW_IONOCACHE)
5553 flags |= IO_NOCACHE;
5554
5555 if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE)
5556 flags |= IO_PASSIVE;
5557
5558 if (push_flag & PUSH_SYNC)
5559 flags |= IO_SYNC;
5560
5561 cl.b_addr = l_clusters[cl_index].b_addr;
5562 cl.e_addr = l_clusters[cl_index].e_addr;
5563
5564 cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg);
5565
5566 l_clusters[cl_index].b_addr = 0;
5567 l_clusters[cl_index].e_addr = 0;
5568
5569 cl_pushed++;
5570
5571 if ( !(push_flag & PUSH_ALL) )
5572 break;
5573 }
5574 dont_try:
5575 if (cl_len > cl_pushed) {
5576 /*
5577 * we didn't push all of the clusters, so
5578 * lets try to merge them back in to the vnode
5579 */
5580 if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) {
5581 /*
5582 * we picked up some new clusters while we were trying to
5583 * push the old ones... this can happen because I've dropped
5584 * the vnode lock... the sum of the
5585 * leftovers plus the new cluster count exceeds our ability
5586 * to represent them, so switch to the sparse cluster mechanism
5587 *
5588 * collect the active public clusters...
5589 */
5590 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5591
5592 for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
5593 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
5594 continue;
5595 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
5596 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
5597 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
5598
5599 cl_index1++;
5600 }
5601 /*
5602 * update the cluster count
5603 */
5604 wbp->cl_number = cl_index1;
5605
5606 /*
5607 * and collect the original clusters that were moved into the
5608 * local storage for sorting purposes
5609 */
5610 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5611
5612 } else {
5613 /*
5614 * we've got room to merge the leftovers back in
5615 * just append them starting at the next 'hole'
5616 * represented by wbp->cl_number
5617 */
5618 for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) {
5619 if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr)
5620 continue;
5621
5622 wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr;
5623 wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr;
5624 wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags;
5625
5626 cl_index1++;
5627 }
5628 /*
5629 * update the cluster count
5630 */
5631 wbp->cl_number = cl_index1;
5632 }
5633 }
5634 return (MAX_CLUSTERS - wbp->cl_number);
5635 }
5636
5637
5638
5639 static int
5640 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5641 {
5642 upl_page_info_t *pl;
5643 upl_t upl;
5644 vm_offset_t upl_offset;
5645 int upl_size;
5646 off_t upl_f_offset;
5647 int pages_in_upl;
5648 int start_pg;
5649 int last_pg;
5650 int io_size;
5651 int io_flags;
5652 int upl_flags;
5653 int bflag;
5654 int size;
5655 int error = 0;
5656 int retval;
5657 kern_return_t kret;
5658
5659 if (flags & IO_PASSIVE)
5660 bflag = CL_PASSIVE;
5661 else
5662 bflag = 0;
5663
5664 if (flags & IO_SKIP_ENCRYPTION)
5665 bflag |= CL_ENCRYPTED;
5666
5667 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
5668 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
5669
5670 if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) {
5671 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
5672
5673 return (0);
5674 }
5675 upl_size = pages_in_upl * PAGE_SIZE;
5676 upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
5677
5678 if (upl_f_offset + upl_size >= EOF) {
5679
5680 if (upl_f_offset >= EOF) {
5681 /*
5682 * must have truncated the file and missed
5683 * clearing a dangling cluster (i.e. it's completely
5684 * beyond the new EOF
5685 */
5686 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
5687
5688 return(0);
5689 }
5690 size = EOF - upl_f_offset;
5691
5692 upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
5693 pages_in_upl = upl_size / PAGE_SIZE;
5694 } else
5695 size = upl_size;
5696
5697 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
5698
5699 /*
5700 * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior
5701 *
5702 * - only pages that are currently dirty are returned... these are the ones we need to clean
5703 * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set
5704 * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page
5705 * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if
5706 * someone dirties this page while the I/O is in progress, we don't lose track of the new state
5707 *
5708 * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard)
5709 */
5710
5711 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE))
5712 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
5713 else
5714 upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
5715
5716 kret = ubc_create_upl(vp,
5717 upl_f_offset,
5718 upl_size,
5719 &upl,
5720 &pl,
5721 upl_flags);
5722 if (kret != KERN_SUCCESS)
5723 panic("cluster_push: failed to get pagelist");
5724
5725 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
5726
5727 /*
5728 * since we only asked for the dirty pages back
5729 * it's possible that we may only get a few or even none, so...
5730 * before we start marching forward, we must make sure we know
5731 * where the last present page is in the UPL, otherwise we could
5732 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
5733 * employed by commit_range and abort_range.
5734 */
5735 for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
5736 if (upl_page_present(pl, last_pg))
5737 break;
5738 }
5739 pages_in_upl = last_pg + 1;
5740
5741 if (pages_in_upl == 0) {
5742 ubc_upl_abort(upl, 0);
5743
5744 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
5745 return(0);
5746 }
5747
5748 for (last_pg = 0; last_pg < pages_in_upl; ) {
5749 /*
5750 * find the next dirty page in the UPL
5751 * this will become the first page in the
5752 * next I/O to generate
5753 */
5754 for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
5755 if (upl_dirty_page(pl, start_pg))
5756 break;
5757 if (upl_page_present(pl, start_pg))
5758 /*
5759 * RET_ONLY_DIRTY will return non-dirty 'precious' pages
5760 * just release these unchanged since we're not going
5761 * to steal them or change their state
5762 */
5763 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
5764 }
5765 if (start_pg >= pages_in_upl)
5766 /*
5767 * done... no more dirty pages to push
5768 */
5769 break;
5770 if (start_pg > last_pg)
5771 /*
5772 * skipped over some non-dirty pages
5773 */
5774 size -= ((start_pg - last_pg) * PAGE_SIZE);
5775
5776 /*
5777 * find a range of dirty pages to write
5778 */
5779 for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
5780 if (!upl_dirty_page(pl, last_pg))
5781 break;
5782 }
5783 upl_offset = start_pg * PAGE_SIZE;
5784
5785 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
5786
5787 io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag;
5788
5789 if ( !(flags & IO_SYNC))
5790 io_flags |= CL_ASYNC;
5791
5792 if (flags & IO_CLOSE)
5793 io_flags |= CL_CLOSE;
5794
5795 if (flags & IO_NOCACHE)
5796 io_flags |= CL_NOCACHE;
5797
5798 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5799 io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5800
5801 if (error == 0 && retval)
5802 error = retval;
5803
5804 size -= io_size;
5805 }
5806 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0);
5807
5808 return(error);
5809 }
5810
5811
5812 /*
5813 * sparse_cluster_switch is called with the write behind lock held
5814 */
5815 static void
5816 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5817 {
5818 int cl_index;
5819
5820 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, 0, 0, 0);
5821
5822 for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) {
5823 int flags;
5824 struct cl_extent cl;
5825
5826 for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) {
5827
5828 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
5829 if (flags & UPL_POP_DIRTY) {
5830 cl.e_addr = cl.b_addr + 1;
5831
5832 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg);
5833 }
5834 }
5835 }
5836 }
5837 wbp->cl_number = 0;
5838
5839 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, 0, 0, 0);
5840 }
5841
5842
5843 /*
5844 * sparse_cluster_push must be called with the write-behind lock held if the scmap is
5845 * still associated with the write-behind context... however, if the scmap has been disassociated
5846 * from the write-behind context (the cluster_push case), the wb lock is not held
5847 */
5848 static void
5849 sparse_cluster_push(void **scmap, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg)
5850 {
5851 struct cl_extent cl;
5852 off_t offset;
5853 u_int length;
5854
5855 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, kdebug_vnode(vp), (*scmap), 0, push_flag, 0);
5856
5857 if (push_flag & PUSH_ALL)
5858 vfs_drt_control(scmap, 1);
5859
5860 for (;;) {
5861 if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS)
5862 break;
5863
5864 cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64);
5865 cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64);
5866
5867 cluster_push_now(vp, &cl, EOF, io_flags & (IO_PASSIVE|IO_CLOSE), callback, callback_arg);
5868
5869 if ( !(push_flag & PUSH_ALL) )
5870 break;
5871 }
5872 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), 0, 0, 0);
5873 }
5874
5875
5876 /*
5877 * sparse_cluster_add is called with the write behind lock held
5878 */
5879 static void
5880 sparse_cluster_add(void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5881 {
5882 u_int new_dirty;
5883 u_int length;
5884 off_t offset;
5885
5886 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0);
5887
5888 offset = (off_t)(cl->b_addr * PAGE_SIZE_64);
5889 length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE;
5890
5891 while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) {
5892 /*
5893 * no room left in the map
5894 * only a partial update was done
5895 * push out some pages and try again
5896 */
5897 sparse_cluster_push(scmap, vp, EOF, 0, 0, callback, callback_arg);
5898
5899 offset += (new_dirty * PAGE_SIZE_64);
5900 length -= (new_dirty * PAGE_SIZE);
5901 }
5902 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), 0, 0, 0);
5903 }
5904
5905
5906 static int
5907 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5908 {
5909 upl_page_info_t *pl;
5910 upl_t upl;
5911 addr64_t ubc_paddr;
5912 kern_return_t kret;
5913 int error = 0;
5914 int did_read = 0;
5915 int abort_flags;
5916 int upl_flags;
5917 int bflag;
5918
5919 if (flags & IO_PASSIVE)
5920 bflag = CL_PASSIVE;
5921 else
5922 bflag = 0;
5923
5924 if (flags & IO_NOCACHE)
5925 bflag |= CL_NOCACHE;
5926
5927 upl_flags = UPL_SET_LITE;
5928
5929 if ( !(flags & CL_READ) ) {
5930 /*
5931 * "write" operation: let the UPL subsystem know
5932 * that we intend to modify the buffer cache pages
5933 * we're gathering.
5934 */
5935 upl_flags |= UPL_WILL_MODIFY;
5936 } else {
5937 /*
5938 * indicate that there is no need to pull the
5939 * mapping for this page... we're only going
5940 * to read from it, not modify it.
5941 */
5942 upl_flags |= UPL_FILE_IO;
5943 }
5944 kret = ubc_create_upl(vp,
5945 uio->uio_offset & ~PAGE_MASK_64,
5946 PAGE_SIZE,
5947 &upl,
5948 &pl,
5949 upl_flags);
5950
5951 if (kret != KERN_SUCCESS)
5952 return(EINVAL);
5953
5954 if (!upl_valid_page(pl, 0)) {
5955 /*
5956 * issue a synchronous read to cluster_io
5957 */
5958 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5959 CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5960 if (error) {
5961 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5962
5963 return(error);
5964 }
5965 did_read = 1;
5966 }
5967 ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
5968
5969 /*
5970 * NOTE: There is no prototype for the following in BSD. It, and the definitions
5971 * of the defines for cppvPsrc, cppvPsnk, cppvFsnk, and cppvFsrc will be found in
5972 * osfmk/ppc/mappings.h. They are not included here because there appears to be no
5973 * way to do so without exporting them to kexts as well.
5974 */
5975 if (flags & CL_READ)
5976 // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */
5977 copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */
5978 else
5979 // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */
5980 copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */
5981
5982 if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
5983 /*
5984 * issue a synchronous write to cluster_io
5985 */
5986 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5987 bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg);
5988 }
5989 if (error == 0)
5990 uio_update(uio, (user_size_t)xsize);
5991
5992 if (did_read)
5993 abort_flags = UPL_ABORT_FREE_ON_EMPTY;
5994 else
5995 abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
5996
5997 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
5998
5999 return (error);
6000 }
6001
6002 int
6003 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
6004 {
6005 int pg_offset;
6006 int pg_index;
6007 int csize;
6008 int segflg;
6009 int retval = 0;
6010 int xsize;
6011 upl_page_info_t *pl;
6012 int dirty_count;
6013
6014 xsize = *io_resid;
6015
6016 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
6017 (int)uio->uio_offset, upl_offset, xsize, 0, 0);
6018
6019 segflg = uio->uio_segflg;
6020
6021 switch(segflg) {
6022
6023 case UIO_USERSPACE32:
6024 case UIO_USERISPACE32:
6025 uio->uio_segflg = UIO_PHYS_USERSPACE32;
6026 break;
6027
6028 case UIO_USERSPACE:
6029 case UIO_USERISPACE:
6030 uio->uio_segflg = UIO_PHYS_USERSPACE;
6031 break;
6032
6033 case UIO_USERSPACE64:
6034 case UIO_USERISPACE64:
6035 uio->uio_segflg = UIO_PHYS_USERSPACE64;
6036 break;
6037
6038 case UIO_SYSSPACE:
6039 uio->uio_segflg = UIO_PHYS_SYSSPACE;
6040 break;
6041
6042 }
6043 pl = ubc_upl_pageinfo(upl);
6044
6045 pg_index = upl_offset / PAGE_SIZE;
6046 pg_offset = upl_offset & PAGE_MASK;
6047 csize = min(PAGE_SIZE - pg_offset, xsize);
6048
6049 dirty_count = 0;
6050 while (xsize && retval == 0) {
6051 addr64_t paddr;
6052
6053 paddr = ((addr64_t)upl_phys_page(pl, pg_index) << PAGE_SHIFT) + pg_offset;
6054 if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE))
6055 dirty_count++;
6056
6057 retval = uiomove64(paddr, csize, uio);
6058
6059 pg_index += 1;
6060 pg_offset = 0;
6061 xsize -= csize;
6062 csize = min(PAGE_SIZE, xsize);
6063 }
6064 *io_resid = xsize;
6065
6066 uio->uio_segflg = segflg;
6067
6068 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, upl_lookup_vnode(upl));
6069 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
6070 (int)uio->uio_offset, xsize, retval, segflg, 0);
6071
6072 return (retval);
6073 }
6074
6075
6076 int
6077 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
6078 {
6079
6080 return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1));
6081 }
6082
6083
6084 static int
6085 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
6086 {
6087 int segflg;
6088 int io_size;
6089 int xsize;
6090 int start_offset;
6091 int retval = 0;
6092 memory_object_control_t control;
6093
6094 io_size = *io_resid;
6095
6096 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
6097 (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
6098
6099 control = ubc_getobject(vp, UBC_FLAGS_NONE);
6100
6101 if (control == MEMORY_OBJECT_CONTROL_NULL) {
6102 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
6103 (int)uio->uio_offset, io_size, retval, 3, 0);
6104
6105 return(0);
6106 }
6107 segflg = uio->uio_segflg;
6108
6109 switch(segflg) {
6110
6111 case UIO_USERSPACE32:
6112 case UIO_USERISPACE32:
6113 uio->uio_segflg = UIO_PHYS_USERSPACE32;
6114 break;
6115
6116 case UIO_USERSPACE64:
6117 case UIO_USERISPACE64:
6118 uio->uio_segflg = UIO_PHYS_USERSPACE64;
6119 break;
6120
6121 case UIO_USERSPACE:
6122 case UIO_USERISPACE:
6123 uio->uio_segflg = UIO_PHYS_USERSPACE;
6124 break;
6125
6126 case UIO_SYSSPACE:
6127 uio->uio_segflg = UIO_PHYS_SYSSPACE;
6128 break;
6129 }
6130
6131 if ( (io_size = *io_resid) ) {
6132 start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
6133 xsize = uio_resid(uio);
6134
6135 retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
6136 start_offset, io_size, mark_dirty, take_reference);
6137 xsize -= uio_resid(uio);
6138 io_size -= xsize;
6139 }
6140 uio->uio_segflg = segflg;
6141 *io_resid = io_size;
6142
6143 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
6144 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
6145
6146 return(retval);
6147 }
6148
6149
6150 int
6151 is_file_clean(vnode_t vp, off_t filesize)
6152 {
6153 off_t f_offset;
6154 int flags;
6155 int total_dirty = 0;
6156
6157 for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
6158 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
6159 if (flags & UPL_POP_DIRTY) {
6160 total_dirty++;
6161 }
6162 }
6163 }
6164 if (total_dirty)
6165 return(EINVAL);
6166
6167 return (0);
6168 }
6169
6170
6171
6172 /*
6173 * Dirty region tracking/clustering mechanism.
6174 *
6175 * This code (vfs_drt_*) provides a mechanism for tracking and clustering
6176 * dirty regions within a larger space (file). It is primarily intended to
6177 * support clustering in large files with many dirty areas.
6178 *
6179 * The implementation assumes that the dirty regions are pages.
6180 *
6181 * To represent dirty pages within the file, we store bit vectors in a
6182 * variable-size circular hash.
6183 */
6184
6185 /*
6186 * Bitvector size. This determines the number of pages we group in a
6187 * single hashtable entry. Each hashtable entry is aligned to this
6188 * size within the file.
6189 */
6190 #define DRT_BITVECTOR_PAGES ((1024 * 1024) / PAGE_SIZE)
6191
6192 /*
6193 * File offset handling.
6194 *
6195 * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
6196 * the correct formula is (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
6197 */
6198 #define DRT_ADDRESS_MASK (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1))
6199 #define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
6200
6201 /*
6202 * Hashtable address field handling.
6203 *
6204 * The low-order bits of the hashtable address are used to conserve
6205 * space.
6206 *
6207 * DRT_HASH_COUNT_MASK must be large enough to store the range
6208 * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
6209 * to indicate that the bucket is actually unoccupied.
6210 */
6211 #define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
6212 #define DRT_HASH_SET_ADDRESS(scm, i, a) \
6213 do { \
6214 (scm)->scm_hashtable[(i)].dhe_control = \
6215 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
6216 } while (0)
6217 #define DRT_HASH_COUNT_MASK 0x1ff
6218 #define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
6219 #define DRT_HASH_SET_COUNT(scm, i, c) \
6220 do { \
6221 (scm)->scm_hashtable[(i)].dhe_control = \
6222 ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
6223 } while (0)
6224 #define DRT_HASH_CLEAR(scm, i) \
6225 do { \
6226 (scm)->scm_hashtable[(i)].dhe_control = 0; \
6227 } while (0)
6228 #define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
6229 #define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
6230 #define DRT_HASH_COPY(oscm, oi, scm, i) \
6231 do { \
6232 (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
6233 DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
6234 } while(0);
6235
6236
6237 /*
6238 * Hash table moduli.
6239 *
6240 * Since the hashtable entry's size is dependent on the size of
6241 * the bitvector, and since the hashtable size is constrained to
6242 * both being prime and fitting within the desired allocation
6243 * size, these values need to be manually determined.
6244 *
6245 * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
6246 *
6247 * The small hashtable allocation is 1024 bytes, so the modulus is 23.
6248 * The large hashtable allocation is 16384 bytes, so the modulus is 401.
6249 */
6250 #define DRT_HASH_SMALL_MODULUS 23
6251 #define DRT_HASH_LARGE_MODULUS 401
6252
6253 /*
6254 * Physical memory required before the large hash modulus is permitted.
6255 *
6256 * On small memory systems, the large hash modulus can lead to phsyical
6257 * memory starvation, so we avoid using it there.
6258 */
6259 #define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
6260
6261 #define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */
6262 #define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */
6263
6264 /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
6265
6266 /*
6267 * Hashtable bitvector handling.
6268 *
6269 * Bitvector fields are 32 bits long.
6270 */
6271
6272 #define DRT_HASH_SET_BIT(scm, i, bit) \
6273 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
6274
6275 #define DRT_HASH_CLEAR_BIT(scm, i, bit) \
6276 (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
6277
6278 #define DRT_HASH_TEST_BIT(scm, i, bit) \
6279 ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
6280
6281 #define DRT_BITVECTOR_CLEAR(scm, i) \
6282 bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
6283
6284 #define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
6285 bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
6286 &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
6287 (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
6288
6289
6290
6291 /*
6292 * Hashtable entry.
6293 */
6294 struct vfs_drt_hashentry {
6295 u_int64_t dhe_control;
6296 /*
6297 * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
6298 * DRT_BITVECTOR_PAGES is defined as ((1024 * 1024) / PAGE_SIZE)
6299 * Since PAGE_SIZE is only known at boot time,
6300 * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k)
6301 * -declare dhe_bitvector array for largest possible length
6302 */
6303 #define MAX_DRT_BITVECTOR_PAGES (1024 * 1024)/( 4 * 1024)
6304 u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES/32];
6305 };
6306
6307 /*
6308 * Dirty Region Tracking structure.
6309 *
6310 * The hashtable is allocated entirely inside the DRT structure.
6311 *
6312 * The hash is a simple circular prime modulus arrangement, the structure
6313 * is resized from small to large if it overflows.
6314 */
6315
6316 struct vfs_drt_clustermap {
6317 u_int32_t scm_magic; /* sanity/detection */
6318 #define DRT_SCM_MAGIC 0x12020003
6319 u_int32_t scm_modulus; /* current ring size */
6320 u_int32_t scm_buckets; /* number of occupied buckets */
6321 u_int32_t scm_lastclean; /* last entry we cleaned */
6322 u_int32_t scm_iskips; /* number of slot skips */
6323
6324 struct vfs_drt_hashentry scm_hashtable[0];
6325 };
6326
6327
6328 #define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
6329 #define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
6330
6331 /*
6332 * Debugging codes and arguments.
6333 */
6334 #define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
6335 #define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
6336 #define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
6337 #define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
6338 #define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
6339 * dirty */
6340 /* 0, setcount */
6341 /* 1 (clean, no map) */
6342 /* 2 (map alloc fail) */
6343 /* 3, resid (partial) */
6344 #define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
6345 #define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
6346 * lastclean, iskips */
6347
6348
6349 static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
6350 static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
6351 static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
6352 u_int64_t offset, int *indexp);
6353 static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
6354 u_int64_t offset,
6355 int *indexp,
6356 int recursed);
6357 static kern_return_t vfs_drt_do_mark_pages(
6358 void **cmapp,
6359 u_int64_t offset,
6360 u_int length,
6361 u_int *setcountp,
6362 int dirty);
6363 static void vfs_drt_trace(
6364 struct vfs_drt_clustermap *cmap,
6365 int code,
6366 int arg1,
6367 int arg2,
6368 int arg3,
6369 int arg4);
6370
6371
6372 /*
6373 * Allocate and initialise a sparse cluster map.
6374 *
6375 * Will allocate a new map, resize or compact an existing map.
6376 *
6377 * XXX we should probably have at least one intermediate map size,
6378 * as the 1:16 ratio seems a bit drastic.
6379 */
6380 static kern_return_t
6381 vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
6382 {
6383 struct vfs_drt_clustermap *cmap, *ocmap;
6384 kern_return_t kret;
6385 u_int64_t offset;
6386 u_int32_t i;
6387 int nsize, active_buckets, index, copycount;
6388
6389 ocmap = NULL;
6390 if (cmapp != NULL)
6391 ocmap = *cmapp;
6392
6393 /*
6394 * Decide on the size of the new map.
6395 */
6396 if (ocmap == NULL) {
6397 nsize = DRT_HASH_SMALL_MODULUS;
6398 } else {
6399 /* count the number of active buckets in the old map */
6400 active_buckets = 0;
6401 for (i = 0; i < ocmap->scm_modulus; i++) {
6402 if (!DRT_HASH_VACANT(ocmap, i) &&
6403 (DRT_HASH_GET_COUNT(ocmap, i) != 0))
6404 active_buckets++;
6405 }
6406 /*
6407 * If we're currently using the small allocation, check to
6408 * see whether we should grow to the large one.
6409 */
6410 if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
6411 /*
6412 * If the ring is nearly full and we are allowed to
6413 * use the large modulus, upgrade.
6414 */
6415 if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
6416 (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
6417 nsize = DRT_HASH_LARGE_MODULUS;
6418 } else {
6419 nsize = DRT_HASH_SMALL_MODULUS;
6420 }
6421 } else {
6422 /* already using the large modulus */
6423 nsize = DRT_HASH_LARGE_MODULUS;
6424 /*
6425 * If the ring is completely full, there's
6426 * nothing useful for us to do. Behave as
6427 * though we had compacted into the new
6428 * array and return.
6429 */
6430 if (active_buckets >= DRT_HASH_LARGE_MODULUS)
6431 return(KERN_SUCCESS);
6432 }
6433 }
6434
6435 /*
6436 * Allocate and initialise the new map.
6437 */
6438
6439 kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap,
6440 (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION, VM_KERN_MEMORY_FILE);
6441 if (kret != KERN_SUCCESS)
6442 return(kret);
6443 cmap->scm_magic = DRT_SCM_MAGIC;
6444 cmap->scm_modulus = nsize;
6445 cmap->scm_buckets = 0;
6446 cmap->scm_lastclean = 0;
6447 cmap->scm_iskips = 0;
6448 for (i = 0; i < cmap->scm_modulus; i++) {
6449 DRT_HASH_CLEAR(cmap, i);
6450 DRT_HASH_VACATE(cmap, i);
6451 DRT_BITVECTOR_CLEAR(cmap, i);
6452 }
6453
6454 /*
6455 * If there's an old map, re-hash entries from it into the new map.
6456 */
6457 copycount = 0;
6458 if (ocmap != NULL) {
6459 for (i = 0; i < ocmap->scm_modulus; i++) {
6460 /* skip empty buckets */
6461 if (DRT_HASH_VACANT(ocmap, i) ||
6462 (DRT_HASH_GET_COUNT(ocmap, i) == 0))
6463 continue;
6464 /* get new index */
6465 offset = DRT_HASH_GET_ADDRESS(ocmap, i);
6466 kret = vfs_drt_get_index(&cmap, offset, &index, 1);
6467 if (kret != KERN_SUCCESS) {
6468 /* XXX need to bail out gracefully here */
6469 panic("vfs_drt: new cluster map mysteriously too small");
6470 index = 0;
6471 }
6472 /* copy */
6473 DRT_HASH_COPY(ocmap, i, cmap, index);
6474 copycount++;
6475 }
6476 }
6477
6478 /* log what we've done */
6479 vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
6480
6481 /*
6482 * It's important to ensure that *cmapp always points to
6483 * a valid map, so we must overwrite it before freeing
6484 * the old map.
6485 */
6486 *cmapp = cmap;
6487 if (ocmap != NULL) {
6488 /* emit stats into trace buffer */
6489 vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
6490 ocmap->scm_modulus,
6491 ocmap->scm_buckets,
6492 ocmap->scm_lastclean,
6493 ocmap->scm_iskips);
6494
6495 vfs_drt_free_map(ocmap);
6496 }
6497 return(KERN_SUCCESS);
6498 }
6499
6500
6501 /*
6502 * Free a sparse cluster map.
6503 */
6504 static kern_return_t
6505 vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
6506 {
6507 kmem_free(kernel_map, (vm_offset_t)cmap,
6508 (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
6509 return(KERN_SUCCESS);
6510 }
6511
6512
6513 /*
6514 * Find the hashtable slot currently occupied by an entry for the supplied offset.
6515 */
6516 static kern_return_t
6517 vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
6518 {
6519 int index;
6520 u_int32_t i;
6521
6522 offset = DRT_ALIGN_ADDRESS(offset);
6523 index = DRT_HASH(cmap, offset);
6524
6525 /* traverse the hashtable */
6526 for (i = 0; i < cmap->scm_modulus; i++) {
6527
6528 /*
6529 * If the slot is vacant, we can stop.
6530 */
6531 if (DRT_HASH_VACANT(cmap, index))
6532 break;
6533
6534 /*
6535 * If the address matches our offset, we have success.
6536 */
6537 if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
6538 *indexp = index;
6539 return(KERN_SUCCESS);
6540 }
6541
6542 /*
6543 * Move to the next slot, try again.
6544 */
6545 index = DRT_HASH_NEXT(cmap, index);
6546 }
6547 /*
6548 * It's not there.
6549 */
6550 return(KERN_FAILURE);
6551 }
6552
6553 /*
6554 * Find the hashtable slot for the supplied offset. If we haven't allocated
6555 * one yet, allocate one and populate the address field. Note that it will
6556 * not have a nonzero page count and thus will still technically be free, so
6557 * in the case where we are called to clean pages, the slot will remain free.
6558 */
6559 static kern_return_t
6560 vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
6561 {
6562 struct vfs_drt_clustermap *cmap;
6563 kern_return_t kret;
6564 u_int32_t index;
6565 u_int32_t i;
6566
6567 cmap = *cmapp;
6568
6569 /* look for an existing entry */
6570 kret = vfs_drt_search_index(cmap, offset, indexp);
6571 if (kret == KERN_SUCCESS)
6572 return(kret);
6573
6574 /* need to allocate an entry */
6575 offset = DRT_ALIGN_ADDRESS(offset);
6576 index = DRT_HASH(cmap, offset);
6577
6578 /* scan from the index forwards looking for a vacant slot */
6579 for (i = 0; i < cmap->scm_modulus; i++) {
6580 /* slot vacant? */
6581 if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) {
6582 cmap->scm_buckets++;
6583 if (index < cmap->scm_lastclean)
6584 cmap->scm_lastclean = index;
6585 DRT_HASH_SET_ADDRESS(cmap, index, offset);
6586 DRT_HASH_SET_COUNT(cmap, index, 0);
6587 DRT_BITVECTOR_CLEAR(cmap, index);
6588 *indexp = index;
6589 vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
6590 return(KERN_SUCCESS);
6591 }
6592 cmap->scm_iskips += i;
6593 index = DRT_HASH_NEXT(cmap, index);
6594 }
6595
6596 /*
6597 * We haven't found a vacant slot, so the map is full. If we're not
6598 * already recursed, try reallocating/compacting it.
6599 */
6600 if (recursed)
6601 return(KERN_FAILURE);
6602 kret = vfs_drt_alloc_map(cmapp);
6603 if (kret == KERN_SUCCESS) {
6604 /* now try to insert again */
6605 kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
6606 }
6607 return(kret);
6608 }
6609
6610 /*
6611 * Implementation of set dirty/clean.
6612 *
6613 * In the 'clean' case, not finding a map is OK.
6614 */
6615 static kern_return_t
6616 vfs_drt_do_mark_pages(
6617 void **private,
6618 u_int64_t offset,
6619 u_int length,
6620 u_int *setcountp,
6621 int dirty)
6622 {
6623 struct vfs_drt_clustermap *cmap, **cmapp;
6624 kern_return_t kret;
6625 int i, index, pgoff, pgcount, setcount, ecount;
6626
6627 cmapp = (struct vfs_drt_clustermap **)private;
6628 cmap = *cmapp;
6629
6630 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
6631
6632 if (setcountp != NULL)
6633 *setcountp = 0;
6634
6635 /* allocate a cluster map if we don't already have one */
6636 if (cmap == NULL) {
6637 /* no cluster map, nothing to clean */
6638 if (!dirty) {
6639 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
6640 return(KERN_SUCCESS);
6641 }
6642 kret = vfs_drt_alloc_map(cmapp);
6643 if (kret != KERN_SUCCESS) {
6644 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
6645 return(kret);
6646 }
6647 }
6648 setcount = 0;
6649
6650 /*
6651 * Iterate over the length of the region.
6652 */
6653 while (length > 0) {
6654 /*
6655 * Get the hashtable index for this offset.
6656 *
6657 * XXX this will add blank entries if we are clearing a range
6658 * that hasn't been dirtied.
6659 */
6660 kret = vfs_drt_get_index(cmapp, offset, &index, 0);
6661 cmap = *cmapp; /* may have changed! */
6662 /* this may be a partial-success return */
6663 if (kret != KERN_SUCCESS) {
6664 if (setcountp != NULL)
6665 *setcountp = setcount;
6666 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
6667
6668 return(kret);
6669 }
6670
6671 /*
6672 * Work out how many pages we're modifying in this
6673 * hashtable entry.
6674 */
6675 pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE;
6676 pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
6677
6678 /*
6679 * Iterate over pages, dirty/clearing as we go.
6680 */
6681 ecount = DRT_HASH_GET_COUNT(cmap, index);
6682 for (i = 0; i < pgcount; i++) {
6683 if (dirty) {
6684 if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
6685 DRT_HASH_SET_BIT(cmap, index, pgoff + i);
6686 ecount++;
6687 setcount++;
6688 }
6689 } else {
6690 if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
6691 DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
6692 ecount--;
6693 setcount++;
6694 }
6695 }
6696 }
6697 DRT_HASH_SET_COUNT(cmap, index, ecount);
6698
6699 offset += pgcount * PAGE_SIZE;
6700 length -= pgcount * PAGE_SIZE;
6701 }
6702 if (setcountp != NULL)
6703 *setcountp = setcount;
6704
6705 vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
6706
6707 return(KERN_SUCCESS);
6708 }
6709
6710 /*
6711 * Mark a set of pages as dirty/clean.
6712 *
6713 * This is a public interface.
6714 *
6715 * cmapp
6716 * Pointer to storage suitable for holding a pointer. Note that
6717 * this must either be NULL or a value set by this function.
6718 *
6719 * size
6720 * Current file size in bytes.
6721 *
6722 * offset
6723 * Offset of the first page to be marked as dirty, in bytes. Must be
6724 * page-aligned.
6725 *
6726 * length
6727 * Length of dirty region, in bytes. Must be a multiple of PAGE_SIZE.
6728 *
6729 * setcountp
6730 * Number of pages newly marked dirty by this call (optional).
6731 *
6732 * Returns KERN_SUCCESS if all the pages were successfully marked.
6733 */
6734 static kern_return_t
6735 vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp)
6736 {
6737 /* XXX size unused, drop from interface */
6738 return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1));
6739 }
6740
6741 #if 0
6742 static kern_return_t
6743 vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
6744 {
6745 return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0));
6746 }
6747 #endif
6748
6749 /*
6750 * Get a cluster of dirty pages.
6751 *
6752 * This is a public interface.
6753 *
6754 * cmapp
6755 * Pointer to storage managed by drt_mark_pages. Note that this must
6756 * be NULL or a value set by drt_mark_pages.
6757 *
6758 * offsetp
6759 * Returns the byte offset into the file of the first page in the cluster.
6760 *
6761 * lengthp
6762 * Returns the length in bytes of the cluster of dirty pages.
6763 *
6764 * Returns success if a cluster was found. If KERN_FAILURE is returned, there
6765 * are no dirty pages meeting the minmum size criteria. Private storage will
6766 * be released if there are no more dirty pages left in the map
6767 *
6768 */
6769 static kern_return_t
6770 vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
6771 {
6772 struct vfs_drt_clustermap *cmap;
6773 u_int64_t offset;
6774 u_int length;
6775 u_int32_t j;
6776 int index, i, fs, ls;
6777
6778 /* sanity */
6779 if ((cmapp == NULL) || (*cmapp == NULL))
6780 return(KERN_FAILURE);
6781 cmap = *cmapp;
6782
6783 /* walk the hashtable */
6784 for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
6785 index = DRT_HASH(cmap, offset);
6786
6787 if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0))
6788 continue;
6789
6790 /* scan the bitfield for a string of bits */
6791 fs = -1;
6792
6793 for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
6794 if (DRT_HASH_TEST_BIT(cmap, index, i)) {
6795 fs = i;
6796 break;
6797 }
6798 }
6799 if (fs == -1) {
6800 /* didn't find any bits set */
6801 panic("vfs_drt: entry summary count > 0 but no bits set in map");
6802 }
6803 for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
6804 if (!DRT_HASH_TEST_BIT(cmap, index, i))
6805 break;
6806 }
6807
6808 /* compute offset and length, mark pages clean */
6809 offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
6810 length = ls * PAGE_SIZE;
6811 vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
6812 cmap->scm_lastclean = index;
6813
6814 /* return successful */
6815 *offsetp = (off_t)offset;
6816 *lengthp = length;
6817
6818 vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
6819 return(KERN_SUCCESS);
6820 }
6821 /*
6822 * We didn't find anything... hashtable is empty
6823 * emit stats into trace buffer and
6824 * then free it
6825 */
6826 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
6827 cmap->scm_modulus,
6828 cmap->scm_buckets,
6829 cmap->scm_lastclean,
6830 cmap->scm_iskips);
6831
6832 vfs_drt_free_map(cmap);
6833 *cmapp = NULL;
6834
6835 return(KERN_FAILURE);
6836 }
6837
6838
6839 static kern_return_t
6840 vfs_drt_control(void **cmapp, int op_type)
6841 {
6842 struct vfs_drt_clustermap *cmap;
6843
6844 /* sanity */
6845 if ((cmapp == NULL) || (*cmapp == NULL))
6846 return(KERN_FAILURE);
6847 cmap = *cmapp;
6848
6849 switch (op_type) {
6850 case 0:
6851 /* emit stats into trace buffer */
6852 vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
6853 cmap->scm_modulus,
6854 cmap->scm_buckets,
6855 cmap->scm_lastclean,
6856 cmap->scm_iskips);
6857
6858 vfs_drt_free_map(cmap);
6859 *cmapp = NULL;
6860 break;
6861
6862 case 1:
6863 cmap->scm_lastclean = 0;
6864 break;
6865 }
6866 return(KERN_SUCCESS);
6867 }
6868
6869
6870
6871 /*
6872 * Emit a summary of the state of the clustermap into the trace buffer
6873 * along with some caller-provided data.
6874 */
6875 #if KDEBUG
6876 static void
6877 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
6878 {
6879 KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
6880 }
6881 #else
6882 static void
6883 vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code,
6884 __unused int arg1, __unused int arg2, __unused int arg3,
6885 __unused int arg4)
6886 {
6887 }
6888 #endif
6889
6890 #if 0
6891 /*
6892 * Perform basic sanity check on the hash entry summary count
6893 * vs. the actual bits set in the entry.
6894 */
6895 static void
6896 vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
6897 {
6898 int index, i;
6899 int bits_on;
6900
6901 for (index = 0; index < cmap->scm_modulus; index++) {
6902 if (DRT_HASH_VACANT(cmap, index))
6903 continue;
6904
6905 for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
6906 if (DRT_HASH_TEST_BIT(cmap, index, i))
6907 bits_on++;
6908 }
6909 if (bits_on != DRT_HASH_GET_COUNT(cmap, index))
6910 panic("bits_on = %d, index = %d\n", bits_on, index);
6911 }
6912 }
6913 #endif