]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/vnode_pager.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / bsd / vm / vnode_pager.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34/*
35 * File: vnode_pager.c
36 *
37 * "Swap" pager that pages to/from vnodes. Also
38 * handles demand paging from files.
39 *
40 */
41
42#include <mach/boolean.h>
43#include <sys/param.h>
44#include <sys/systm.h>
91447636 45#include <sys/user.h>
1c79356b 46#include <sys/proc.h>
91447636 47#include <sys/kauth.h>
1c79356b
A
48#include <sys/buf.h>
49#include <sys/uio.h>
91447636 50#include <sys/vnode_internal.h>
1c79356b 51#include <sys/namei.h>
91447636
A
52#include <sys/mount_internal.h> /* needs internal due to fhandle_t */
53#include <sys/ubc_internal.h>
1c79356b 54#include <sys/lock.h>
6d2010ae 55#include <sys/disk.h> /* For DKIOC calls */
1c79356b
A
56
57#include <mach/mach_types.h>
58#include <mach/memory_object_types.h>
b0d623f7
A
59#include <mach/memory_object_control.h>
60#include <mach/vm_map.h>
61#include <mach/mach_vm.h>
62#include <mach/upl.h>
2d21ac55 63#include <mach/sdt.h>
1c79356b
A
64
65#include <vm/vm_map.h>
66#include <vm/vm_kern.h>
1c79356b
A
67#include <kern/zalloc.h>
68#include <kern/kalloc.h>
69#include <libkern/libkern.h>
70
71#include <vm/vnode_pager.h>
72#include <vm/vm_pageout.h>
73
74#include <kern/assert.h>
9bccf70c 75#include <sys/kdebug.h>
91447636
A
76#include <nfs/rpcv2.h>
77#include <nfs/nfsproto.h>
78#include <nfs/nfs.h>
79
80#include <vm/vm_protos.h>
1c79356b 81
5ba3f43e 82#include <vfs/vfs_disk_conditioner.h>
b0d623f7 83
6d2010ae
A
84void
85vnode_pager_throttle()
86{
87 struct uthread *ut;
88
89 ut = get_bsdthread_info(current_thread());
90
91 if (ut->uu_lowpri_window)
39236c6e 92 throttle_lowpri_io(1);
6d2010ae
A
93}
94
6d2010ae
A
95boolean_t
96vnode_pager_isSSD(vnode_t vp)
97{
5ba3f43e 98 return disk_conditioner_mount_is_ssd(vp->v_mount);
6d2010ae
A
99}
100
fe8ab488
A
101#if CONFIG_IOSCHED
102void
103vnode_pager_issue_reprioritize_io(struct vnode *devvp, uint64_t blkno, uint32_t len, int priority)
104{
105 u_int32_t blocksize = 0;
106 dk_extent_t extent;
107 dk_set_tier_t set_tier;
108 int error = 0;
109
110 error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blocksize, 0, vfs_context_kernel());
111 if (error)
112 return;
113
114 memset(&extent, 0, sizeof(dk_extent_t));
115 memset(&set_tier, 0, sizeof(dk_set_tier_t));
116
117 extent.offset = blkno * (u_int64_t) blocksize;
118 extent.length = len;
119
120 set_tier.extents = &extent;
121 set_tier.extentsCount = 1;
122 set_tier.tier = priority;
123
124 error = VNOP_IOCTL(devvp, DKIOCSETTIER, (caddr_t)&set_tier, 0, vfs_context_kernel());
125 return;
126}
127#endif
6d2010ae 128
b0d623f7
A
129uint32_t
130vnode_pager_isinuse(struct vnode *vp)
131{
132 if (vp->v_usecount > vp->v_kusecount)
133 return (1);
134 return (0);
135}
136
137uint32_t
39236c6e 138vnode_pager_return_throttle_io_limit(struct vnode *vp, uint32_t *limit)
b0d623f7 139{
39236c6e 140 return(cluster_throttle_io_limit(vp, limit));
b0d623f7 141}
1c79356b 142
0b4e3aa0
A
143vm_object_offset_t
144vnode_pager_get_filesize(struct vnode *vp)
145{
0b4e3aa0 146 return (vm_object_offset_t) ubc_getsize(vp);
0b4e3aa0
A
147}
148
15129b1c
A
149extern int safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, int *truncated_path);
150
0c530ab8 151kern_return_t
15129b1c 152vnode_pager_get_name(
0c530ab8
A
153 struct vnode *vp,
154 char *pathname,
15129b1c
A
155 vm_size_t pathname_len,
156 char *filename,
157 vm_size_t filename_len,
158 boolean_t *truncated_path_p)
0c530ab8 159{
15129b1c
A
160 *truncated_path_p = FALSE;
161 if (pathname != NULL) {
162 /* get the path name */
163 safe_getpath(vp, NULL,
164 pathname, (int) pathname_len,
165 truncated_path_p);
166 }
167 if ((pathname == NULL || *truncated_path_p) &&
168 filename != NULL) {
169 /* get the file name */
170 const char *name;
171
172 name = vnode_getname_printable(vp);
173 strlcpy(filename, name, (size_t) filename_len);
174 vnode_putname_printable(name);
0c530ab8 175 }
0c530ab8
A
176 return KERN_SUCCESS;
177}
178
179kern_return_t
15129b1c 180vnode_pager_get_mtime(
0c530ab8 181 struct vnode *vp,
15129b1c
A
182 struct timespec *current_mtime,
183 struct timespec *cs_mtime)
0c530ab8 184{
15129b1c
A
185 vnode_mtime(vp, current_mtime, vfs_context_current());
186 if (cs_mtime != NULL) {
187 ubc_get_cs_mtime(vp, cs_mtime);
188 }
0c530ab8
A
189 return KERN_SUCCESS;
190}
191
2d21ac55
A
192kern_return_t
193vnode_pager_get_cs_blobs(
194 struct vnode *vp,
195 void **blobs)
196{
197 *blobs = ubc_get_cs_blobs(vp);
198 return KERN_SUCCESS;
199}
200
6d2010ae
A
201/*
202 * vnode_trim:
203 * Used to call the DKIOCUNMAP ioctl on the underlying disk device for the specified vnode.
204 * Trims the region at offset bytes into the file, for length bytes.
205 *
206 * Care must be taken to ensure that the vnode is sufficiently reference counted at the time this
207 * function is called; no iocounts or usecounts are taken on the vnode.
208 * This function is non-idempotent in error cases; We cannot un-discard the blocks if only some of them
209 * are successfully discarded.
210 */
211u_int32_t vnode_trim (
212 struct vnode *vp,
213 off_t offset,
214 size_t length)
215{
216 daddr64_t io_blockno; /* Block number corresponding to the start of the extent */
217 size_t io_bytecount; /* Number of bytes in current extent for the specified range */
218 size_t trimmed = 0;
219 off_t current_offset = offset;
220 size_t remaining_length = length;
221 int error = 0;
222 u_int32_t blocksize = 0;
223 struct vnode *devvp;
224 dk_extent_t extent;
225 dk_unmap_t unmap;
226
227
228 /* Get the underlying device vnode */
229 devvp = vp->v_mount->mnt_devvp;
230
231 /* Figure out the underlying device block size */
232 error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blocksize, 0, vfs_context_kernel());
233 if (error) {
234 goto trim_exit;
235 }
236
237 /*
238 * We may not get the entire range from offset -> offset+length in a single
239 * extent from the blockmap call. Keep looping/going until we are sure we've hit
240 * the whole range or if we encounter an error.
241 */
242 while (trimmed < length) {
243 /*
244 * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the
245 * specified offset. It returns blocks in contiguous chunks, so if the logical range is
246 * broken into multiple extents, it must be called multiple times, increasing the offset
247 * in each call to ensure that the entire range is covered.
248 */
249 error = VNOP_BLOCKMAP (vp, current_offset, remaining_length,
5ba3f43e 250 &io_blockno, &io_bytecount, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL);
6d2010ae
A
251
252 if (error) {
253 goto trim_exit;
254 }
255 /*
256 * We have a contiguous run. Prepare & issue the ioctl for the device.
257 * the DKIOCUNMAP ioctl takes offset in bytes from the start of the device.
258 */
259 memset (&extent, 0, sizeof(dk_extent_t));
260 memset (&unmap, 0, sizeof(dk_unmap_t));
261 extent.offset = (uint64_t) io_blockno * (u_int64_t) blocksize;
262 extent.length = io_bytecount;
263 unmap.extents = &extent;
264 unmap.extentsCount = 1;
265 error = VNOP_IOCTL(devvp, DKIOCUNMAP, (caddr_t)&unmap, 0, vfs_context_kernel());
266
267 if (error) {
268 goto trim_exit;
269 }
270 remaining_length = remaining_length - io_bytecount;
271 trimmed = trimmed + io_bytecount;
272 current_offset = current_offset + io_bytecount;
273 }
274trim_exit:
275
276 return error;
277
278}
279
1c79356b
A
280pager_return_t
281vnode_pageout(struct vnode *vp,
282 upl_t upl,
b0d623f7 283 upl_offset_t upl_offset,
1c79356b 284 vm_object_offset_t f_offset,
b0d623f7 285 upl_size_t size,
1c79356b
A
286 int flags,
287 int *errorp)
288{
91447636 289 int result = PAGER_SUCCESS;
1c79356b 290 int error = 0;
91447636
A
291 int error_ret = 0;
292 daddr64_t blkno;
293 int isize;
1c79356b 294 int pg_index;
91447636 295 int base_index;
b0d623f7 296 upl_offset_t offset;
1c79356b 297 upl_page_info_t *pl;
2d21ac55 298 vfs_context_t ctx = vfs_context_current(); /* pager context */
1c79356b 299
1c79356b
A
300 isize = (int)size;
301
9bccf70c 302 if (isize <= 0) {
91447636
A
303 result = PAGER_ERROR;
304 error_ret = EINVAL;
9bccf70c
A
305 goto out;
306 }
1c79356b 307
2d21ac55 308 if (UBCINFOEXISTS(vp) == 0) {
91447636
A
309 result = PAGER_ERROR;
310 error_ret = EINVAL;
9bccf70c 311
fa4905b1 312 if (upl && !(flags & UPL_NOCOMMIT))
9bccf70c 313 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1c79356b
A
314 goto out;
315 }
91447636 316 if ( !(flags & UPL_VNODE_PAGER)) {
1c79356b 317 /*
91447636
A
318 * This is a pageout from the default pager,
319 * just go ahead and call vnop_pageout since
320 * it has already sorted out the dirty ranges
1c79356b 321 */
316670eb
A
322 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
323 (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
324 size, 1, 0, 0, 0);
9bccf70c 325
91447636 326 if ( (error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
2d21ac55 327 (size_t)size, flags, ctx)) )
91447636 328 result = PAGER_ERROR;
9bccf70c 329
316670eb
A
330 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
331 (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
332 size, 1, 0, 0, 0);
9bccf70c 333
1c79356b
A
334 goto out;
335 }
b0d623f7
A
336 if (upl == NULL) {
337 int request_flags;
338
339 if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEOUTV2) {
340 /*
341 * filesystem has requested the new form of VNOP_PAGEOUT for file
342 * backed objects... we will not grab the UPL befofe calling VNOP_PAGEOUT...
343 * it is the fileystem's responsibility to grab the range we're denoting
344 * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
345 * take any locks it needs, before effectively locking the pages into a UPL...
346 */
316670eb
A
347 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
348 (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
349 size, (int)f_offset, 0, 0, 0);
b0d623f7
A
350
351 if ( (error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset,
352 size, flags, ctx)) ) {
353 result = PAGER_ERROR;
354 }
316670eb
A
355 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
356 (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
357 size, 0, 0, 0, 0);
b0d623f7
A
358
359 goto out;
360 }
361 if (flags & UPL_MSYNC)
362 request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY;
363 else
364 request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY;
365
5ba3f43e 366 if (ubc_create_upl_kernel(vp, f_offset, size, &upl, &pl, request_flags, VM_KERN_MEMORY_FILE) != KERN_SUCCESS) {
b0d623f7
A
367 result = PAGER_ERROR;
368 error_ret = EINVAL;
369 goto out;
370 }
371 upl_offset = 0;
372 } else
373 pl = ubc_upl_pageinfo(upl);
374
fe8ab488
A
375 /*
376 * Ignore any non-present pages at the end of the
377 * UPL so that we aren't looking at a upl that
378 * may already have been freed by the preceeding
379 * aborts/completions.
380 */
381 base_index = upl_offset / PAGE_SIZE;
382
383 for (pg_index = (upl_offset + isize) / PAGE_SIZE; pg_index > base_index;) {
384 if (upl_page_present(pl, --pg_index))
385 break;
386 if (pg_index == base_index) {
387 /*
388 * no pages were returned, so release
389 * our hold on the upl and leave
390 */
391 if ( !(flags & UPL_NOCOMMIT))
392 ubc_upl_abort_range(upl, upl_offset, isize, UPL_ABORT_FREE_ON_EMPTY);
393
394 goto out;
395 }
396 }
397 isize = ((pg_index + 1) - base_index) * PAGE_SIZE;
398
9bccf70c 399 /*
91447636
A
400 * we come here for pageouts to 'real' files and
401 * for msyncs... the upl may not contain any
402 * dirty pages.. it's our responsibility to sort
403 * through it and find the 'runs' of dirty pages
404 * to call VNOP_PAGEOUT on...
9bccf70c 405 */
fe8ab488 406
fa4905b1 407 if (ubc_getsize(vp) == 0) {
91447636
A
408 /*
409 * if the file has been effectively deleted, then
410 * we need to go through the UPL and invalidate any
411 * buffer headers we might have that reference any
412 * of it's pages
413 */
414 for (offset = upl_offset; isize; isize -= PAGE_SIZE, offset += PAGE_SIZE) {
415#if NFSCLIENT
416 if (vp->v_tag == VT_NFS)
417 /* check with nfs if page is OK to drop */
418 error = nfs_buf_page_inval(vp, (off_t)f_offset);
419 else
420#endif
421 {
422 blkno = ubc_offtoblk(vp, (off_t)f_offset);
423 error = buf_invalblkno(vp, blkno, 0);
424 }
425 if (error) {
426 if ( !(flags & UPL_NOCOMMIT))
427 ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
428 if (error_ret == 0)
429 error_ret = error;
430 result = PAGER_ERROR;
431
432 } else if ( !(flags & UPL_NOCOMMIT)) {
433 ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
fa4905b1 434 }
91447636 435 f_offset += PAGE_SIZE;
1c79356b 436 }
1c79356b
A
437 goto out;
438 }
91447636
A
439
440 offset = upl_offset;
441 pg_index = base_index;
1c79356b
A
442
443 while (isize) {
444 int xsize;
445 int num_of_pages;
446
91447636
A
447 if ( !upl_page_present(pl, pg_index)) {
448 /*
449 * we asked for RET_ONLY_DIRTY, so it's possible
450 * to get back empty slots in the UPL
451 * just skip over them
452 */
2d21ac55
A
453 f_offset += PAGE_SIZE;
454 offset += PAGE_SIZE;
455 isize -= PAGE_SIZE;
1c79356b
A
456 pg_index++;
457
458 continue;
459 }
460 if ( !upl_dirty_page(pl, pg_index)) {
461 /*
462 * if the page is not dirty and reached here it is
463 * marked precious or it is due to invalidation in
464 * memory_object_lock request as part of truncation
465 * We also get here from vm_object_terminate()
466 * So all you need to do in these
467 * cases is to invalidate incore buffer if it is there
91447636 468 * Note we must not sleep here if the buffer is busy - that is
fa4905b1 469 * a lock inversion which causes deadlock.
1c79356b 470 */
91447636
A
471#if NFSCLIENT
472 if (vp->v_tag == VT_NFS)
55e303ae 473 /* check with nfs if page is OK to drop */
2d21ac55 474 error = nfs_buf_page_inval(vp, (off_t)f_offset);
91447636
A
475 else
476#endif
477 {
2d21ac55 478 blkno = ubc_offtoblk(vp, (off_t)f_offset);
91447636
A
479 error = buf_invalblkno(vp, blkno, 0);
480 }
481 if (error) {
482 if ( !(flags & UPL_NOCOMMIT))
483 ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
484 if (error_ret == 0)
485 error_ret = error;
486 result = PAGER_ERROR;
487
488 } else if ( !(flags & UPL_NOCOMMIT)) {
489 ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
490 }
2d21ac55
A
491 f_offset += PAGE_SIZE;
492 offset += PAGE_SIZE;
493 isize -= PAGE_SIZE;
1c79356b
A
494 pg_index++;
495
496 continue;
497 }
1c79356b
A
498 num_of_pages = 1;
499 xsize = isize - PAGE_SIZE;
500
501 while (xsize) {
1c79356b
A
502 if ( !upl_dirty_page(pl, pg_index + num_of_pages))
503 break;
504 num_of_pages++;
505 xsize -= PAGE_SIZE;
506 }
507 xsize = num_of_pages * PAGE_SIZE;
508
316670eb
A
509 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
510 (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
511 xsize, (int)f_offset, 0, 0, 0);
9bccf70c 512
b0d623f7 513 if ( (error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset,
2d21ac55 514 xsize, flags, ctx)) ) {
91447636
A
515 if (error_ret == 0)
516 error_ret = error;
517 result = PAGER_ERROR;
518 }
316670eb
A
519 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
520 (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
521 xsize, 0, 0, 0, 0);
9bccf70c 522
2d21ac55
A
523 f_offset += xsize;
524 offset += xsize;
525 isize -= xsize;
1c79356b
A
526 pg_index += num_of_pages;
527 }
528out:
529 if (errorp)
91447636 530 *errorp = error_ret;
1c79356b 531
91447636 532 return (result);
1c79356b
A
533}
534
535
536pager_return_t
537vnode_pagein(
538 struct vnode *vp,
9bccf70c 539 upl_t upl,
b0d623f7 540 upl_offset_t upl_offset,
1c79356b 541 vm_object_offset_t f_offset,
b0d623f7 542 upl_size_t size,
1c79356b
A
543 int flags,
544 int *errorp)
545{
9bccf70c
A
546 upl_page_info_t *pl;
547 int result = PAGER_SUCCESS;
1c79356b 548 int error = 0;
9bccf70c
A
549 int pages_in_upl;
550 int start_pg;
551 int last_pg;
552 int first_pg;
553 int xsize;
2d21ac55 554 int must_commit = 1;
39236c6e 555 int ignore_valid_page_check = 0;
1c79356b 556
2d21ac55
A
557 if (flags & UPL_NOCOMMIT)
558 must_commit = 0;
1c79356b 559
39236c6e
A
560 if (flags & UPL_IGNORE_VALID_PAGE_CHECK)
561 ignore_valid_page_check = 1;
562
2d21ac55 563 if (UBCINFOEXISTS(vp) == 0) {
1c79356b
A
564 result = PAGER_ERROR;
565 error = PAGER_ERROR;
2d21ac55
A
566
567 if (upl && must_commit)
9bccf70c 568 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
2d21ac55 569
1c79356b
A
570 goto out;
571 }
9bccf70c 572 if (upl == (upl_t)NULL) {
b0d623f7 573 flags &= ~UPL_NOCOMMIT;
2d21ac55 574
fe8ab488 575 if (size > MAX_UPL_SIZE_BYTES) {
9bccf70c
A
576 result = PAGER_ERROR;
577 error = PAGER_ERROR;
578 goto out;
579 }
b0d623f7
A
580 if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEINV2) {
581 /*
582 * filesystem has requested the new form of VNOP_PAGEIN for file
583 * backed objects... we will not grab the UPL befofe calling VNOP_PAGEIN...
584 * it is the fileystem's responsibility to grab the range we're denoting
585 * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
586 * take any locks it needs, before effectively locking the pages into a UPL...
587 * so we pass a NULL into the filesystem instead of a UPL pointer... the 'upl_offset'
588 * is used to identify the "must have" page in the extent... the filesystem is free
589 * to clip the extent to better fit the underlying FS blocksize if it desires as
590 * long as it continues to include the "must have" page... 'f_offset' + 'upl_offset'
591 * identifies that page
592 */
593 if ( (error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset,
594 size, flags, vfs_context_current())) ) {
595 result = PAGER_ERROR;
596 error = PAGER_ERROR;
597 }
598 goto out;
599 }
5ba3f43e 600 ubc_create_upl_kernel(vp, f_offset, size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT, VM_KERN_MEMORY_FILE);
1c79356b 601
9bccf70c
A
602 if (upl == (upl_t)NULL) {
603 result = PAGER_ABSENT;
604 error = PAGER_ABSENT;
605 goto out;
1c79356b 606 }
316670eb
A
607 ubc_upl_range_needed(upl, upl_offset / PAGE_SIZE, 1);
608
9bccf70c 609 upl_offset = 0;
2d21ac55
A
610 first_pg = 0;
611
9bccf70c
A
612 /*
613 * if we get here, we've created the upl and
614 * are responsible for commiting/aborting it
615 * regardless of what the caller has passed in
616 */
2d21ac55 617 must_commit = 1;
1c79356b 618 } else {
9bccf70c 619 pl = ubc_upl_pageinfo(upl);
2d21ac55 620 first_pg = upl_offset / PAGE_SIZE;
9bccf70c
A
621 }
622 pages_in_upl = size / PAGE_SIZE;
2d21ac55 623 DTRACE_VM2(pgpgin, int, pages_in_upl, (uint64_t *), NULL);
9bccf70c
A
624
625 /*
626 * before we start marching forward, we must make sure we end on
627 * a present page, otherwise we will be working with a freed
628 * upl
629 */
630 for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
631 if (upl_page_present(pl, last_pg))
632 break;
2d21ac55
A
633 if (last_pg == first_pg) {
634 /*
635 * empty UPL, no pages are present
636 */
637 if (must_commit)
638 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
639 goto out;
640 }
9bccf70c
A
641 }
642 pages_in_upl = last_pg + 1;
2d21ac55 643 last_pg = first_pg;
9bccf70c 644
2d21ac55 645 while (last_pg < pages_in_upl) {
9bccf70c 646 /*
2d21ac55 647 * skip over missing pages...
9bccf70c 648 */
2d21ac55 649 for ( ; last_pg < pages_in_upl; last_pg++) {
9bccf70c
A
650 if (upl_page_present(pl, last_pg))
651 break;
652 }
39236c6e
A
653
654 if (ignore_valid_page_check == 1) {
655 start_pg = last_pg;
656 } else {
657 /*
658 * skip over 'valid' pages... we don't want to issue I/O for these
659 */
660 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
661 if (!upl_valid_page(pl, last_pg))
662 break;
663 }
9bccf70c 664 }
39236c6e 665
9bccf70c
A
666 if (last_pg > start_pg) {
667 /*
668 * we've found a range of valid pages
669 * if we've got COMMIT responsibility
670 * commit this range of pages back to the
671 * cache unchanged
672 */
673 xsize = (last_pg - start_pg) * PAGE_SIZE;
1c79356b 674
2d21ac55 675 if (must_commit)
9bccf70c 676 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
9bccf70c
A
677 }
678 if (last_pg == pages_in_upl)
2d21ac55
A
679 /*
680 * we're done... all pages that were present
681 * have either had I/O issued on them or
682 * were aborted unchanged...
683 */
9bccf70c
A
684 break;
685
2d21ac55 686 if (!upl_page_present(pl, last_pg)) {
9bccf70c 687 /*
2d21ac55
A
688 * we found a range of valid pages
689 * terminated by a missing page...
690 * bump index to the next page and continue on
9bccf70c 691 */
2d21ac55 692 last_pg++;
9bccf70c 693 continue;
2d21ac55 694 }
9bccf70c
A
695 /*
696 * scan from the found invalid page looking for a valid
697 * or non-present page before the end of the upl is reached, if we
698 * find one, then it will be the last page of the request to
699 * 'cluster_io'
700 */
701 for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
39236c6e 702 if (( !ignore_valid_page_check && upl_valid_page(pl, last_pg)) || !upl_page_present(pl, last_pg))
9bccf70c
A
703 break;
704 }
705 if (last_pg > start_pg) {
706 int xoff;
9bccf70c
A
707 xsize = (last_pg - start_pg) * PAGE_SIZE;
708 xoff = start_pg * PAGE_SIZE;
709
b0d623f7 710 if ( (error = VNOP_PAGEIN(vp, upl, (upl_offset_t) xoff,
9bccf70c 711 (off_t)f_offset + xoff,
2d21ac55 712 xsize, flags, vfs_context_current())) ) {
b0d623f7
A
713 /*
714 * Usually this UPL will be aborted/committed by the lower cluster layer.
6d2010ae
A
715 *
716 * a) In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
717 * a deadlock with another thread already inflating the file.
718 *
719 * b) In the case of content protection, EPERM is a valid error and we should respect it.
720 *
721 * In those cases, we must take care of our UPL at this layer itself.
b0d623f7
A
722 */
723 if (must_commit) {
724 if(error == EAGAIN) {
725 ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART);
726 }
6d2010ae
A
727 if(error == EPERM) {
728 ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
729 }
b0d623f7 730 }
0b4e3aa0
A
731 result = PAGER_ERROR;
732 error = PAGER_ERROR;
9bccf70c 733
0b4e3aa0 734 }
1c79356b 735 }
9bccf70c 736 }
1c79356b
A
737out:
738 if (errorp)
fa4905b1 739 *errorp = result;
1c79356b
A
740
741 return (error);
742}
743
0b4e3aa0 744void *
1c79356b
A
745upl_get_internal_page_list(upl_t upl)
746{
0b4e3aa0 747 return(UPL_GET_INTERNAL_PAGE_LIST(upl));
1c79356b
A
748
749}