]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/bsd_vm.c
xnu-792.6.22.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <sys/errno.h>
91447636 24
1c79356b 25#include <mach/mach_types.h>
91447636
A
26#include <mach/mach_traps.h>
27#include <mach/host_priv.h>
1c79356b 28#include <mach/kern_return.h>
91447636 29#include <mach/memory_object_control.h>
1c79356b
A
30#include <mach/memory_object_types.h>
31#include <mach/port.h>
32#include <mach/policy.h>
91447636
A
33#include <mach/upl.h>
34#include <mach/thread_act.h>
35
36#include <kern/host.h>
37#include <kern/thread.h>
38
1c79356b
A
39#include <ipc/ipc_port.h>
40#include <ipc/ipc_space.h>
1c79356b 41
0b4e3aa0 42#include <default_pager/default_pager_types.h>
91447636
A
43#include <default_pager/default_pager_object_server.h>
44
45#include <vm/vm_map.h>
46#include <vm/vm_kern.h>
47#include <vm/vm_pageout.h>
48#include <vm/memory_object.h>
49#include <vm/vm_pageout.h>
50#include <vm/vm_protos.h>
1c79356b
A
51
52/* BSD VM COMPONENT INTERFACES */
53int
54get_map_nentries(
55 vm_map_t);
56
57vm_offset_t
58get_map_start(
59 vm_map_t);
60
61vm_offset_t
62get_map_end(
63 vm_map_t);
64
65/*
66 *
67 */
68int
69get_map_nentries(
70 vm_map_t map)
71{
72 return(map->hdr.nentries);
73}
74
91447636
A
75mach_vm_offset_t
76mach_get_vm_start(vm_map_t map)
77{
78 return( vm_map_first_entry(map)->vme_start);
79}
80
81mach_vm_offset_t
82mach_get_vm_end(vm_map_t map)
83{
84 return( vm_map_last_entry(map)->vme_end);
85}
86
1c79356b 87/*
91447636
A
88 * Legacy routines to get the start and end for a vm_map_t. They
89 * return them in the vm_offset_t format. So, they should only be
90 * called on maps that are the same size as the kernel map for
91 * accurate results.
1c79356b
A
92 */
93vm_offset_t
91447636 94get_vm_start(
1c79356b
A
95 vm_map_t map)
96{
91447636 97 return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
1c79356b
A
98}
99
1c79356b 100vm_offset_t
91447636 101get_vm_end(
1c79356b
A
102 vm_map_t map)
103{
91447636 104 return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
1c79356b
A
105}
106
107/*
108 * BSD VNODE PAGER
109 */
110
111/* until component support available */
112int vnode_pager_workaround;
113
1c79356b 114typedef struct vnode_pager {
0b4e3aa0
A
115 int *pager; /* pager workaround pointer */
116 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
117 unsigned int ref_count; /* reference count */
118 memory_object_control_t control_handle; /* mem object control handle */
91447636 119 struct vnode *vnode_handle; /* vnode handle */
1c79356b
A
120} *vnode_pager_t;
121
1c79356b
A
122
123ipc_port_t
91447636 124trigger_name_to_port( /* forward */
1c79356b
A
125 mach_port_t);
126
1c79356b 127kern_return_t
91447636 128vnode_pager_cluster_read( /* forward */
1c79356b
A
129 vnode_pager_t,
130 vm_object_offset_t,
131 vm_size_t);
132
133void
91447636 134vnode_pager_cluster_write( /* forward */
1c79356b
A
135 vnode_pager_t,
136 vm_object_offset_t,
91447636
A
137 vm_size_t,
138 vm_object_offset_t *,
139 int *,
140 int);
1c79356b 141
0b4e3aa0 142
1c79356b 143vnode_pager_t
91447636
A
144vnode_object_create( /* forward */
145 struct vnode *);
1c79356b 146
1c79356b 147vnode_pager_t
91447636 148vnode_pager_lookup( /* forward */
0b4e3aa0 149 memory_object_t);
1c79356b 150
1c79356b
A
151zone_t vnode_pager_zone;
152
153
154#define VNODE_PAGER_NULL ((vnode_pager_t) 0)
155
156/* TODO: Should be set dynamically by vnode_pager_init() */
157#define CLUSTER_SHIFT 1
158
159/* TODO: Should be set dynamically by vnode_pager_bootstrap() */
160#define MAX_VNODE 10000
161
162
163#if DEBUG
164int pagerdebug=0;
165
166#define PAGER_ALL 0xffffffff
167#define PAGER_INIT 0x00000001
168#define PAGER_PAGEIN 0x00000002
169
170#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
171#else
172#define PAGER_DEBUG(LEVEL, A)
173#endif
174
175/*
176 * Routine: macx_triggers
177 * Function:
178 * Syscall interface to set the call backs for low and
179 * high water marks.
180 */
181int
182macx_triggers(
91447636 183 struct macx_triggers_args *args)
1c79356b 184{
91447636
A
185 int hi_water = args->hi_water;
186 int low_water = args->low_water;
187 int flags = args->flags;
188 mach_port_t trigger_name = args->alert_port;
1c79356b 189 kern_return_t kr;
0b4e3aa0 190 memory_object_default_t default_pager;
1c79356b
A
191 ipc_port_t trigger_port;
192
0b4e3aa0 193 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b 194 kr = host_default_memory_manager(host_priv_self(),
0b4e3aa0 195 &default_pager, 0);
1c79356b
A
196 if(kr != KERN_SUCCESS) {
197 return EINVAL;
198 }
91447636
A
199
200 if ((flags & SWAP_ENCRYPT_ON) &&
201 (flags & SWAP_ENCRYPT_OFF)) {
202 /* can't have it both ways */
203 return EINVAL;
204 }
205
206 if (flags & SWAP_ENCRYPT_ON) {
207 /* ENCRYPTED SWAP: tell default_pager to encrypt */
208 default_pager_triggers(default_pager,
209 0, 0,
210 SWAP_ENCRYPT_ON,
211 IP_NULL);
212 } else if (flags & SWAP_ENCRYPT_OFF) {
213 /* ENCRYPTED SWAP: tell default_pager not to encrypt */
214 default_pager_triggers(default_pager,
215 0, 0,
216 SWAP_ENCRYPT_OFF,
217 IP_NULL);
218 }
219
0b4e3aa0
A
220 if (flags & HI_WAT_ALERT) {
221 trigger_port = trigger_name_to_port(trigger_name);
222 if(trigger_port == NULL) {
223 return EINVAL;
224 }
225 /* trigger_port is locked and active */
226 ipc_port_make_send_locked(trigger_port);
227 /* now unlocked */
228 default_pager_triggers(default_pager,
229 hi_water, low_water,
230 HI_WAT_ALERT, trigger_port);
231 }
232
233 if (flags & LO_WAT_ALERT) {
234 trigger_port = trigger_name_to_port(trigger_name);
235 if(trigger_port == NULL) {
236 return EINVAL;
237 }
238 /* trigger_port is locked and active */
239 ipc_port_make_send_locked(trigger_port);
240 /* and now its unlocked */
241 default_pager_triggers(default_pager,
242 hi_water, low_water,
243 LO_WAT_ALERT, trigger_port);
1c79356b 244 }
1c79356b
A
245
246 /*
247 * Set thread scheduling priority and policy for the current thread
248 * it is assumed for the time being that the thread setting the alert
55e303ae
A
249 * is the same one which will be servicing it.
250 *
251 * XXX This does not belong in the kernel XXX
1c79356b
A
252 */
253 {
55e303ae
A
254 thread_precedence_policy_data_t pre;
255 thread_extended_policy_data_t ext;
256
257 ext.timeshare = FALSE;
258 pre.importance = INT32_MAX;
259
91447636
A
260 thread_policy_set(current_thread(),
261 THREAD_EXTENDED_POLICY,
262 (thread_policy_t)&ext,
263 THREAD_EXTENDED_POLICY_COUNT);
55e303ae 264
91447636
A
265 thread_policy_set(current_thread(),
266 THREAD_PRECEDENCE_POLICY,
267 (thread_policy_t)&pre,
268 THREAD_PRECEDENCE_POLICY_COUNT);
1c79356b
A
269 }
270
91447636
A
271 current_thread()->options |= TH_OPT_VMPRIV;
272
273 return 0;
1c79356b
A
274}
275
276/*
277 *
278 */
279ipc_port_t
280trigger_name_to_port(
281 mach_port_t trigger_name)
282{
283 ipc_port_t trigger_port;
284 ipc_space_t space;
285
286 if (trigger_name == 0)
287 return (NULL);
288
289 space = current_space();
290 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
291 &trigger_port) != KERN_SUCCESS)
292 return (NULL);
293 return trigger_port;
294}
295
91447636
A
296
297extern int uiomove64(addr64_t, int, void *);
298#define MAX_RUN 32
299
300int
301memory_object_control_uiomove(
302 memory_object_control_t control,
303 memory_object_offset_t offset,
304 void * uio,
305 int start_offset,
306 int io_requested,
307 int mark_dirty)
308{
309 vm_object_t object;
310 vm_page_t dst_page;
311 int xsize;
312 int retval = 0;
313 int cur_run;
314 int cur_needed;
315 int i;
316 vm_page_t page_run[MAX_RUN];
317
318
319 object = memory_object_control_to_vm_object(control);
320 if (object == VM_OBJECT_NULL) {
321 return (0);
322 }
323 assert(!object->internal);
324
325 vm_object_lock(object);
326
327 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
328 /*
329 * We can't modify the pages without honoring
330 * copy-on-write obligations first, so fall off
331 * this optimized path and fall back to the regular
332 * path.
333 */
334 vm_object_unlock(object);
335 return 0;
336 }
337
338 while (io_requested && retval == 0) {
339
340 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
341
342 if (cur_needed > MAX_RUN)
343 cur_needed = MAX_RUN;
344
345 for (cur_run = 0; cur_run < cur_needed; ) {
346
347 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
348 break;
349 /*
350 * Sync up on getting the busy bit
351 */
352 if ((dst_page->busy || dst_page->cleaning)) {
353 /*
354 * someone else is playing with the page... if we've
355 * already collected pages into this run, go ahead
356 * and process now, we can't block on this
357 * page while holding other pages in the BUSY state
358 * otherwise we will wait
359 */
360 if (cur_run)
361 break;
362 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
363 continue;
364 }
365 /*
366 * this routine is only called when copying
367 * to/from real files... no need to consider
368 * encrypted swap pages
369 */
370 assert(!dst_page->encrypted);
371
372 if (mark_dirty)
373 dst_page->dirty = TRUE;
374 dst_page->busy = TRUE;
375
376 page_run[cur_run++] = dst_page;
377
378 offset += PAGE_SIZE_64;
379 }
380 if (cur_run == 0)
381 /*
382 * we hit a 'hole' in the cache
383 * we bail at this point
384 * we'll unlock the object below
385 */
386 break;
387 vm_object_unlock(object);
388
389 for (i = 0; i < cur_run; i++) {
390
391 dst_page = page_run[i];
392
393 if ((xsize = PAGE_SIZE - start_offset) > io_requested)
394 xsize = io_requested;
395
396 if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
397 break;
398
399 io_requested -= xsize;
400 start_offset = 0;
401 }
402 vm_object_lock(object);
403
404 for (i = 0; i < cur_run; i++) {
405 dst_page = page_run[i];
406
407 PAGE_WAKEUP_DONE(dst_page);
408 }
409 }
410 vm_object_unlock(object);
411
412 return (retval);
413}
414
415
1c79356b
A
416/*
417 *
418 */
419void
420vnode_pager_bootstrap(void)
421{
422 register vm_size_t size;
423
424 size = (vm_size_t) sizeof(struct vnode_pager);
425 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
426 PAGE_SIZE, "vnode pager structures");
1c79356b
A
427 return;
428}
429
430/*
431 *
432 */
0b4e3aa0 433memory_object_t
1c79356b 434vnode_pager_setup(
91447636
A
435 struct vnode *vp,
436 __unused memory_object_t pager)
1c79356b
A
437{
438 vnode_pager_t vnode_object;
1c79356b
A
439
440 vnode_object = vnode_object_create(vp);
441 if (vnode_object == VNODE_PAGER_NULL)
442 panic("vnode_pager_setup: vnode_object_create() failed");
0b4e3aa0 443 return((memory_object_t)vnode_object);
1c79356b
A
444}
445
446/*
447 *
448 */
449kern_return_t
0b4e3aa0
A
450vnode_pager_init(memory_object_t mem_obj,
451 memory_object_control_t control,
91447636
A
452#if !DEBUG
453 __unused
454#endif
455 vm_size_t pg_size)
1c79356b
A
456{
457 vnode_pager_t vnode_object;
458 kern_return_t kr;
459 memory_object_attr_info_data_t attributes;
1c79356b
A
460
461
91447636 462 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size));
1c79356b 463
0b4e3aa0
A
464 if (control == MEMORY_OBJECT_CONTROL_NULL)
465 return KERN_INVALID_ARGUMENT;
1c79356b 466
0b4e3aa0 467 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 468
0b4e3aa0 469 memory_object_control_reference(control);
91447636 470
0b4e3aa0 471 vnode_object->control_handle = control;
1c79356b
A
472
473 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
474 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
475 attributes.cluster_size = (1 << (PAGE_SHIFT));
476 attributes.may_cache_object = TRUE;
477 attributes.temporary = TRUE;
478
479 kr = memory_object_change_attributes(
0b4e3aa0 480 control,
1c79356b
A
481 MEMORY_OBJECT_ATTRIBUTE_INFO,
482 (memory_object_info_t) &attributes,
0b4e3aa0 483 MEMORY_OBJECT_ATTR_INFO_COUNT);
1c79356b
A
484 if (kr != KERN_SUCCESS)
485 panic("vnode_pager_init: memory_object_change_attributes() failed");
486
487 return(KERN_SUCCESS);
488}
489
490/*
491 *
492 */
493kern_return_t
494vnode_pager_data_return(
0b4e3aa0
A
495 memory_object_t mem_obj,
496 memory_object_offset_t offset,
1c79356b 497 vm_size_t data_cnt,
91447636
A
498 memory_object_offset_t *resid_offset,
499 int *io_error,
500 __unused boolean_t dirty,
501 __unused boolean_t kernel_copy,
502 int upl_flags)
1c79356b
A
503{
504 register vnode_pager_t vnode_object;
505
0b4e3aa0 506 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 507
91447636 508 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
1c79356b
A
509
510 return KERN_SUCCESS;
511}
512
0b4e3aa0
A
513kern_return_t
514vnode_pager_data_initialize(
91447636
A
515 __unused memory_object_t mem_obj,
516 __unused memory_object_offset_t offset,
517 __unused vm_size_t data_cnt)
0b4e3aa0 518{
91447636 519 panic("vnode_pager_data_initialize");
0b4e3aa0
A
520 return KERN_FAILURE;
521}
522
523kern_return_t
524vnode_pager_data_unlock(
91447636
A
525 __unused memory_object_t mem_obj,
526 __unused memory_object_offset_t offset,
527 __unused vm_size_t size,
528 __unused vm_prot_t desired_access)
0b4e3aa0
A
529{
530 return KERN_FAILURE;
531}
532
533kern_return_t
534vnode_pager_get_object_size(
535 memory_object_t mem_obj,
536 memory_object_offset_t *length)
537{
538 vnode_pager_t vnode_object;
539
540 vnode_object = vnode_pager_lookup(mem_obj);
541
542 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
543 return KERN_SUCCESS;
544}
545
1c79356b
A
546/*
547 *
548 */
549kern_return_t
550vnode_pager_data_request(
0b4e3aa0
A
551 memory_object_t mem_obj,
552 memory_object_offset_t offset,
1c79356b 553 vm_size_t length,
91447636
A
554#if !DEBUG
555 __unused
556#endif
557vm_prot_t protection_required)
1c79356b
A
558{
559 register vnode_pager_t vnode_object;
560
0b4e3aa0 561 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
1c79356b 562
0b4e3aa0 563 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 564
0b4e3aa0 565 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
1c79356b 566
91447636 567 return vnode_pager_cluster_read(vnode_object, offset, length);
1c79356b
A
568}
569
570/*
571 *
572 */
573void
0b4e3aa0
A
574vnode_pager_reference(
575 memory_object_t mem_obj)
576{
1c79356b 577 register vnode_pager_t vnode_object;
9bccf70c 578 unsigned int new_ref_count;
1c79356b 579
0b4e3aa0 580 vnode_object = vnode_pager_lookup(mem_obj);
9bccf70c
A
581 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
582 assert(new_ref_count > 1);
0b4e3aa0 583}
1c79356b 584
0b4e3aa0
A
585/*
586 *
587 */
588void
589vnode_pager_deallocate(
590 memory_object_t mem_obj)
591{
592 register vnode_pager_t vnode_object;
1c79356b 593
0b4e3aa0 594 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
1c79356b 595
0b4e3aa0 596 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 597
9bccf70c 598 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
91447636 599 if (vnode_object->vnode_handle != NULL) {
0b4e3aa0
A
600 vnode_pager_vrele(vnode_object->vnode_handle);
601 }
91447636 602 zfree(vnode_pager_zone, vnode_object);
0b4e3aa0 603 }
1c79356b
A
604 return;
605}
606
607/*
608 *
609 */
610kern_return_t
611vnode_pager_terminate(
91447636
A
612#if !DEBUG
613 __unused
614#endif
0b4e3aa0 615 memory_object_t mem_obj)
1c79356b 616{
0b4e3aa0 617 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
1c79356b 618
0b4e3aa0
A
619 return(KERN_SUCCESS);
620}
1c79356b 621
0b4e3aa0
A
622/*
623 *
624 */
625kern_return_t
626vnode_pager_synchronize(
627 memory_object_t mem_obj,
628 memory_object_offset_t offset,
629 vm_size_t length,
91447636 630 __unused vm_sync_t sync_flags)
0b4e3aa0
A
631{
632 register vnode_pager_t vnode_object;
1c79356b 633
0b4e3aa0 634 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
1c79356b 635
0b4e3aa0 636 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 637
0b4e3aa0 638 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
1c79356b 639
0b4e3aa0 640 return (KERN_SUCCESS);
1c79356b
A
641}
642
643/*
644 *
645 */
646kern_return_t
0b4e3aa0
A
647vnode_pager_unmap(
648 memory_object_t mem_obj)
1c79356b 649{
0b4e3aa0 650 register vnode_pager_t vnode_object;
1c79356b 651
0b4e3aa0
A
652 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
653
654 vnode_object = vnode_pager_lookup(mem_obj);
655
656 ubc_unmap(vnode_object->vnode_handle);
657 return KERN_SUCCESS;
1c79356b
A
658}
659
0b4e3aa0 660
1c79356b
A
661/*
662 *
663 */
664void
665vnode_pager_cluster_write(
666 vnode_pager_t vnode_object,
667 vm_object_offset_t offset,
91447636
A
668 vm_size_t cnt,
669 vm_object_offset_t * resid_offset,
670 int * io_error,
671 int upl_flags)
1c79356b 672{
91447636
A
673 vm_size_t size;
674 upl_t upl = NULL;
675 int request_flags;
676 int errno;
1c79356b 677
91447636 678 if (upl_flags & UPL_MSYNC) {
1c79356b 679
91447636
A
680 upl_flags |= UPL_VNODE_PAGER;
681
682 if ( (upl_flags & UPL_IOSYNC) && io_error)
683 upl_flags |= UPL_KEEPCACHED;
684
685 while (cnt) {
686 kern_return_t kr;
687
688 size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
0b4e3aa0 689
91447636
A
690 request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
691 UPL_SET_INTERNAL | UPL_SET_LITE;
692
693 kr = memory_object_upl_request(vnode_object->control_handle,
694 offset, size, &upl, NULL, NULL, request_flags);
695 if (kr != KERN_SUCCESS)
696 panic("vnode_pager_cluster_write: upl request failed\n");
697
698 vnode_pageout(vnode_object->vnode_handle,
699 upl, (vm_offset_t)0, offset, size, upl_flags, &errno);
700
701 if ( (upl_flags & UPL_KEEPCACHED) ) {
702 if ( (*io_error = errno) )
703 break;
704 }
705 cnt -= size;
706 offset += size;
707 }
708 if (resid_offset)
709 *resid_offset = offset;
710
711 } else {
712 vm_object_offset_t vnode_size;
713 vm_object_offset_t base_offset;
714 vm_object_t object;
715 vm_page_t target_page;
716 int ticket;
717
718 /*
719 * this is the pageout path
720 */
721 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
722
723 if (vnode_size > (offset + PAGE_SIZE)) {
724 /*
725 * preset the maximum size of the cluster
726 * and put us on a nice cluster boundary...
727 * and then clip the size to insure we
728 * don't request past the end of the underlying file
729 */
730 size = PAGE_SIZE * MAX_UPL_TRANSFER;
731 base_offset = offset & ~((signed)(size - 1));
732
733 if ((base_offset + size) > vnode_size)
734 size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
735 } else {
736 /*
737 * we've been requested to page out a page beyond the current
738 * end of the 'file'... don't try to cluster in this case...
739 * we still need to send this page through because it might
740 * be marked precious and the underlying filesystem may need
741 * to do something with it (besides page it out)...
742 */
743 base_offset = offset;
744 size = PAGE_SIZE;
0b4e3aa0 745 }
91447636
A
746 object = memory_object_control_to_vm_object(vnode_object->control_handle);
747
748 if (object == VM_OBJECT_NULL)
749 panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");
750
751 request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
752 UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
753 UPL_SET_INTERNAL | UPL_SET_LITE;
754
755 vm_object_lock(object);
756
757 if ((target_page = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
758 /*
759 * only pick up pages whose ticket number matches
760 * the ticket number of the page orginally targeted
761 * for pageout
762 */
763 ticket = target_page->page_ticket;
1c79356b 764
91447636 765 request_flags |= ((ticket << UPL_PAGE_TICKET_SHIFT) & UPL_PAGE_TICKET_MASK);
1c79356b 766 }
91447636
A
767 vm_object_unlock(object);
768
769 vm_object_upl_request(object, base_offset, size,
770 &upl, NULL, NULL, request_flags);
771 if (upl == NULL)
772 panic("vnode_pager_cluster_write: upl request failed\n");
773
774 vnode_pageout(vnode_object->vnode_handle,
775 upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
1c79356b 776 }
1c79356b
A
777}
778
779
780/*
781 *
782 */
783kern_return_t
784vnode_pager_cluster_read(
785 vnode_pager_t vnode_object,
786 vm_object_offset_t offset,
787 vm_size_t cnt)
788{
1c79356b
A
789 int local_error = 0;
790 int kret;
1c79356b 791
91447636 792 assert(! (cnt & PAGE_MASK));
1c79356b 793
91447636
A
794 kret = vnode_pagein(vnode_object->vnode_handle,
795 (upl_t) NULL,
796 (vm_offset_t) NULL,
797 offset,
798 cnt,
799 0,
800 &local_error);
0b4e3aa0
A
801/*
802 if(kret == PAGER_ABSENT) {
803 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
804 defined in bsd/vm/vm_pager.h However, we should not be including
805 that file here it is a layering violation.
806*/
91447636
A
807 if (kret == 1) {
808 int uplflags;
809 upl_t upl = NULL;
810 int count = 0;
811 kern_return_t kr;
812
813 uplflags = (UPL_NO_SYNC |
814 UPL_CLEAN_IN_PLACE |
815 UPL_SET_INTERNAL);
816 count = 0;
817 kr = memory_object_upl_request(vnode_object->control_handle,
818 offset, cnt,
819 &upl, NULL, &count, uplflags);
820 if (kr == KERN_SUCCESS) {
0b4e3aa0
A
821 upl_abort(upl, 0);
822 upl_deallocate(upl);
91447636
A
823 } else {
824 /*
825 * We couldn't gather the page list, probably
826 * because the memory object doesn't have a link
827 * to a VM object anymore (forced unmount, for
828 * example). Just return an error to the vm_fault()
829 * path and let it handle it.
830 */
831 }
0b4e3aa0 832
91447636 833 return KERN_FAILURE;
1c79356b 834 }
0b4e3aa0 835
91447636 836 return KERN_SUCCESS;
1c79356b
A
837
838}
839
840
841/*
842 *
843 */
844void
845vnode_pager_release_from_cache(
846 int *cnt)
847{
848 memory_object_free_from_cache(
0b4e3aa0 849 &realhost, &vnode_pager_workaround, cnt);
1c79356b
A
850}
851
852/*
853 *
854 */
855vnode_pager_t
856vnode_object_create(
91447636 857 struct vnode *vp)
1c79356b
A
858{
859 register vnode_pager_t vnode_object;
860
861 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
862 if (vnode_object == VNODE_PAGER_NULL)
863 return(VNODE_PAGER_NULL);
1c79356b 864
1c79356b 865 /*
0b4e3aa0
A
866 * The vm_map call takes both named entry ports and raw memory
867 * objects in the same parameter. We need to make sure that
868 * vm_map does not see this object as a named entry port. So,
869 * we reserve the second word in the object for a fake ip_kotype
870 * setting - that will tell vm_map to use it as a memory object.
1c79356b 871 */
0b4e3aa0
A
872 vnode_object->pager = &vnode_pager_workaround;
873 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
874 vnode_object->ref_count = 1;
875 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
876 vnode_object->vnode_handle = vp;
877
878 return(vnode_object);
1c79356b
A
879}
880
881/*
882 *
883 */
884vnode_pager_t
0b4e3aa0
A
885vnode_pager_lookup(
886 memory_object_t name)
1c79356b 887{
0b4e3aa0 888 vnode_pager_t vnode_object;
1c79356b 889
0b4e3aa0
A
890 vnode_object = (vnode_pager_t)name;
891 assert(vnode_object->pager == &vnode_pager_workaround);
892 return (vnode_object);
1c79356b 893}
0b4e3aa0 894