]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <sys/errno.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/mach_traps.h>
35 #include <mach/host_priv.h>
36 #include <mach/kern_return.h>
37 #include <mach/memory_object_control.h>
38 #include <mach/memory_object_types.h>
39 #include <mach/port.h>
40 #include <mach/policy.h>
41 #include <mach/upl.h>
42 #include <mach/thread_act.h>
43
44 #include <kern/host.h>
45 #include <kern/thread.h>
46
47 #include <ipc/ipc_port.h>
48 #include <ipc/ipc_space.h>
49
50 #include <default_pager/default_pager_types.h>
51 #include <default_pager/default_pager_object_server.h>
52
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/memory_object.h>
57 #include <vm/vm_pageout.h>
58 #include <vm/vm_protos.h>
59
60 /* BSD VM COMPONENT INTERFACES */
61 int
62 get_map_nentries(
63 vm_map_t);
64
65 vm_offset_t
66 get_map_start(
67 vm_map_t);
68
69 vm_offset_t
70 get_map_end(
71 vm_map_t);
72
73 /*
74 *
75 */
76 int
77 get_map_nentries(
78 vm_map_t map)
79 {
80 return(map->hdr.nentries);
81 }
82
83 mach_vm_offset_t
84 mach_get_vm_start(vm_map_t map)
85 {
86 return( vm_map_first_entry(map)->vme_start);
87 }
88
89 mach_vm_offset_t
90 mach_get_vm_end(vm_map_t map)
91 {
92 return( vm_map_last_entry(map)->vme_end);
93 }
94
95 /*
96 * Legacy routines to get the start and end for a vm_map_t. They
97 * return them in the vm_offset_t format. So, they should only be
98 * called on maps that are the same size as the kernel map for
99 * accurate results.
100 */
101 vm_offset_t
102 get_vm_start(
103 vm_map_t map)
104 {
105 return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
106 }
107
108 vm_offset_t
109 get_vm_end(
110 vm_map_t map)
111 {
112 return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
113 }
114
115 /*
116 * BSD VNODE PAGER
117 */
118
119 /* until component support available */
120 int vnode_pager_workaround;
121
122 typedef struct vnode_pager {
123 int *pager; /* pager workaround pointer */
124 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
125 unsigned int ref_count; /* reference count */
126 memory_object_control_t control_handle; /* mem object control handle */
127 struct vnode *vnode_handle; /* vnode handle */
128 } *vnode_pager_t;
129
130
131 ipc_port_t
132 trigger_name_to_port( /* forward */
133 mach_port_t);
134
135 kern_return_t
136 vnode_pager_cluster_read( /* forward */
137 vnode_pager_t,
138 vm_object_offset_t,
139 vm_size_t);
140
141 void
142 vnode_pager_cluster_write( /* forward */
143 vnode_pager_t,
144 vm_object_offset_t,
145 vm_size_t,
146 vm_object_offset_t *,
147 int *,
148 int);
149
150
151 vnode_pager_t
152 vnode_object_create( /* forward */
153 struct vnode *);
154
155 vnode_pager_t
156 vnode_pager_lookup( /* forward */
157 memory_object_t);
158
159 zone_t vnode_pager_zone;
160
161
162 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
163
164 /* TODO: Should be set dynamically by vnode_pager_init() */
165 #define CLUSTER_SHIFT 1
166
167 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
168 #define MAX_VNODE 10000
169
170
171 #if DEBUG
172 int pagerdebug=0;
173
174 #define PAGER_ALL 0xffffffff
175 #define PAGER_INIT 0x00000001
176 #define PAGER_PAGEIN 0x00000002
177
178 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
179 #else
180 #define PAGER_DEBUG(LEVEL, A)
181 #endif
182
183 /*
184 * Routine: macx_triggers
185 * Function:
186 * Syscall interface to set the call backs for low and
187 * high water marks.
188 */
189 int
190 macx_triggers(
191 struct macx_triggers_args *args)
192 {
193 int hi_water = args->hi_water;
194 int low_water = args->low_water;
195 int flags = args->flags;
196 mach_port_t trigger_name = args->alert_port;
197 kern_return_t kr;
198 memory_object_default_t default_pager;
199 ipc_port_t trigger_port;
200
201 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
202 kr = host_default_memory_manager(host_priv_self(),
203 &default_pager, 0);
204 if(kr != KERN_SUCCESS) {
205 return EINVAL;
206 }
207
208 if ((flags & SWAP_ENCRYPT_ON) &&
209 (flags & SWAP_ENCRYPT_OFF)) {
210 /* can't have it both ways */
211 return EINVAL;
212 }
213
214 if (flags & SWAP_ENCRYPT_ON) {
215 /* ENCRYPTED SWAP: tell default_pager to encrypt */
216 default_pager_triggers(default_pager,
217 0, 0,
218 SWAP_ENCRYPT_ON,
219 IP_NULL);
220 } else if (flags & SWAP_ENCRYPT_OFF) {
221 /* ENCRYPTED SWAP: tell default_pager not to encrypt */
222 default_pager_triggers(default_pager,
223 0, 0,
224 SWAP_ENCRYPT_OFF,
225 IP_NULL);
226 }
227
228 if (flags & HI_WAT_ALERT) {
229 trigger_port = trigger_name_to_port(trigger_name);
230 if(trigger_port == NULL) {
231 return EINVAL;
232 }
233 /* trigger_port is locked and active */
234 ipc_port_make_send_locked(trigger_port);
235 /* now unlocked */
236 default_pager_triggers(default_pager,
237 hi_water, low_water,
238 HI_WAT_ALERT, trigger_port);
239 }
240
241 if (flags & LO_WAT_ALERT) {
242 trigger_port = trigger_name_to_port(trigger_name);
243 if(trigger_port == NULL) {
244 return EINVAL;
245 }
246 /* trigger_port is locked and active */
247 ipc_port_make_send_locked(trigger_port);
248 /* and now its unlocked */
249 default_pager_triggers(default_pager,
250 hi_water, low_water,
251 LO_WAT_ALERT, trigger_port);
252 }
253
254 /*
255 * Set thread scheduling priority and policy for the current thread
256 * it is assumed for the time being that the thread setting the alert
257 * is the same one which will be servicing it.
258 *
259 * XXX This does not belong in the kernel XXX
260 */
261 {
262 thread_precedence_policy_data_t pre;
263 thread_extended_policy_data_t ext;
264
265 ext.timeshare = FALSE;
266 pre.importance = INT32_MAX;
267
268 thread_policy_set(current_thread(),
269 THREAD_EXTENDED_POLICY,
270 (thread_policy_t)&ext,
271 THREAD_EXTENDED_POLICY_COUNT);
272
273 thread_policy_set(current_thread(),
274 THREAD_PRECEDENCE_POLICY,
275 (thread_policy_t)&pre,
276 THREAD_PRECEDENCE_POLICY_COUNT);
277 }
278
279 current_thread()->options |= TH_OPT_VMPRIV;
280
281 return 0;
282 }
283
284 /*
285 *
286 */
287 ipc_port_t
288 trigger_name_to_port(
289 mach_port_t trigger_name)
290 {
291 ipc_port_t trigger_port;
292 ipc_space_t space;
293
294 if (trigger_name == 0)
295 return (NULL);
296
297 space = current_space();
298 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
299 &trigger_port) != KERN_SUCCESS)
300 return (NULL);
301 return trigger_port;
302 }
303
304
305 extern int uiomove64(addr64_t, int, void *);
306 #define MAX_RUN 32
307
308 int
309 memory_object_control_uiomove(
310 memory_object_control_t control,
311 memory_object_offset_t offset,
312 void * uio,
313 int start_offset,
314 int io_requested,
315 int mark_dirty)
316 {
317 vm_object_t object;
318 vm_page_t dst_page;
319 int xsize;
320 int retval = 0;
321 int cur_run;
322 int cur_needed;
323 int i;
324 vm_page_t page_run[MAX_RUN];
325
326
327 object = memory_object_control_to_vm_object(control);
328 if (object == VM_OBJECT_NULL) {
329 return (0);
330 }
331 assert(!object->internal);
332
333 vm_object_lock(object);
334
335 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
336 /*
337 * We can't modify the pages without honoring
338 * copy-on-write obligations first, so fall off
339 * this optimized path and fall back to the regular
340 * path.
341 */
342 vm_object_unlock(object);
343 return 0;
344 }
345
346 while (io_requested && retval == 0) {
347
348 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
349
350 if (cur_needed > MAX_RUN)
351 cur_needed = MAX_RUN;
352
353 for (cur_run = 0; cur_run < cur_needed; ) {
354
355 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
356 break;
357 /*
358 * Sync up on getting the busy bit
359 */
360 if ((dst_page->busy || dst_page->cleaning)) {
361 /*
362 * someone else is playing with the page... if we've
363 * already collected pages into this run, go ahead
364 * and process now, we can't block on this
365 * page while holding other pages in the BUSY state
366 * otherwise we will wait
367 */
368 if (cur_run)
369 break;
370 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
371 continue;
372 }
373 /*
374 * this routine is only called when copying
375 * to/from real files... no need to consider
376 * encrypted swap pages
377 */
378 assert(!dst_page->encrypted);
379
380 if (mark_dirty)
381 dst_page->dirty = TRUE;
382 dst_page->busy = TRUE;
383
384 page_run[cur_run++] = dst_page;
385
386 offset += PAGE_SIZE_64;
387 }
388 if (cur_run == 0)
389 /*
390 * we hit a 'hole' in the cache
391 * we bail at this point
392 * we'll unlock the object below
393 */
394 break;
395 vm_object_unlock(object);
396
397 for (i = 0; i < cur_run; i++) {
398
399 dst_page = page_run[i];
400
401 if ((xsize = PAGE_SIZE - start_offset) > io_requested)
402 xsize = io_requested;
403
404 if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
405 break;
406
407 io_requested -= xsize;
408 start_offset = 0;
409 }
410 vm_object_lock(object);
411
412 for (i = 0; i < cur_run; i++) {
413 dst_page = page_run[i];
414
415 PAGE_WAKEUP_DONE(dst_page);
416 }
417 }
418 vm_object_unlock(object);
419
420 return (retval);
421 }
422
423
424 /*
425 *
426 */
427 void
428 vnode_pager_bootstrap(void)
429 {
430 register vm_size_t size;
431
432 size = (vm_size_t) sizeof(struct vnode_pager);
433 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
434 PAGE_SIZE, "vnode pager structures");
435 return;
436 }
437
438 /*
439 *
440 */
441 memory_object_t
442 vnode_pager_setup(
443 struct vnode *vp,
444 __unused memory_object_t pager)
445 {
446 vnode_pager_t vnode_object;
447
448 vnode_object = vnode_object_create(vp);
449 if (vnode_object == VNODE_PAGER_NULL)
450 panic("vnode_pager_setup: vnode_object_create() failed");
451 return((memory_object_t)vnode_object);
452 }
453
454 /*
455 *
456 */
457 kern_return_t
458 vnode_pager_init(memory_object_t mem_obj,
459 memory_object_control_t control,
460 #if !DEBUG
461 __unused
462 #endif
463 vm_size_t pg_size)
464 {
465 vnode_pager_t vnode_object;
466 kern_return_t kr;
467 memory_object_attr_info_data_t attributes;
468
469
470 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size));
471
472 if (control == MEMORY_OBJECT_CONTROL_NULL)
473 return KERN_INVALID_ARGUMENT;
474
475 vnode_object = vnode_pager_lookup(mem_obj);
476
477 memory_object_control_reference(control);
478
479 vnode_object->control_handle = control;
480
481 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
482 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
483 attributes.cluster_size = (1 << (PAGE_SHIFT));
484 attributes.may_cache_object = TRUE;
485 attributes.temporary = TRUE;
486
487 kr = memory_object_change_attributes(
488 control,
489 MEMORY_OBJECT_ATTRIBUTE_INFO,
490 (memory_object_info_t) &attributes,
491 MEMORY_OBJECT_ATTR_INFO_COUNT);
492 if (kr != KERN_SUCCESS)
493 panic("vnode_pager_init: memory_object_change_attributes() failed");
494
495 return(KERN_SUCCESS);
496 }
497
498 /*
499 *
500 */
501 kern_return_t
502 vnode_pager_data_return(
503 memory_object_t mem_obj,
504 memory_object_offset_t offset,
505 vm_size_t data_cnt,
506 memory_object_offset_t *resid_offset,
507 int *io_error,
508 __unused boolean_t dirty,
509 __unused boolean_t kernel_copy,
510 int upl_flags)
511 {
512 register vnode_pager_t vnode_object;
513
514 vnode_object = vnode_pager_lookup(mem_obj);
515
516 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
517
518 return KERN_SUCCESS;
519 }
520
521 kern_return_t
522 vnode_pager_data_initialize(
523 __unused memory_object_t mem_obj,
524 __unused memory_object_offset_t offset,
525 __unused vm_size_t data_cnt)
526 {
527 panic("vnode_pager_data_initialize");
528 return KERN_FAILURE;
529 }
530
531 kern_return_t
532 vnode_pager_data_unlock(
533 __unused memory_object_t mem_obj,
534 __unused memory_object_offset_t offset,
535 __unused vm_size_t size,
536 __unused vm_prot_t desired_access)
537 {
538 return KERN_FAILURE;
539 }
540
541 kern_return_t
542 vnode_pager_get_object_size(
543 memory_object_t mem_obj,
544 memory_object_offset_t *length)
545 {
546 vnode_pager_t vnode_object;
547
548 vnode_object = vnode_pager_lookup(mem_obj);
549
550 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
551 return KERN_SUCCESS;
552 }
553
554 /*
555 *
556 */
557 kern_return_t
558 vnode_pager_data_request(
559 memory_object_t mem_obj,
560 memory_object_offset_t offset,
561 vm_size_t length,
562 #if !DEBUG
563 __unused
564 #endif
565 vm_prot_t protection_required)
566 {
567 register vnode_pager_t vnode_object;
568
569 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
570
571 vnode_object = vnode_pager_lookup(mem_obj);
572
573 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
574
575 return vnode_pager_cluster_read(vnode_object, offset, length);
576 }
577
578 /*
579 *
580 */
581 void
582 vnode_pager_reference(
583 memory_object_t mem_obj)
584 {
585 register vnode_pager_t vnode_object;
586 unsigned int new_ref_count;
587
588 vnode_object = vnode_pager_lookup(mem_obj);
589 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
590 assert(new_ref_count > 1);
591 }
592
593 /*
594 *
595 */
596 void
597 vnode_pager_deallocate(
598 memory_object_t mem_obj)
599 {
600 register vnode_pager_t vnode_object;
601
602 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
603
604 vnode_object = vnode_pager_lookup(mem_obj);
605
606 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
607 if (vnode_object->vnode_handle != NULL) {
608 vnode_pager_vrele(vnode_object->vnode_handle);
609 }
610 zfree(vnode_pager_zone, vnode_object);
611 }
612 return;
613 }
614
615 /*
616 *
617 */
618 kern_return_t
619 vnode_pager_terminate(
620 #if !DEBUG
621 __unused
622 #endif
623 memory_object_t mem_obj)
624 {
625 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
626
627 return(KERN_SUCCESS);
628 }
629
630 /*
631 *
632 */
633 kern_return_t
634 vnode_pager_synchronize(
635 memory_object_t mem_obj,
636 memory_object_offset_t offset,
637 vm_size_t length,
638 __unused vm_sync_t sync_flags)
639 {
640 register vnode_pager_t vnode_object;
641
642 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
643
644 vnode_object = vnode_pager_lookup(mem_obj);
645
646 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
647
648 return (KERN_SUCCESS);
649 }
650
651 /*
652 *
653 */
654 kern_return_t
655 vnode_pager_unmap(
656 memory_object_t mem_obj)
657 {
658 register vnode_pager_t vnode_object;
659
660 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
661
662 vnode_object = vnode_pager_lookup(mem_obj);
663
664 ubc_unmap(vnode_object->vnode_handle);
665 return KERN_SUCCESS;
666 }
667
668
669 /*
670 *
671 */
672 void
673 vnode_pager_cluster_write(
674 vnode_pager_t vnode_object,
675 vm_object_offset_t offset,
676 vm_size_t cnt,
677 vm_object_offset_t * resid_offset,
678 int * io_error,
679 int upl_flags)
680 {
681 vm_size_t size;
682 upl_t upl = NULL;
683 int request_flags;
684 int errno;
685
686 if (upl_flags & UPL_MSYNC) {
687
688 upl_flags |= UPL_VNODE_PAGER;
689
690 if ( (upl_flags & UPL_IOSYNC) && io_error)
691 upl_flags |= UPL_KEEPCACHED;
692
693 while (cnt) {
694 kern_return_t kr;
695
696 size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
697
698 request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
699 UPL_SET_INTERNAL | UPL_SET_LITE;
700
701 kr = memory_object_upl_request(vnode_object->control_handle,
702 offset, size, &upl, NULL, NULL, request_flags);
703 if (kr != KERN_SUCCESS)
704 panic("vnode_pager_cluster_write: upl request failed\n");
705
706 vnode_pageout(vnode_object->vnode_handle,
707 upl, (vm_offset_t)0, offset, size, upl_flags, &errno);
708
709 if ( (upl_flags & UPL_KEEPCACHED) ) {
710 if ( (*io_error = errno) )
711 break;
712 }
713 cnt -= size;
714 offset += size;
715 }
716 if (resid_offset)
717 *resid_offset = offset;
718
719 } else {
720 vm_object_offset_t vnode_size;
721 vm_object_offset_t base_offset;
722 vm_object_t object;
723 vm_page_t target_page;
724 int ticket;
725
726 /*
727 * this is the pageout path
728 */
729 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
730
731 if (vnode_size > (offset + PAGE_SIZE)) {
732 /*
733 * preset the maximum size of the cluster
734 * and put us on a nice cluster boundary...
735 * and then clip the size to insure we
736 * don't request past the end of the underlying file
737 */
738 size = PAGE_SIZE * MAX_UPL_TRANSFER;
739 base_offset = offset & ~((signed)(size - 1));
740
741 if ((base_offset + size) > vnode_size)
742 size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
743 } else {
744 /*
745 * we've been requested to page out a page beyond the current
746 * end of the 'file'... don't try to cluster in this case...
747 * we still need to send this page through because it might
748 * be marked precious and the underlying filesystem may need
749 * to do something with it (besides page it out)...
750 */
751 base_offset = offset;
752 size = PAGE_SIZE;
753 }
754 object = memory_object_control_to_vm_object(vnode_object->control_handle);
755
756 if (object == VM_OBJECT_NULL)
757 panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");
758
759 request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
760 UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
761 UPL_SET_INTERNAL | UPL_SET_LITE;
762
763 vm_object_lock(object);
764
765 if ((target_page = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
766 /*
767 * only pick up pages whose ticket number matches
768 * the ticket number of the page orginally targeted
769 * for pageout
770 */
771 ticket = target_page->page_ticket;
772
773 request_flags |= ((ticket << UPL_PAGE_TICKET_SHIFT) & UPL_PAGE_TICKET_MASK);
774 }
775 vm_object_unlock(object);
776
777 vm_object_upl_request(object, base_offset, size,
778 &upl, NULL, NULL, request_flags);
779 if (upl == NULL)
780 panic("vnode_pager_cluster_write: upl request failed\n");
781
782 vnode_pageout(vnode_object->vnode_handle,
783 upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
784 }
785 }
786
787
788 /*
789 *
790 */
791 kern_return_t
792 vnode_pager_cluster_read(
793 vnode_pager_t vnode_object,
794 vm_object_offset_t offset,
795 vm_size_t cnt)
796 {
797 int local_error = 0;
798 int kret;
799
800 assert(! (cnt & PAGE_MASK));
801
802 kret = vnode_pagein(vnode_object->vnode_handle,
803 (upl_t) NULL,
804 (vm_offset_t) NULL,
805 offset,
806 cnt,
807 0,
808 &local_error);
809 /*
810 if(kret == PAGER_ABSENT) {
811 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
812 defined in bsd/vm/vm_pager.h However, we should not be including
813 that file here it is a layering violation.
814 */
815 if (kret == 1) {
816 int uplflags;
817 upl_t upl = NULL;
818 int count = 0;
819 kern_return_t kr;
820
821 uplflags = (UPL_NO_SYNC |
822 UPL_CLEAN_IN_PLACE |
823 UPL_SET_INTERNAL);
824 count = 0;
825 kr = memory_object_upl_request(vnode_object->control_handle,
826 offset, cnt,
827 &upl, NULL, &count, uplflags);
828 if (kr == KERN_SUCCESS) {
829 upl_abort(upl, 0);
830 upl_deallocate(upl);
831 } else {
832 /*
833 * We couldn't gather the page list, probably
834 * because the memory object doesn't have a link
835 * to a VM object anymore (forced unmount, for
836 * example). Just return an error to the vm_fault()
837 * path and let it handle it.
838 */
839 }
840
841 return KERN_FAILURE;
842 }
843
844 return KERN_SUCCESS;
845
846 }
847
848
849 /*
850 *
851 */
852 void
853 vnode_pager_release_from_cache(
854 int *cnt)
855 {
856 memory_object_free_from_cache(
857 &realhost, &vnode_pager_workaround, cnt);
858 }
859
860 /*
861 *
862 */
863 vnode_pager_t
864 vnode_object_create(
865 struct vnode *vp)
866 {
867 register vnode_pager_t vnode_object;
868
869 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
870 if (vnode_object == VNODE_PAGER_NULL)
871 return(VNODE_PAGER_NULL);
872
873 /*
874 * The vm_map call takes both named entry ports and raw memory
875 * objects in the same parameter. We need to make sure that
876 * vm_map does not see this object as a named entry port. So,
877 * we reserve the second word in the object for a fake ip_kotype
878 * setting - that will tell vm_map to use it as a memory object.
879 */
880 vnode_object->pager = &vnode_pager_workaround;
881 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
882 vnode_object->ref_count = 1;
883 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
884 vnode_object->vnode_handle = vp;
885
886 return(vnode_object);
887 }
888
889 /*
890 *
891 */
892 vnode_pager_t
893 vnode_pager_lookup(
894 memory_object_t name)
895 {
896 vnode_pager_t vnode_object;
897
898 vnode_object = (vnode_pager_t)name;
899 assert(vnode_object->pager == &vnode_pager_workaround);
900 return (vnode_object);
901 }
902