]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
5c777b715e99df7218992495eb11a7f82f1d4acd
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41
42 #include <kern/host.h>
43 #include <kern/thread.h>
44
45 #include <ipc/ipc_port.h>
46 #include <ipc/ipc_space.h>
47
48 #include <default_pager/default_pager_types.h>
49 #include <default_pager/default_pager_object_server.h>
50
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/memory_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos.h>
57
58 /* BSD VM COMPONENT INTERFACES */
59 int
60 get_map_nentries(
61 vm_map_t);
62
63 vm_offset_t
64 get_map_start(
65 vm_map_t);
66
67 vm_offset_t
68 get_map_end(
69 vm_map_t);
70
71 /*
72 *
73 */
74 int
75 get_map_nentries(
76 vm_map_t map)
77 {
78 return(map->hdr.nentries);
79 }
80
81 mach_vm_offset_t
82 mach_get_vm_start(vm_map_t map)
83 {
84 return( vm_map_first_entry(map)->vme_start);
85 }
86
87 mach_vm_offset_t
88 mach_get_vm_end(vm_map_t map)
89 {
90 return( vm_map_last_entry(map)->vme_end);
91 }
92
93 /*
94 * Legacy routines to get the start and end for a vm_map_t. They
95 * return them in the vm_offset_t format. So, they should only be
96 * called on maps that are the same size as the kernel map for
97 * accurate results.
98 */
99 vm_offset_t
100 get_vm_start(
101 vm_map_t map)
102 {
103 return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
104 }
105
106 vm_offset_t
107 get_vm_end(
108 vm_map_t map)
109 {
110 return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
111 }
112
113 /*
114 * BSD VNODE PAGER
115 */
116
117 /* until component support available */
118 int vnode_pager_workaround;
119
120 typedef struct vnode_pager {
121 int *pager; /* pager workaround pointer */
122 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
123 unsigned int ref_count; /* reference count */
124 memory_object_control_t control_handle; /* mem object control handle */
125 struct vnode *vnode_handle; /* vnode handle */
126 } *vnode_pager_t;
127
128
129 ipc_port_t
130 trigger_name_to_port( /* forward */
131 mach_port_t);
132
133 kern_return_t
134 vnode_pager_cluster_read( /* forward */
135 vnode_pager_t,
136 vm_object_offset_t,
137 vm_size_t);
138
139 void
140 vnode_pager_cluster_write( /* forward */
141 vnode_pager_t,
142 vm_object_offset_t,
143 vm_size_t,
144 vm_object_offset_t *,
145 int *,
146 int);
147
148
149 vnode_pager_t
150 vnode_object_create( /* forward */
151 struct vnode *);
152
153 vnode_pager_t
154 vnode_pager_lookup( /* forward */
155 memory_object_t);
156
157 zone_t vnode_pager_zone;
158
159
160 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
161
162 /* TODO: Should be set dynamically by vnode_pager_init() */
163 #define CLUSTER_SHIFT 1
164
165 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
166 #define MAX_VNODE 10000
167
168
169 #if DEBUG
170 int pagerdebug=0;
171
172 #define PAGER_ALL 0xffffffff
173 #define PAGER_INIT 0x00000001
174 #define PAGER_PAGEIN 0x00000002
175
176 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
177 #else
178 #define PAGER_DEBUG(LEVEL, A)
179 #endif
180
181 /*
182 * Routine: macx_triggers
183 * Function:
184 * Syscall interface to set the call backs for low and
185 * high water marks.
186 */
187 int
188 macx_triggers(
189 struct macx_triggers_args *args)
190 {
191 int hi_water = args->hi_water;
192 int low_water = args->low_water;
193 int flags = args->flags;
194 mach_port_t trigger_name = args->alert_port;
195 kern_return_t kr;
196 memory_object_default_t default_pager;
197 ipc_port_t trigger_port;
198
199 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
200 kr = host_default_memory_manager(host_priv_self(),
201 &default_pager, 0);
202 if(kr != KERN_SUCCESS) {
203 return EINVAL;
204 }
205
206 if ((flags & SWAP_ENCRYPT_ON) &&
207 (flags & SWAP_ENCRYPT_OFF)) {
208 /* can't have it both ways */
209 return EINVAL;
210 }
211
212 if (flags & SWAP_ENCRYPT_ON) {
213 /* ENCRYPTED SWAP: tell default_pager to encrypt */
214 default_pager_triggers(default_pager,
215 0, 0,
216 SWAP_ENCRYPT_ON,
217 IP_NULL);
218 } else if (flags & SWAP_ENCRYPT_OFF) {
219 /* ENCRYPTED SWAP: tell default_pager not to encrypt */
220 default_pager_triggers(default_pager,
221 0, 0,
222 SWAP_ENCRYPT_OFF,
223 IP_NULL);
224 }
225
226 if (flags & HI_WAT_ALERT) {
227 trigger_port = trigger_name_to_port(trigger_name);
228 if(trigger_port == NULL) {
229 return EINVAL;
230 }
231 /* trigger_port is locked and active */
232 ipc_port_make_send_locked(trigger_port);
233 /* now unlocked */
234 default_pager_triggers(default_pager,
235 hi_water, low_water,
236 HI_WAT_ALERT, trigger_port);
237 }
238
239 if (flags & LO_WAT_ALERT) {
240 trigger_port = trigger_name_to_port(trigger_name);
241 if(trigger_port == NULL) {
242 return EINVAL;
243 }
244 /* trigger_port is locked and active */
245 ipc_port_make_send_locked(trigger_port);
246 /* and now its unlocked */
247 default_pager_triggers(default_pager,
248 hi_water, low_water,
249 LO_WAT_ALERT, trigger_port);
250 }
251
252 /*
253 * Set thread scheduling priority and policy for the current thread
254 * it is assumed for the time being that the thread setting the alert
255 * is the same one which will be servicing it.
256 *
257 * XXX This does not belong in the kernel XXX
258 */
259 {
260 thread_precedence_policy_data_t pre;
261 thread_extended_policy_data_t ext;
262
263 ext.timeshare = FALSE;
264 pre.importance = INT32_MAX;
265
266 thread_policy_set(current_thread(),
267 THREAD_EXTENDED_POLICY,
268 (thread_policy_t)&ext,
269 THREAD_EXTENDED_POLICY_COUNT);
270
271 thread_policy_set(current_thread(),
272 THREAD_PRECEDENCE_POLICY,
273 (thread_policy_t)&pre,
274 THREAD_PRECEDENCE_POLICY_COUNT);
275 }
276
277 current_thread()->options |= TH_OPT_VMPRIV;
278
279 return 0;
280 }
281
282 /*
283 *
284 */
285 ipc_port_t
286 trigger_name_to_port(
287 mach_port_t trigger_name)
288 {
289 ipc_port_t trigger_port;
290 ipc_space_t space;
291
292 if (trigger_name == 0)
293 return (NULL);
294
295 space = current_space();
296 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
297 &trigger_port) != KERN_SUCCESS)
298 return (NULL);
299 return trigger_port;
300 }
301
302
303 extern int uiomove64(addr64_t, int, void *);
304 #define MAX_RUN 32
305
306 int
307 memory_object_control_uiomove(
308 memory_object_control_t control,
309 memory_object_offset_t offset,
310 void * uio,
311 int start_offset,
312 int io_requested,
313 int mark_dirty)
314 {
315 vm_object_t object;
316 vm_page_t dst_page;
317 int xsize;
318 int retval = 0;
319 int cur_run;
320 int cur_needed;
321 int i;
322 vm_page_t page_run[MAX_RUN];
323
324
325 object = memory_object_control_to_vm_object(control);
326 if (object == VM_OBJECT_NULL) {
327 return (0);
328 }
329 assert(!object->internal);
330
331 vm_object_lock(object);
332
333 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
334 /*
335 * We can't modify the pages without honoring
336 * copy-on-write obligations first, so fall off
337 * this optimized path and fall back to the regular
338 * path.
339 */
340 vm_object_unlock(object);
341 return 0;
342 }
343
344 while (io_requested && retval == 0) {
345
346 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
347
348 if (cur_needed > MAX_RUN)
349 cur_needed = MAX_RUN;
350
351 for (cur_run = 0; cur_run < cur_needed; ) {
352
353 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
354 break;
355 /*
356 * Sync up on getting the busy bit
357 */
358 if ((dst_page->busy || dst_page->cleaning)) {
359 /*
360 * someone else is playing with the page... if we've
361 * already collected pages into this run, go ahead
362 * and process now, we can't block on this
363 * page while holding other pages in the BUSY state
364 * otherwise we will wait
365 */
366 if (cur_run)
367 break;
368 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
369 continue;
370 }
371 /*
372 * this routine is only called when copying
373 * to/from real files... no need to consider
374 * encrypted swap pages
375 */
376 assert(!dst_page->encrypted);
377
378 if (mark_dirty)
379 dst_page->dirty = TRUE;
380 dst_page->busy = TRUE;
381
382 page_run[cur_run++] = dst_page;
383
384 offset += PAGE_SIZE_64;
385 }
386 if (cur_run == 0)
387 /*
388 * we hit a 'hole' in the cache
389 * we bail at this point
390 * we'll unlock the object below
391 */
392 break;
393 vm_object_unlock(object);
394
395 for (i = 0; i < cur_run; i++) {
396
397 dst_page = page_run[i];
398
399 if ((xsize = PAGE_SIZE - start_offset) > io_requested)
400 xsize = io_requested;
401
402 if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
403 break;
404
405 io_requested -= xsize;
406 start_offset = 0;
407 }
408 vm_object_lock(object);
409
410 for (i = 0; i < cur_run; i++) {
411 dst_page = page_run[i];
412
413 PAGE_WAKEUP_DONE(dst_page);
414 }
415 }
416 vm_object_unlock(object);
417
418 return (retval);
419 }
420
421
422 /*
423 *
424 */
425 void
426 vnode_pager_bootstrap(void)
427 {
428 register vm_size_t size;
429
430 size = (vm_size_t) sizeof(struct vnode_pager);
431 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
432 PAGE_SIZE, "vnode pager structures");
433 return;
434 }
435
436 /*
437 *
438 */
439 memory_object_t
440 vnode_pager_setup(
441 struct vnode *vp,
442 __unused memory_object_t pager)
443 {
444 vnode_pager_t vnode_object;
445
446 vnode_object = vnode_object_create(vp);
447 if (vnode_object == VNODE_PAGER_NULL)
448 panic("vnode_pager_setup: vnode_object_create() failed");
449 return((memory_object_t)vnode_object);
450 }
451
452 /*
453 *
454 */
455 kern_return_t
456 vnode_pager_init(memory_object_t mem_obj,
457 memory_object_control_t control,
458 #if !DEBUG
459 __unused
460 #endif
461 vm_size_t pg_size)
462 {
463 vnode_pager_t vnode_object;
464 kern_return_t kr;
465 memory_object_attr_info_data_t attributes;
466
467
468 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size));
469
470 if (control == MEMORY_OBJECT_CONTROL_NULL)
471 return KERN_INVALID_ARGUMENT;
472
473 vnode_object = vnode_pager_lookup(mem_obj);
474
475 memory_object_control_reference(control);
476
477 vnode_object->control_handle = control;
478
479 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
480 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
481 attributes.cluster_size = (1 << (PAGE_SHIFT));
482 attributes.may_cache_object = TRUE;
483 attributes.temporary = TRUE;
484
485 kr = memory_object_change_attributes(
486 control,
487 MEMORY_OBJECT_ATTRIBUTE_INFO,
488 (memory_object_info_t) &attributes,
489 MEMORY_OBJECT_ATTR_INFO_COUNT);
490 if (kr != KERN_SUCCESS)
491 panic("vnode_pager_init: memory_object_change_attributes() failed");
492
493 return(KERN_SUCCESS);
494 }
495
496 /*
497 *
498 */
499 kern_return_t
500 vnode_pager_data_return(
501 memory_object_t mem_obj,
502 memory_object_offset_t offset,
503 vm_size_t data_cnt,
504 memory_object_offset_t *resid_offset,
505 int *io_error,
506 __unused boolean_t dirty,
507 __unused boolean_t kernel_copy,
508 int upl_flags)
509 {
510 register vnode_pager_t vnode_object;
511
512 vnode_object = vnode_pager_lookup(mem_obj);
513
514 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
515
516 return KERN_SUCCESS;
517 }
518
519 kern_return_t
520 vnode_pager_data_initialize(
521 __unused memory_object_t mem_obj,
522 __unused memory_object_offset_t offset,
523 __unused vm_size_t data_cnt)
524 {
525 panic("vnode_pager_data_initialize");
526 return KERN_FAILURE;
527 }
528
529 kern_return_t
530 vnode_pager_data_unlock(
531 __unused memory_object_t mem_obj,
532 __unused memory_object_offset_t offset,
533 __unused vm_size_t size,
534 __unused vm_prot_t desired_access)
535 {
536 return KERN_FAILURE;
537 }
538
539 kern_return_t
540 vnode_pager_get_object_size(
541 memory_object_t mem_obj,
542 memory_object_offset_t *length)
543 {
544 vnode_pager_t vnode_object;
545
546 vnode_object = vnode_pager_lookup(mem_obj);
547
548 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
549 return KERN_SUCCESS;
550 }
551
552 /*
553 *
554 */
555 kern_return_t
556 vnode_pager_data_request(
557 memory_object_t mem_obj,
558 memory_object_offset_t offset,
559 vm_size_t length,
560 #if !DEBUG
561 __unused
562 #endif
563 vm_prot_t protection_required)
564 {
565 register vnode_pager_t vnode_object;
566
567 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
568
569 vnode_object = vnode_pager_lookup(mem_obj);
570
571 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
572
573 return vnode_pager_cluster_read(vnode_object, offset, length);
574 }
575
576 /*
577 *
578 */
579 void
580 vnode_pager_reference(
581 memory_object_t mem_obj)
582 {
583 register vnode_pager_t vnode_object;
584 unsigned int new_ref_count;
585
586 vnode_object = vnode_pager_lookup(mem_obj);
587 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
588 assert(new_ref_count > 1);
589 }
590
591 /*
592 *
593 */
594 void
595 vnode_pager_deallocate(
596 memory_object_t mem_obj)
597 {
598 register vnode_pager_t vnode_object;
599
600 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
601
602 vnode_object = vnode_pager_lookup(mem_obj);
603
604 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
605 if (vnode_object->vnode_handle != NULL) {
606 vnode_pager_vrele(vnode_object->vnode_handle);
607 }
608 zfree(vnode_pager_zone, vnode_object);
609 }
610 return;
611 }
612
613 /*
614 *
615 */
616 kern_return_t
617 vnode_pager_terminate(
618 #if !DEBUG
619 __unused
620 #endif
621 memory_object_t mem_obj)
622 {
623 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
624
625 return(KERN_SUCCESS);
626 }
627
628 /*
629 *
630 */
631 kern_return_t
632 vnode_pager_synchronize(
633 memory_object_t mem_obj,
634 memory_object_offset_t offset,
635 vm_size_t length,
636 __unused vm_sync_t sync_flags)
637 {
638 register vnode_pager_t vnode_object;
639
640 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
641
642 vnode_object = vnode_pager_lookup(mem_obj);
643
644 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
645
646 return (KERN_SUCCESS);
647 }
648
649 /*
650 *
651 */
652 kern_return_t
653 vnode_pager_unmap(
654 memory_object_t mem_obj)
655 {
656 register vnode_pager_t vnode_object;
657
658 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
659
660 vnode_object = vnode_pager_lookup(mem_obj);
661
662 ubc_unmap(vnode_object->vnode_handle);
663 return KERN_SUCCESS;
664 }
665
666
667 /*
668 *
669 */
670 void
671 vnode_pager_cluster_write(
672 vnode_pager_t vnode_object,
673 vm_object_offset_t offset,
674 vm_size_t cnt,
675 vm_object_offset_t * resid_offset,
676 int * io_error,
677 int upl_flags)
678 {
679 vm_size_t size;
680 upl_t upl = NULL;
681 int request_flags;
682 int errno;
683
684 if (upl_flags & UPL_MSYNC) {
685
686 upl_flags |= UPL_VNODE_PAGER;
687
688 if ( (upl_flags & UPL_IOSYNC) && io_error)
689 upl_flags |= UPL_KEEPCACHED;
690
691 while (cnt) {
692 kern_return_t kr;
693
694 size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
695
696 request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
697 UPL_SET_INTERNAL | UPL_SET_LITE;
698
699 kr = memory_object_upl_request(vnode_object->control_handle,
700 offset, size, &upl, NULL, NULL, request_flags);
701 if (kr != KERN_SUCCESS)
702 panic("vnode_pager_cluster_write: upl request failed\n");
703
704 vnode_pageout(vnode_object->vnode_handle,
705 upl, (vm_offset_t)0, offset, size, upl_flags, &errno);
706
707 if ( (upl_flags & UPL_KEEPCACHED) ) {
708 if ( (*io_error = errno) )
709 break;
710 }
711 cnt -= size;
712 offset += size;
713 }
714 if (resid_offset)
715 *resid_offset = offset;
716
717 } else {
718 vm_object_offset_t vnode_size;
719 vm_object_offset_t base_offset;
720 vm_object_t object;
721 vm_page_t target_page;
722 int ticket;
723
724 /*
725 * this is the pageout path
726 */
727 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
728
729 if (vnode_size > (offset + PAGE_SIZE)) {
730 /*
731 * preset the maximum size of the cluster
732 * and put us on a nice cluster boundary...
733 * and then clip the size to insure we
734 * don't request past the end of the underlying file
735 */
736 size = PAGE_SIZE * MAX_UPL_TRANSFER;
737 base_offset = offset & ~((signed)(size - 1));
738
739 if ((base_offset + size) > vnode_size)
740 size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
741 } else {
742 /*
743 * we've been requested to page out a page beyond the current
744 * end of the 'file'... don't try to cluster in this case...
745 * we still need to send this page through because it might
746 * be marked precious and the underlying filesystem may need
747 * to do something with it (besides page it out)...
748 */
749 base_offset = offset;
750 size = PAGE_SIZE;
751 }
752 object = memory_object_control_to_vm_object(vnode_object->control_handle);
753
754 if (object == VM_OBJECT_NULL)
755 panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");
756
757 request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
758 UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
759 UPL_SET_INTERNAL | UPL_SET_LITE;
760
761 vm_object_lock(object);
762
763 if ((target_page = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
764 /*
765 * only pick up pages whose ticket number matches
766 * the ticket number of the page orginally targeted
767 * for pageout
768 */
769 ticket = target_page->page_ticket;
770
771 request_flags |= ((ticket << UPL_PAGE_TICKET_SHIFT) & UPL_PAGE_TICKET_MASK);
772 }
773 vm_object_unlock(object);
774
775 vm_object_upl_request(object, base_offset, size,
776 &upl, NULL, NULL, request_flags);
777 if (upl == NULL)
778 panic("vnode_pager_cluster_write: upl request failed\n");
779
780 vnode_pageout(vnode_object->vnode_handle,
781 upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
782 }
783 }
784
785
786 /*
787 *
788 */
789 kern_return_t
790 vnode_pager_cluster_read(
791 vnode_pager_t vnode_object,
792 vm_object_offset_t offset,
793 vm_size_t cnt)
794 {
795 int local_error = 0;
796 int kret;
797
798 assert(! (cnt & PAGE_MASK));
799
800 kret = vnode_pagein(vnode_object->vnode_handle,
801 (upl_t) NULL,
802 (vm_offset_t) NULL,
803 offset,
804 cnt,
805 0,
806 &local_error);
807 /*
808 if(kret == PAGER_ABSENT) {
809 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
810 defined in bsd/vm/vm_pager.h However, we should not be including
811 that file here it is a layering violation.
812 */
813 if (kret == 1) {
814 int uplflags;
815 upl_t upl = NULL;
816 int count = 0;
817 kern_return_t kr;
818
819 uplflags = (UPL_NO_SYNC |
820 UPL_CLEAN_IN_PLACE |
821 UPL_SET_INTERNAL);
822 count = 0;
823 kr = memory_object_upl_request(vnode_object->control_handle,
824 offset, cnt,
825 &upl, NULL, &count, uplflags);
826 if (kr == KERN_SUCCESS) {
827 upl_abort(upl, 0);
828 upl_deallocate(upl);
829 } else {
830 /*
831 * We couldn't gather the page list, probably
832 * because the memory object doesn't have a link
833 * to a VM object anymore (forced unmount, for
834 * example). Just return an error to the vm_fault()
835 * path and let it handle it.
836 */
837 }
838
839 return KERN_FAILURE;
840 }
841
842 return KERN_SUCCESS;
843
844 }
845
846
847 /*
848 *
849 */
850 void
851 vnode_pager_release_from_cache(
852 int *cnt)
853 {
854 memory_object_free_from_cache(
855 &realhost, &vnode_pager_workaround, cnt);
856 }
857
858 /*
859 *
860 */
861 vnode_pager_t
862 vnode_object_create(
863 struct vnode *vp)
864 {
865 register vnode_pager_t vnode_object;
866
867 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
868 if (vnode_object == VNODE_PAGER_NULL)
869 return(VNODE_PAGER_NULL);
870
871 /*
872 * The vm_map call takes both named entry ports and raw memory
873 * objects in the same parameter. We need to make sure that
874 * vm_map does not see this object as a named entry port. So,
875 * we reserve the second word in the object for a fake ip_kotype
876 * setting - that will tell vm_map to use it as a memory object.
877 */
878 vnode_object->pager = &vnode_pager_workaround;
879 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
880 vnode_object->ref_count = 1;
881 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
882 vnode_object->vnode_handle = vp;
883
884 return(vnode_object);
885 }
886
887 /*
888 *
889 */
890 vnode_pager_t
891 vnode_pager_lookup(
892 memory_object_t name)
893 {
894 vnode_pager_t vnode_object;
895
896 vnode_object = (vnode_pager_t)name;
897 assert(vnode_object->pager == &vnode_pager_workaround);
898 return (vnode_object);
899 }
900