]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/errno.h>
25
26 #include <mach/mach_types.h>
27 #include <mach/mach_traps.h>
28 #include <mach/host_priv.h>
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_control.h>
31 #include <mach/memory_object_types.h>
32 #include <mach/port.h>
33 #include <mach/policy.h>
34 #include <mach/upl.h>
35 #include <mach/thread_act.h>
36
37 #include <kern/host.h>
38 #include <kern/thread.h>
39
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42
43 #include <default_pager/default_pager_types.h>
44 #include <default_pager/default_pager_object_server.h>
45
46 #include <vm/vm_map.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_pageout.h>
49 #include <vm/memory_object.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_protos.h>
52
53 /* BSD VM COMPONENT INTERFACES */
54 int
55 get_map_nentries(
56 vm_map_t);
57
58 vm_offset_t
59 get_map_start(
60 vm_map_t);
61
62 vm_offset_t
63 get_map_end(
64 vm_map_t);
65
66 /*
67 *
68 */
69 int
70 get_map_nentries(
71 vm_map_t map)
72 {
73 return(map->hdr.nentries);
74 }
75
76 mach_vm_offset_t
77 mach_get_vm_start(vm_map_t map)
78 {
79 return( vm_map_first_entry(map)->vme_start);
80 }
81
82 mach_vm_offset_t
83 mach_get_vm_end(vm_map_t map)
84 {
85 return( vm_map_last_entry(map)->vme_end);
86 }
87
88 /*
89 * Legacy routines to get the start and end for a vm_map_t. They
90 * return them in the vm_offset_t format. So, they should only be
91 * called on maps that are the same size as the kernel map for
92 * accurate results.
93 */
94 vm_offset_t
95 get_vm_start(
96 vm_map_t map)
97 {
98 return(CAST_DOWN(vm_offset_t, vm_map_first_entry(map)->vme_start));
99 }
100
101 vm_offset_t
102 get_vm_end(
103 vm_map_t map)
104 {
105 return(CAST_DOWN(vm_offset_t, vm_map_last_entry(map)->vme_end));
106 }
107
108 /*
109 * BSD VNODE PAGER
110 */
111
112 /* until component support available */
113 int vnode_pager_workaround;
114
115 typedef struct vnode_pager {
116 int *pager; /* pager workaround pointer */
117 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
118 unsigned int ref_count; /* reference count */
119 memory_object_control_t control_handle; /* mem object control handle */
120 struct vnode *vnode_handle; /* vnode handle */
121 } *vnode_pager_t;
122
123
124 ipc_port_t
125 trigger_name_to_port( /* forward */
126 mach_port_t);
127
128 kern_return_t
129 vnode_pager_cluster_read( /* forward */
130 vnode_pager_t,
131 vm_object_offset_t,
132 vm_size_t);
133
134 void
135 vnode_pager_cluster_write( /* forward */
136 vnode_pager_t,
137 vm_object_offset_t,
138 vm_size_t,
139 vm_object_offset_t *,
140 int *,
141 int);
142
143
144 vnode_pager_t
145 vnode_object_create( /* forward */
146 struct vnode *);
147
148 vnode_pager_t
149 vnode_pager_lookup( /* forward */
150 memory_object_t);
151
152 zone_t vnode_pager_zone;
153
154
155 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
156
157 /* TODO: Should be set dynamically by vnode_pager_init() */
158 #define CLUSTER_SHIFT 1
159
160 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
161 #define MAX_VNODE 10000
162
163
164 #if DEBUG
165 int pagerdebug=0;
166
167 #define PAGER_ALL 0xffffffff
168 #define PAGER_INIT 0x00000001
169 #define PAGER_PAGEIN 0x00000002
170
171 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
172 #else
173 #define PAGER_DEBUG(LEVEL, A)
174 #endif
175
176 /*
177 * Routine: macx_triggers
178 * Function:
179 * Syscall interface to set the call backs for low and
180 * high water marks.
181 */
182 int
183 macx_triggers(
184 struct macx_triggers_args *args)
185 {
186 int hi_water = args->hi_water;
187 int low_water = args->low_water;
188 int flags = args->flags;
189 mach_port_t trigger_name = args->alert_port;
190 kern_return_t kr;
191 memory_object_default_t default_pager;
192 ipc_port_t trigger_port;
193
194 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
195 kr = host_default_memory_manager(host_priv_self(),
196 &default_pager, 0);
197 if(kr != KERN_SUCCESS) {
198 return EINVAL;
199 }
200
201 if ((flags & SWAP_ENCRYPT_ON) &&
202 (flags & SWAP_ENCRYPT_OFF)) {
203 /* can't have it both ways */
204 return EINVAL;
205 }
206
207 if (flags & SWAP_ENCRYPT_ON) {
208 /* ENCRYPTED SWAP: tell default_pager to encrypt */
209 default_pager_triggers(default_pager,
210 0, 0,
211 SWAP_ENCRYPT_ON,
212 IP_NULL);
213 } else if (flags & SWAP_ENCRYPT_OFF) {
214 /* ENCRYPTED SWAP: tell default_pager not to encrypt */
215 default_pager_triggers(default_pager,
216 0, 0,
217 SWAP_ENCRYPT_OFF,
218 IP_NULL);
219 }
220
221 if (flags & HI_WAT_ALERT) {
222 trigger_port = trigger_name_to_port(trigger_name);
223 if(trigger_port == NULL) {
224 return EINVAL;
225 }
226 /* trigger_port is locked and active */
227 ipc_port_make_send_locked(trigger_port);
228 /* now unlocked */
229 default_pager_triggers(default_pager,
230 hi_water, low_water,
231 HI_WAT_ALERT, trigger_port);
232 }
233
234 if (flags & LO_WAT_ALERT) {
235 trigger_port = trigger_name_to_port(trigger_name);
236 if(trigger_port == NULL) {
237 return EINVAL;
238 }
239 /* trigger_port is locked and active */
240 ipc_port_make_send_locked(trigger_port);
241 /* and now its unlocked */
242 default_pager_triggers(default_pager,
243 hi_water, low_water,
244 LO_WAT_ALERT, trigger_port);
245 }
246
247 /*
248 * Set thread scheduling priority and policy for the current thread
249 * it is assumed for the time being that the thread setting the alert
250 * is the same one which will be servicing it.
251 *
252 * XXX This does not belong in the kernel XXX
253 */
254 {
255 thread_precedence_policy_data_t pre;
256 thread_extended_policy_data_t ext;
257
258 ext.timeshare = FALSE;
259 pre.importance = INT32_MAX;
260
261 thread_policy_set(current_thread(),
262 THREAD_EXTENDED_POLICY,
263 (thread_policy_t)&ext,
264 THREAD_EXTENDED_POLICY_COUNT);
265
266 thread_policy_set(current_thread(),
267 THREAD_PRECEDENCE_POLICY,
268 (thread_policy_t)&pre,
269 THREAD_PRECEDENCE_POLICY_COUNT);
270 }
271
272 current_thread()->options |= TH_OPT_VMPRIV;
273
274 return 0;
275 }
276
277 /*
278 *
279 */
280 ipc_port_t
281 trigger_name_to_port(
282 mach_port_t trigger_name)
283 {
284 ipc_port_t trigger_port;
285 ipc_space_t space;
286
287 if (trigger_name == 0)
288 return (NULL);
289
290 space = current_space();
291 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
292 &trigger_port) != KERN_SUCCESS)
293 return (NULL);
294 return trigger_port;
295 }
296
297
298 extern int uiomove64(addr64_t, int, void *);
299 #define MAX_RUN 32
300
301 int
302 memory_object_control_uiomove(
303 memory_object_control_t control,
304 memory_object_offset_t offset,
305 void * uio,
306 int start_offset,
307 int io_requested,
308 int mark_dirty)
309 {
310 vm_object_t object;
311 vm_page_t dst_page;
312 int xsize;
313 int retval = 0;
314 int cur_run;
315 int cur_needed;
316 int i;
317 vm_page_t page_run[MAX_RUN];
318
319
320 object = memory_object_control_to_vm_object(control);
321 if (object == VM_OBJECT_NULL) {
322 return (0);
323 }
324 assert(!object->internal);
325
326 vm_object_lock(object);
327
328 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
329 /*
330 * We can't modify the pages without honoring
331 * copy-on-write obligations first, so fall off
332 * this optimized path and fall back to the regular
333 * path.
334 */
335 vm_object_unlock(object);
336 return 0;
337 }
338
339 while (io_requested && retval == 0) {
340
341 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
342
343 if (cur_needed > MAX_RUN)
344 cur_needed = MAX_RUN;
345
346 for (cur_run = 0; cur_run < cur_needed; ) {
347
348 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
349 break;
350 /*
351 * Sync up on getting the busy bit
352 */
353 if ((dst_page->busy || dst_page->cleaning)) {
354 /*
355 * someone else is playing with the page... if we've
356 * already collected pages into this run, go ahead
357 * and process now, we can't block on this
358 * page while holding other pages in the BUSY state
359 * otherwise we will wait
360 */
361 if (cur_run)
362 break;
363 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
364 continue;
365 }
366 /*
367 * this routine is only called when copying
368 * to/from real files... no need to consider
369 * encrypted swap pages
370 */
371 assert(!dst_page->encrypted);
372
373 if (mark_dirty)
374 dst_page->dirty = TRUE;
375 dst_page->busy = TRUE;
376
377 page_run[cur_run++] = dst_page;
378
379 offset += PAGE_SIZE_64;
380 }
381 if (cur_run == 0)
382 /*
383 * we hit a 'hole' in the cache
384 * we bail at this point
385 * we'll unlock the object below
386 */
387 break;
388 vm_object_unlock(object);
389
390 for (i = 0; i < cur_run; i++) {
391
392 dst_page = page_run[i];
393
394 if ((xsize = PAGE_SIZE - start_offset) > io_requested)
395 xsize = io_requested;
396
397 if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
398 break;
399
400 io_requested -= xsize;
401 start_offset = 0;
402 }
403 vm_object_lock(object);
404
405 for (i = 0; i < cur_run; i++) {
406 dst_page = page_run[i];
407
408 PAGE_WAKEUP_DONE(dst_page);
409 }
410 }
411 vm_object_unlock(object);
412
413 return (retval);
414 }
415
416
417 /*
418 *
419 */
420 void
421 vnode_pager_bootstrap(void)
422 {
423 register vm_size_t size;
424
425 size = (vm_size_t) sizeof(struct vnode_pager);
426 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
427 PAGE_SIZE, "vnode pager structures");
428 return;
429 }
430
431 /*
432 *
433 */
434 memory_object_t
435 vnode_pager_setup(
436 struct vnode *vp,
437 __unused memory_object_t pager)
438 {
439 vnode_pager_t vnode_object;
440
441 vnode_object = vnode_object_create(vp);
442 if (vnode_object == VNODE_PAGER_NULL)
443 panic("vnode_pager_setup: vnode_object_create() failed");
444 return((memory_object_t)vnode_object);
445 }
446
447 /*
448 *
449 */
450 kern_return_t
451 vnode_pager_init(memory_object_t mem_obj,
452 memory_object_control_t control,
453 #if !DEBUG
454 __unused
455 #endif
456 vm_size_t pg_size)
457 {
458 vnode_pager_t vnode_object;
459 kern_return_t kr;
460 memory_object_attr_info_data_t attributes;
461
462
463 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %x\n", mem_obj, control, pg_size));
464
465 if (control == MEMORY_OBJECT_CONTROL_NULL)
466 return KERN_INVALID_ARGUMENT;
467
468 vnode_object = vnode_pager_lookup(mem_obj);
469
470 memory_object_control_reference(control);
471
472 vnode_object->control_handle = control;
473
474 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
475 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
476 attributes.cluster_size = (1 << (PAGE_SHIFT));
477 attributes.may_cache_object = TRUE;
478 attributes.temporary = TRUE;
479
480 kr = memory_object_change_attributes(
481 control,
482 MEMORY_OBJECT_ATTRIBUTE_INFO,
483 (memory_object_info_t) &attributes,
484 MEMORY_OBJECT_ATTR_INFO_COUNT);
485 if (kr != KERN_SUCCESS)
486 panic("vnode_pager_init: memory_object_change_attributes() failed");
487
488 return(KERN_SUCCESS);
489 }
490
491 /*
492 *
493 */
494 kern_return_t
495 vnode_pager_data_return(
496 memory_object_t mem_obj,
497 memory_object_offset_t offset,
498 vm_size_t data_cnt,
499 memory_object_offset_t *resid_offset,
500 int *io_error,
501 __unused boolean_t dirty,
502 __unused boolean_t kernel_copy,
503 int upl_flags)
504 {
505 register vnode_pager_t vnode_object;
506
507 vnode_object = vnode_pager_lookup(mem_obj);
508
509 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
510
511 return KERN_SUCCESS;
512 }
513
514 kern_return_t
515 vnode_pager_data_initialize(
516 __unused memory_object_t mem_obj,
517 __unused memory_object_offset_t offset,
518 __unused vm_size_t data_cnt)
519 {
520 panic("vnode_pager_data_initialize");
521 return KERN_FAILURE;
522 }
523
524 kern_return_t
525 vnode_pager_data_unlock(
526 __unused memory_object_t mem_obj,
527 __unused memory_object_offset_t offset,
528 __unused vm_size_t size,
529 __unused vm_prot_t desired_access)
530 {
531 return KERN_FAILURE;
532 }
533
534 kern_return_t
535 vnode_pager_get_object_size(
536 memory_object_t mem_obj,
537 memory_object_offset_t *length)
538 {
539 vnode_pager_t vnode_object;
540
541 vnode_object = vnode_pager_lookup(mem_obj);
542
543 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
544 return KERN_SUCCESS;
545 }
546
547 /*
548 *
549 */
550 kern_return_t
551 vnode_pager_data_request(
552 memory_object_t mem_obj,
553 memory_object_offset_t offset,
554 vm_size_t length,
555 #if !DEBUG
556 __unused
557 #endif
558 vm_prot_t protection_required)
559 {
560 register vnode_pager_t vnode_object;
561
562 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
563
564 vnode_object = vnode_pager_lookup(mem_obj);
565
566 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
567
568 return vnode_pager_cluster_read(vnode_object, offset, length);
569 }
570
571 /*
572 *
573 */
574 void
575 vnode_pager_reference(
576 memory_object_t mem_obj)
577 {
578 register vnode_pager_t vnode_object;
579 unsigned int new_ref_count;
580
581 vnode_object = vnode_pager_lookup(mem_obj);
582 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
583 assert(new_ref_count > 1);
584 }
585
586 /*
587 *
588 */
589 void
590 vnode_pager_deallocate(
591 memory_object_t mem_obj)
592 {
593 register vnode_pager_t vnode_object;
594
595 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
596
597 vnode_object = vnode_pager_lookup(mem_obj);
598
599 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
600 if (vnode_object->vnode_handle != NULL) {
601 vnode_pager_vrele(vnode_object->vnode_handle);
602 }
603 zfree(vnode_pager_zone, vnode_object);
604 }
605 return;
606 }
607
608 /*
609 *
610 */
611 kern_return_t
612 vnode_pager_terminate(
613 #if !DEBUG
614 __unused
615 #endif
616 memory_object_t mem_obj)
617 {
618 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
619
620 return(KERN_SUCCESS);
621 }
622
623 /*
624 *
625 */
626 kern_return_t
627 vnode_pager_synchronize(
628 memory_object_t mem_obj,
629 memory_object_offset_t offset,
630 vm_size_t length,
631 __unused vm_sync_t sync_flags)
632 {
633 register vnode_pager_t vnode_object;
634
635 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
636
637 vnode_object = vnode_pager_lookup(mem_obj);
638
639 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
640
641 return (KERN_SUCCESS);
642 }
643
644 /*
645 *
646 */
647 kern_return_t
648 vnode_pager_unmap(
649 memory_object_t mem_obj)
650 {
651 register vnode_pager_t vnode_object;
652
653 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
654
655 vnode_object = vnode_pager_lookup(mem_obj);
656
657 ubc_unmap(vnode_object->vnode_handle);
658 return KERN_SUCCESS;
659 }
660
661
662 /*
663 *
664 */
665 void
666 vnode_pager_cluster_write(
667 vnode_pager_t vnode_object,
668 vm_object_offset_t offset,
669 vm_size_t cnt,
670 vm_object_offset_t * resid_offset,
671 int * io_error,
672 int upl_flags)
673 {
674 vm_size_t size;
675 upl_t upl = NULL;
676 int request_flags;
677 int errno;
678
679 if (upl_flags & UPL_MSYNC) {
680
681 upl_flags |= UPL_VNODE_PAGER;
682
683 if ( (upl_flags & UPL_IOSYNC) && io_error)
684 upl_flags |= UPL_KEEPCACHED;
685
686 while (cnt) {
687 kern_return_t kr;
688
689 size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */
690
691 request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
692 UPL_SET_INTERNAL | UPL_SET_LITE;
693
694 kr = memory_object_upl_request(vnode_object->control_handle,
695 offset, size, &upl, NULL, NULL, request_flags);
696 if (kr != KERN_SUCCESS)
697 panic("vnode_pager_cluster_write: upl request failed\n");
698
699 vnode_pageout(vnode_object->vnode_handle,
700 upl, (vm_offset_t)0, offset, size, upl_flags, &errno);
701
702 if ( (upl_flags & UPL_KEEPCACHED) ) {
703 if ( (*io_error = errno) )
704 break;
705 }
706 cnt -= size;
707 offset += size;
708 }
709 if (resid_offset)
710 *resid_offset = offset;
711
712 } else {
713 vm_object_offset_t vnode_size;
714 vm_object_offset_t base_offset;
715 vm_object_t object;
716 vm_page_t target_page;
717 int ticket;
718
719 /*
720 * this is the pageout path
721 */
722 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
723
724 if (vnode_size > (offset + PAGE_SIZE)) {
725 /*
726 * preset the maximum size of the cluster
727 * and put us on a nice cluster boundary...
728 * and then clip the size to insure we
729 * don't request past the end of the underlying file
730 */
731 size = PAGE_SIZE * MAX_UPL_TRANSFER;
732 base_offset = offset & ~((signed)(size - 1));
733
734 if ((base_offset + size) > vnode_size)
735 size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
736 } else {
737 /*
738 * we've been requested to page out a page beyond the current
739 * end of the 'file'... don't try to cluster in this case...
740 * we still need to send this page through because it might
741 * be marked precious and the underlying filesystem may need
742 * to do something with it (besides page it out)...
743 */
744 base_offset = offset;
745 size = PAGE_SIZE;
746 }
747 object = memory_object_control_to_vm_object(vnode_object->control_handle);
748
749 if (object == VM_OBJECT_NULL)
750 panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");
751
752 request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
753 UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
754 UPL_SET_INTERNAL | UPL_SET_LITE;
755
756 vm_object_lock(object);
757
758 if ((target_page = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
759 /*
760 * only pick up pages whose ticket number matches
761 * the ticket number of the page orginally targeted
762 * for pageout
763 */
764 ticket = target_page->page_ticket;
765
766 request_flags |= ((ticket << UPL_PAGE_TICKET_SHIFT) & UPL_PAGE_TICKET_MASK);
767 }
768 vm_object_unlock(object);
769
770 vm_object_upl_request(object, base_offset, size,
771 &upl, NULL, NULL, request_flags);
772 if (upl == NULL)
773 panic("vnode_pager_cluster_write: upl request failed\n");
774
775 vnode_pageout(vnode_object->vnode_handle,
776 upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
777 }
778 }
779
780
781 /*
782 *
783 */
784 kern_return_t
785 vnode_pager_cluster_read(
786 vnode_pager_t vnode_object,
787 vm_object_offset_t offset,
788 vm_size_t cnt)
789 {
790 int local_error = 0;
791 int kret;
792
793 assert(! (cnt & PAGE_MASK));
794
795 kret = vnode_pagein(vnode_object->vnode_handle,
796 (upl_t) NULL,
797 (vm_offset_t) NULL,
798 offset,
799 cnt,
800 0,
801 &local_error);
802 /*
803 if(kret == PAGER_ABSENT) {
804 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
805 defined in bsd/vm/vm_pager.h However, we should not be including
806 that file here it is a layering violation.
807 */
808 if (kret == 1) {
809 int uplflags;
810 upl_t upl = NULL;
811 int count = 0;
812 kern_return_t kr;
813
814 uplflags = (UPL_NO_SYNC |
815 UPL_CLEAN_IN_PLACE |
816 UPL_SET_INTERNAL);
817 count = 0;
818 kr = memory_object_upl_request(vnode_object->control_handle,
819 offset, cnt,
820 &upl, NULL, &count, uplflags);
821 if (kr == KERN_SUCCESS) {
822 upl_abort(upl, 0);
823 upl_deallocate(upl);
824 } else {
825 /*
826 * We couldn't gather the page list, probably
827 * because the memory object doesn't have a link
828 * to a VM object anymore (forced unmount, for
829 * example). Just return an error to the vm_fault()
830 * path and let it handle it.
831 */
832 }
833
834 return KERN_FAILURE;
835 }
836
837 return KERN_SUCCESS;
838
839 }
840
841
842 /*
843 *
844 */
845 void
846 vnode_pager_release_from_cache(
847 int *cnt)
848 {
849 memory_object_free_from_cache(
850 &realhost, &vnode_pager_workaround, cnt);
851 }
852
853 /*
854 *
855 */
856 vnode_pager_t
857 vnode_object_create(
858 struct vnode *vp)
859 {
860 register vnode_pager_t vnode_object;
861
862 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
863 if (vnode_object == VNODE_PAGER_NULL)
864 return(VNODE_PAGER_NULL);
865
866 /*
867 * The vm_map call takes both named entry ports and raw memory
868 * objects in the same parameter. We need to make sure that
869 * vm_map does not see this object as a named entry port. So,
870 * we reserve the second word in the object for a fake ip_kotype
871 * setting - that will tell vm_map to use it as a memory object.
872 */
873 vnode_object->pager = &vnode_pager_workaround;
874 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
875 vnode_object->ref_count = 1;
876 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
877 vnode_object->vnode_handle = vp;
878
879 return(vnode_object);
880 }
881
882 /*
883 *
884 */
885 vnode_pager_t
886 vnode_pager_lookup(
887 memory_object_t name)
888 {
889 vnode_pager_t vnode_object;
890
891 vnode_object = (vnode_pager_t)name;
892 assert(vnode_object->pager == &vnode_pager_workaround);
893 return (vnode_object);
894 }
895