]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
01f6ffdef02c42bb5f2f51c06863c420ec5f4ca5
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/errno.h>
24 #include <kern/host.h>
25 #include <mach/mach_types.h>
26 #include <vm/vm_map.h>
27 #include <vm/vm_kern.h>
28 #include <vm/vm_pageout.h>
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_types.h>
31 #include <mach/port.h>
32 #include <mach/policy.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_space.h>
35 #include <kern/thread.h>
36 #include <vm/memory_object.h>
37 #include <vm/vm_pageout.h>
38
39 #include <libkern/OSAtomic.h>
40
41 #include <default_pager/default_pager_types.h>
42
43 /* BSD VM COMPONENT INTERFACES */
44 int
45 get_map_nentries(
46 vm_map_t);
47
48 vm_offset_t
49 get_map_start(
50 vm_map_t);
51
52 vm_offset_t
53 get_map_end(
54 vm_map_t);
55
56 /*
57 *
58 */
59 int
60 get_map_nentries(
61 vm_map_t map)
62 {
63 return(map->hdr.nentries);
64 }
65
66 /*
67 *
68 */
69 vm_offset_t
70 get_map_start(
71 vm_map_t map)
72 {
73 return(vm_map_first_entry(map)->vme_start);
74 }
75
76 /*
77 *
78 */
79 vm_offset_t
80 get_map_end(
81 vm_map_t map)
82 {
83 return(vm_map_last_entry(map)->vme_end);
84 }
85
86 /*
87 * BSD VNODE PAGER
88 */
89
90 /* until component support available */
91 int vnode_pager_workaround;
92
93 typedef int vnode_port_t;
94
95 typedef struct vnode_pager {
96 int *pager; /* pager workaround pointer */
97 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
98 unsigned int ref_count; /* reference count */
99 memory_object_control_t control_handle; /* mem object control handle */
100 vnode_port_t vnode_handle; /* vnode handle */
101 } *vnode_pager_t;
102
103
104 ipc_port_t
105 trigger_name_to_port(
106 mach_port_t);
107
108 void
109 vnode_pager_bootstrap(
110 void);
111
112 void
113 vnode_pager_alloc_map(
114 void);
115
116 memory_object_t
117 vnode_pager_setup(
118 vnode_port_t,
119 memory_object_t);
120
121
122 kern_return_t
123 vnode_pager_init(
124 memory_object_t,
125 memory_object_control_t,
126 vm_size_t);
127
128 kern_return_t
129 vnode_pager_get_object_size(
130 memory_object_t,
131 memory_object_offset_t *);
132
133 kern_return_t
134 vnode_pager_data_request(
135 memory_object_t,
136 memory_object_offset_t,
137 vm_size_t,
138 vm_prot_t);
139
140 kern_return_t
141 vnode_pager_data_return(
142 memory_object_t,
143 memory_object_offset_t,
144 vm_size_t,
145 boolean_t,
146 boolean_t);
147
148 kern_return_t
149 vnode_pager_data_initialize(
150 memory_object_t,
151 memory_object_offset_t,
152 vm_size_t);
153
154 void
155 vnode_pager_deallocate(
156 memory_object_t);
157
158 kern_return_t
159 vnode_pager_terminate(
160 memory_object_t);
161
162 kern_return_t
163 vnode_pager_cluster_read(
164 vnode_pager_t,
165 vm_object_offset_t,
166 vm_size_t);
167
168 void
169 vnode_pager_cluster_write(
170 vnode_pager_t,
171 vm_object_offset_t,
172 vm_size_t);
173
174
175 int
176 vnode_pagein(
177 vnode_port_t,
178 upl_t,
179 vm_offset_t,
180 vm_object_offset_t,
181 int,
182 int,
183 int *);
184 int
185 vnode_pageout(
186 vnode_port_t,
187 upl_t,
188 vm_offset_t,
189 vm_object_offset_t,
190 int,
191 int,
192 int *);
193
194 vm_object_offset_t
195 vnode_pager_get_filesize(
196 vnode_port_t);
197
198 vnode_pager_t
199 vnode_object_create(
200 vnode_port_t vp);
201
202 vnode_pager_t
203 vnode_pager_lookup(
204 memory_object_t);
205
206 void
207 vnode_pager_release_from_cache(
208 int *cnt);
209
210 zone_t vnode_pager_zone;
211
212
213 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
214
215 /* TODO: Should be set dynamically by vnode_pager_init() */
216 #define CLUSTER_SHIFT 1
217
218 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
219 #define MAX_VNODE 10000
220
221
222 #if DEBUG
223 int pagerdebug=0;
224
225 #define PAGER_ALL 0xffffffff
226 #define PAGER_INIT 0x00000001
227 #define PAGER_PAGEIN 0x00000002
228
229 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
230 #else
231 #define PAGER_DEBUG(LEVEL, A)
232 #endif
233
234 /*
235 * Routine: macx_triggers
236 * Function:
237 * Syscall interface to set the call backs for low and
238 * high water marks.
239 */
240 int
241 macx_triggers(
242 int hi_water,
243 int low_water,
244 int flags,
245 mach_port_t trigger_name)
246 {
247 kern_return_t kr;
248 memory_object_default_t default_pager;
249 ipc_port_t trigger_port;
250
251 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
252 kr = host_default_memory_manager(host_priv_self(),
253 &default_pager, 0);
254 if(kr != KERN_SUCCESS) {
255 return EINVAL;
256 }
257 if (flags & HI_WAT_ALERT) {
258 trigger_port = trigger_name_to_port(trigger_name);
259 if(trigger_port == NULL) {
260 return EINVAL;
261 }
262 /* trigger_port is locked and active */
263 ipc_port_make_send_locked(trigger_port);
264 /* now unlocked */
265 default_pager_triggers(default_pager,
266 hi_water, low_water,
267 HI_WAT_ALERT, trigger_port);
268 }
269
270 if (flags & LO_WAT_ALERT) {
271 trigger_port = trigger_name_to_port(trigger_name);
272 if(trigger_port == NULL) {
273 return EINVAL;
274 }
275 /* trigger_port is locked and active */
276 ipc_port_make_send_locked(trigger_port);
277 /* and now its unlocked */
278 default_pager_triggers(default_pager,
279 hi_water, low_water,
280 LO_WAT_ALERT, trigger_port);
281 }
282
283 /*
284 * Set thread scheduling priority and policy for the current thread
285 * it is assumed for the time being that the thread setting the alert
286 * is the same one which will be servicing it.
287 */
288 {
289 struct policy_timeshare_base fifo_base;
290 struct policy_timeshare_limit fifo_limit;
291 policy_base_t base;
292 processor_set_t pset;
293 policy_limit_t limit;
294
295 pset = (current_thread())->processor_set;
296 base = (policy_base_t) &fifo_base;
297 limit = (policy_limit_t) &fifo_limit;
298 fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD;
299 thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT);
300 }
301
302 current_thread()->vm_privilege = TRUE;
303 }
304
305 /*
306 *
307 */
308 ipc_port_t
309 trigger_name_to_port(
310 mach_port_t trigger_name)
311 {
312 ipc_port_t trigger_port;
313 ipc_space_t space;
314
315 if (trigger_name == 0)
316 return (NULL);
317
318 space = current_space();
319 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
320 &trigger_port) != KERN_SUCCESS)
321 return (NULL);
322 return trigger_port;
323 }
324
325 /*
326 *
327 */
328 void
329 vnode_pager_bootstrap(void)
330 {
331 register vm_size_t size;
332
333 size = (vm_size_t) sizeof(struct vnode_pager);
334 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
335 PAGE_SIZE, "vnode pager structures");
336 return;
337 }
338
339 /*
340 *
341 */
342 memory_object_t
343 vnode_pager_setup(
344 vnode_port_t vp,
345 memory_object_t pager)
346 {
347 vnode_pager_t vnode_object;
348
349 vnode_object = vnode_object_create(vp);
350 if (vnode_object == VNODE_PAGER_NULL)
351 panic("vnode_pager_setup: vnode_object_create() failed");
352 return((memory_object_t)vnode_object);
353 }
354
355 /*
356 *
357 */
358 kern_return_t
359 vnode_pager_init(memory_object_t mem_obj,
360 memory_object_control_t control,
361 vm_size_t pg_size)
362 {
363 vnode_pager_t vnode_object;
364 kern_return_t kr;
365 memory_object_attr_info_data_t attributes;
366
367
368 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size));
369
370 if (control == MEMORY_OBJECT_CONTROL_NULL)
371 return KERN_INVALID_ARGUMENT;
372
373 vnode_object = vnode_pager_lookup(mem_obj);
374
375 memory_object_control_reference(control);
376 vnode_object->control_handle = control;
377
378 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
379 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
380 attributes.cluster_size = (1 << (PAGE_SHIFT));
381 attributes.may_cache_object = TRUE;
382 attributes.temporary = TRUE;
383
384 kr = memory_object_change_attributes(
385 control,
386 MEMORY_OBJECT_ATTRIBUTE_INFO,
387 (memory_object_info_t) &attributes,
388 MEMORY_OBJECT_ATTR_INFO_COUNT);
389 if (kr != KERN_SUCCESS)
390 panic("vnode_pager_init: memory_object_change_attributes() failed");
391
392 return(KERN_SUCCESS);
393 }
394
395 /*
396 *
397 */
398 kern_return_t
399 vnode_pager_data_return(
400 memory_object_t mem_obj,
401 memory_object_offset_t offset,
402 vm_size_t data_cnt,
403 boolean_t dirty,
404 boolean_t kernel_copy)
405 {
406 register vnode_pager_t vnode_object;
407
408 vnode_object = vnode_pager_lookup(mem_obj);
409
410 vnode_pager_cluster_write(vnode_object, offset, data_cnt);
411
412 return KERN_SUCCESS;
413 }
414
415 kern_return_t
416 vnode_pager_data_initialize(
417 memory_object_t mem_obj,
418 memory_object_offset_t offset,
419 vm_size_t data_cnt)
420 {
421 return KERN_FAILURE;
422 }
423
424 kern_return_t
425 vnode_pager_data_unlock(
426 memory_object_t mem_obj,
427 memory_object_offset_t offset,
428 vm_size_t size,
429 vm_prot_t desired_access)
430 {
431 return KERN_FAILURE;
432 }
433
434 kern_return_t
435 vnode_pager_get_object_size(
436 memory_object_t mem_obj,
437 memory_object_offset_t *length)
438 {
439 vnode_pager_t vnode_object;
440
441 vnode_object = vnode_pager_lookup(mem_obj);
442
443 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
444 return KERN_SUCCESS;
445 }
446
447 /*
448 *
449 */
450 kern_return_t
451 vnode_pager_data_request(
452 memory_object_t mem_obj,
453 memory_object_offset_t offset,
454 vm_size_t length,
455 vm_prot_t protection_required)
456 {
457 register vnode_pager_t vnode_object;
458
459 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
460
461 vnode_object = vnode_pager_lookup(mem_obj);
462
463 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
464
465 vnode_pager_cluster_read(vnode_object, offset, length);
466
467 return KERN_SUCCESS;
468 }
469
470 /*
471 *
472 */
473 void
474 vnode_pager_reference(
475 memory_object_t mem_obj)
476 {
477 register vnode_pager_t vnode_object;
478 unsigned int prev_ref_count;
479
480 vnode_object = vnode_pager_lookup(mem_obj);
481 prev_ref_count = OSIncrementAtomic((UInt32 *)&vnode_object->ref_count);
482 assert(prev_ref_count > 0);
483 }
484
485 /*
486 *
487 */
488 void
489 vnode_pager_deallocate(
490 memory_object_t mem_obj)
491 {
492 register vnode_pager_t vnode_object;
493
494 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
495
496 vnode_object = vnode_pager_lookup(mem_obj);
497
498 if (OSDecrementAtomic((UInt32 *)&vnode_object->ref_count) == 1) {
499 if (vnode_object->vnode_handle != (vnode_port_t) NULL) {
500 vnode_pager_vrele(vnode_object->vnode_handle);
501 }
502 zfree(vnode_pager_zone, (vm_offset_t) vnode_object);
503 }
504 return;
505 }
506
507 /*
508 *
509 */
510 kern_return_t
511 vnode_pager_terminate(
512 memory_object_t mem_obj)
513 {
514 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
515
516 return(KERN_SUCCESS);
517 }
518
519 /*
520 *
521 */
522 kern_return_t
523 vnode_pager_synchronize(
524 memory_object_t mem_obj,
525 memory_object_offset_t offset,
526 vm_size_t length,
527 vm_sync_t sync_flags)
528 {
529 register vnode_pager_t vnode_object;
530
531 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
532
533 vnode_object = vnode_pager_lookup(mem_obj);
534
535 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
536
537 return (KERN_SUCCESS);
538 }
539
540 /*
541 *
542 */
543 kern_return_t
544 vnode_pager_unmap(
545 memory_object_t mem_obj)
546 {
547 register vnode_pager_t vnode_object;
548
549 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
550
551 vnode_object = vnode_pager_lookup(mem_obj);
552
553 ubc_unmap(vnode_object->vnode_handle);
554 return KERN_SUCCESS;
555 }
556
557
558 /*
559 *
560 */
561 void
562 vnode_pager_cluster_write(
563 vnode_pager_t vnode_object,
564 vm_object_offset_t offset,
565 vm_size_t cnt)
566 {
567 int error = 0;
568 int local_error = 0;
569 int kret;
570 int size;
571
572 if (cnt & PAGE_MASK) {
573 panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE");
574 }
575 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
576
577 while (cnt) {
578
579 kret = vnode_pageout(vnode_object->vnode_handle,
580 (upl_t )NULL, (vm_offset_t)NULL,
581 offset, size, 0, &local_error);
582 /*
583 if(kret == PAGER_ABSENT) {
584 Need to work out the defs here, 1 corresponds to
585 PAGER_ABSENT defined in bsd/vm/vm_pager.h However,
586 we should not be including that file here it is a
587 layering violation.
588 */
589 if(kret == 1) {
590 int uplflags;
591 upl_t upl = NULL;
592 int count = 0;
593 kern_return_t kr;
594
595 uplflags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
596 UPL_SET_INTERNAL | UPL_COPYOUT_FROM);
597 count = 0;
598 kr = memory_object_upl_request(
599 vnode_object->control_handle,
600 offset, size, &upl, NULL, &count, uplflags);
601 if(kr != KERN_SUCCESS) {
602 panic("vnode_pager_cluster_write: upl request failed\n");
603 }
604 upl_abort(upl, 0);
605 upl_deallocate(upl);
606
607 error = 0;
608 local_error = 0;
609 }
610
611 if (local_error != 0) {
612 error = local_error;
613 local_error = 0;
614 }
615 cnt -= size;
616 offset += size;
617 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
618 }
619 #if 0
620 if (error != 0)
621 return(KERN_FAILURE);
622
623 return(KERN_SUCCESS);
624 #endif /* 0 */
625 }
626
627
628 /*
629 *
630 */
631 kern_return_t
632 vnode_pager_cluster_read(
633 vnode_pager_t vnode_object,
634 vm_object_offset_t offset,
635 vm_size_t cnt)
636 {
637 int error = 0;
638 int local_error = 0;
639 int kret;
640
641 if(cnt & PAGE_MASK) {
642 panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE");
643 }
644
645 kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, cnt, 2, &local_error);
646 /*
647 if(kret == PAGER_ABSENT) {
648 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
649 defined in bsd/vm/vm_pager.h However, we should not be including
650 that file here it is a layering violation.
651 */
652 if(kret == 1) {
653 int uplflags;
654 upl_t upl = NULL;
655 int count = 0;
656 kern_return_t kr;
657
658 uplflags = (UPL_NO_SYNC |
659 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
660 count = 0;
661 kr = memory_object_upl_request(
662 vnode_object->control_handle, offset, cnt,
663 &upl, NULL, &count, uplflags);
664 if(kr != KERN_SUCCESS) {
665 panic("vnode_pager_cluster_read: upl request failed\n");
666 }
667 upl_abort(upl, 0);
668 upl_deallocate(upl);
669
670 error = 1;
671 }
672
673 if (error != 0)
674 return(KERN_FAILURE);
675
676 return(KERN_SUCCESS);
677
678 }
679
680
681 /*
682 *
683 */
684 void
685 vnode_pager_release_from_cache(
686 int *cnt)
687 {
688 memory_object_free_from_cache(
689 &realhost, &vnode_pager_workaround, cnt);
690 }
691
692 /*
693 *
694 */
695 vnode_pager_t
696 vnode_object_create(
697 vnode_port_t vp)
698 {
699 register vnode_pager_t vnode_object;
700
701 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
702 if (vnode_object == VNODE_PAGER_NULL)
703 return(VNODE_PAGER_NULL);
704
705 /*
706 * The vm_map call takes both named entry ports and raw memory
707 * objects in the same parameter. We need to make sure that
708 * vm_map does not see this object as a named entry port. So,
709 * we reserve the second word in the object for a fake ip_kotype
710 * setting - that will tell vm_map to use it as a memory object.
711 */
712 vnode_object->pager = &vnode_pager_workaround;
713 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
714 vnode_object->ref_count = 1;
715 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
716 vnode_object->vnode_handle = vp;
717
718 return(vnode_object);
719 }
720
721 /*
722 *
723 */
724 vnode_pager_t
725 vnode_pager_lookup(
726 memory_object_t name)
727 {
728 vnode_pager_t vnode_object;
729
730 vnode_object = (vnode_pager_t)name;
731 assert(vnode_object->pager == &vnode_pager_workaround);
732 return (vnode_object);
733 }
734