]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <sys/errno.h>
27 #include <kern/host.h>
28 #include <mach/mach_types.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_pageout.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_types.h>
34 #include <mach/port.h>
35 #include <mach/policy.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/thread.h>
39 #include <vm/memory_object.h>
40 #include <vm/vm_pageout.h>
41
42 #include <default_pager/default_pager_types.h>
43
44 /* BSD VM COMPONENT INTERFACES */
45 int
46 get_map_nentries(
47 vm_map_t);
48
49 vm_offset_t
50 get_map_start(
51 vm_map_t);
52
53 vm_offset_t
54 get_map_end(
55 vm_map_t);
56
57 /*
58 *
59 */
60 int
61 get_map_nentries(
62 vm_map_t map)
63 {
64 return(map->hdr.nentries);
65 }
66
67 /*
68 *
69 */
70 vm_offset_t
71 get_map_start(
72 vm_map_t map)
73 {
74 return(vm_map_first_entry(map)->vme_start);
75 }
76
77 /*
78 *
79 */
80 vm_offset_t
81 get_map_end(
82 vm_map_t map)
83 {
84 return(vm_map_last_entry(map)->vme_end);
85 }
86
87 /*
88 * BSD VNODE PAGER
89 */
90
91 /* until component support available */
92 int vnode_pager_workaround;
93
94 typedef int vnode_port_t;
95
96 typedef struct vnode_pager {
97 int *pager; /* pager workaround pointer */
98 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
99 unsigned int ref_count; /* reference count */
100 memory_object_control_t control_handle; /* mem object control handle */
101 vnode_port_t vnode_handle; /* vnode handle */
102 } *vnode_pager_t;
103
104
105 ipc_port_t
106 trigger_name_to_port(
107 mach_port_t);
108
109 void
110 vnode_pager_bootstrap(
111 void);
112
113 void
114 vnode_pager_alloc_map(
115 void);
116
117 memory_object_t
118 vnode_pager_setup(
119 vnode_port_t,
120 memory_object_t);
121
122
123 kern_return_t
124 vnode_pager_init(
125 memory_object_t,
126 memory_object_control_t,
127 vm_size_t);
128
129 kern_return_t
130 vnode_pager_get_object_size(
131 memory_object_t,
132 memory_object_offset_t *);
133
134 kern_return_t
135 vnode_pager_data_request(
136 memory_object_t,
137 memory_object_offset_t,
138 vm_size_t,
139 vm_prot_t);
140
141 kern_return_t
142 vnode_pager_data_return(
143 memory_object_t,
144 memory_object_offset_t,
145 vm_size_t,
146 boolean_t,
147 boolean_t);
148
149 kern_return_t
150 vnode_pager_data_initialize(
151 memory_object_t,
152 memory_object_offset_t,
153 vm_size_t);
154
155 void
156 vnode_pager_deallocate(
157 memory_object_t);
158
159 kern_return_t
160 vnode_pager_terminate(
161 memory_object_t);
162
163 kern_return_t
164 vnode_pager_cluster_read(
165 vnode_pager_t,
166 vm_object_offset_t,
167 vm_size_t);
168
169 void
170 vnode_pager_cluster_write(
171 vnode_pager_t,
172 vm_object_offset_t,
173 vm_size_t);
174
175
176 int
177 vnode_pagein(
178 vnode_port_t,
179 upl_t,
180 vm_offset_t,
181 vm_object_offset_t,
182 int,
183 int,
184 int *);
185 int
186 vnode_pageout(
187 vnode_port_t,
188 upl_t,
189 vm_offset_t,
190 vm_object_offset_t,
191 int,
192 int,
193 int *);
194
195 vm_object_offset_t
196 vnode_pager_get_filesize(
197 vnode_port_t);
198
199 vnode_pager_t
200 vnode_object_create(
201 vnode_port_t vp);
202
203 vnode_pager_t
204 vnode_pager_lookup(
205 memory_object_t);
206
207 void
208 vnode_pager_release_from_cache(
209 int *cnt);
210
211 zone_t vnode_pager_zone;
212
213
214 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
215
216 /* TODO: Should be set dynamically by vnode_pager_init() */
217 #define CLUSTER_SHIFT 1
218
219 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
220 #define MAX_VNODE 10000
221
222
223 #if DEBUG
224 int pagerdebug=0;
225
226 #define PAGER_ALL 0xffffffff
227 #define PAGER_INIT 0x00000001
228 #define PAGER_PAGEIN 0x00000002
229
230 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
231 #else
232 #define PAGER_DEBUG(LEVEL, A)
233 #endif
234
235 /*
236 * Routine: macx_triggers
237 * Function:
238 * Syscall interface to set the call backs for low and
239 * high water marks.
240 */
241 int
242 macx_triggers(
243 int hi_water,
244 int low_water,
245 int flags,
246 mach_port_t trigger_name)
247 {
248 kern_return_t kr;
249 memory_object_default_t default_pager;
250 ipc_port_t trigger_port;
251
252 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
253 kr = host_default_memory_manager(host_priv_self(),
254 &default_pager, 0);
255 if(kr != KERN_SUCCESS) {
256 return EINVAL;
257 }
258 if (flags & HI_WAT_ALERT) {
259 trigger_port = trigger_name_to_port(trigger_name);
260 if(trigger_port == NULL) {
261 return EINVAL;
262 }
263 /* trigger_port is locked and active */
264 ipc_port_make_send_locked(trigger_port);
265 /* now unlocked */
266 default_pager_triggers(default_pager,
267 hi_water, low_water,
268 HI_WAT_ALERT, trigger_port);
269 }
270
271 if (flags & LO_WAT_ALERT) {
272 trigger_port = trigger_name_to_port(trigger_name);
273 if(trigger_port == NULL) {
274 return EINVAL;
275 }
276 /* trigger_port is locked and active */
277 ipc_port_make_send_locked(trigger_port);
278 /* and now its unlocked */
279 default_pager_triggers(default_pager,
280 hi_water, low_water,
281 LO_WAT_ALERT, trigger_port);
282 }
283
284 /*
285 * Set thread scheduling priority and policy for the current thread
286 * it is assumed for the time being that the thread setting the alert
287 * is the same one which will be servicing it.
288 */
289 {
290 struct policy_timeshare_base fifo_base;
291 struct policy_timeshare_limit fifo_limit;
292 policy_base_t base;
293 processor_set_t pset;
294 policy_limit_t limit;
295
296 pset = (current_thread())->processor_set;
297 base = (policy_base_t) &fifo_base;
298 limit = (policy_limit_t) &fifo_limit;
299 fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD;
300 thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT);
301 }
302
303 current_thread()->vm_privilege = TRUE;
304 }
305
306 /*
307 *
308 */
309 ipc_port_t
310 trigger_name_to_port(
311 mach_port_t trigger_name)
312 {
313 ipc_port_t trigger_port;
314 ipc_space_t space;
315
316 if (trigger_name == 0)
317 return (NULL);
318
319 space = current_space();
320 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
321 &trigger_port) != KERN_SUCCESS)
322 return (NULL);
323 return trigger_port;
324 }
325
326 /*
327 *
328 */
329 void
330 vnode_pager_bootstrap(void)
331 {
332 register vm_size_t size;
333
334 size = (vm_size_t) sizeof(struct vnode_pager);
335 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
336 PAGE_SIZE, "vnode pager structures");
337 return;
338 }
339
340 /*
341 *
342 */
343 memory_object_t
344 vnode_pager_setup(
345 vnode_port_t vp,
346 memory_object_t pager)
347 {
348 vnode_pager_t vnode_object;
349
350 vnode_object = vnode_object_create(vp);
351 if (vnode_object == VNODE_PAGER_NULL)
352 panic("vnode_pager_setup: vnode_object_create() failed");
353 return((memory_object_t)vnode_object);
354 }
355
356 /*
357 *
358 */
359 kern_return_t
360 vnode_pager_init(memory_object_t mem_obj,
361 memory_object_control_t control,
362 vm_size_t pg_size)
363 {
364 vnode_pager_t vnode_object;
365 kern_return_t kr;
366 memory_object_attr_info_data_t attributes;
367
368
369 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size));
370
371 if (control == MEMORY_OBJECT_CONTROL_NULL)
372 return KERN_INVALID_ARGUMENT;
373
374 vnode_object = vnode_pager_lookup(mem_obj);
375
376 memory_object_control_reference(control);
377 vnode_object->control_handle = control;
378
379 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
380 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
381 attributes.cluster_size = (1 << (PAGE_SHIFT));
382 attributes.may_cache_object = TRUE;
383 attributes.temporary = TRUE;
384
385 kr = memory_object_change_attributes(
386 control,
387 MEMORY_OBJECT_ATTRIBUTE_INFO,
388 (memory_object_info_t) &attributes,
389 MEMORY_OBJECT_ATTR_INFO_COUNT);
390 if (kr != KERN_SUCCESS)
391 panic("vnode_pager_init: memory_object_change_attributes() failed");
392
393 return(KERN_SUCCESS);
394 }
395
396 /*
397 *
398 */
399 kern_return_t
400 vnode_pager_data_return(
401 memory_object_t mem_obj,
402 memory_object_offset_t offset,
403 vm_size_t data_cnt,
404 boolean_t dirty,
405 boolean_t kernel_copy)
406 {
407 register vnode_pager_t vnode_object;
408
409 vnode_object = vnode_pager_lookup(mem_obj);
410
411 vnode_pager_cluster_write(vnode_object, offset, data_cnt);
412
413 return KERN_SUCCESS;
414 }
415
416 kern_return_t
417 vnode_pager_data_initialize(
418 memory_object_t mem_obj,
419 memory_object_offset_t offset,
420 vm_size_t data_cnt)
421 {
422 return KERN_FAILURE;
423 }
424
425 kern_return_t
426 vnode_pager_data_unlock(
427 memory_object_t mem_obj,
428 memory_object_offset_t offset,
429 vm_size_t size,
430 vm_prot_t desired_access)
431 {
432 return KERN_FAILURE;
433 }
434
435 kern_return_t
436 vnode_pager_get_object_size(
437 memory_object_t mem_obj,
438 memory_object_offset_t *length)
439 {
440 vnode_pager_t vnode_object;
441
442 vnode_object = vnode_pager_lookup(mem_obj);
443
444 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
445 return KERN_SUCCESS;
446 }
447
448 /*
449 *
450 */
451 kern_return_t
452 vnode_pager_data_request(
453 memory_object_t mem_obj,
454 memory_object_offset_t offset,
455 vm_size_t length,
456 vm_prot_t protection_required)
457 {
458 register vnode_pager_t vnode_object;
459
460 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
461
462 vnode_object = vnode_pager_lookup(mem_obj);
463
464 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
465
466 vnode_pager_cluster_read(vnode_object, offset, length);
467
468 return KERN_SUCCESS;
469 }
470
471 /*
472 *
473 */
474 void
475 vnode_pager_reference(
476 memory_object_t mem_obj)
477 {
478 register vnode_pager_t vnode_object;
479 unsigned int new_ref_count;
480
481 vnode_object = vnode_pager_lookup(mem_obj);
482 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
483 assert(new_ref_count > 1);
484 }
485
486 /*
487 *
488 */
489 void
490 vnode_pager_deallocate(
491 memory_object_t mem_obj)
492 {
493 register vnode_pager_t vnode_object;
494
495 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
496
497 vnode_object = vnode_pager_lookup(mem_obj);
498
499 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
500 if (vnode_object->vnode_handle != (vnode_port_t) NULL) {
501 vnode_pager_vrele(vnode_object->vnode_handle);
502 }
503 zfree(vnode_pager_zone, (vm_offset_t) vnode_object);
504 }
505 return;
506 }
507
508 /*
509 *
510 */
511 kern_return_t
512 vnode_pager_terminate(
513 memory_object_t mem_obj)
514 {
515 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
516
517 return(KERN_SUCCESS);
518 }
519
520 /*
521 *
522 */
523 kern_return_t
524 vnode_pager_synchronize(
525 memory_object_t mem_obj,
526 memory_object_offset_t offset,
527 vm_size_t length,
528 vm_sync_t sync_flags)
529 {
530 register vnode_pager_t vnode_object;
531
532 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
533
534 vnode_object = vnode_pager_lookup(mem_obj);
535
536 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
537
538 return (KERN_SUCCESS);
539 }
540
541 /*
542 *
543 */
544 kern_return_t
545 vnode_pager_unmap(
546 memory_object_t mem_obj)
547 {
548 register vnode_pager_t vnode_object;
549
550 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
551
552 vnode_object = vnode_pager_lookup(mem_obj);
553
554 ubc_unmap(vnode_object->vnode_handle);
555 return KERN_SUCCESS;
556 }
557
558
559 /*
560 *
561 */
562 void
563 vnode_pager_cluster_write(
564 vnode_pager_t vnode_object,
565 vm_object_offset_t offset,
566 vm_size_t cnt)
567 {
568 int error = 0;
569 int local_error = 0;
570 int kret;
571 int size;
572
573 if (cnt & PAGE_MASK) {
574 panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE");
575 }
576 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
577
578 while (cnt) {
579
580 kret = vnode_pageout(vnode_object->vnode_handle,
581 (upl_t )NULL, (vm_offset_t)NULL,
582 offset, size, 0, &local_error);
583 /*
584 if(kret == PAGER_ABSENT) {
585 Need to work out the defs here, 1 corresponds to
586 PAGER_ABSENT defined in bsd/vm/vm_pager.h However,
587 we should not be including that file here it is a
588 layering violation.
589 */
590 if(kret == 1) {
591 int uplflags;
592 upl_t upl = NULL;
593 int count = 0;
594 kern_return_t kr;
595
596 uplflags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
597 UPL_SET_INTERNAL | UPL_COPYOUT_FROM);
598 count = 0;
599 kr = memory_object_upl_request(
600 vnode_object->control_handle,
601 offset, size, &upl, NULL, &count, uplflags);
602 if(kr != KERN_SUCCESS) {
603 panic("vnode_pager_cluster_write: upl request failed\n");
604 }
605 upl_abort(upl, 0);
606 upl_deallocate(upl);
607
608 error = 0;
609 local_error = 0;
610 }
611
612 if (local_error != 0) {
613 error = local_error;
614 local_error = 0;
615 }
616 cnt -= size;
617 offset += size;
618 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
619 }
620 #if 0
621 if (error != 0)
622 return(KERN_FAILURE);
623
624 return(KERN_SUCCESS);
625 #endif /* 0 */
626 }
627
628
629 /*
630 *
631 */
632 kern_return_t
633 vnode_pager_cluster_read(
634 vnode_pager_t vnode_object,
635 vm_object_offset_t offset,
636 vm_size_t cnt)
637 {
638 int error = 0;
639 int local_error = 0;
640 int kret;
641
642 if(cnt & PAGE_MASK) {
643 panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE");
644 }
645
646 kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, cnt, 0, &local_error);
647 /*
648 if(kret == PAGER_ABSENT) {
649 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
650 defined in bsd/vm/vm_pager.h However, we should not be including
651 that file here it is a layering violation.
652 */
653 if(kret == 1) {
654 int uplflags;
655 upl_t upl = NULL;
656 int count = 0;
657 kern_return_t kr;
658
659 uplflags = (UPL_NO_SYNC |
660 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
661 count = 0;
662 kr = memory_object_upl_request(
663 vnode_object->control_handle, offset, cnt,
664 &upl, NULL, &count, uplflags);
665 if(kr != KERN_SUCCESS) {
666 panic("vnode_pager_cluster_read: upl request failed\n");
667 }
668 upl_abort(upl, 0);
669 upl_deallocate(upl);
670
671 error = 1;
672 }
673
674 if (error != 0)
675 return(KERN_FAILURE);
676
677 return(KERN_SUCCESS);
678
679 }
680
681
682 /*
683 *
684 */
685 void
686 vnode_pager_release_from_cache(
687 int *cnt)
688 {
689 memory_object_free_from_cache(
690 &realhost, &vnode_pager_workaround, cnt);
691 }
692
693 /*
694 *
695 */
696 vnode_pager_t
697 vnode_object_create(
698 vnode_port_t vp)
699 {
700 register vnode_pager_t vnode_object;
701
702 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
703 if (vnode_object == VNODE_PAGER_NULL)
704 return(VNODE_PAGER_NULL);
705
706 /*
707 * The vm_map call takes both named entry ports and raw memory
708 * objects in the same parameter. We need to make sure that
709 * vm_map does not see this object as a named entry port. So,
710 * we reserve the second word in the object for a fake ip_kotype
711 * setting - that will tell vm_map to use it as a memory object.
712 */
713 vnode_object->pager = &vnode_pager_workaround;
714 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
715 vnode_object->ref_count = 1;
716 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
717 vnode_object->vnode_handle = vp;
718
719 return(vnode_object);
720 }
721
722 /*
723 *
724 */
725 vnode_pager_t
726 vnode_pager_lookup(
727 memory_object_t name)
728 {
729 vnode_pager_t vnode_object;
730
731 vnode_object = (vnode_pager_t)name;
732 assert(vnode_object->pager == &vnode_pager_workaround);
733 return (vnode_object);
734 }
735