]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/errno.h>
24 #include <kern/host.h>
25 #include <mach/mach_types.h>
26 #include <vm/vm_map.h>
27 #include <vm/vm_kern.h>
28 #include <vm/vm_pageout.h>
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_types.h>
31 #include <mach/port.h>
32 #include <mach/policy.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_space.h>
35 #include <kern/thread.h>
36 #include <vm/memory_object.h>
37 #include <vm/vm_pageout.h>
38
39 #include <default_pager/default_pager_types.h>
40
41 /* BSD VM COMPONENT INTERFACES */
42 int
43 get_map_nentries(
44 vm_map_t);
45
46 vm_offset_t
47 get_map_start(
48 vm_map_t);
49
50 vm_offset_t
51 get_map_end(
52 vm_map_t);
53
54 /*
55 *
56 */
57 int
58 get_map_nentries(
59 vm_map_t map)
60 {
61 return(map->hdr.nentries);
62 }
63
64 /*
65 *
66 */
67 vm_offset_t
68 get_map_start(
69 vm_map_t map)
70 {
71 return(vm_map_first_entry(map)->vme_start);
72 }
73
74 /*
75 *
76 */
77 vm_offset_t
78 get_map_end(
79 vm_map_t map)
80 {
81 return(vm_map_last_entry(map)->vme_end);
82 }
83
84 /*
85 * BSD VNODE PAGER
86 */
87
88 /* until component support available */
89 int vnode_pager_workaround;
90
91 typedef int vnode_port_t;
92
93 typedef struct vnode_pager {
94 int *pager; /* pager workaround pointer */
95 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
96 unsigned int ref_count; /* reference count */
97 memory_object_control_t control_handle; /* mem object control handle */
98 vnode_port_t vnode_handle; /* vnode handle */
99 } *vnode_pager_t;
100
101
102 ipc_port_t
103 trigger_name_to_port(
104 mach_port_t);
105
106 void
107 vnode_pager_bootstrap(
108 void);
109
110 void
111 vnode_pager_alloc_map(
112 void);
113
114 memory_object_t
115 vnode_pager_setup(
116 vnode_port_t,
117 memory_object_t);
118
119
120 kern_return_t
121 vnode_pager_init(
122 memory_object_t,
123 memory_object_control_t,
124 vm_size_t);
125
126 kern_return_t
127 vnode_pager_get_object_size(
128 memory_object_t,
129 memory_object_offset_t *);
130
131 kern_return_t
132 vnode_pager_data_request(
133 memory_object_t,
134 memory_object_offset_t,
135 vm_size_t,
136 vm_prot_t);
137
138 kern_return_t
139 vnode_pager_data_return(
140 memory_object_t,
141 memory_object_offset_t,
142 vm_size_t,
143 boolean_t,
144 boolean_t);
145
146 kern_return_t
147 vnode_pager_data_initialize(
148 memory_object_t,
149 memory_object_offset_t,
150 vm_size_t);
151
152 void
153 vnode_pager_deallocate(
154 memory_object_t);
155
156 kern_return_t
157 vnode_pager_terminate(
158 memory_object_t);
159
160 kern_return_t
161 vnode_pager_cluster_read(
162 vnode_pager_t,
163 vm_object_offset_t,
164 vm_size_t);
165
166 void
167 vnode_pager_cluster_write(
168 vnode_pager_t,
169 vm_object_offset_t,
170 vm_size_t);
171
172
173 int
174 vnode_pagein(
175 vnode_port_t,
176 upl_t,
177 vm_offset_t,
178 vm_object_offset_t,
179 int,
180 int,
181 int *);
182 int
183 vnode_pageout(
184 vnode_port_t,
185 upl_t,
186 vm_offset_t,
187 vm_object_offset_t,
188 int,
189 int,
190 int *);
191
192 vm_object_offset_t
193 vnode_pager_get_filesize(
194 vnode_port_t);
195
196 vnode_pager_t
197 vnode_object_create(
198 vnode_port_t vp);
199
200 vnode_pager_t
201 vnode_pager_lookup(
202 memory_object_t);
203
204 void
205 vnode_pager_release_from_cache(
206 int *cnt);
207
208 zone_t vnode_pager_zone;
209
210
211 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
212
213 /* TODO: Should be set dynamically by vnode_pager_init() */
214 #define CLUSTER_SHIFT 1
215
216 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
217 #define MAX_VNODE 10000
218
219
220 #if DEBUG
221 int pagerdebug=0;
222
223 #define PAGER_ALL 0xffffffff
224 #define PAGER_INIT 0x00000001
225 #define PAGER_PAGEIN 0x00000002
226
227 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
228 #else
229 #define PAGER_DEBUG(LEVEL, A)
230 #endif
231
232 /*
233 * Routine: macx_triggers
234 * Function:
235 * Syscall interface to set the call backs for low and
236 * high water marks.
237 */
238 int
239 macx_triggers(
240 int hi_water,
241 int low_water,
242 int flags,
243 mach_port_t trigger_name)
244 {
245 kern_return_t kr;
246 memory_object_default_t default_pager;
247 ipc_port_t trigger_port;
248
249 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
250 kr = host_default_memory_manager(host_priv_self(),
251 &default_pager, 0);
252 if(kr != KERN_SUCCESS) {
253 return EINVAL;
254 }
255 if (flags & HI_WAT_ALERT) {
256 trigger_port = trigger_name_to_port(trigger_name);
257 if(trigger_port == NULL) {
258 return EINVAL;
259 }
260 /* trigger_port is locked and active */
261 ipc_port_make_send_locked(trigger_port);
262 /* now unlocked */
263 default_pager_triggers(default_pager,
264 hi_water, low_water,
265 HI_WAT_ALERT, trigger_port);
266 }
267
268 if (flags & LO_WAT_ALERT) {
269 trigger_port = trigger_name_to_port(trigger_name);
270 if(trigger_port == NULL) {
271 return EINVAL;
272 }
273 /* trigger_port is locked and active */
274 ipc_port_make_send_locked(trigger_port);
275 /* and now its unlocked */
276 default_pager_triggers(default_pager,
277 hi_water, low_water,
278 LO_WAT_ALERT, trigger_port);
279 }
280
281 /*
282 * Set thread scheduling priority and policy for the current thread
283 * it is assumed for the time being that the thread setting the alert
284 * is the same one which will be servicing it.
285 *
286 * XXX This does not belong in the kernel XXX
287 */
288 {
289 thread_precedence_policy_data_t pre;
290 thread_extended_policy_data_t ext;
291
292 ext.timeshare = FALSE;
293 pre.importance = INT32_MAX;
294
295 thread_policy_set(current_act(),
296 THREAD_EXTENDED_POLICY, (thread_policy_t)&ext,
297 THREAD_EXTENDED_POLICY_COUNT);
298
299 thread_policy_set(current_act(),
300 THREAD_PRECEDENCE_POLICY, (thread_policy_t)&pre,
301 THREAD_PRECEDENCE_POLICY_COUNT);
302 }
303
304 current_thread()->vm_privilege = TRUE;
305 }
306
307 /*
308 *
309 */
310 ipc_port_t
311 trigger_name_to_port(
312 mach_port_t trigger_name)
313 {
314 ipc_port_t trigger_port;
315 ipc_space_t space;
316
317 if (trigger_name == 0)
318 return (NULL);
319
320 space = current_space();
321 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
322 &trigger_port) != KERN_SUCCESS)
323 return (NULL);
324 return trigger_port;
325 }
326
327 /*
328 *
329 */
330 void
331 vnode_pager_bootstrap(void)
332 {
333 register vm_size_t size;
334
335 size = (vm_size_t) sizeof(struct vnode_pager);
336 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
337 PAGE_SIZE, "vnode pager structures");
338 return;
339 }
340
341 /*
342 *
343 */
344 memory_object_t
345 vnode_pager_setup(
346 vnode_port_t vp,
347 memory_object_t pager)
348 {
349 vnode_pager_t vnode_object;
350
351 vnode_object = vnode_object_create(vp);
352 if (vnode_object == VNODE_PAGER_NULL)
353 panic("vnode_pager_setup: vnode_object_create() failed");
354 return((memory_object_t)vnode_object);
355 }
356
357 /*
358 *
359 */
360 kern_return_t
361 vnode_pager_init(memory_object_t mem_obj,
362 memory_object_control_t control,
363 vm_size_t pg_size)
364 {
365 vnode_pager_t vnode_object;
366 kern_return_t kr;
367 memory_object_attr_info_data_t attributes;
368
369
370 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size));
371
372 if (control == MEMORY_OBJECT_CONTROL_NULL)
373 return KERN_INVALID_ARGUMENT;
374
375 vnode_object = vnode_pager_lookup(mem_obj);
376
377 memory_object_control_reference(control);
378 vnode_object->control_handle = control;
379
380 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
381 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
382 attributes.cluster_size = (1 << (PAGE_SHIFT));
383 attributes.may_cache_object = TRUE;
384 attributes.temporary = TRUE;
385
386 kr = memory_object_change_attributes(
387 control,
388 MEMORY_OBJECT_ATTRIBUTE_INFO,
389 (memory_object_info_t) &attributes,
390 MEMORY_OBJECT_ATTR_INFO_COUNT);
391 if (kr != KERN_SUCCESS)
392 panic("vnode_pager_init: memory_object_change_attributes() failed");
393
394 return(KERN_SUCCESS);
395 }
396
397 /*
398 *
399 */
400 kern_return_t
401 vnode_pager_data_return(
402 memory_object_t mem_obj,
403 memory_object_offset_t offset,
404 vm_size_t data_cnt,
405 boolean_t dirty,
406 boolean_t kernel_copy)
407 {
408 register vnode_pager_t vnode_object;
409
410 vnode_object = vnode_pager_lookup(mem_obj);
411
412 vnode_pager_cluster_write(vnode_object, offset, data_cnt);
413
414 return KERN_SUCCESS;
415 }
416
417 kern_return_t
418 vnode_pager_data_initialize(
419 memory_object_t mem_obj,
420 memory_object_offset_t offset,
421 vm_size_t data_cnt)
422 {
423 return KERN_FAILURE;
424 }
425
426 kern_return_t
427 vnode_pager_data_unlock(
428 memory_object_t mem_obj,
429 memory_object_offset_t offset,
430 vm_size_t size,
431 vm_prot_t desired_access)
432 {
433 return KERN_FAILURE;
434 }
435
436 kern_return_t
437 vnode_pager_get_object_size(
438 memory_object_t mem_obj,
439 memory_object_offset_t *length)
440 {
441 vnode_pager_t vnode_object;
442
443 vnode_object = vnode_pager_lookup(mem_obj);
444
445 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
446 return KERN_SUCCESS;
447 }
448
449 /*
450 *
451 */
452 kern_return_t
453 vnode_pager_data_request(
454 memory_object_t mem_obj,
455 memory_object_offset_t offset,
456 vm_size_t length,
457 vm_prot_t protection_required)
458 {
459 register vnode_pager_t vnode_object;
460
461 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
462
463 vnode_object = vnode_pager_lookup(mem_obj);
464
465 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
466
467 vnode_pager_cluster_read(vnode_object, offset, length);
468
469 return KERN_SUCCESS;
470 }
471
472 /*
473 *
474 */
475 void
476 vnode_pager_reference(
477 memory_object_t mem_obj)
478 {
479 register vnode_pager_t vnode_object;
480 unsigned int new_ref_count;
481
482 vnode_object = vnode_pager_lookup(mem_obj);
483 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
484 assert(new_ref_count > 1);
485 }
486
487 /*
488 *
489 */
490 void
491 vnode_pager_deallocate(
492 memory_object_t mem_obj)
493 {
494 register vnode_pager_t vnode_object;
495
496 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
497
498 vnode_object = vnode_pager_lookup(mem_obj);
499
500 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
501 if (vnode_object->vnode_handle != (vnode_port_t) NULL) {
502 vnode_pager_vrele(vnode_object->vnode_handle);
503 }
504 zfree(vnode_pager_zone, (vm_offset_t) vnode_object);
505 }
506 return;
507 }
508
509 /*
510 *
511 */
512 kern_return_t
513 vnode_pager_terminate(
514 memory_object_t mem_obj)
515 {
516 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
517
518 return(KERN_SUCCESS);
519 }
520
521 /*
522 *
523 */
524 kern_return_t
525 vnode_pager_synchronize(
526 memory_object_t mem_obj,
527 memory_object_offset_t offset,
528 vm_size_t length,
529 vm_sync_t sync_flags)
530 {
531 register vnode_pager_t vnode_object;
532
533 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
534
535 vnode_object = vnode_pager_lookup(mem_obj);
536
537 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
538
539 return (KERN_SUCCESS);
540 }
541
542 /*
543 *
544 */
545 kern_return_t
546 vnode_pager_unmap(
547 memory_object_t mem_obj)
548 {
549 register vnode_pager_t vnode_object;
550
551 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
552
553 vnode_object = vnode_pager_lookup(mem_obj);
554
555 ubc_unmap(vnode_object->vnode_handle);
556 return KERN_SUCCESS;
557 }
558
559
560 /*
561 *
562 */
563 void
564 vnode_pager_cluster_write(
565 vnode_pager_t vnode_object,
566 vm_object_offset_t offset,
567 vm_size_t cnt)
568 {
569 int error = 0;
570 int local_error = 0;
571 int kret;
572 int size;
573
574 if (cnt & PAGE_MASK) {
575 panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE");
576 }
577 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
578
579 while (cnt) {
580
581 kret = vnode_pageout(vnode_object->vnode_handle,
582 (upl_t )NULL, (vm_offset_t)NULL,
583 offset, size, 0, &local_error);
584 /*
585 if(kret == PAGER_ABSENT) {
586 Need to work out the defs here, 1 corresponds to
587 PAGER_ABSENT defined in bsd/vm/vm_pager.h However,
588 we should not be including that file here it is a
589 layering violation.
590 */
591 if(kret == 1) {
592 int uplflags;
593 upl_t upl = NULL;
594 int count = 0;
595 kern_return_t kr;
596
597 uplflags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
598 UPL_SET_INTERNAL | UPL_COPYOUT_FROM);
599 count = 0;
600 kr = memory_object_upl_request(
601 vnode_object->control_handle,
602 offset, size, &upl, NULL, &count, uplflags);
603 if(kr != KERN_SUCCESS) {
604 panic("vnode_pager_cluster_write: upl request failed\n");
605 }
606 upl_abort(upl, 0);
607 upl_deallocate(upl);
608
609 error = 0;
610 local_error = 0;
611 }
612
613 if (local_error != 0) {
614 error = local_error;
615 local_error = 0;
616 }
617 cnt -= size;
618 offset += size;
619 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
620 }
621 #if 0
622 if (error != 0)
623 return(KERN_FAILURE);
624
625 return(KERN_SUCCESS);
626 #endif /* 0 */
627 }
628
629
630 /*
631 *
632 */
633 kern_return_t
634 vnode_pager_cluster_read(
635 vnode_pager_t vnode_object,
636 vm_object_offset_t offset,
637 vm_size_t cnt)
638 {
639 int error = 0;
640 int local_error = 0;
641 int kret;
642
643 if(cnt & PAGE_MASK) {
644 panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE");
645 }
646
647 kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, cnt, 0, &local_error);
648 /*
649 if(kret == PAGER_ABSENT) {
650 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
651 defined in bsd/vm/vm_pager.h However, we should not be including
652 that file here it is a layering violation.
653 */
654 if(kret == 1) {
655 int uplflags;
656 upl_t upl = NULL;
657 int count = 0;
658 kern_return_t kr;
659
660 uplflags = (UPL_NO_SYNC |
661 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
662 count = 0;
663 kr = memory_object_upl_request(
664 vnode_object->control_handle, offset, cnt,
665 &upl, NULL, &count, uplflags);
666 if(kr != KERN_SUCCESS) {
667 panic("vnode_pager_cluster_read: upl request failed\n");
668 }
669 upl_abort(upl, 0);
670 upl_deallocate(upl);
671
672 error = 1;
673 }
674
675 if (error != 0)
676 return(KERN_FAILURE);
677
678 return(KERN_SUCCESS);
679
680 }
681
682
683 /*
684 *
685 */
686 void
687 vnode_pager_release_from_cache(
688 int *cnt)
689 {
690 memory_object_free_from_cache(
691 &realhost, &vnode_pager_workaround, cnt);
692 }
693
694 /*
695 *
696 */
697 vnode_pager_t
698 vnode_object_create(
699 vnode_port_t vp)
700 {
701 register vnode_pager_t vnode_object;
702
703 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
704 if (vnode_object == VNODE_PAGER_NULL)
705 return(VNODE_PAGER_NULL);
706
707 /*
708 * The vm_map call takes both named entry ports and raw memory
709 * objects in the same parameter. We need to make sure that
710 * vm_map does not see this object as a named entry port. So,
711 * we reserve the second word in the object for a fake ip_kotype
712 * setting - that will tell vm_map to use it as a memory object.
713 */
714 vnode_object->pager = &vnode_pager_workaround;
715 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
716 vnode_object->ref_count = 1;
717 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
718 vnode_object->vnode_handle = vp;
719
720 return(vnode_object);
721 }
722
723 /*
724 *
725 */
726 vnode_pager_t
727 vnode_pager_lookup(
728 memory_object_t name)
729 {
730 vnode_pager_t vnode_object;
731
732 vnode_object = (vnode_pager_t)name;
733 assert(vnode_object->pager == &vnode_pager_workaround);
734 return (vnode_object);
735 }
736