]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/bsd_vm.c
xnu-344.26.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
1 /*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/errno.h>
24 #include <kern/host.h>
25 #include <mach/mach_types.h>
26 #include <vm/vm_map.h>
27 #include <vm/vm_kern.h>
28 #include <vm/vm_pageout.h>
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_types.h>
31 #include <mach/port.h>
32 #include <mach/policy.h>
33 #include <ipc/ipc_port.h>
34 #include <ipc/ipc_space.h>
35 #include <kern/thread.h>
36 #include <vm/memory_object.h>
37 #include <vm/vm_pageout.h>
38
39 #include <default_pager/default_pager_types.h>
40
41 /* BSD VM COMPONENT INTERFACES */
42 int
43 get_map_nentries(
44 vm_map_t);
45
46 vm_offset_t
47 get_map_start(
48 vm_map_t);
49
50 vm_offset_t
51 get_map_end(
52 vm_map_t);
53
54 /*
55 *
56 */
57 int
58 get_map_nentries(
59 vm_map_t map)
60 {
61 return(map->hdr.nentries);
62 }
63
64 /*
65 *
66 */
67 vm_offset_t
68 get_map_start(
69 vm_map_t map)
70 {
71 return(vm_map_first_entry(map)->vme_start);
72 }
73
74 /*
75 *
76 */
77 vm_offset_t
78 get_map_end(
79 vm_map_t map)
80 {
81 return(vm_map_last_entry(map)->vme_end);
82 }
83
84 /*
85 * BSD VNODE PAGER
86 */
87
88 /* until component support available */
89 int vnode_pager_workaround;
90
91 typedef int vnode_port_t;
92
93 typedef struct vnode_pager {
94 int *pager; /* pager workaround pointer */
95 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
96 unsigned int ref_count; /* reference count */
97 memory_object_control_t control_handle; /* mem object control handle */
98 vnode_port_t vnode_handle; /* vnode handle */
99 } *vnode_pager_t;
100
101
102 ipc_port_t
103 trigger_name_to_port(
104 mach_port_t);
105
106 void
107 vnode_pager_bootstrap(
108 void);
109
110 void
111 vnode_pager_alloc_map(
112 void);
113
114 memory_object_t
115 vnode_pager_setup(
116 vnode_port_t,
117 memory_object_t);
118
119
120 kern_return_t
121 vnode_pager_init(
122 memory_object_t,
123 memory_object_control_t,
124 vm_size_t);
125
126 kern_return_t
127 vnode_pager_get_object_size(
128 memory_object_t,
129 memory_object_offset_t *);
130
131 kern_return_t
132 vnode_pager_data_request(
133 memory_object_t,
134 memory_object_offset_t,
135 vm_size_t,
136 vm_prot_t);
137
138 kern_return_t
139 vnode_pager_data_return(
140 memory_object_t,
141 memory_object_offset_t,
142 vm_size_t,
143 boolean_t,
144 boolean_t);
145
146 kern_return_t
147 vnode_pager_data_initialize(
148 memory_object_t,
149 memory_object_offset_t,
150 vm_size_t);
151
152 void
153 vnode_pager_deallocate(
154 memory_object_t);
155
156 kern_return_t
157 vnode_pager_terminate(
158 memory_object_t);
159
160 kern_return_t
161 vnode_pager_cluster_read(
162 vnode_pager_t,
163 vm_object_offset_t,
164 vm_size_t);
165
166 void
167 vnode_pager_cluster_write(
168 vnode_pager_t,
169 vm_object_offset_t,
170 vm_size_t);
171
172
173 int
174 vnode_pagein(
175 vnode_port_t,
176 upl_t,
177 vm_offset_t,
178 vm_object_offset_t,
179 int,
180 int,
181 int *);
182 int
183 vnode_pageout(
184 vnode_port_t,
185 upl_t,
186 vm_offset_t,
187 vm_object_offset_t,
188 int,
189 int,
190 int *);
191
192 vm_object_offset_t
193 vnode_pager_get_filesize(
194 vnode_port_t);
195
196 vnode_pager_t
197 vnode_object_create(
198 vnode_port_t vp);
199
200 vnode_pager_t
201 vnode_pager_lookup(
202 memory_object_t);
203
204 void
205 vnode_pager_release_from_cache(
206 int *cnt);
207
208 zone_t vnode_pager_zone;
209
210
211 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
212
213 /* TODO: Should be set dynamically by vnode_pager_init() */
214 #define CLUSTER_SHIFT 1
215
216 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */
217 #define MAX_VNODE 10000
218
219
220 #if DEBUG
221 int pagerdebug=0;
222
223 #define PAGER_ALL 0xffffffff
224 #define PAGER_INIT 0x00000001
225 #define PAGER_PAGEIN 0x00000002
226
227 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
228 #else
229 #define PAGER_DEBUG(LEVEL, A)
230 #endif
231
232 /*
233 * Routine: macx_triggers
234 * Function:
235 * Syscall interface to set the call backs for low and
236 * high water marks.
237 */
238 int
239 macx_triggers(
240 int hi_water,
241 int low_water,
242 int flags,
243 mach_port_t trigger_name)
244 {
245 kern_return_t kr;
246 memory_object_default_t default_pager;
247 ipc_port_t trigger_port;
248
249 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
250 kr = host_default_memory_manager(host_priv_self(),
251 &default_pager, 0);
252 if(kr != KERN_SUCCESS) {
253 return EINVAL;
254 }
255 if (flags & HI_WAT_ALERT) {
256 trigger_port = trigger_name_to_port(trigger_name);
257 if(trigger_port == NULL) {
258 return EINVAL;
259 }
260 /* trigger_port is locked and active */
261 ipc_port_make_send_locked(trigger_port);
262 /* now unlocked */
263 default_pager_triggers(default_pager,
264 hi_water, low_water,
265 HI_WAT_ALERT, trigger_port);
266 }
267
268 if (flags & LO_WAT_ALERT) {
269 trigger_port = trigger_name_to_port(trigger_name);
270 if(trigger_port == NULL) {
271 return EINVAL;
272 }
273 /* trigger_port is locked and active */
274 ipc_port_make_send_locked(trigger_port);
275 /* and now its unlocked */
276 default_pager_triggers(default_pager,
277 hi_water, low_water,
278 LO_WAT_ALERT, trigger_port);
279 }
280
281 /*
282 * Set thread scheduling priority and policy for the current thread
283 * it is assumed for the time being that the thread setting the alert
284 * is the same one which will be servicing it.
285 */
286 {
287 struct policy_timeshare_base fifo_base;
288 struct policy_timeshare_limit fifo_limit;
289 policy_base_t base;
290 processor_set_t pset;
291 policy_limit_t limit;
292
293 pset = (current_thread())->processor_set;
294 base = (policy_base_t) &fifo_base;
295 limit = (policy_limit_t) &fifo_limit;
296 fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD;
297 thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT);
298 }
299
300 current_thread()->vm_privilege = TRUE;
301 }
302
303 /*
304 *
305 */
306 ipc_port_t
307 trigger_name_to_port(
308 mach_port_t trigger_name)
309 {
310 ipc_port_t trigger_port;
311 ipc_space_t space;
312
313 if (trigger_name == 0)
314 return (NULL);
315
316 space = current_space();
317 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
318 &trigger_port) != KERN_SUCCESS)
319 return (NULL);
320 return trigger_port;
321 }
322
323 /*
324 *
325 */
326 void
327 vnode_pager_bootstrap(void)
328 {
329 register vm_size_t size;
330
331 size = (vm_size_t) sizeof(struct vnode_pager);
332 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
333 PAGE_SIZE, "vnode pager structures");
334 return;
335 }
336
337 /*
338 *
339 */
340 memory_object_t
341 vnode_pager_setup(
342 vnode_port_t vp,
343 memory_object_t pager)
344 {
345 vnode_pager_t vnode_object;
346
347 vnode_object = vnode_object_create(vp);
348 if (vnode_object == VNODE_PAGER_NULL)
349 panic("vnode_pager_setup: vnode_object_create() failed");
350 return((memory_object_t)vnode_object);
351 }
352
353 /*
354 *
355 */
356 kern_return_t
357 vnode_pager_init(memory_object_t mem_obj,
358 memory_object_control_t control,
359 vm_size_t pg_size)
360 {
361 vnode_pager_t vnode_object;
362 kern_return_t kr;
363 memory_object_attr_info_data_t attributes;
364
365
366 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size));
367
368 if (control == MEMORY_OBJECT_CONTROL_NULL)
369 return KERN_INVALID_ARGUMENT;
370
371 vnode_object = vnode_pager_lookup(mem_obj);
372
373 memory_object_control_reference(control);
374 vnode_object->control_handle = control;
375
376 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
377 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
378 attributes.cluster_size = (1 << (PAGE_SHIFT));
379 attributes.may_cache_object = TRUE;
380 attributes.temporary = TRUE;
381
382 kr = memory_object_change_attributes(
383 control,
384 MEMORY_OBJECT_ATTRIBUTE_INFO,
385 (memory_object_info_t) &attributes,
386 MEMORY_OBJECT_ATTR_INFO_COUNT);
387 if (kr != KERN_SUCCESS)
388 panic("vnode_pager_init: memory_object_change_attributes() failed");
389
390 return(KERN_SUCCESS);
391 }
392
393 /*
394 *
395 */
396 kern_return_t
397 vnode_pager_data_return(
398 memory_object_t mem_obj,
399 memory_object_offset_t offset,
400 vm_size_t data_cnt,
401 boolean_t dirty,
402 boolean_t kernel_copy)
403 {
404 register vnode_pager_t vnode_object;
405
406 vnode_object = vnode_pager_lookup(mem_obj);
407
408 vnode_pager_cluster_write(vnode_object, offset, data_cnt);
409
410 return KERN_SUCCESS;
411 }
412
413 kern_return_t
414 vnode_pager_data_initialize(
415 memory_object_t mem_obj,
416 memory_object_offset_t offset,
417 vm_size_t data_cnt)
418 {
419 return KERN_FAILURE;
420 }
421
422 kern_return_t
423 vnode_pager_data_unlock(
424 memory_object_t mem_obj,
425 memory_object_offset_t offset,
426 vm_size_t size,
427 vm_prot_t desired_access)
428 {
429 return KERN_FAILURE;
430 }
431
432 kern_return_t
433 vnode_pager_get_object_size(
434 memory_object_t mem_obj,
435 memory_object_offset_t *length)
436 {
437 vnode_pager_t vnode_object;
438
439 vnode_object = vnode_pager_lookup(mem_obj);
440
441 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
442 return KERN_SUCCESS;
443 }
444
445 /*
446 *
447 */
448 kern_return_t
449 vnode_pager_data_request(
450 memory_object_t mem_obj,
451 memory_object_offset_t offset,
452 vm_size_t length,
453 vm_prot_t protection_required)
454 {
455 register vnode_pager_t vnode_object;
456
457 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
458
459 vnode_object = vnode_pager_lookup(mem_obj);
460
461 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
462
463 vnode_pager_cluster_read(vnode_object, offset, length);
464
465 return KERN_SUCCESS;
466 }
467
468 /*
469 *
470 */
471 void
472 vnode_pager_reference(
473 memory_object_t mem_obj)
474 {
475 register vnode_pager_t vnode_object;
476 unsigned int new_ref_count;
477
478 vnode_object = vnode_pager_lookup(mem_obj);
479 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
480 assert(new_ref_count > 1);
481 }
482
483 /*
484 *
485 */
486 void
487 vnode_pager_deallocate(
488 memory_object_t mem_obj)
489 {
490 register vnode_pager_t vnode_object;
491
492 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
493
494 vnode_object = vnode_pager_lookup(mem_obj);
495
496 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
497 if (vnode_object->vnode_handle != (vnode_port_t) NULL) {
498 vnode_pager_vrele(vnode_object->vnode_handle);
499 }
500 zfree(vnode_pager_zone, (vm_offset_t) vnode_object);
501 }
502 return;
503 }
504
505 /*
506 *
507 */
508 kern_return_t
509 vnode_pager_terminate(
510 memory_object_t mem_obj)
511 {
512 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
513
514 return(KERN_SUCCESS);
515 }
516
517 /*
518 *
519 */
520 kern_return_t
521 vnode_pager_synchronize(
522 memory_object_t mem_obj,
523 memory_object_offset_t offset,
524 vm_size_t length,
525 vm_sync_t sync_flags)
526 {
527 register vnode_pager_t vnode_object;
528
529 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
530
531 vnode_object = vnode_pager_lookup(mem_obj);
532
533 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
534
535 return (KERN_SUCCESS);
536 }
537
538 /*
539 *
540 */
541 kern_return_t
542 vnode_pager_unmap(
543 memory_object_t mem_obj)
544 {
545 register vnode_pager_t vnode_object;
546
547 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
548
549 vnode_object = vnode_pager_lookup(mem_obj);
550
551 ubc_unmap(vnode_object->vnode_handle);
552 return KERN_SUCCESS;
553 }
554
555
556 /*
557 *
558 */
559 void
560 vnode_pager_cluster_write(
561 vnode_pager_t vnode_object,
562 vm_object_offset_t offset,
563 vm_size_t cnt)
564 {
565 int error = 0;
566 int local_error = 0;
567 int kret;
568 int size;
569
570 if (cnt & PAGE_MASK) {
571 panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE");
572 }
573 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
574
575 while (cnt) {
576
577 kret = vnode_pageout(vnode_object->vnode_handle,
578 (upl_t )NULL, (vm_offset_t)NULL,
579 offset, size, 0, &local_error);
580 /*
581 if(kret == PAGER_ABSENT) {
582 Need to work out the defs here, 1 corresponds to
583 PAGER_ABSENT defined in bsd/vm/vm_pager.h However,
584 we should not be including that file here it is a
585 layering violation.
586 */
587 if(kret == 1) {
588 int uplflags;
589 upl_t upl = NULL;
590 int count = 0;
591 kern_return_t kr;
592
593 uplflags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
594 UPL_SET_INTERNAL | UPL_COPYOUT_FROM);
595 count = 0;
596 kr = memory_object_upl_request(
597 vnode_object->control_handle,
598 offset, size, &upl, NULL, &count, uplflags);
599 if(kr != KERN_SUCCESS) {
600 panic("vnode_pager_cluster_write: upl request failed\n");
601 }
602 upl_abort(upl, 0);
603 upl_deallocate(upl);
604
605 error = 0;
606 local_error = 0;
607 }
608
609 if (local_error != 0) {
610 error = local_error;
611 local_error = 0;
612 }
613 cnt -= size;
614 offset += size;
615 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
616 }
617 #if 0
618 if (error != 0)
619 return(KERN_FAILURE);
620
621 return(KERN_SUCCESS);
622 #endif /* 0 */
623 }
624
625
626 /*
627 *
628 */
629 kern_return_t
630 vnode_pager_cluster_read(
631 vnode_pager_t vnode_object,
632 vm_object_offset_t offset,
633 vm_size_t cnt)
634 {
635 int error = 0;
636 int local_error = 0;
637 int kret;
638
639 if(cnt & PAGE_MASK) {
640 panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE");
641 }
642
643 kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, cnt, 0, &local_error);
644 /*
645 if(kret == PAGER_ABSENT) {
646 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
647 defined in bsd/vm/vm_pager.h However, we should not be including
648 that file here it is a layering violation.
649 */
650 if(kret == 1) {
651 int uplflags;
652 upl_t upl = NULL;
653 int count = 0;
654 kern_return_t kr;
655
656 uplflags = (UPL_NO_SYNC |
657 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
658 count = 0;
659 kr = memory_object_upl_request(
660 vnode_object->control_handle, offset, cnt,
661 &upl, NULL, &count, uplflags);
662 if(kr != KERN_SUCCESS) {
663 panic("vnode_pager_cluster_read: upl request failed\n");
664 }
665 upl_abort(upl, 0);
666 upl_deallocate(upl);
667
668 error = 1;
669 }
670
671 if (error != 0)
672 return(KERN_FAILURE);
673
674 return(KERN_SUCCESS);
675
676 }
677
678
679 /*
680 *
681 */
682 void
683 vnode_pager_release_from_cache(
684 int *cnt)
685 {
686 memory_object_free_from_cache(
687 &realhost, &vnode_pager_workaround, cnt);
688 }
689
690 /*
691 *
692 */
693 vnode_pager_t
694 vnode_object_create(
695 vnode_port_t vp)
696 {
697 register vnode_pager_t vnode_object;
698
699 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
700 if (vnode_object == VNODE_PAGER_NULL)
701 return(VNODE_PAGER_NULL);
702
703 /*
704 * The vm_map call takes both named entry ports and raw memory
705 * objects in the same parameter. We need to make sure that
706 * vm_map does not see this object as a named entry port. So,
707 * we reserve the second word in the object for a fake ip_kotype
708 * setting - that will tell vm_map to use it as a memory object.
709 */
710 vnode_object->pager = &vnode_pager_workaround;
711 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
712 vnode_object->ref_count = 1;
713 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
714 vnode_object->vnode_handle = vp;
715
716 return(vnode_object);
717 }
718
719 /*
720 *
721 */
722 vnode_pager_t
723 vnode_pager_lookup(
724 memory_object_t name)
725 {
726 vnode_pager_t vnode_object;
727
728 vnode_object = (vnode_pager_t)name;
729 assert(vnode_object->pager == &vnode_pager_workaround);
730 return (vnode_object);
731 }
732