]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/bsd_vm.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
CommitLineData
1c79356b 1/*
0b4e3aa0 2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <sys/errno.h>
24#include <kern/host.h>
25#include <mach/mach_types.h>
26#include <vm/vm_map.h>
27#include <vm/vm_kern.h>
28#include <vm/vm_pageout.h>
29#include <mach/kern_return.h>
30#include <mach/memory_object_types.h>
31#include <mach/port.h>
32#include <mach/policy.h>
33#include <ipc/ipc_port.h>
34#include <ipc/ipc_space.h>
35#include <kern/thread.h>
0b4e3aa0 36#include <vm/memory_object.h>
1c79356b
A
37#include <vm/vm_pageout.h>
38
0b4e3aa0
A
39#include <libkern/OSAtomic.h>
40
41#include <default_pager/default_pager_types.h>
1c79356b
A
42
43/* BSD VM COMPONENT INTERFACES */
44int
45get_map_nentries(
46 vm_map_t);
47
48vm_offset_t
49get_map_start(
50 vm_map_t);
51
52vm_offset_t
53get_map_end(
54 vm_map_t);
55
56/*
57 *
58 */
59int
60get_map_nentries(
61 vm_map_t map)
62{
63 return(map->hdr.nentries);
64}
65
66/*
67 *
68 */
69vm_offset_t
70get_map_start(
71 vm_map_t map)
72{
73 return(vm_map_first_entry(map)->vme_start);
74}
75
76/*
77 *
78 */
79vm_offset_t
80get_map_end(
81 vm_map_t map)
82{
83 return(vm_map_last_entry(map)->vme_end);
84}
85
86/*
87 * BSD VNODE PAGER
88 */
89
90/* until component support available */
91int vnode_pager_workaround;
92
93typedef int vnode_port_t;
94
95typedef struct vnode_pager {
0b4e3aa0
A
96 int *pager; /* pager workaround pointer */
97 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
98 unsigned int ref_count; /* reference count */
99 memory_object_control_t control_handle; /* mem object control handle */
100 vnode_port_t vnode_handle; /* vnode handle */
1c79356b
A
101} *vnode_pager_t;
102
1c79356b
A
103
104ipc_port_t
105trigger_name_to_port(
106 mach_port_t);
107
108void
109vnode_pager_bootstrap(
110 void);
111
112void
113vnode_pager_alloc_map(
114 void);
115
0b4e3aa0 116memory_object_t
1c79356b
A
117vnode_pager_setup(
118 vnode_port_t,
0b4e3aa0 119 memory_object_t);
1c79356b 120
1c79356b
A
121
122kern_return_t
123vnode_pager_init(
0b4e3aa0
A
124 memory_object_t,
125 memory_object_control_t,
1c79356b
A
126 vm_size_t);
127
0b4e3aa0
A
128kern_return_t
129vnode_pager_get_object_size(
130 memory_object_t,
131 memory_object_offset_t *);
132
1c79356b
A
133kern_return_t
134vnode_pager_data_request(
0b4e3aa0
A
135 memory_object_t,
136 memory_object_offset_t,
1c79356b
A
137 vm_size_t,
138 vm_prot_t);
139
140kern_return_t
141vnode_pager_data_return(
0b4e3aa0
A
142 memory_object_t,
143 memory_object_offset_t,
1c79356b
A
144 vm_size_t,
145 boolean_t,
146 boolean_t);
147
0b4e3aa0
A
148kern_return_t
149vnode_pager_data_initialize(
150 memory_object_t,
151 memory_object_offset_t,
152 vm_size_t);
153
1c79356b 154void
0b4e3aa0
A
155vnode_pager_deallocate(
156 memory_object_t);
1c79356b
A
157
158kern_return_t
159vnode_pager_terminate(
0b4e3aa0 160 memory_object_t);
1c79356b
A
161
162kern_return_t
163vnode_pager_cluster_read(
164 vnode_pager_t,
165 vm_object_offset_t,
166 vm_size_t);
167
168void
169vnode_pager_cluster_write(
170 vnode_pager_t,
171 vm_object_offset_t,
172 vm_size_t);
173
1c79356b
A
174
175int
176vnode_pagein(
177 vnode_port_t,
178 upl_t,
179 vm_offset_t,
180 vm_object_offset_t,
181 int,
182 int,
183 int *);
184int
185vnode_pageout(
186 vnode_port_t,
187 upl_t,
188 vm_offset_t,
189 vm_object_offset_t,
190 int,
191 int,
192 int *);
193
0b4e3aa0
A
194vm_object_offset_t
195vnode_pager_get_filesize(
196 vnode_port_t);
197
1c79356b
A
198vnode_pager_t
199vnode_object_create(
200 vnode_port_t vp);
201
1c79356b 202vnode_pager_t
0b4e3aa0
A
203vnode_pager_lookup(
204 memory_object_t);
1c79356b
A
205
206void
207vnode_pager_release_from_cache(
208 int *cnt);
209
210zone_t vnode_pager_zone;
211
212
213#define VNODE_PAGER_NULL ((vnode_pager_t) 0)
214
215/* TODO: Should be set dynamically by vnode_pager_init() */
216#define CLUSTER_SHIFT 1
217
218/* TODO: Should be set dynamically by vnode_pager_bootstrap() */
219#define MAX_VNODE 10000
220
221
222#if DEBUG
223int pagerdebug=0;
224
225#define PAGER_ALL 0xffffffff
226#define PAGER_INIT 0x00000001
227#define PAGER_PAGEIN 0x00000002
228
229#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
230#else
231#define PAGER_DEBUG(LEVEL, A)
232#endif
233
234/*
235 * Routine: macx_triggers
236 * Function:
237 * Syscall interface to set the call backs for low and
238 * high water marks.
239 */
240int
241macx_triggers(
242 int hi_water,
243 int low_water,
244 int flags,
245 mach_port_t trigger_name)
246{
247 kern_return_t kr;
0b4e3aa0 248 memory_object_default_t default_pager;
1c79356b
A
249 ipc_port_t trigger_port;
250
0b4e3aa0 251 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b 252 kr = host_default_memory_manager(host_priv_self(),
0b4e3aa0 253 &default_pager, 0);
1c79356b
A
254 if(kr != KERN_SUCCESS) {
255 return EINVAL;
256 }
0b4e3aa0
A
257 if (flags & HI_WAT_ALERT) {
258 trigger_port = trigger_name_to_port(trigger_name);
259 if(trigger_port == NULL) {
260 return EINVAL;
261 }
262 /* trigger_port is locked and active */
263 ipc_port_make_send_locked(trigger_port);
264 /* now unlocked */
265 default_pager_triggers(default_pager,
266 hi_water, low_water,
267 HI_WAT_ALERT, trigger_port);
268 }
269
270 if (flags & LO_WAT_ALERT) {
271 trigger_port = trigger_name_to_port(trigger_name);
272 if(trigger_port == NULL) {
273 return EINVAL;
274 }
275 /* trigger_port is locked and active */
276 ipc_port_make_send_locked(trigger_port);
277 /* and now its unlocked */
278 default_pager_triggers(default_pager,
279 hi_water, low_water,
280 LO_WAT_ALERT, trigger_port);
1c79356b 281 }
1c79356b
A
282
283 /*
284 * Set thread scheduling priority and policy for the current thread
285 * it is assumed for the time being that the thread setting the alert
286 * is the same one which will be servicing it.
287 */
288 {
289 struct policy_timeshare_base fifo_base;
290 struct policy_timeshare_limit fifo_limit;
291 policy_base_t base;
292 processor_set_t pset;
293 policy_limit_t limit;
294
295 pset = (current_thread())->processor_set;
296 base = (policy_base_t) &fifo_base;
297 limit = (policy_limit_t) &fifo_limit;
298 fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD;
299 thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT);
300 }
301
302 current_thread()->vm_privilege = TRUE;
303}
304
305/*
306 *
307 */
308ipc_port_t
309trigger_name_to_port(
310 mach_port_t trigger_name)
311{
312 ipc_port_t trigger_port;
313 ipc_space_t space;
314
315 if (trigger_name == 0)
316 return (NULL);
317
318 space = current_space();
319 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
320 &trigger_port) != KERN_SUCCESS)
321 return (NULL);
322 return trigger_port;
323}
324
325/*
326 *
327 */
328void
329vnode_pager_bootstrap(void)
330{
331 register vm_size_t size;
332
333 size = (vm_size_t) sizeof(struct vnode_pager);
334 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
335 PAGE_SIZE, "vnode pager structures");
1c79356b
A
336 return;
337}
338
339/*
340 *
341 */
0b4e3aa0 342memory_object_t
1c79356b
A
343vnode_pager_setup(
344 vnode_port_t vp,
0b4e3aa0 345 memory_object_t pager)
1c79356b
A
346{
347 vnode_pager_t vnode_object;
1c79356b
A
348
349 vnode_object = vnode_object_create(vp);
350 if (vnode_object == VNODE_PAGER_NULL)
351 panic("vnode_pager_setup: vnode_object_create() failed");
0b4e3aa0 352 return((memory_object_t)vnode_object);
1c79356b
A
353}
354
355/*
356 *
357 */
358kern_return_t
0b4e3aa0
A
359vnode_pager_init(memory_object_t mem_obj,
360 memory_object_control_t control,
1c79356b
A
361 vm_size_t pg_size)
362{
363 vnode_pager_t vnode_object;
364 kern_return_t kr;
365 memory_object_attr_info_data_t attributes;
1c79356b
A
366
367
368 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size));
369
0b4e3aa0
A
370 if (control == MEMORY_OBJECT_CONTROL_NULL)
371 return KERN_INVALID_ARGUMENT;
1c79356b 372
0b4e3aa0 373 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 374
0b4e3aa0
A
375 memory_object_control_reference(control);
376 vnode_object->control_handle = control;
1c79356b
A
377
378 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
379 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
380 attributes.cluster_size = (1 << (PAGE_SHIFT));
381 attributes.may_cache_object = TRUE;
382 attributes.temporary = TRUE;
383
384 kr = memory_object_change_attributes(
0b4e3aa0 385 control,
1c79356b
A
386 MEMORY_OBJECT_ATTRIBUTE_INFO,
387 (memory_object_info_t) &attributes,
0b4e3aa0 388 MEMORY_OBJECT_ATTR_INFO_COUNT);
1c79356b
A
389 if (kr != KERN_SUCCESS)
390 panic("vnode_pager_init: memory_object_change_attributes() failed");
391
392 return(KERN_SUCCESS);
393}
394
395/*
396 *
397 */
398kern_return_t
399vnode_pager_data_return(
0b4e3aa0
A
400 memory_object_t mem_obj,
401 memory_object_offset_t offset,
1c79356b
A
402 vm_size_t data_cnt,
403 boolean_t dirty,
404 boolean_t kernel_copy)
405{
406 register vnode_pager_t vnode_object;
407
0b4e3aa0 408 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b
A
409
410 vnode_pager_cluster_write(vnode_object, offset, data_cnt);
411
412 return KERN_SUCCESS;
413}
414
0b4e3aa0
A
415kern_return_t
416vnode_pager_data_initialize(
417 memory_object_t mem_obj,
418 memory_object_offset_t offset,
419 vm_size_t data_cnt)
420{
421 return KERN_FAILURE;
422}
423
424kern_return_t
425vnode_pager_data_unlock(
426 memory_object_t mem_obj,
427 memory_object_offset_t offset,
428 vm_size_t size,
429 vm_prot_t desired_access)
430{
431 return KERN_FAILURE;
432}
433
434kern_return_t
435vnode_pager_get_object_size(
436 memory_object_t mem_obj,
437 memory_object_offset_t *length)
438{
439 vnode_pager_t vnode_object;
440
441 vnode_object = vnode_pager_lookup(mem_obj);
442
443 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
444 return KERN_SUCCESS;
445}
446
1c79356b
A
447/*
448 *
449 */
450kern_return_t
451vnode_pager_data_request(
0b4e3aa0
A
452 memory_object_t mem_obj,
453 memory_object_offset_t offset,
1c79356b
A
454 vm_size_t length,
455 vm_prot_t protection_required)
456{
457 register vnode_pager_t vnode_object;
458
0b4e3aa0 459 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
1c79356b 460
0b4e3aa0 461 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 462
0b4e3aa0 463 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
1c79356b 464
1c79356b
A
465 vnode_pager_cluster_read(vnode_object, offset, length);
466
467 return KERN_SUCCESS;
468}
469
470/*
471 *
472 */
473void
0b4e3aa0
A
474vnode_pager_reference(
475 memory_object_t mem_obj)
476{
1c79356b 477 register vnode_pager_t vnode_object;
0b4e3aa0 478 unsigned int prev_ref_count;
1c79356b 479
0b4e3aa0
A
480 vnode_object = vnode_pager_lookup(mem_obj);
481 prev_ref_count = OSIncrementAtomic((UInt32 *)&vnode_object->ref_count);
482 assert(prev_ref_count > 0);
483}
1c79356b 484
0b4e3aa0
A
485/*
486 *
487 */
488void
489vnode_pager_deallocate(
490 memory_object_t mem_obj)
491{
492 register vnode_pager_t vnode_object;
1c79356b 493
0b4e3aa0 494 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
1c79356b 495
0b4e3aa0 496 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 497
0b4e3aa0
A
498 if (OSDecrementAtomic((UInt32 *)&vnode_object->ref_count) == 1) {
499 if (vnode_object->vnode_handle != (vnode_port_t) NULL) {
500 vnode_pager_vrele(vnode_object->vnode_handle);
501 }
502 zfree(vnode_pager_zone, (vm_offset_t) vnode_object);
503 }
1c79356b
A
504 return;
505}
506
507/*
508 *
509 */
510kern_return_t
511vnode_pager_terminate(
0b4e3aa0 512 memory_object_t mem_obj)
1c79356b 513{
0b4e3aa0 514 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
1c79356b 515
0b4e3aa0
A
516 return(KERN_SUCCESS);
517}
1c79356b 518
0b4e3aa0
A
519/*
520 *
521 */
522kern_return_t
523vnode_pager_synchronize(
524 memory_object_t mem_obj,
525 memory_object_offset_t offset,
526 vm_size_t length,
527 vm_sync_t sync_flags)
528{
529 register vnode_pager_t vnode_object;
1c79356b 530
0b4e3aa0 531 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
1c79356b 532
0b4e3aa0 533 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 534
0b4e3aa0 535 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
1c79356b 536
0b4e3aa0 537 return (KERN_SUCCESS);
1c79356b
A
538}
539
540/*
541 *
542 */
543kern_return_t
0b4e3aa0
A
544vnode_pager_unmap(
545 memory_object_t mem_obj)
1c79356b 546{
0b4e3aa0 547 register vnode_pager_t vnode_object;
1c79356b 548
0b4e3aa0
A
549 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
550
551 vnode_object = vnode_pager_lookup(mem_obj);
552
553 ubc_unmap(vnode_object->vnode_handle);
554 return KERN_SUCCESS;
1c79356b
A
555}
556
0b4e3aa0 557
1c79356b
A
558/*
559 *
560 */
561void
562vnode_pager_cluster_write(
563 vnode_pager_t vnode_object,
564 vm_object_offset_t offset,
565 vm_size_t cnt)
566{
567 int error = 0;
568 int local_error = 0;
569 int kret;
570 int size;
571
572 if (cnt & PAGE_MASK) {
573 panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE");
574 }
575 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
576
577 while (cnt) {
578
0b4e3aa0
A
579 kret = vnode_pageout(vnode_object->vnode_handle,
580 (upl_t )NULL, (vm_offset_t)NULL,
581 offset, size, 0, &local_error);
582/*
583 if(kret == PAGER_ABSENT) {
584 Need to work out the defs here, 1 corresponds to
585 PAGER_ABSENT defined in bsd/vm/vm_pager.h However,
586 we should not be including that file here it is a
587 layering violation.
588*/
589 if(kret == 1) {
590 int uplflags;
591 upl_t upl = NULL;
592 int count = 0;
593 kern_return_t kr;
594
595 uplflags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
596 UPL_SET_INTERNAL | UPL_COPYOUT_FROM);
597 count = 0;
598 kr = memory_object_upl_request(
599 vnode_object->control_handle,
600 offset, size, &upl, NULL, &count, uplflags);
601 if(kr != KERN_SUCCESS) {
602 panic("vnode_pager_cluster_write: upl request failed\n");
603 }
604 upl_abort(upl, 0);
605 upl_deallocate(upl);
606
607 error = 0;
608 local_error = 0;
609 }
1c79356b
A
610
611 if (local_error != 0) {
612 error = local_error;
613 local_error = 0;
614 }
615 cnt -= size;
616 offset += size;
0b4e3aa0 617 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
1c79356b
A
618 }
619#if 0
620 if (error != 0)
621 return(KERN_FAILURE);
622
623 return(KERN_SUCCESS);
624#endif /* 0 */
625}
626
627
628/*
629 *
630 */
631kern_return_t
632vnode_pager_cluster_read(
633 vnode_pager_t vnode_object,
634 vm_object_offset_t offset,
635 vm_size_t cnt)
636{
637 int error = 0;
638 int local_error = 0;
639 int kret;
1c79356b
A
640
641 if(cnt & PAGE_MASK) {
642 panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE");
643 }
644
0b4e3aa0
A
645 kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, cnt, 2, &local_error);
646/*
647 if(kret == PAGER_ABSENT) {
648 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
649 defined in bsd/vm/vm_pager.h However, we should not be including
650 that file here it is a layering violation.
651*/
652 if(kret == 1) {
653 int uplflags;
654 upl_t upl = NULL;
655 int count = 0;
656 kern_return_t kr;
657
658 uplflags = (UPL_NO_SYNC |
659 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
660 count = 0;
661 kr = memory_object_upl_request(
662 vnode_object->control_handle, offset, cnt,
663 &upl, NULL, &count, uplflags);
664 if(kr != KERN_SUCCESS) {
665 panic("vnode_pager_cluster_read: upl request failed\n");
666 }
667 upl_abort(upl, 0);
668 upl_deallocate(upl);
669
670 error = 1;
1c79356b 671 }
0b4e3aa0 672
1c79356b
A
673 if (error != 0)
674 return(KERN_FAILURE);
675
676 return(KERN_SUCCESS);
677
678}
679
680
681/*
682 *
683 */
684void
685vnode_pager_release_from_cache(
686 int *cnt)
687{
688 memory_object_free_from_cache(
0b4e3aa0 689 &realhost, &vnode_pager_workaround, cnt);
1c79356b
A
690}
691
692/*
693 *
694 */
695vnode_pager_t
696vnode_object_create(
697 vnode_port_t vp)
698{
699 register vnode_pager_t vnode_object;
700
701 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
702 if (vnode_object == VNODE_PAGER_NULL)
703 return(VNODE_PAGER_NULL);
1c79356b 704
1c79356b 705 /*
0b4e3aa0
A
706 * The vm_map call takes both named entry ports and raw memory
707 * objects in the same parameter. We need to make sure that
708 * vm_map does not see this object as a named entry port. So,
709 * we reserve the second word in the object for a fake ip_kotype
710 * setting - that will tell vm_map to use it as a memory object.
1c79356b 711 */
0b4e3aa0
A
712 vnode_object->pager = &vnode_pager_workaround;
713 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
714 vnode_object->ref_count = 1;
715 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
716 vnode_object->vnode_handle = vp;
717
718 return(vnode_object);
1c79356b
A
719}
720
721/*
722 *
723 */
724vnode_pager_t
0b4e3aa0
A
725vnode_pager_lookup(
726 memory_object_t name)
1c79356b 727{
0b4e3aa0 728 vnode_pager_t vnode_object;
1c79356b 729
0b4e3aa0
A
730 vnode_object = (vnode_pager_t)name;
731 assert(vnode_object->pager == &vnode_pager_workaround);
732 return (vnode_object);
1c79356b 733}
0b4e3aa0 734