]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/bsd_vm.c
xnu-344.34.tar.gz
[apple/xnu.git] / osfmk / vm / bsd_vm.c
CommitLineData
1c79356b 1/*
0b4e3aa0 2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <sys/errno.h>
24#include <kern/host.h>
25#include <mach/mach_types.h>
26#include <vm/vm_map.h>
27#include <vm/vm_kern.h>
28#include <vm/vm_pageout.h>
29#include <mach/kern_return.h>
30#include <mach/memory_object_types.h>
31#include <mach/port.h>
32#include <mach/policy.h>
33#include <ipc/ipc_port.h>
34#include <ipc/ipc_space.h>
35#include <kern/thread.h>
0b4e3aa0 36#include <vm/memory_object.h>
1c79356b
A
37#include <vm/vm_pageout.h>
38
0b4e3aa0 39#include <default_pager/default_pager_types.h>
1c79356b
A
40
41/* BSD VM COMPONENT INTERFACES */
42int
43get_map_nentries(
44 vm_map_t);
45
46vm_offset_t
47get_map_start(
48 vm_map_t);
49
50vm_offset_t
51get_map_end(
52 vm_map_t);
53
54/*
55 *
56 */
57int
58get_map_nentries(
59 vm_map_t map)
60{
61 return(map->hdr.nentries);
62}
63
64/*
65 *
66 */
67vm_offset_t
68get_map_start(
69 vm_map_t map)
70{
71 return(vm_map_first_entry(map)->vme_start);
72}
73
74/*
75 *
76 */
77vm_offset_t
78get_map_end(
79 vm_map_t map)
80{
81 return(vm_map_last_entry(map)->vme_end);
82}
83
84/*
85 * BSD VNODE PAGER
86 */
87
88/* until component support available */
89int vnode_pager_workaround;
90
91typedef int vnode_port_t;
92
93typedef struct vnode_pager {
0b4e3aa0
A
94 int *pager; /* pager workaround pointer */
95 unsigned int pager_ikot; /* JMM: fake ip_kotype() */
96 unsigned int ref_count; /* reference count */
97 memory_object_control_t control_handle; /* mem object control handle */
98 vnode_port_t vnode_handle; /* vnode handle */
1c79356b
A
99} *vnode_pager_t;
100
1c79356b
A
101
102ipc_port_t
103trigger_name_to_port(
104 mach_port_t);
105
106void
107vnode_pager_bootstrap(
108 void);
109
110void
111vnode_pager_alloc_map(
112 void);
113
0b4e3aa0 114memory_object_t
1c79356b
A
115vnode_pager_setup(
116 vnode_port_t,
0b4e3aa0 117 memory_object_t);
1c79356b 118
1c79356b
A
119
120kern_return_t
121vnode_pager_init(
0b4e3aa0
A
122 memory_object_t,
123 memory_object_control_t,
1c79356b
A
124 vm_size_t);
125
0b4e3aa0
A
126kern_return_t
127vnode_pager_get_object_size(
128 memory_object_t,
129 memory_object_offset_t *);
130
1c79356b
A
131kern_return_t
132vnode_pager_data_request(
0b4e3aa0
A
133 memory_object_t,
134 memory_object_offset_t,
1c79356b
A
135 vm_size_t,
136 vm_prot_t);
137
138kern_return_t
139vnode_pager_data_return(
0b4e3aa0
A
140 memory_object_t,
141 memory_object_offset_t,
1c79356b
A
142 vm_size_t,
143 boolean_t,
144 boolean_t);
145
0b4e3aa0
A
146kern_return_t
147vnode_pager_data_initialize(
148 memory_object_t,
149 memory_object_offset_t,
150 vm_size_t);
151
1c79356b 152void
0b4e3aa0
A
153vnode_pager_deallocate(
154 memory_object_t);
1c79356b
A
155
156kern_return_t
157vnode_pager_terminate(
0b4e3aa0 158 memory_object_t);
1c79356b
A
159
160kern_return_t
161vnode_pager_cluster_read(
162 vnode_pager_t,
163 vm_object_offset_t,
164 vm_size_t);
165
166void
167vnode_pager_cluster_write(
168 vnode_pager_t,
169 vm_object_offset_t,
170 vm_size_t);
171
1c79356b
A
172
173int
174vnode_pagein(
175 vnode_port_t,
176 upl_t,
177 vm_offset_t,
178 vm_object_offset_t,
179 int,
180 int,
181 int *);
182int
183vnode_pageout(
184 vnode_port_t,
185 upl_t,
186 vm_offset_t,
187 vm_object_offset_t,
188 int,
189 int,
190 int *);
191
0b4e3aa0
A
192vm_object_offset_t
193vnode_pager_get_filesize(
194 vnode_port_t);
195
1c79356b
A
196vnode_pager_t
197vnode_object_create(
198 vnode_port_t vp);
199
1c79356b 200vnode_pager_t
0b4e3aa0
A
201vnode_pager_lookup(
202 memory_object_t);
1c79356b
A
203
204void
205vnode_pager_release_from_cache(
206 int *cnt);
207
208zone_t vnode_pager_zone;
209
210
211#define VNODE_PAGER_NULL ((vnode_pager_t) 0)
212
213/* TODO: Should be set dynamically by vnode_pager_init() */
214#define CLUSTER_SHIFT 1
215
216/* TODO: Should be set dynamically by vnode_pager_bootstrap() */
217#define MAX_VNODE 10000
218
219
220#if DEBUG
221int pagerdebug=0;
222
223#define PAGER_ALL 0xffffffff
224#define PAGER_INIT 0x00000001
225#define PAGER_PAGEIN 0x00000002
226
227#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
228#else
229#define PAGER_DEBUG(LEVEL, A)
230#endif
231
232/*
233 * Routine: macx_triggers
234 * Function:
235 * Syscall interface to set the call backs for low and
236 * high water marks.
237 */
238int
239macx_triggers(
240 int hi_water,
241 int low_water,
242 int flags,
243 mach_port_t trigger_name)
244{
245 kern_return_t kr;
0b4e3aa0 246 memory_object_default_t default_pager;
1c79356b
A
247 ipc_port_t trigger_port;
248
0b4e3aa0 249 default_pager = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b 250 kr = host_default_memory_manager(host_priv_self(),
0b4e3aa0 251 &default_pager, 0);
1c79356b
A
252 if(kr != KERN_SUCCESS) {
253 return EINVAL;
254 }
0b4e3aa0
A
255 if (flags & HI_WAT_ALERT) {
256 trigger_port = trigger_name_to_port(trigger_name);
257 if(trigger_port == NULL) {
258 return EINVAL;
259 }
260 /* trigger_port is locked and active */
261 ipc_port_make_send_locked(trigger_port);
262 /* now unlocked */
263 default_pager_triggers(default_pager,
264 hi_water, low_water,
265 HI_WAT_ALERT, trigger_port);
266 }
267
268 if (flags & LO_WAT_ALERT) {
269 trigger_port = trigger_name_to_port(trigger_name);
270 if(trigger_port == NULL) {
271 return EINVAL;
272 }
273 /* trigger_port is locked and active */
274 ipc_port_make_send_locked(trigger_port);
275 /* and now its unlocked */
276 default_pager_triggers(default_pager,
277 hi_water, low_water,
278 LO_WAT_ALERT, trigger_port);
1c79356b 279 }
1c79356b
A
280
281 /*
282 * Set thread scheduling priority and policy for the current thread
283 * it is assumed for the time being that the thread setting the alert
284 * is the same one which will be servicing it.
285 */
286 {
287 struct policy_timeshare_base fifo_base;
288 struct policy_timeshare_limit fifo_limit;
289 policy_base_t base;
290 processor_set_t pset;
291 policy_limit_t limit;
292
293 pset = (current_thread())->processor_set;
294 base = (policy_base_t) &fifo_base;
295 limit = (policy_limit_t) &fifo_limit;
296 fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD;
297 thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT);
298 }
299
300 current_thread()->vm_privilege = TRUE;
301}
302
303/*
304 *
305 */
306ipc_port_t
307trigger_name_to_port(
308 mach_port_t trigger_name)
309{
310 ipc_port_t trigger_port;
311 ipc_space_t space;
312
313 if (trigger_name == 0)
314 return (NULL);
315
316 space = current_space();
317 if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name,
318 &trigger_port) != KERN_SUCCESS)
319 return (NULL);
320 return trigger_port;
321}
322
323/*
324 *
325 */
326void
327vnode_pager_bootstrap(void)
328{
329 register vm_size_t size;
330
331 size = (vm_size_t) sizeof(struct vnode_pager);
332 vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size,
333 PAGE_SIZE, "vnode pager structures");
1c79356b
A
334 return;
335}
336
337/*
338 *
339 */
0b4e3aa0 340memory_object_t
1c79356b
A
341vnode_pager_setup(
342 vnode_port_t vp,
0b4e3aa0 343 memory_object_t pager)
1c79356b
A
344{
345 vnode_pager_t vnode_object;
1c79356b
A
346
347 vnode_object = vnode_object_create(vp);
348 if (vnode_object == VNODE_PAGER_NULL)
349 panic("vnode_pager_setup: vnode_object_create() failed");
0b4e3aa0 350 return((memory_object_t)vnode_object);
1c79356b
A
351}
352
353/*
354 *
355 */
356kern_return_t
0b4e3aa0
A
357vnode_pager_init(memory_object_t mem_obj,
358 memory_object_control_t control,
1c79356b
A
359 vm_size_t pg_size)
360{
361 vnode_pager_t vnode_object;
362 kern_return_t kr;
363 memory_object_attr_info_data_t attributes;
1c79356b
A
364
365
366 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size));
367
0b4e3aa0
A
368 if (control == MEMORY_OBJECT_CONTROL_NULL)
369 return KERN_INVALID_ARGUMENT;
1c79356b 370
0b4e3aa0 371 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 372
0b4e3aa0
A
373 memory_object_control_reference(control);
374 vnode_object->control_handle = control;
1c79356b
A
375
376 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
377 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
378 attributes.cluster_size = (1 << (PAGE_SHIFT));
379 attributes.may_cache_object = TRUE;
380 attributes.temporary = TRUE;
381
382 kr = memory_object_change_attributes(
0b4e3aa0 383 control,
1c79356b
A
384 MEMORY_OBJECT_ATTRIBUTE_INFO,
385 (memory_object_info_t) &attributes,
0b4e3aa0 386 MEMORY_OBJECT_ATTR_INFO_COUNT);
1c79356b
A
387 if (kr != KERN_SUCCESS)
388 panic("vnode_pager_init: memory_object_change_attributes() failed");
389
390 return(KERN_SUCCESS);
391}
392
393/*
394 *
395 */
396kern_return_t
397vnode_pager_data_return(
0b4e3aa0
A
398 memory_object_t mem_obj,
399 memory_object_offset_t offset,
1c79356b
A
400 vm_size_t data_cnt,
401 boolean_t dirty,
402 boolean_t kernel_copy)
403{
404 register vnode_pager_t vnode_object;
405
0b4e3aa0 406 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b
A
407
408 vnode_pager_cluster_write(vnode_object, offset, data_cnt);
409
410 return KERN_SUCCESS;
411}
412
0b4e3aa0
A
413kern_return_t
414vnode_pager_data_initialize(
415 memory_object_t mem_obj,
416 memory_object_offset_t offset,
417 vm_size_t data_cnt)
418{
419 return KERN_FAILURE;
420}
421
422kern_return_t
423vnode_pager_data_unlock(
424 memory_object_t mem_obj,
425 memory_object_offset_t offset,
426 vm_size_t size,
427 vm_prot_t desired_access)
428{
429 return KERN_FAILURE;
430}
431
432kern_return_t
433vnode_pager_get_object_size(
434 memory_object_t mem_obj,
435 memory_object_offset_t *length)
436{
437 vnode_pager_t vnode_object;
438
439 vnode_object = vnode_pager_lookup(mem_obj);
440
441 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
442 return KERN_SUCCESS;
443}
444
1c79356b
A
445/*
446 *
447 */
448kern_return_t
449vnode_pager_data_request(
0b4e3aa0
A
450 memory_object_t mem_obj,
451 memory_object_offset_t offset,
1c79356b
A
452 vm_size_t length,
453 vm_prot_t protection_required)
454{
455 register vnode_pager_t vnode_object;
456
0b4e3aa0 457 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x\n", mem_obj, offset, length, protection_required));
1c79356b 458
0b4e3aa0 459 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 460
0b4e3aa0 461 PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, vnode_object %x\n", mem_obj, offset, length, protection_required, vnode_object));
1c79356b 462
1c79356b
A
463 vnode_pager_cluster_read(vnode_object, offset, length);
464
465 return KERN_SUCCESS;
466}
467
468/*
469 *
470 */
471void
0b4e3aa0
A
472vnode_pager_reference(
473 memory_object_t mem_obj)
474{
1c79356b 475 register vnode_pager_t vnode_object;
9bccf70c 476 unsigned int new_ref_count;
1c79356b 477
0b4e3aa0 478 vnode_object = vnode_pager_lookup(mem_obj);
9bccf70c
A
479 new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1);
480 assert(new_ref_count > 1);
0b4e3aa0 481}
1c79356b 482
0b4e3aa0
A
483/*
484 *
485 */
486void
487vnode_pager_deallocate(
488 memory_object_t mem_obj)
489{
490 register vnode_pager_t vnode_object;
1c79356b 491
0b4e3aa0 492 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %x\n", mem_obj));
1c79356b 493
0b4e3aa0 494 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 495
9bccf70c 496 if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) {
0b4e3aa0
A
497 if (vnode_object->vnode_handle != (vnode_port_t) NULL) {
498 vnode_pager_vrele(vnode_object->vnode_handle);
499 }
500 zfree(vnode_pager_zone, (vm_offset_t) vnode_object);
501 }
1c79356b
A
502 return;
503}
504
505/*
506 *
507 */
508kern_return_t
509vnode_pager_terminate(
0b4e3aa0 510 memory_object_t mem_obj)
1c79356b 511{
0b4e3aa0 512 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x\n", mem_obj));
1c79356b 513
0b4e3aa0
A
514 return(KERN_SUCCESS);
515}
1c79356b 516
0b4e3aa0
A
517/*
518 *
519 */
520kern_return_t
521vnode_pager_synchronize(
522 memory_object_t mem_obj,
523 memory_object_offset_t offset,
524 vm_size_t length,
525 vm_sync_t sync_flags)
526{
527 register vnode_pager_t vnode_object;
1c79356b 528
0b4e3aa0 529 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_synchronize: %x\n", mem_obj));
1c79356b 530
0b4e3aa0 531 vnode_object = vnode_pager_lookup(mem_obj);
1c79356b 532
0b4e3aa0 533 memory_object_synchronize_completed(vnode_object->control_handle, offset, length);
1c79356b 534
0b4e3aa0 535 return (KERN_SUCCESS);
1c79356b
A
536}
537
538/*
539 *
540 */
541kern_return_t
0b4e3aa0
A
542vnode_pager_unmap(
543 memory_object_t mem_obj)
1c79356b 544{
0b4e3aa0 545 register vnode_pager_t vnode_object;
1c79356b 546
0b4e3aa0
A
547 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_unmap: %x\n", mem_obj));
548
549 vnode_object = vnode_pager_lookup(mem_obj);
550
551 ubc_unmap(vnode_object->vnode_handle);
552 return KERN_SUCCESS;
1c79356b
A
553}
554
0b4e3aa0 555
1c79356b
A
556/*
557 *
558 */
559void
560vnode_pager_cluster_write(
561 vnode_pager_t vnode_object,
562 vm_object_offset_t offset,
563 vm_size_t cnt)
564{
565 int error = 0;
566 int local_error = 0;
567 int kret;
568 int size;
569
570 if (cnt & PAGE_MASK) {
571 panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE");
572 }
573 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
574
575 while (cnt) {
576
0b4e3aa0
A
577 kret = vnode_pageout(vnode_object->vnode_handle,
578 (upl_t )NULL, (vm_offset_t)NULL,
579 offset, size, 0, &local_error);
580/*
581 if(kret == PAGER_ABSENT) {
582 Need to work out the defs here, 1 corresponds to
583 PAGER_ABSENT defined in bsd/vm/vm_pager.h However,
584 we should not be including that file here it is a
585 layering violation.
586*/
587 if(kret == 1) {
588 int uplflags;
589 upl_t upl = NULL;
590 int count = 0;
591 kern_return_t kr;
592
593 uplflags = (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
594 UPL_SET_INTERNAL | UPL_COPYOUT_FROM);
595 count = 0;
596 kr = memory_object_upl_request(
597 vnode_object->control_handle,
598 offset, size, &upl, NULL, &count, uplflags);
599 if(kr != KERN_SUCCESS) {
600 panic("vnode_pager_cluster_write: upl request failed\n");
601 }
602 upl_abort(upl, 0);
603 upl_deallocate(upl);
604
605 error = 0;
606 local_error = 0;
607 }
1c79356b
A
608
609 if (local_error != 0) {
610 error = local_error;
611 local_error = 0;
612 }
613 cnt -= size;
614 offset += size;
0b4e3aa0 615 size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */
1c79356b
A
616 }
617#if 0
618 if (error != 0)
619 return(KERN_FAILURE);
620
621 return(KERN_SUCCESS);
622#endif /* 0 */
623}
624
625
626/*
627 *
628 */
629kern_return_t
630vnode_pager_cluster_read(
631 vnode_pager_t vnode_object,
632 vm_object_offset_t offset,
633 vm_size_t cnt)
634{
635 int error = 0;
636 int local_error = 0;
637 int kret;
1c79356b
A
638
639 if(cnt & PAGE_MASK) {
640 panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE");
641 }
642
9bccf70c 643 kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, cnt, 0, &local_error);
0b4e3aa0
A
644/*
645 if(kret == PAGER_ABSENT) {
646 Need to work out the defs here, 1 corresponds to PAGER_ABSENT
647 defined in bsd/vm/vm_pager.h However, we should not be including
648 that file here it is a layering violation.
649*/
650 if(kret == 1) {
651 int uplflags;
652 upl_t upl = NULL;
653 int count = 0;
654 kern_return_t kr;
655
656 uplflags = (UPL_NO_SYNC |
657 UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
658 count = 0;
659 kr = memory_object_upl_request(
660 vnode_object->control_handle, offset, cnt,
661 &upl, NULL, &count, uplflags);
662 if(kr != KERN_SUCCESS) {
663 panic("vnode_pager_cluster_read: upl request failed\n");
664 }
665 upl_abort(upl, 0);
666 upl_deallocate(upl);
667
668 error = 1;
1c79356b 669 }
0b4e3aa0 670
1c79356b
A
671 if (error != 0)
672 return(KERN_FAILURE);
673
674 return(KERN_SUCCESS);
675
676}
677
678
679/*
680 *
681 */
682void
683vnode_pager_release_from_cache(
684 int *cnt)
685{
686 memory_object_free_from_cache(
0b4e3aa0 687 &realhost, &vnode_pager_workaround, cnt);
1c79356b
A
688}
689
690/*
691 *
692 */
693vnode_pager_t
694vnode_object_create(
695 vnode_port_t vp)
696{
697 register vnode_pager_t vnode_object;
698
699 vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone);
700 if (vnode_object == VNODE_PAGER_NULL)
701 return(VNODE_PAGER_NULL);
1c79356b 702
1c79356b 703 /*
0b4e3aa0
A
704 * The vm_map call takes both named entry ports and raw memory
705 * objects in the same parameter. We need to make sure that
706 * vm_map does not see this object as a named entry port. So,
707 * we reserve the second word in the object for a fake ip_kotype
708 * setting - that will tell vm_map to use it as a memory object.
1c79356b 709 */
0b4e3aa0
A
710 vnode_object->pager = &vnode_pager_workaround;
711 vnode_object->pager_ikot = IKOT_MEMORY_OBJECT;
712 vnode_object->ref_count = 1;
713 vnode_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
714 vnode_object->vnode_handle = vp;
715
716 return(vnode_object);
1c79356b
A
717}
718
719/*
720 *
721 */
722vnode_pager_t
0b4e3aa0
A
723vnode_pager_lookup(
724 memory_object_t name)
1c79356b 725{
0b4e3aa0 726 vnode_pager_t vnode_object;
1c79356b 727
0b4e3aa0
A
728 vnode_object = (vnode_pager_t)name;
729 assert(vnode_object->pager == &vnode_pager_workaround);
730 return (vnode_object);
1c79356b 731}
0b4e3aa0 732