]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_kern.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_kern.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Kernel memory management.
58 */
59
60#include <cpus.h>
61#include <mach/kern_return.h>
62#include <mach/vm_param.h>
63#include <kern/assert.h>
64#include <kern/lock.h>
65#include <kern/thread.h>
66#include <vm/vm_kern.h>
67#include <vm/vm_map.h>
68#include <vm/vm_object.h>
69#include <vm/vm_page.h>
70#include <vm/vm_pageout.h>
71#include <kern/misc_protos.h>
72#include <vm/cpm.h>
73
74#include <string.h>
75/*
76 * Variables exported by this module.
77 */
78
79vm_map_t kernel_map;
80vm_map_t kernel_pageable_map;
81
82/*
83 * Forward declarations for internal functions.
84 */
85extern kern_return_t kmem_alloc_pages(
86 register vm_object_t object,
87 register vm_object_offset_t offset,
b4c24cb9 88 register vm_size_t size);
1c79356b
A
89
90extern void kmem_remap_pages(
91 register vm_object_t object,
92 register vm_object_offset_t offset,
93 register vm_offset_t start,
94 register vm_offset_t end,
95 vm_prot_t protection);
96
97kern_return_t
98kmem_alloc_contig(
99 vm_map_t map,
100 vm_offset_t *addrp,
101 vm_size_t size,
102 vm_offset_t mask,
103 int flags)
104{
105 vm_object_t object;
106 vm_page_t m, pages;
107 kern_return_t kr;
108 vm_offset_t addr, i;
109 vm_object_offset_t offset;
110 vm_map_entry_t entry;
111
112 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
113 return KERN_INVALID_ARGUMENT;
114
115 if (size == 0) {
116 *addrp = 0;
117 return KERN_INVALID_ARGUMENT;
118 }
119
55e303ae 120 size = round_page_32(size);
1c79356b
A
121 if ((flags & KMA_KOBJECT) == 0) {
122 object = vm_object_allocate(size);
123 kr = vm_map_find_space(map, &addr, size, mask, &entry);
124 }
125 else {
126 object = kernel_object;
127 kr = vm_map_find_space(map, &addr, size, mask, &entry);
128 }
129
130 if ((flags & KMA_KOBJECT) == 0) {
131 entry->object.vm_object = object;
132 entry->offset = offset = 0;
133 } else {
134 offset = addr - VM_MIN_KERNEL_ADDRESS;
135
136 if (entry->object.vm_object == VM_OBJECT_NULL) {
137 vm_object_reference(object);
138 entry->object.vm_object = object;
139 entry->offset = offset;
140 }
141 }
142
143 if (kr != KERN_SUCCESS) {
144 if ((flags & KMA_KOBJECT) == 0)
145 vm_object_deallocate(object);
146 return kr;
147 }
148
149 vm_map_unlock(map);
150
151 kr = cpm_allocate(size, &pages, FALSE);
152
153 if (kr != KERN_SUCCESS) {
154 vm_map_remove(map, addr, addr + size, 0);
155 *addrp = 0;
156 return kr;
157 }
158
159 vm_object_lock(object);
160 for (i = 0; i < size; i += PAGE_SIZE) {
161 m = pages;
162 pages = NEXT_PAGE(m);
163 m->busy = FALSE;
164 vm_page_insert(m, object, offset + i);
165 }
166 vm_object_unlock(object);
167
168 if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
169 != KERN_SUCCESS) {
170 if (object == kernel_object) {
171 vm_object_lock(object);
172 vm_object_page_remove(object, offset, offset + size);
173 vm_object_unlock(object);
174 }
175 vm_map_remove(map, addr, addr + size, 0);
176 return kr;
177 }
178 if (object == kernel_object)
179 vm_map_simplify(map, addr);
180
181 *addrp = addr;
182 return KERN_SUCCESS;
183}
184
185/*
186 * Master entry point for allocating kernel memory.
187 * NOTE: this routine is _never_ interrupt safe.
188 *
189 * map : map to allocate into
190 * addrp : pointer to start address of new memory
191 * size : size of memory requested
192 * flags : options
193 * KMA_HERE *addrp is base address, else "anywhere"
194 * KMA_NOPAGEWAIT don't wait for pages if unavailable
195 * KMA_KOBJECT use kernel_object
196 */
197
198kern_return_t
199kernel_memory_allocate(
200 register vm_map_t map,
201 register vm_offset_t *addrp,
202 register vm_size_t size,
203 register vm_offset_t mask,
204 int flags)
205{
206 vm_object_t object = VM_OBJECT_NULL;
207 vm_map_entry_t entry;
208 vm_object_offset_t offset;
209 vm_offset_t addr;
210 vm_offset_t i;
211 kern_return_t kr;
212
55e303ae 213 size = round_page_32(size);
1c79356b
A
214 if ((flags & KMA_KOBJECT) == 0) {
215 /*
216 * Allocate a new object. We must do this before locking
217 * the map, or risk deadlock with the default pager:
218 * device_read_alloc uses kmem_alloc,
219 * which tries to allocate an object,
220 * which uses kmem_alloc_wired to get memory,
221 * which blocks for pages.
222 * then the default pager needs to read a block
223 * to process a memory_object_data_write,
224 * and device_read_alloc calls kmem_alloc
225 * and deadlocks on the map lock.
226 */
227 object = vm_object_allocate(size);
228 kr = vm_map_find_space(map, &addr, size, mask, &entry);
229 }
230 else {
231 object = kernel_object;
232 kr = vm_map_find_space(map, &addr, size, mask, &entry);
233 }
234 if (kr != KERN_SUCCESS) {
235 if ((flags & KMA_KOBJECT) == 0)
236 vm_object_deallocate(object);
237 return kr;
238 }
239
240 if ((flags & KMA_KOBJECT) == 0) {
241 entry->object.vm_object = object;
242 entry->offset = offset = 0;
243 } else {
244 offset = addr - VM_MIN_KERNEL_ADDRESS;
245
246 if (entry->object.vm_object == VM_OBJECT_NULL) {
247 vm_object_reference(object);
248 entry->object.vm_object = object;
249 entry->offset = offset;
250 }
251 }
252
253 /*
254 * Since we have not given out this address yet,
b4c24cb9
A
255 * it is safe to unlock the map. Except of course
256 * we must make certain no one coalesces our address
257 * or does a blind vm_deallocate and removes the object
258 * an extra object reference will suffice to protect
259 * against both contingencies.
1c79356b 260 */
b4c24cb9 261 vm_object_reference(object);
1c79356b
A
262 vm_map_unlock(map);
263
264 vm_object_lock(object);
265 for (i = 0; i < size; i += PAGE_SIZE) {
266 vm_page_t mem;
267
268 while ((mem = vm_page_alloc(object,
269 offset + (vm_object_offset_t)i))
270 == VM_PAGE_NULL) {
271 if (flags & KMA_NOPAGEWAIT) {
272 if (object == kernel_object)
273 vm_object_page_remove(object, offset,
274 offset + (vm_object_offset_t)i);
275 vm_object_unlock(object);
276 vm_map_remove(map, addr, addr + size, 0);
b4c24cb9 277 vm_object_deallocate(object);
1c79356b
A
278 return KERN_RESOURCE_SHORTAGE;
279 }
280 vm_object_unlock(object);
281 VM_PAGE_WAIT();
282 vm_object_lock(object);
283 }
284 mem->busy = FALSE;
285 }
286 vm_object_unlock(object);
287
288 if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
289 != KERN_SUCCESS) {
290 if (object == kernel_object) {
291 vm_object_lock(object);
292 vm_object_page_remove(object, offset, offset + size);
293 vm_object_unlock(object);
294 }
295 vm_map_remove(map, addr, addr + size, 0);
b4c24cb9 296 vm_object_deallocate(object);
1c79356b
A
297 return (kr);
298 }
b4c24cb9
A
299 /* now that the page is wired, we no longer have to fear coalesce */
300 vm_object_deallocate(object);
1c79356b
A
301 if (object == kernel_object)
302 vm_map_simplify(map, addr);
303
304 /*
305 * Return the memory, not zeroed.
306 */
307#if (NCPUS > 1) && i860
308 bzero( addr, size );
309#endif /* #if (NCPUS > 1) && i860 */
310 *addrp = addr;
311 return KERN_SUCCESS;
312}
313
314/*
315 * kmem_alloc:
316 *
317 * Allocate wired-down memory in the kernel's address map
318 * or a submap. The memory is not zero-filled.
319 */
320
321kern_return_t
322kmem_alloc(
323 vm_map_t map,
324 vm_offset_t *addrp,
325 vm_size_t size)
326{
327 return kernel_memory_allocate(map, addrp, size, 0, 0);
328}
329
330/*
331 * kmem_realloc:
332 *
333 * Reallocate wired-down memory in the kernel's address map
334 * or a submap. Newly allocated pages are not zeroed.
335 * This can only be used on regions allocated with kmem_alloc.
336 *
337 * If successful, the pages in the old region are mapped twice.
338 * The old region is unchanged. Use kmem_free to get rid of it.
339 */
340kern_return_t
341kmem_realloc(
342 vm_map_t map,
343 vm_offset_t oldaddr,
344 vm_size_t oldsize,
345 vm_offset_t *newaddrp,
346 vm_size_t newsize)
347{
b4c24cb9
A
348 vm_offset_t oldmin, oldmax;
349 vm_offset_t newaddr;
350 vm_offset_t offset;
351 vm_object_t object;
352 vm_map_entry_t oldentry, newentry;
353 vm_page_t mem;
354 kern_return_t kr;
1c79356b 355
55e303ae
A
356 oldmin = trunc_page_32(oldaddr);
357 oldmax = round_page_32(oldaddr + oldsize);
1c79356b 358 oldsize = oldmax - oldmin;
55e303ae 359 newsize = round_page_32(newsize);
1c79356b 360
1c79356b
A
361
362 /*
363 * Find the VM object backing the old region.
364 */
365
b4c24cb9
A
366 vm_map_lock(map);
367
1c79356b
A
368 if (!vm_map_lookup_entry(map, oldmin, &oldentry))
369 panic("kmem_realloc");
370 object = oldentry->object.vm_object;
371
372 /*
373 * Increase the size of the object and
374 * fill in the new region.
375 */
376
377 vm_object_reference(object);
b4c24cb9
A
378 /* by grabbing the object lock before unlocking the map */
379 /* we guarantee that we will panic if more than one */
380 /* attempt is made to realloc a kmem_alloc'd area */
1c79356b 381 vm_object_lock(object);
b4c24cb9 382 vm_map_unlock(map);
1c79356b
A
383 if (object->size != oldsize)
384 panic("kmem_realloc");
385 object->size = newsize;
386 vm_object_unlock(object);
387
b4c24cb9
A
388 /* allocate the new pages while expanded portion of the */
389 /* object is still not mapped */
390 kmem_alloc_pages(object, oldsize, newsize-oldsize);
391
1c79356b
A
392
393 /*
b4c24cb9 394 * Find space for the new region.
1c79356b
A
395 */
396
b4c24cb9
A
397 kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0,
398 &newentry);
399 if (kr != KERN_SUCCESS) {
400 vm_object_lock(object);
401 for(offset = oldsize;
402 offset<newsize; offset+=PAGE_SIZE) {
403 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
404 vm_page_lock_queues();
405 vm_page_free(mem);
406 vm_page_unlock_queues();
407 }
408 }
409 object->size = oldsize;
410 vm_object_unlock(object);
411 vm_object_deallocate(object);
412 return kr;
413 }
414 newentry->object.vm_object = object;
415 newentry->offset = 0;
416 assert (newentry->wired_count == 0);
417
418
419 /* add an extra reference in case we have someone doing an */
420 /* unexpected deallocate */
421 vm_object_reference(object);
1c79356b
A
422 vm_map_unlock(map);
423
b4c24cb9
A
424 if ((kr = vm_map_wire(map, newaddr, newaddr + newsize,
425 VM_PROT_DEFAULT, FALSE)) != KERN_SUCCESS) {
426 vm_map_remove(map, newaddr, newaddr + newsize, 0);
427 vm_object_lock(object);
428 for(offset = oldsize;
429 offset<newsize; offset+=PAGE_SIZE) {
430 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
431 vm_page_lock_queues();
432 vm_page_free(mem);
433 vm_page_unlock_queues();
434 }
435 }
436 object->size = oldsize;
437 vm_object_unlock(object);
438 vm_object_deallocate(object);
439 return (kr);
440 }
441 vm_object_deallocate(object);
1c79356b 442
1c79356b
A
443
444 *newaddrp = newaddr;
445 return KERN_SUCCESS;
446}
447
448/*
449 * kmem_alloc_wired:
450 *
451 * Allocate wired-down memory in the kernel's address map
452 * or a submap. The memory is not zero-filled.
453 *
454 * The memory is allocated in the kernel_object.
455 * It may not be copied with vm_map_copy, and
456 * it may not be reallocated with kmem_realloc.
457 */
458
459kern_return_t
460kmem_alloc_wired(
461 vm_map_t map,
462 vm_offset_t *addrp,
463 vm_size_t size)
464{
465 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
466}
467
468/*
469 * kmem_alloc_aligned:
470 *
471 * Like kmem_alloc_wired, except that the memory is aligned.
472 * The size should be a power-of-2.
473 */
474
475kern_return_t
476kmem_alloc_aligned(
477 vm_map_t map,
478 vm_offset_t *addrp,
479 vm_size_t size)
480{
481 if ((size & (size - 1)) != 0)
482 panic("kmem_alloc_aligned: size not aligned");
483 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
484}
485
486/*
487 * kmem_alloc_pageable:
488 *
489 * Allocate pageable memory in the kernel's address map.
490 */
491
492kern_return_t
493kmem_alloc_pageable(
494 vm_map_t map,
495 vm_offset_t *addrp,
496 vm_size_t size)
497{
498 vm_offset_t addr;
499 kern_return_t kr;
500
501#ifndef normal
502 addr = (vm_map_min(map)) + 0x1000;
503#else
504 addr = vm_map_min(map);
505#endif
55e303ae 506 kr = vm_map_enter(map, &addr, round_page_32(size),
1c79356b
A
507 (vm_offset_t) 0, TRUE,
508 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
509 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
510 if (kr != KERN_SUCCESS)
511 return kr;
512
513 *addrp = addr;
514 return KERN_SUCCESS;
515}
516
517/*
518 * kmem_free:
519 *
520 * Release a region of kernel virtual memory allocated
521 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
522 * and return the physical pages associated with that region.
523 */
524
525void
526kmem_free(
527 vm_map_t map,
528 vm_offset_t addr,
529 vm_size_t size)
530{
531 kern_return_t kr;
532
55e303ae
A
533 kr = vm_map_remove(map, trunc_page_32(addr),
534 round_page_32(addr + size),
535 VM_MAP_REMOVE_KUNWIRE);
1c79356b
A
536 if (kr != KERN_SUCCESS)
537 panic("kmem_free");
538}
539
540/*
b4c24cb9 541 * Allocate new pages in an object.
1c79356b
A
542 */
543
544kern_return_t
545kmem_alloc_pages(
546 register vm_object_t object,
547 register vm_object_offset_t offset,
b4c24cb9 548 register vm_size_t size)
1c79356b 549{
1c79356b 550
55e303ae 551 size = round_page_32(size);
b4c24cb9
A
552 vm_object_lock(object);
553 while (size) {
1c79356b
A
554 register vm_page_t mem;
555
1c79356b
A
556
557 /*
558 * Allocate a page
559 */
560 while ((mem = vm_page_alloc(object, offset))
561 == VM_PAGE_NULL) {
562 vm_object_unlock(object);
563 VM_PAGE_WAIT();
564 vm_object_lock(object);
565 }
566
1c79356b 567
b4c24cb9
A
568 offset += PAGE_SIZE;
569 size -= PAGE_SIZE;
570 mem->busy = FALSE;
1c79356b 571 }
b4c24cb9 572 vm_object_unlock(object);
1c79356b
A
573 return KERN_SUCCESS;
574}
575
576/*
577 * Remap wired pages in an object into a new region.
578 * The object is assumed to be mapped into the kernel map or
579 * a submap.
580 */
581void
582kmem_remap_pages(
583 register vm_object_t object,
584 register vm_object_offset_t offset,
585 register vm_offset_t start,
586 register vm_offset_t end,
587 vm_prot_t protection)
588{
589 /*
590 * Mark the pmap region as not pageable.
591 */
592 pmap_pageable(kernel_pmap, start, end, FALSE);
593
594 while (start < end) {
595 register vm_page_t mem;
596
597 vm_object_lock(object);
598
599 /*
600 * Find a page
601 */
602 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
603 panic("kmem_remap_pages");
604
605 /*
606 * Wire it down (again)
607 */
608 vm_page_lock_queues();
609 vm_page_wire(mem);
610 vm_page_unlock_queues();
611 vm_object_unlock(object);
612
613 /*
614 * Enter it in the kernel pmap. The page isn't busy,
615 * but this shouldn't be a problem because it is wired.
616 */
9bccf70c 617 PMAP_ENTER(kernel_pmap, start, mem, protection,
55e303ae
A
618 ((unsigned int)(mem->object->wimg_bits))
619 & VM_WIMG_MASK,
620 TRUE);
1c79356b
A
621
622 start += PAGE_SIZE;
623 offset += PAGE_SIZE;
624 }
625}
626
627/*
628 * kmem_suballoc:
629 *
630 * Allocates a map to manage a subrange
631 * of the kernel virtual address space.
632 *
633 * Arguments are as follows:
634 *
635 * parent Map to take range from
636 * addr Address of start of range (IN/OUT)
637 * size Size of range to find
638 * pageable Can region be paged
639 * anywhere Can region be located anywhere in map
640 * new_map Pointer to new submap
641 */
642kern_return_t
643kmem_suballoc(
644 vm_map_t parent,
645 vm_offset_t *addr,
646 vm_size_t size,
647 boolean_t pageable,
648 boolean_t anywhere,
649 vm_map_t *new_map)
650{
651 vm_map_t map;
652 kern_return_t kr;
653
55e303ae 654 size = round_page_32(size);
1c79356b
A
655
656 /*
657 * Need reference on submap object because it is internal
658 * to the vm_system. vm_object_enter will never be called
659 * on it (usual source of reference for vm_map_enter).
660 */
661 vm_object_reference(vm_submap_object);
662
663 if (anywhere == TRUE)
664 *addr = (vm_offset_t)vm_map_min(parent);
665 kr = vm_map_enter(parent, addr, size,
666 (vm_offset_t) 0, anywhere,
667 vm_submap_object, (vm_object_offset_t) 0, FALSE,
668 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
669 if (kr != KERN_SUCCESS) {
670 vm_object_deallocate(vm_submap_object);
671 return (kr);
672 }
673
674 pmap_reference(vm_map_pmap(parent));
675 map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable);
676 if (map == VM_MAP_NULL)
677 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
678
679 kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE);
680 if (kr != KERN_SUCCESS) {
681 /*
682 * See comment preceding vm_map_submap().
683 */
684 vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS);
685 vm_map_deallocate(map); /* also removes ref to pmap */
686 vm_object_deallocate(vm_submap_object);
687 return (kr);
688 }
1c79356b
A
689 *new_map = map;
690 return (KERN_SUCCESS);
691}
692
693/*
694 * kmem_init:
695 *
696 * Initialize the kernel's virtual memory map, taking
697 * into account all memory allocated up to this time.
698 */
699void
700kmem_init(
701 vm_offset_t start,
702 vm_offset_t end)
703{
704 kernel_map = vm_map_create(pmap_kernel(),
705 VM_MIN_KERNEL_ADDRESS, end,
706 FALSE);
707
708 /*
709 * Reserve virtual memory allocated up to this time.
710 */
711
712 if (start != VM_MIN_KERNEL_ADDRESS) {
713 vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
714 (void) vm_map_enter(kernel_map,
715 &addr, start - VM_MIN_KERNEL_ADDRESS,
716 (vm_offset_t) 0, TRUE,
717 VM_OBJECT_NULL,
718 (vm_object_offset_t) 0, FALSE,
719 VM_PROT_DEFAULT, VM_PROT_ALL,
720 VM_INHERIT_DEFAULT);
721 }
722
723 /*
724 * Account for kernel memory (text, data, bss, vm shenanigans).
725 * This may include inaccessible "holes" as determined by what
55e303ae 726 * the machine-dependent init code includes in max_mem.
1c79356b 727 */
55e303ae 728 vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
1c79356b
A
729 + vm_page_active_count
730 + vm_page_inactive_count));
731}
732
1c79356b
A
733
734/*
735 * kmem_io_object_trunc:
736 *
737 * Truncate an object vm_map_copy_t.
738 * Called by the scatter/gather list network code to remove pages from
739 * the tail end of a packet. Also unwires the objects pages.
740 */
741
742kern_return_t
743kmem_io_object_trunc(copy, new_size)
744 vm_map_copy_t copy; /* IN/OUT copy object */
745 register vm_size_t new_size; /* IN new object size */
746{
747 register vm_size_t offset, old_size;
748
749 assert(copy->type == VM_MAP_COPY_OBJECT);
750
751 old_size = (vm_size_t)round_page_64(copy->size);
752 copy->size = new_size;
55e303ae 753 new_size = round_page_32(new_size);
1c79356b
A
754
755 vm_object_lock(copy->cpy_object);
756 vm_object_page_remove(copy->cpy_object,
757 (vm_object_offset_t)new_size, (vm_object_offset_t)old_size);
758 for (offset = 0; offset < new_size; offset += PAGE_SIZE) {
759 register vm_page_t mem;
760
761 if ((mem = vm_page_lookup(copy->cpy_object,
762 (vm_object_offset_t)offset)) == VM_PAGE_NULL)
763 panic("kmem_io_object_trunc: unable to find object page");
764
765 /*
766 * Make sure these pages are marked dirty
767 */
768 mem->dirty = TRUE;
769 vm_page_lock_queues();
770 vm_page_unwire(mem);
771 vm_page_unlock_queues();
772 }
773 copy->cpy_object->size = new_size; /* adjust size of object */
774 vm_object_unlock(copy->cpy_object);
775 return(KERN_SUCCESS);
776}
777
778/*
779 * kmem_io_object_deallocate:
780 *
781 * Free an vm_map_copy_t.
782 * Called by the scatter/gather list network code to free a packet.
783 */
784
785void
786kmem_io_object_deallocate(
787 vm_map_copy_t copy) /* IN/OUT copy object */
788{
789 kern_return_t ret;
790
791 /*
792 * Clear out all the object pages (this will leave an empty object).
793 */
794 ret = kmem_io_object_trunc(copy, 0);
795 if (ret != KERN_SUCCESS)
796 panic("kmem_io_object_deallocate: unable to truncate object");
797 /*
798 * ...and discard the copy object.
799 */
800 vm_map_copy_discard(copy);
801}
802
803/*
804 * Routine: copyinmap
805 * Purpose:
806 * Like copyin, except that fromaddr is an address
807 * in the specified VM map. This implementation
808 * is incomplete; it handles the current user map
809 * and the kernel map/submaps.
810 */
811boolean_t
812copyinmap(
813 vm_map_t map,
814 vm_offset_t fromaddr,
815 vm_offset_t toaddr,
816 vm_size_t length)
817{
818 if (vm_map_pmap(map) == pmap_kernel()) {
819 /* assume a correct copy */
820 memcpy((void *)toaddr, (void *)fromaddr, length);
821 return FALSE;
822 }
823
824 if (current_map() == map)
825 return copyin((char *)fromaddr, (char *)toaddr, length);
826
827 return TRUE;
828}
829
830/*
831 * Routine: copyoutmap
832 * Purpose:
833 * Like copyout, except that toaddr is an address
834 * in the specified VM map. This implementation
835 * is incomplete; it handles the current user map
836 * and the kernel map/submaps.
837 */
838boolean_t
839copyoutmap(
840 vm_map_t map,
841 vm_offset_t fromaddr,
842 vm_offset_t toaddr,
843 vm_size_t length)
844{
845 if (vm_map_pmap(map) == pmap_kernel()) {
846 /* assume a correct copy */
847 memcpy((void *)toaddr, (void *)fromaddr, length);
848 return FALSE;
849 }
850
851 if (current_map() == map)
852 return copyout((char *)fromaddr, (char *)toaddr, length);
853
854 return TRUE;
855}
9bccf70c
A
856
857
858kern_return_t
859vm_conflict_check(
860 vm_map_t map,
861 vm_offset_t off,
862 vm_size_t len,
863 memory_object_t pager,
864 vm_object_offset_t file_off)
865{
866 vm_map_entry_t entry;
867 vm_object_t obj;
868 vm_object_offset_t obj_off;
869 vm_map_t base_map;
870 vm_offset_t base_offset;
871 vm_offset_t original_offset;
872 kern_return_t kr;
873 vm_size_t local_len;
874
875 base_map = map;
876 base_offset = off;
877 original_offset = off;
878 kr = KERN_SUCCESS;
879 vm_map_lock(map);
880 while(vm_map_lookup_entry(map, off, &entry)) {
881 local_len = len;
882
883 if (entry->object.vm_object == VM_OBJECT_NULL) {
884 vm_map_unlock(map);
885 return KERN_SUCCESS;
886 }
887 if (entry->is_sub_map) {
888 vm_map_t old_map;
55e303ae 889
9bccf70c
A
890 old_map = map;
891 vm_map_lock(entry->object.sub_map);
892 map = entry->object.sub_map;
893 off = entry->offset + (off - entry->vme_start);
894 vm_map_unlock(old_map);
895 continue;
896 }
897 obj = entry->object.vm_object;
898 obj_off = (off - entry->vme_start) + entry->offset;
899 while(obj->shadow) {
900 obj_off += obj->shadow_offset;
901 obj = obj->shadow;
902 }
903 if((obj->pager_created) && (obj->pager == pager)) {
904 if(((obj->paging_offset) + obj_off) == file_off) {
905 if(off != base_offset) {
906 vm_map_unlock(map);
907 return KERN_FAILURE;
908 }
909 kr = KERN_ALREADY_WAITING;
55e303ae
A
910 } else {
911 vm_object_offset_t obj_off_aligned;
912 vm_object_offset_t file_off_aligned;
913
914 obj_off_aligned = obj_off & ~PAGE_MASK;
915 file_off_aligned = file_off & ~PAGE_MASK;
916
917 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
918 /*
919 * the target map and the file offset start in the same page
920 * but are not identical...
921 */
922 vm_map_unlock(map);
923 return KERN_FAILURE;
924 }
925 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
926 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
927 /*
928 * some portion of the tail of the I/O will fall
929 * within the encompass of the target map
930 */
931 vm_map_unlock(map);
932 return KERN_FAILURE;
933 }
934 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
935 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
936 /*
937 * the beginning page of the file offset falls within
938 * the target map's encompass
939 */
940 vm_map_unlock(map);
941 return KERN_FAILURE;
942 }
9bccf70c
A
943 }
944 } else if(kr != KERN_SUCCESS) {
55e303ae 945 vm_map_unlock(map);
9bccf70c
A
946 return KERN_FAILURE;
947 }
948
55e303ae 949 if(len <= ((entry->vme_end - entry->vme_start) -
9bccf70c
A
950 (off - entry->vme_start))) {
951 vm_map_unlock(map);
952 return kr;
953 } else {
954 len -= (entry->vme_end - entry->vme_start) -
955 (off - entry->vme_start);
956 }
957 base_offset = base_offset + (local_len - len);
958 file_off = file_off + (local_len - len);
959 off = base_offset;
960 if(map != base_map) {
961 vm_map_unlock(map);
962 vm_map_lock(base_map);
963 map = base_map;
964 }
965 }
966
967 vm_map_unlock(map);
968 return kr;
9bccf70c 969}