]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_kern.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: vm/vm_kern.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 * Date: 1985
59 *
60 * Kernel memory management.
61 */
62
63#include <cpus.h>
64#include <mach/kern_return.h>
65#include <mach/vm_param.h>
66#include <kern/assert.h>
67#include <kern/lock.h>
68#include <kern/thread.h>
69#include <vm/vm_kern.h>
70#include <vm/vm_map.h>
71#include <vm/vm_object.h>
72#include <vm/vm_page.h>
73#include <vm/vm_pageout.h>
74#include <kern/misc_protos.h>
75#include <vm/cpm.h>
76
77#include <string.h>
78/*
79 * Variables exported by this module.
80 */
81
82vm_map_t kernel_map;
83vm_map_t kernel_pageable_map;
84
85/*
86 * Forward declarations for internal functions.
87 */
88extern kern_return_t kmem_alloc_pages(
89 register vm_object_t object,
90 register vm_object_offset_t offset,
b4c24cb9 91 register vm_size_t size);
1c79356b
A
92
93extern void kmem_remap_pages(
94 register vm_object_t object,
95 register vm_object_offset_t offset,
96 register vm_offset_t start,
97 register vm_offset_t end,
98 vm_prot_t protection);
99
100kern_return_t
101kmem_alloc_contig(
102 vm_map_t map,
103 vm_offset_t *addrp,
104 vm_size_t size,
105 vm_offset_t mask,
106 int flags)
107{
108 vm_object_t object;
109 vm_page_t m, pages;
110 kern_return_t kr;
111 vm_offset_t addr, i;
112 vm_object_offset_t offset;
113 vm_map_entry_t entry;
114
115 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
116 return KERN_INVALID_ARGUMENT;
117
118 if (size == 0) {
119 *addrp = 0;
120 return KERN_INVALID_ARGUMENT;
121 }
122
de355530 123 size = round_page(size);
1c79356b
A
124 if ((flags & KMA_KOBJECT) == 0) {
125 object = vm_object_allocate(size);
126 kr = vm_map_find_space(map, &addr, size, mask, &entry);
127 }
128 else {
129 object = kernel_object;
130 kr = vm_map_find_space(map, &addr, size, mask, &entry);
131 }
132
133 if ((flags & KMA_KOBJECT) == 0) {
134 entry->object.vm_object = object;
135 entry->offset = offset = 0;
136 } else {
137 offset = addr - VM_MIN_KERNEL_ADDRESS;
138
139 if (entry->object.vm_object == VM_OBJECT_NULL) {
140 vm_object_reference(object);
141 entry->object.vm_object = object;
142 entry->offset = offset;
143 }
144 }
145
146 if (kr != KERN_SUCCESS) {
147 if ((flags & KMA_KOBJECT) == 0)
148 vm_object_deallocate(object);
149 return kr;
150 }
151
152 vm_map_unlock(map);
153
154 kr = cpm_allocate(size, &pages, FALSE);
155
156 if (kr != KERN_SUCCESS) {
157 vm_map_remove(map, addr, addr + size, 0);
158 *addrp = 0;
159 return kr;
160 }
161
162 vm_object_lock(object);
163 for (i = 0; i < size; i += PAGE_SIZE) {
164 m = pages;
165 pages = NEXT_PAGE(m);
166 m->busy = FALSE;
167 vm_page_insert(m, object, offset + i);
168 }
169 vm_object_unlock(object);
170
171 if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
172 != KERN_SUCCESS) {
173 if (object == kernel_object) {
174 vm_object_lock(object);
175 vm_object_page_remove(object, offset, offset + size);
176 vm_object_unlock(object);
177 }
178 vm_map_remove(map, addr, addr + size, 0);
179 return kr;
180 }
181 if (object == kernel_object)
182 vm_map_simplify(map, addr);
183
184 *addrp = addr;
185 return KERN_SUCCESS;
186}
187
188/*
189 * Master entry point for allocating kernel memory.
190 * NOTE: this routine is _never_ interrupt safe.
191 *
192 * map : map to allocate into
193 * addrp : pointer to start address of new memory
194 * size : size of memory requested
195 * flags : options
196 * KMA_HERE *addrp is base address, else "anywhere"
197 * KMA_NOPAGEWAIT don't wait for pages if unavailable
198 * KMA_KOBJECT use kernel_object
199 */
200
201kern_return_t
202kernel_memory_allocate(
203 register vm_map_t map,
204 register vm_offset_t *addrp,
205 register vm_size_t size,
206 register vm_offset_t mask,
207 int flags)
208{
209 vm_object_t object = VM_OBJECT_NULL;
210 vm_map_entry_t entry;
211 vm_object_offset_t offset;
212 vm_offset_t addr;
213 vm_offset_t i;
214 kern_return_t kr;
215
de355530 216 size = round_page(size);
1c79356b
A
217 if ((flags & KMA_KOBJECT) == 0) {
218 /*
219 * Allocate a new object. We must do this before locking
220 * the map, or risk deadlock with the default pager:
221 * device_read_alloc uses kmem_alloc,
222 * which tries to allocate an object,
223 * which uses kmem_alloc_wired to get memory,
224 * which blocks for pages.
225 * then the default pager needs to read a block
226 * to process a memory_object_data_write,
227 * and device_read_alloc calls kmem_alloc
228 * and deadlocks on the map lock.
229 */
230 object = vm_object_allocate(size);
231 kr = vm_map_find_space(map, &addr, size, mask, &entry);
232 }
233 else {
234 object = kernel_object;
235 kr = vm_map_find_space(map, &addr, size, mask, &entry);
236 }
237 if (kr != KERN_SUCCESS) {
238 if ((flags & KMA_KOBJECT) == 0)
239 vm_object_deallocate(object);
240 return kr;
241 }
242
243 if ((flags & KMA_KOBJECT) == 0) {
244 entry->object.vm_object = object;
245 entry->offset = offset = 0;
246 } else {
247 offset = addr - VM_MIN_KERNEL_ADDRESS;
248
249 if (entry->object.vm_object == VM_OBJECT_NULL) {
250 vm_object_reference(object);
251 entry->object.vm_object = object;
252 entry->offset = offset;
253 }
254 }
255
256 /*
257 * Since we have not given out this address yet,
b4c24cb9
A
258 * it is safe to unlock the map. Except of course
259 * we must make certain no one coalesces our address
260 * or does a blind vm_deallocate and removes the object
261 * an extra object reference will suffice to protect
262 * against both contingencies.
1c79356b 263 */
b4c24cb9 264 vm_object_reference(object);
1c79356b
A
265 vm_map_unlock(map);
266
267 vm_object_lock(object);
268 for (i = 0; i < size; i += PAGE_SIZE) {
269 vm_page_t mem;
270
271 while ((mem = vm_page_alloc(object,
272 offset + (vm_object_offset_t)i))
273 == VM_PAGE_NULL) {
274 if (flags & KMA_NOPAGEWAIT) {
275 if (object == kernel_object)
276 vm_object_page_remove(object, offset,
277 offset + (vm_object_offset_t)i);
278 vm_object_unlock(object);
279 vm_map_remove(map, addr, addr + size, 0);
b4c24cb9 280 vm_object_deallocate(object);
1c79356b
A
281 return KERN_RESOURCE_SHORTAGE;
282 }
283 vm_object_unlock(object);
284 VM_PAGE_WAIT();
285 vm_object_lock(object);
286 }
287 mem->busy = FALSE;
288 }
289 vm_object_unlock(object);
290
291 if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
292 != KERN_SUCCESS) {
293 if (object == kernel_object) {
294 vm_object_lock(object);
295 vm_object_page_remove(object, offset, offset + size);
296 vm_object_unlock(object);
297 }
298 vm_map_remove(map, addr, addr + size, 0);
b4c24cb9 299 vm_object_deallocate(object);
1c79356b
A
300 return (kr);
301 }
b4c24cb9
A
302 /* now that the page is wired, we no longer have to fear coalesce */
303 vm_object_deallocate(object);
1c79356b
A
304 if (object == kernel_object)
305 vm_map_simplify(map, addr);
306
307 /*
308 * Return the memory, not zeroed.
309 */
310#if (NCPUS > 1) && i860
311 bzero( addr, size );
312#endif /* #if (NCPUS > 1) && i860 */
313 *addrp = addr;
314 return KERN_SUCCESS;
315}
316
317/*
318 * kmem_alloc:
319 *
320 * Allocate wired-down memory in the kernel's address map
321 * or a submap. The memory is not zero-filled.
322 */
323
324kern_return_t
325kmem_alloc(
326 vm_map_t map,
327 vm_offset_t *addrp,
328 vm_size_t size)
329{
330 return kernel_memory_allocate(map, addrp, size, 0, 0);
331}
332
333/*
334 * kmem_realloc:
335 *
336 * Reallocate wired-down memory in the kernel's address map
337 * or a submap. Newly allocated pages are not zeroed.
338 * This can only be used on regions allocated with kmem_alloc.
339 *
340 * If successful, the pages in the old region are mapped twice.
341 * The old region is unchanged. Use kmem_free to get rid of it.
342 */
343kern_return_t
344kmem_realloc(
345 vm_map_t map,
346 vm_offset_t oldaddr,
347 vm_size_t oldsize,
348 vm_offset_t *newaddrp,
349 vm_size_t newsize)
350{
b4c24cb9
A
351 vm_offset_t oldmin, oldmax;
352 vm_offset_t newaddr;
353 vm_offset_t offset;
354 vm_object_t object;
355 vm_map_entry_t oldentry, newentry;
356 vm_page_t mem;
357 kern_return_t kr;
1c79356b 358
de355530
A
359 oldmin = trunc_page(oldaddr);
360 oldmax = round_page(oldaddr + oldsize);
1c79356b 361 oldsize = oldmax - oldmin;
de355530 362 newsize = round_page(newsize);
1c79356b 363
1c79356b
A
364
365 /*
366 * Find the VM object backing the old region.
367 */
368
b4c24cb9
A
369 vm_map_lock(map);
370
1c79356b
A
371 if (!vm_map_lookup_entry(map, oldmin, &oldentry))
372 panic("kmem_realloc");
373 object = oldentry->object.vm_object;
374
375 /*
376 * Increase the size of the object and
377 * fill in the new region.
378 */
379
380 vm_object_reference(object);
b4c24cb9
A
381 /* by grabbing the object lock before unlocking the map */
382 /* we guarantee that we will panic if more than one */
383 /* attempt is made to realloc a kmem_alloc'd area */
1c79356b 384 vm_object_lock(object);
b4c24cb9 385 vm_map_unlock(map);
1c79356b
A
386 if (object->size != oldsize)
387 panic("kmem_realloc");
388 object->size = newsize;
389 vm_object_unlock(object);
390
b4c24cb9
A
391 /* allocate the new pages while expanded portion of the */
392 /* object is still not mapped */
393 kmem_alloc_pages(object, oldsize, newsize-oldsize);
394
1c79356b
A
395
396 /*
b4c24cb9 397 * Find space for the new region.
1c79356b
A
398 */
399
b4c24cb9
A
400 kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0,
401 &newentry);
402 if (kr != KERN_SUCCESS) {
403 vm_object_lock(object);
404 for(offset = oldsize;
405 offset<newsize; offset+=PAGE_SIZE) {
406 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
407 vm_page_lock_queues();
408 vm_page_free(mem);
409 vm_page_unlock_queues();
410 }
411 }
412 object->size = oldsize;
413 vm_object_unlock(object);
414 vm_object_deallocate(object);
415 return kr;
416 }
417 newentry->object.vm_object = object;
418 newentry->offset = 0;
419 assert (newentry->wired_count == 0);
420
421
422 /* add an extra reference in case we have someone doing an */
423 /* unexpected deallocate */
424 vm_object_reference(object);
1c79356b
A
425 vm_map_unlock(map);
426
b4c24cb9
A
427 if ((kr = vm_map_wire(map, newaddr, newaddr + newsize,
428 VM_PROT_DEFAULT, FALSE)) != KERN_SUCCESS) {
429 vm_map_remove(map, newaddr, newaddr + newsize, 0);
430 vm_object_lock(object);
431 for(offset = oldsize;
432 offset<newsize; offset+=PAGE_SIZE) {
433 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
434 vm_page_lock_queues();
435 vm_page_free(mem);
436 vm_page_unlock_queues();
437 }
438 }
439 object->size = oldsize;
440 vm_object_unlock(object);
441 vm_object_deallocate(object);
442 return (kr);
443 }
444 vm_object_deallocate(object);
1c79356b 445
1c79356b
A
446
447 *newaddrp = newaddr;
448 return KERN_SUCCESS;
449}
450
451/*
452 * kmem_alloc_wired:
453 *
454 * Allocate wired-down memory in the kernel's address map
455 * or a submap. The memory is not zero-filled.
456 *
457 * The memory is allocated in the kernel_object.
458 * It may not be copied with vm_map_copy, and
459 * it may not be reallocated with kmem_realloc.
460 */
461
462kern_return_t
463kmem_alloc_wired(
464 vm_map_t map,
465 vm_offset_t *addrp,
466 vm_size_t size)
467{
468 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
469}
470
471/*
472 * kmem_alloc_aligned:
473 *
474 * Like kmem_alloc_wired, except that the memory is aligned.
475 * The size should be a power-of-2.
476 */
477
478kern_return_t
479kmem_alloc_aligned(
480 vm_map_t map,
481 vm_offset_t *addrp,
482 vm_size_t size)
483{
484 if ((size & (size - 1)) != 0)
485 panic("kmem_alloc_aligned: size not aligned");
486 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
487}
488
489/*
490 * kmem_alloc_pageable:
491 *
492 * Allocate pageable memory in the kernel's address map.
493 */
494
495kern_return_t
496kmem_alloc_pageable(
497 vm_map_t map,
498 vm_offset_t *addrp,
499 vm_size_t size)
500{
501 vm_offset_t addr;
502 kern_return_t kr;
503
504#ifndef normal
505 addr = (vm_map_min(map)) + 0x1000;
506#else
507 addr = vm_map_min(map);
508#endif
de355530 509 kr = vm_map_enter(map, &addr, round_page(size),
1c79356b
A
510 (vm_offset_t) 0, TRUE,
511 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
512 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
513 if (kr != KERN_SUCCESS)
514 return kr;
515
516 *addrp = addr;
517 return KERN_SUCCESS;
518}
519
520/*
521 * kmem_free:
522 *
523 * Release a region of kernel virtual memory allocated
524 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
525 * and return the physical pages associated with that region.
526 */
527
528void
529kmem_free(
530 vm_map_t map,
531 vm_offset_t addr,
532 vm_size_t size)
533{
534 kern_return_t kr;
535
de355530
A
536 kr = vm_map_remove(map, trunc_page(addr),
537 round_page(addr + size), VM_MAP_REMOVE_KUNWIRE);
1c79356b
A
538 if (kr != KERN_SUCCESS)
539 panic("kmem_free");
540}
541
542/*
b4c24cb9 543 * Allocate new pages in an object.
1c79356b
A
544 */
545
546kern_return_t
547kmem_alloc_pages(
548 register vm_object_t object,
549 register vm_object_offset_t offset,
b4c24cb9 550 register vm_size_t size)
1c79356b 551{
1c79356b 552
de355530 553 size = round_page(size);
b4c24cb9
A
554 vm_object_lock(object);
555 while (size) {
1c79356b
A
556 register vm_page_t mem;
557
1c79356b
A
558
559 /*
560 * Allocate a page
561 */
562 while ((mem = vm_page_alloc(object, offset))
563 == VM_PAGE_NULL) {
564 vm_object_unlock(object);
565 VM_PAGE_WAIT();
566 vm_object_lock(object);
567 }
568
1c79356b 569
b4c24cb9
A
570 offset += PAGE_SIZE;
571 size -= PAGE_SIZE;
572 mem->busy = FALSE;
1c79356b 573 }
b4c24cb9 574 vm_object_unlock(object);
1c79356b
A
575 return KERN_SUCCESS;
576}
577
578/*
579 * Remap wired pages in an object into a new region.
580 * The object is assumed to be mapped into the kernel map or
581 * a submap.
582 */
583void
584kmem_remap_pages(
585 register vm_object_t object,
586 register vm_object_offset_t offset,
587 register vm_offset_t start,
588 register vm_offset_t end,
589 vm_prot_t protection)
590{
591 /*
592 * Mark the pmap region as not pageable.
593 */
594 pmap_pageable(kernel_pmap, start, end, FALSE);
595
596 while (start < end) {
597 register vm_page_t mem;
598
599 vm_object_lock(object);
600
601 /*
602 * Find a page
603 */
604 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
605 panic("kmem_remap_pages");
606
607 /*
608 * Wire it down (again)
609 */
610 vm_page_lock_queues();
611 vm_page_wire(mem);
612 vm_page_unlock_queues();
613 vm_object_unlock(object);
614
615 /*
616 * Enter it in the kernel pmap. The page isn't busy,
617 * but this shouldn't be a problem because it is wired.
618 */
9bccf70c 619 PMAP_ENTER(kernel_pmap, start, mem, protection,
de355530 620 VM_WIMG_USE_DEFAULT, TRUE);
1c79356b
A
621
622 start += PAGE_SIZE;
623 offset += PAGE_SIZE;
624 }
625}
626
627/*
628 * kmem_suballoc:
629 *
630 * Allocates a map to manage a subrange
631 * of the kernel virtual address space.
632 *
633 * Arguments are as follows:
634 *
635 * parent Map to take range from
636 * addr Address of start of range (IN/OUT)
637 * size Size of range to find
638 * pageable Can region be paged
639 * anywhere Can region be located anywhere in map
640 * new_map Pointer to new submap
641 */
642kern_return_t
643kmem_suballoc(
644 vm_map_t parent,
645 vm_offset_t *addr,
646 vm_size_t size,
647 boolean_t pageable,
648 boolean_t anywhere,
649 vm_map_t *new_map)
650{
651 vm_map_t map;
652 kern_return_t kr;
653
de355530 654 size = round_page(size);
1c79356b
A
655
656 /*
657 * Need reference on submap object because it is internal
658 * to the vm_system. vm_object_enter will never be called
659 * on it (usual source of reference for vm_map_enter).
660 */
661 vm_object_reference(vm_submap_object);
662
663 if (anywhere == TRUE)
664 *addr = (vm_offset_t)vm_map_min(parent);
665 kr = vm_map_enter(parent, addr, size,
666 (vm_offset_t) 0, anywhere,
667 vm_submap_object, (vm_object_offset_t) 0, FALSE,
668 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
669 if (kr != KERN_SUCCESS) {
670 vm_object_deallocate(vm_submap_object);
671 return (kr);
672 }
673
674 pmap_reference(vm_map_pmap(parent));
675 map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable);
676 if (map == VM_MAP_NULL)
677 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
678
679 kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE);
680 if (kr != KERN_SUCCESS) {
681 /*
682 * See comment preceding vm_map_submap().
683 */
684 vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS);
685 vm_map_deallocate(map); /* also removes ref to pmap */
686 vm_object_deallocate(vm_submap_object);
687 return (kr);
688 }
1c79356b
A
689 *new_map = map;
690 return (KERN_SUCCESS);
691}
692
693/*
694 * kmem_init:
695 *
696 * Initialize the kernel's virtual memory map, taking
697 * into account all memory allocated up to this time.
698 */
699void
700kmem_init(
701 vm_offset_t start,
702 vm_offset_t end)
703{
704 kernel_map = vm_map_create(pmap_kernel(),
705 VM_MIN_KERNEL_ADDRESS, end,
706 FALSE);
707
708 /*
709 * Reserve virtual memory allocated up to this time.
710 */
711
712 if (start != VM_MIN_KERNEL_ADDRESS) {
713 vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
714 (void) vm_map_enter(kernel_map,
715 &addr, start - VM_MIN_KERNEL_ADDRESS,
716 (vm_offset_t) 0, TRUE,
717 VM_OBJECT_NULL,
718 (vm_object_offset_t) 0, FALSE,
719 VM_PROT_DEFAULT, VM_PROT_ALL,
720 VM_INHERIT_DEFAULT);
721 }
722
723 /*
724 * Account for kernel memory (text, data, bss, vm shenanigans).
725 * This may include inaccessible "holes" as determined by what
de355530 726 * the machine-dependent init code includes in mem_size.
1c79356b 727 */
de355530 728 vm_page_wire_count = (atop(mem_size) - (vm_page_free_count
1c79356b
A
729 + vm_page_active_count
730 + vm_page_inactive_count));
731}
732
1c79356b
A
733
734/*
735 * kmem_io_object_trunc:
736 *
737 * Truncate an object vm_map_copy_t.
738 * Called by the scatter/gather list network code to remove pages from
739 * the tail end of a packet. Also unwires the objects pages.
740 */
741
742kern_return_t
743kmem_io_object_trunc(copy, new_size)
744 vm_map_copy_t copy; /* IN/OUT copy object */
745 register vm_size_t new_size; /* IN new object size */
746{
747 register vm_size_t offset, old_size;
748
749 assert(copy->type == VM_MAP_COPY_OBJECT);
750
751 old_size = (vm_size_t)round_page_64(copy->size);
752 copy->size = new_size;
de355530 753 new_size = round_page(new_size);
1c79356b
A
754
755 vm_object_lock(copy->cpy_object);
756 vm_object_page_remove(copy->cpy_object,
757 (vm_object_offset_t)new_size, (vm_object_offset_t)old_size);
758 for (offset = 0; offset < new_size; offset += PAGE_SIZE) {
759 register vm_page_t mem;
760
761 if ((mem = vm_page_lookup(copy->cpy_object,
762 (vm_object_offset_t)offset)) == VM_PAGE_NULL)
763 panic("kmem_io_object_trunc: unable to find object page");
764
765 /*
766 * Make sure these pages are marked dirty
767 */
768 mem->dirty = TRUE;
769 vm_page_lock_queues();
770 vm_page_unwire(mem);
771 vm_page_unlock_queues();
772 }
773 copy->cpy_object->size = new_size; /* adjust size of object */
774 vm_object_unlock(copy->cpy_object);
775 return(KERN_SUCCESS);
776}
777
778/*
779 * kmem_io_object_deallocate:
780 *
781 * Free an vm_map_copy_t.
782 * Called by the scatter/gather list network code to free a packet.
783 */
784
785void
786kmem_io_object_deallocate(
787 vm_map_copy_t copy) /* IN/OUT copy object */
788{
789 kern_return_t ret;
790
791 /*
792 * Clear out all the object pages (this will leave an empty object).
793 */
794 ret = kmem_io_object_trunc(copy, 0);
795 if (ret != KERN_SUCCESS)
796 panic("kmem_io_object_deallocate: unable to truncate object");
797 /*
798 * ...and discard the copy object.
799 */
800 vm_map_copy_discard(copy);
801}
802
803/*
804 * Routine: copyinmap
805 * Purpose:
806 * Like copyin, except that fromaddr is an address
807 * in the specified VM map. This implementation
808 * is incomplete; it handles the current user map
809 * and the kernel map/submaps.
810 */
811boolean_t
812copyinmap(
813 vm_map_t map,
814 vm_offset_t fromaddr,
815 vm_offset_t toaddr,
816 vm_size_t length)
817{
818 if (vm_map_pmap(map) == pmap_kernel()) {
819 /* assume a correct copy */
820 memcpy((void *)toaddr, (void *)fromaddr, length);
821 return FALSE;
822 }
823
824 if (current_map() == map)
825 return copyin((char *)fromaddr, (char *)toaddr, length);
826
827 return TRUE;
828}
829
830/*
831 * Routine: copyoutmap
832 * Purpose:
833 * Like copyout, except that toaddr is an address
834 * in the specified VM map. This implementation
835 * is incomplete; it handles the current user map
836 * and the kernel map/submaps.
837 */
838boolean_t
839copyoutmap(
840 vm_map_t map,
841 vm_offset_t fromaddr,
842 vm_offset_t toaddr,
843 vm_size_t length)
844{
845 if (vm_map_pmap(map) == pmap_kernel()) {
846 /* assume a correct copy */
847 memcpy((void *)toaddr, (void *)fromaddr, length);
848 return FALSE;
849 }
850
851 if (current_map() == map)
852 return copyout((char *)fromaddr, (char *)toaddr, length);
853
854 return TRUE;
855}
9bccf70c
A
856
857
858kern_return_t
859vm_conflict_check(
860 vm_map_t map,
861 vm_offset_t off,
862 vm_size_t len,
863 memory_object_t pager,
864 vm_object_offset_t file_off)
865{
866 vm_map_entry_t entry;
867 vm_object_t obj;
868 vm_object_offset_t obj_off;
869 vm_map_t base_map;
870 vm_offset_t base_offset;
871 vm_offset_t original_offset;
872 kern_return_t kr;
873 vm_size_t local_len;
874
875 base_map = map;
876 base_offset = off;
877 original_offset = off;
878 kr = KERN_SUCCESS;
879 vm_map_lock(map);
880 while(vm_map_lookup_entry(map, off, &entry)) {
881 local_len = len;
882
883 if (entry->object.vm_object == VM_OBJECT_NULL) {
884 vm_map_unlock(map);
885 return KERN_SUCCESS;
886 }
887 if (entry->is_sub_map) {
888 vm_map_t old_map;
889 old_map = map;
890 vm_map_lock(entry->object.sub_map);
891 map = entry->object.sub_map;
892 off = entry->offset + (off - entry->vme_start);
893 vm_map_unlock(old_map);
894 continue;
895 }
896 obj = entry->object.vm_object;
897 obj_off = (off - entry->vme_start) + entry->offset;
898 while(obj->shadow) {
899 obj_off += obj->shadow_offset;
900 obj = obj->shadow;
901 }
902 if((obj->pager_created) && (obj->pager == pager)) {
903 if(((obj->paging_offset) + obj_off) == file_off) {
904 if(off != base_offset) {
905 vm_map_unlock(map);
906 return KERN_FAILURE;
907 }
908 kr = KERN_ALREADY_WAITING;
909 } else if(
910 ((file_off < ((obj->paging_offset) + obj_off)) &&
911 ((file_off + len) >
912 ((obj->paging_offset) + obj_off))) ||
913 ((file_off > ((obj->paging_offset) + obj_off)) &&
914 (((((obj->paging_offset) + obj_off)) + len)
915 > file_off))) {
916 vm_map_unlock(map);
917 return KERN_FAILURE;
918 }
919 } else if(kr != KERN_SUCCESS) {
920 return KERN_FAILURE;
921 }
922
923 if(len < ((entry->vme_end - entry->vme_start) -
924 (off - entry->vme_start))) {
925 vm_map_unlock(map);
926 return kr;
927 } else {
928 len -= (entry->vme_end - entry->vme_start) -
929 (off - entry->vme_start);
930 }
931 base_offset = base_offset + (local_len - len);
932 file_off = file_off + (local_len - len);
933 off = base_offset;
934 if(map != base_map) {
935 vm_map_unlock(map);
936 vm_map_lock(base_map);
937 map = base_map;
938 }
939 }
940
941 vm_map_unlock(map);
942 return kr;
943
944
945}