]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
7d957c220af0d7c5639694bdbe6e1ee0b89cc584
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_kern.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Kernel memory management.
58 */
59
60 #include <cpus.h>
61 #include <mach/kern_return.h>
62 #include <mach/vm_param.h>
63 #include <kern/assert.h>
64 #include <kern/lock.h>
65 #include <kern/thread.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_pageout.h>
71 #include <kern/misc_protos.h>
72 #include <vm/cpm.h>
73
74 #include <string.h>
75 /*
76 * Variables exported by this module.
77 */
78
79 vm_map_t kernel_map;
80 vm_map_t kernel_pageable_map;
81
82 /*
83 * Forward declarations for internal functions.
84 */
85 extern kern_return_t kmem_alloc_pages(
86 register vm_object_t object,
87 register vm_object_offset_t offset,
88 register vm_offset_t start,
89 register vm_offset_t end,
90 vm_prot_t protection);
91
92 extern void kmem_remap_pages(
93 register vm_object_t object,
94 register vm_object_offset_t offset,
95 register vm_offset_t start,
96 register vm_offset_t end,
97 vm_prot_t protection);
98
99 kern_return_t
100 kmem_alloc_contig(
101 vm_map_t map,
102 vm_offset_t *addrp,
103 vm_size_t size,
104 vm_offset_t mask,
105 int flags)
106 {
107 vm_object_t object;
108 vm_page_t m, pages;
109 kern_return_t kr;
110 vm_offset_t addr, i;
111 vm_object_offset_t offset;
112 vm_map_entry_t entry;
113
114 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
115 return KERN_INVALID_ARGUMENT;
116
117 if (size == 0) {
118 *addrp = 0;
119 return KERN_INVALID_ARGUMENT;
120 }
121
122 size = round_page(size);
123 if ((flags & KMA_KOBJECT) == 0) {
124 object = vm_object_allocate(size);
125 kr = vm_map_find_space(map, &addr, size, mask, &entry);
126 }
127 else {
128 object = kernel_object;
129 kr = vm_map_find_space(map, &addr, size, mask, &entry);
130 }
131
132 if ((flags & KMA_KOBJECT) == 0) {
133 entry->object.vm_object = object;
134 entry->offset = offset = 0;
135 } else {
136 offset = addr - VM_MIN_KERNEL_ADDRESS;
137
138 if (entry->object.vm_object == VM_OBJECT_NULL) {
139 vm_object_reference(object);
140 entry->object.vm_object = object;
141 entry->offset = offset;
142 }
143 }
144
145 if (kr != KERN_SUCCESS) {
146 if ((flags & KMA_KOBJECT) == 0)
147 vm_object_deallocate(object);
148 return kr;
149 }
150
151 vm_map_unlock(map);
152
153 kr = cpm_allocate(size, &pages, FALSE);
154
155 if (kr != KERN_SUCCESS) {
156 vm_map_remove(map, addr, addr + size, 0);
157 *addrp = 0;
158 return kr;
159 }
160
161 vm_object_lock(object);
162 for (i = 0; i < size; i += PAGE_SIZE) {
163 m = pages;
164 pages = NEXT_PAGE(m);
165 m->busy = FALSE;
166 vm_page_insert(m, object, offset + i);
167 }
168 vm_object_unlock(object);
169
170 if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
171 != KERN_SUCCESS) {
172 if (object == kernel_object) {
173 vm_object_lock(object);
174 vm_object_page_remove(object, offset, offset + size);
175 vm_object_unlock(object);
176 }
177 vm_map_remove(map, addr, addr + size, 0);
178 return kr;
179 }
180 if (object == kernel_object)
181 vm_map_simplify(map, addr);
182
183 *addrp = addr;
184 return KERN_SUCCESS;
185 }
186
187 /*
188 * Master entry point for allocating kernel memory.
189 * NOTE: this routine is _never_ interrupt safe.
190 *
191 * map : map to allocate into
192 * addrp : pointer to start address of new memory
193 * size : size of memory requested
194 * flags : options
195 * KMA_HERE *addrp is base address, else "anywhere"
196 * KMA_NOPAGEWAIT don't wait for pages if unavailable
197 * KMA_KOBJECT use kernel_object
198 */
199
200 kern_return_t
201 kernel_memory_allocate(
202 register vm_map_t map,
203 register vm_offset_t *addrp,
204 register vm_size_t size,
205 register vm_offset_t mask,
206 int flags)
207 {
208 vm_object_t object = VM_OBJECT_NULL;
209 vm_map_entry_t entry;
210 vm_object_offset_t offset;
211 vm_offset_t addr;
212 vm_offset_t i;
213 kern_return_t kr;
214
215 size = round_page(size);
216 if ((flags & KMA_KOBJECT) == 0) {
217 /*
218 * Allocate a new object. We must do this before locking
219 * the map, or risk deadlock with the default pager:
220 * device_read_alloc uses kmem_alloc,
221 * which tries to allocate an object,
222 * which uses kmem_alloc_wired to get memory,
223 * which blocks for pages.
224 * then the default pager needs to read a block
225 * to process a memory_object_data_write,
226 * and device_read_alloc calls kmem_alloc
227 * and deadlocks on the map lock.
228 */
229 object = vm_object_allocate(size);
230 kr = vm_map_find_space(map, &addr, size, mask, &entry);
231 }
232 else {
233 object = kernel_object;
234 kr = vm_map_find_space(map, &addr, size, mask, &entry);
235 }
236 if (kr != KERN_SUCCESS) {
237 if ((flags & KMA_KOBJECT) == 0)
238 vm_object_deallocate(object);
239 return kr;
240 }
241
242 if ((flags & KMA_KOBJECT) == 0) {
243 entry->object.vm_object = object;
244 entry->offset = offset = 0;
245 } else {
246 offset = addr - VM_MIN_KERNEL_ADDRESS;
247
248 if (entry->object.vm_object == VM_OBJECT_NULL) {
249 vm_object_reference(object);
250 entry->object.vm_object = object;
251 entry->offset = offset;
252 }
253 }
254
255 /*
256 * Since we have not given out this address yet,
257 * it is safe to unlock the map.
258 */
259 vm_map_unlock(map);
260
261 vm_object_lock(object);
262 for (i = 0; i < size; i += PAGE_SIZE) {
263 vm_page_t mem;
264
265 while ((mem = vm_page_alloc(object,
266 offset + (vm_object_offset_t)i))
267 == VM_PAGE_NULL) {
268 if (flags & KMA_NOPAGEWAIT) {
269 if (object == kernel_object)
270 vm_object_page_remove(object, offset,
271 offset + (vm_object_offset_t)i);
272 vm_object_unlock(object);
273 vm_map_remove(map, addr, addr + size, 0);
274 return KERN_RESOURCE_SHORTAGE;
275 }
276 vm_object_unlock(object);
277 VM_PAGE_WAIT();
278 vm_object_lock(object);
279 }
280 mem->busy = FALSE;
281 }
282 vm_object_unlock(object);
283
284 if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE))
285 != KERN_SUCCESS) {
286 if (object == kernel_object) {
287 vm_object_lock(object);
288 vm_object_page_remove(object, offset, offset + size);
289 vm_object_unlock(object);
290 }
291 vm_map_remove(map, addr, addr + size, 0);
292 return (kr);
293 }
294 if (object == kernel_object)
295 vm_map_simplify(map, addr);
296
297 /*
298 * Return the memory, not zeroed.
299 */
300 #if (NCPUS > 1) && i860
301 bzero( addr, size );
302 #endif /* #if (NCPUS > 1) && i860 */
303 *addrp = addr;
304 return KERN_SUCCESS;
305 }
306
307 /*
308 * kmem_alloc:
309 *
310 * Allocate wired-down memory in the kernel's address map
311 * or a submap. The memory is not zero-filled.
312 */
313
314 kern_return_t
315 kmem_alloc(
316 vm_map_t map,
317 vm_offset_t *addrp,
318 vm_size_t size)
319 {
320 return kernel_memory_allocate(map, addrp, size, 0, 0);
321 }
322
323 /*
324 * kmem_realloc:
325 *
326 * Reallocate wired-down memory in the kernel's address map
327 * or a submap. Newly allocated pages are not zeroed.
328 * This can only be used on regions allocated with kmem_alloc.
329 *
330 * If successful, the pages in the old region are mapped twice.
331 * The old region is unchanged. Use kmem_free to get rid of it.
332 */
333 kern_return_t
334 kmem_realloc(
335 vm_map_t map,
336 vm_offset_t oldaddr,
337 vm_size_t oldsize,
338 vm_offset_t *newaddrp,
339 vm_size_t newsize)
340 {
341 vm_offset_t oldmin, oldmax;
342 vm_offset_t newaddr;
343 vm_object_t object;
344 vm_map_entry_t oldentry, newentry;
345 kern_return_t kr;
346
347 oldmin = trunc_page(oldaddr);
348 oldmax = round_page(oldaddr + oldsize);
349 oldsize = oldmax - oldmin;
350 newsize = round_page(newsize);
351
352 /*
353 * Find space for the new region.
354 */
355
356 kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0,
357 &newentry);
358 if (kr != KERN_SUCCESS) {
359 return kr;
360 }
361
362 /*
363 * Find the VM object backing the old region.
364 */
365
366 if (!vm_map_lookup_entry(map, oldmin, &oldentry))
367 panic("kmem_realloc");
368 object = oldentry->object.vm_object;
369
370 /*
371 * Increase the size of the object and
372 * fill in the new region.
373 */
374
375 vm_object_reference(object);
376 vm_object_lock(object);
377 if (object->size != oldsize)
378 panic("kmem_realloc");
379 object->size = newsize;
380 vm_object_unlock(object);
381
382 newentry->object.vm_object = object;
383 newentry->offset = 0;
384 assert (newentry->wired_count == 0);
385 newentry->wired_count = 1;
386
387 /*
388 * Since we have not given out this address yet,
389 * it is safe to unlock the map. We are trusting
390 * that nobody will play with either region.
391 */
392
393 vm_map_unlock(map);
394
395 /*
396 * Remap the pages in the old region and
397 * allocate more pages for the new region.
398 */
399
400 kmem_remap_pages(object, 0,
401 newaddr, newaddr + oldsize,
402 VM_PROT_DEFAULT);
403 kmem_alloc_pages(object, oldsize,
404 newaddr + oldsize, newaddr + newsize,
405 VM_PROT_DEFAULT);
406
407 *newaddrp = newaddr;
408 return KERN_SUCCESS;
409 }
410
411 /*
412 * kmem_alloc_wired:
413 *
414 * Allocate wired-down memory in the kernel's address map
415 * or a submap. The memory is not zero-filled.
416 *
417 * The memory is allocated in the kernel_object.
418 * It may not be copied with vm_map_copy, and
419 * it may not be reallocated with kmem_realloc.
420 */
421
422 kern_return_t
423 kmem_alloc_wired(
424 vm_map_t map,
425 vm_offset_t *addrp,
426 vm_size_t size)
427 {
428 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
429 }
430
431 /*
432 * kmem_alloc_aligned:
433 *
434 * Like kmem_alloc_wired, except that the memory is aligned.
435 * The size should be a power-of-2.
436 */
437
438 kern_return_t
439 kmem_alloc_aligned(
440 vm_map_t map,
441 vm_offset_t *addrp,
442 vm_size_t size)
443 {
444 if ((size & (size - 1)) != 0)
445 panic("kmem_alloc_aligned: size not aligned");
446 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
447 }
448
449 /*
450 * kmem_alloc_pageable:
451 *
452 * Allocate pageable memory in the kernel's address map.
453 */
454
455 kern_return_t
456 kmem_alloc_pageable(
457 vm_map_t map,
458 vm_offset_t *addrp,
459 vm_size_t size)
460 {
461 vm_offset_t addr;
462 kern_return_t kr;
463
464 #ifndef normal
465 addr = (vm_map_min(map)) + 0x1000;
466 #else
467 addr = vm_map_min(map);
468 #endif
469 kr = vm_map_enter(map, &addr, round_page(size),
470 (vm_offset_t) 0, TRUE,
471 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
472 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
473 if (kr != KERN_SUCCESS)
474 return kr;
475
476 *addrp = addr;
477 return KERN_SUCCESS;
478 }
479
480 /*
481 * kmem_free:
482 *
483 * Release a region of kernel virtual memory allocated
484 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
485 * and return the physical pages associated with that region.
486 */
487
488 void
489 kmem_free(
490 vm_map_t map,
491 vm_offset_t addr,
492 vm_size_t size)
493 {
494 kern_return_t kr;
495
496 kr = vm_map_remove(map, trunc_page(addr),
497 round_page(addr + size), VM_MAP_REMOVE_KUNWIRE);
498 if (kr != KERN_SUCCESS)
499 panic("kmem_free");
500 }
501
502 /*
503 * Allocate new wired pages in an object.
504 * The object is assumed to be mapped into the kernel map or
505 * a submap.
506 */
507
508 kern_return_t
509 kmem_alloc_pages(
510 register vm_object_t object,
511 register vm_object_offset_t offset,
512 register vm_offset_t start,
513 register vm_offset_t end,
514 vm_prot_t protection)
515 {
516 /*
517 * Mark the pmap region as not pageable.
518 */
519 pmap_pageable(kernel_pmap, start, end, FALSE);
520
521 while (start < end) {
522 register vm_page_t mem;
523
524 vm_object_lock(object);
525
526 /*
527 * Allocate a page
528 */
529 while ((mem = vm_page_alloc(object, offset))
530 == VM_PAGE_NULL) {
531 vm_object_unlock(object);
532 VM_PAGE_WAIT();
533 vm_object_lock(object);
534 }
535
536 /*
537 * Wire it down
538 */
539 vm_page_lock_queues();
540 vm_page_wire(mem);
541 vm_page_unlock_queues();
542 vm_object_unlock(object);
543
544 /*
545 * Enter it in the kernel pmap
546 */
547 PMAP_ENTER(kernel_pmap, start, mem,
548 protection, TRUE);
549
550 vm_object_lock(object);
551 PAGE_WAKEUP_DONE(mem);
552 vm_object_unlock(object);
553
554 start += PAGE_SIZE;
555 offset += PAGE_SIZE_64;
556 }
557 return KERN_SUCCESS;
558 }
559
560 /*
561 * Remap wired pages in an object into a new region.
562 * The object is assumed to be mapped into the kernel map or
563 * a submap.
564 */
565 void
566 kmem_remap_pages(
567 register vm_object_t object,
568 register vm_object_offset_t offset,
569 register vm_offset_t start,
570 register vm_offset_t end,
571 vm_prot_t protection)
572 {
573 /*
574 * Mark the pmap region as not pageable.
575 */
576 pmap_pageable(kernel_pmap, start, end, FALSE);
577
578 while (start < end) {
579 register vm_page_t mem;
580
581 vm_object_lock(object);
582
583 /*
584 * Find a page
585 */
586 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
587 panic("kmem_remap_pages");
588
589 /*
590 * Wire it down (again)
591 */
592 vm_page_lock_queues();
593 vm_page_wire(mem);
594 vm_page_unlock_queues();
595 vm_object_unlock(object);
596
597 /*
598 * Enter it in the kernel pmap. The page isn't busy,
599 * but this shouldn't be a problem because it is wired.
600 */
601 PMAP_ENTER(kernel_pmap, start, mem,
602 protection, TRUE);
603
604 start += PAGE_SIZE;
605 offset += PAGE_SIZE;
606 }
607 }
608
609 /*
610 * kmem_suballoc:
611 *
612 * Allocates a map to manage a subrange
613 * of the kernel virtual address space.
614 *
615 * Arguments are as follows:
616 *
617 * parent Map to take range from
618 * addr Address of start of range (IN/OUT)
619 * size Size of range to find
620 * pageable Can region be paged
621 * anywhere Can region be located anywhere in map
622 * new_map Pointer to new submap
623 */
624 kern_return_t
625 kmem_suballoc(
626 vm_map_t parent,
627 vm_offset_t *addr,
628 vm_size_t size,
629 boolean_t pageable,
630 boolean_t anywhere,
631 vm_map_t *new_map)
632 {
633 vm_map_t map;
634 kern_return_t kr;
635
636 size = round_page(size);
637
638 /*
639 * Need reference on submap object because it is internal
640 * to the vm_system. vm_object_enter will never be called
641 * on it (usual source of reference for vm_map_enter).
642 */
643 vm_object_reference(vm_submap_object);
644
645 if (anywhere == TRUE)
646 *addr = (vm_offset_t)vm_map_min(parent);
647 kr = vm_map_enter(parent, addr, size,
648 (vm_offset_t) 0, anywhere,
649 vm_submap_object, (vm_object_offset_t) 0, FALSE,
650 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
651 if (kr != KERN_SUCCESS) {
652 vm_object_deallocate(vm_submap_object);
653 return (kr);
654 }
655
656 pmap_reference(vm_map_pmap(parent));
657 map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable);
658 if (map == VM_MAP_NULL)
659 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
660
661 kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE);
662 if (kr != KERN_SUCCESS) {
663 /*
664 * See comment preceding vm_map_submap().
665 */
666 vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS);
667 vm_map_deallocate(map); /* also removes ref to pmap */
668 vm_object_deallocate(vm_submap_object);
669 return (kr);
670 }
671
672 *new_map = map;
673 return (KERN_SUCCESS);
674 }
675
676 /*
677 * kmem_init:
678 *
679 * Initialize the kernel's virtual memory map, taking
680 * into account all memory allocated up to this time.
681 */
682 void
683 kmem_init(
684 vm_offset_t start,
685 vm_offset_t end)
686 {
687 kernel_map = vm_map_create(pmap_kernel(),
688 VM_MIN_KERNEL_ADDRESS, end,
689 FALSE);
690
691 /*
692 * Reserve virtual memory allocated up to this time.
693 */
694
695 if (start != VM_MIN_KERNEL_ADDRESS) {
696 vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
697 (void) vm_map_enter(kernel_map,
698 &addr, start - VM_MIN_KERNEL_ADDRESS,
699 (vm_offset_t) 0, TRUE,
700 VM_OBJECT_NULL,
701 (vm_object_offset_t) 0, FALSE,
702 VM_PROT_DEFAULT, VM_PROT_ALL,
703 VM_INHERIT_DEFAULT);
704 }
705
706 /*
707 * Account for kernel memory (text, data, bss, vm shenanigans).
708 * This may include inaccessible "holes" as determined by what
709 * the machine-dependent init code includes in mem_size.
710 */
711 vm_page_wire_count = (atop(mem_size) - (vm_page_free_count
712 + vm_page_active_count
713 + vm_page_inactive_count));
714 }
715
716 /*
717 * kmem_io_map_copyout:
718 *
719 * Establish temporary mapping in designated map for the memory
720 * passed in. Memory format must be a page_list vm_map_copy.
721 */
722
723 kern_return_t
724 kmem_io_map_copyout(
725 vm_map_t map,
726 vm_offset_t *addr, /* actual addr of data */
727 vm_size_t *alloc_size, /* size allocated */
728 vm_map_copy_t copy,
729 vm_size_t min_size, /* Do at least this much */
730 vm_prot_t prot) /* Protection of mapping */
731 {
732 vm_offset_t myaddr, offset;
733 vm_size_t mysize, copy_size;
734 kern_return_t ret;
735 register
736 vm_page_t *page_list;
737 vm_map_copy_t new_copy;
738 register
739 int i;
740
741 assert(copy->type == VM_MAP_COPY_PAGE_LIST);
742 assert(min_size != 0);
743
744 /*
745 * Figure out the size in vm pages.
746 */
747 min_size += (vm_size_t)(copy->offset - trunc_page_64(copy->offset));
748 min_size = round_page(min_size);
749 mysize = (vm_size_t)(round_page_64(
750 copy->offset + (vm_object_offset_t)copy->size) -
751 trunc_page_64(copy->offset));
752
753 /*
754 * If total size is larger than one page list and
755 * we don't have to do more than one page list, then
756 * only do one page list.
757 *
758 * XXX Could be much smarter about this ... like trimming length
759 * XXX if we need more than one page list but not all of them.
760 */
761
762 copy_size = ptoa(copy->cpy_npages);
763 if (mysize > copy_size && copy_size > min_size)
764 mysize = copy_size;
765
766 /*
767 * Allocate some address space in the map (must be kernel
768 * space).
769 */
770 myaddr = vm_map_min(map);
771 ret = vm_map_enter(map, &myaddr, mysize,
772 (vm_offset_t) 0, TRUE,
773 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
774 prot, prot, VM_INHERIT_DEFAULT);
775
776 if (ret != KERN_SUCCESS)
777 return(ret);
778
779 /*
780 * Tell the pmap module that this will be wired, and
781 * enter the mappings.
782 */
783 pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE);
784
785 *addr = myaddr + (vm_offset_t)
786 (copy->offset - trunc_page_64(copy->offset));
787 *alloc_size = mysize;
788
789 offset = myaddr;
790 page_list = &copy->cpy_page_list[0];
791 while (TRUE) {
792 for ( i = 0; i < copy->cpy_npages; i++, offset+=PAGE_SIZE_64) {
793 PMAP_ENTER(vm_map_pmap(map),
794 (vm_offset_t)offset, *page_list,
795 prot, TRUE);
796 page_list++;
797 }
798
799 if (offset == (myaddr + mysize))
800 break;
801
802 /*
803 * Onward to the next page_list. The extend_cont
804 * leaves the current page list's pages alone;
805 * they'll be cleaned up at discard. Reset this
806 * copy's continuation to discard the next one.
807 */
808 vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret);
809
810 if (ret != KERN_SUCCESS) {
811 kmem_io_map_deallocate(map, myaddr, mysize);
812 return(ret);
813 }
814 copy->cpy_cont = vm_map_copy_discard_cont;
815 copy->cpy_cont_args = (vm_map_copyin_args_t) new_copy;
816 assert(new_copy != VM_MAP_COPY_NULL);
817 assert(new_copy->type == VM_MAP_COPY_PAGE_LIST);
818 copy = new_copy;
819 page_list = &copy->cpy_page_list[0];
820 }
821
822 return(ret);
823 }
824
825 /*
826 * kmem_io_map_deallocate:
827 *
828 * Get rid of the mapping established by kmem_io_map_copyout.
829 * Assumes that addr and size have been rounded to page boundaries.
830 */
831
832 void
833 kmem_io_map_deallocate(
834 vm_map_t map,
835 vm_offset_t addr,
836 vm_size_t size)
837 {
838
839 register vm_offset_t va, end;
840
841 end = round_page(addr + size);
842 for (va = trunc_page(addr); va < end; va += PAGE_SIZE)
843 pmap_change_wiring(vm_map_pmap(map), va, FALSE);
844
845 /*
846 * Remove the mappings. The pmap_remove is needed.
847 */
848
849 pmap_remove(vm_map_pmap(map), addr, addr + size);
850 vm_map_remove(map, addr, addr + size, VM_MAP_REMOVE_KUNWIRE);
851 }
852
853
854 /*
855 * kmem_io_object_trunc:
856 *
857 * Truncate an object vm_map_copy_t.
858 * Called by the scatter/gather list network code to remove pages from
859 * the tail end of a packet. Also unwires the objects pages.
860 */
861
862 kern_return_t
863 kmem_io_object_trunc(copy, new_size)
864 vm_map_copy_t copy; /* IN/OUT copy object */
865 register vm_size_t new_size; /* IN new object size */
866 {
867 register vm_size_t offset, old_size;
868
869 assert(copy->type == VM_MAP_COPY_OBJECT);
870
871 old_size = (vm_size_t)round_page_64(copy->size);
872 copy->size = new_size;
873 new_size = round_page(new_size);
874
875 vm_object_lock(copy->cpy_object);
876 vm_object_page_remove(copy->cpy_object,
877 (vm_object_offset_t)new_size, (vm_object_offset_t)old_size);
878 for (offset = 0; offset < new_size; offset += PAGE_SIZE) {
879 register vm_page_t mem;
880
881 if ((mem = vm_page_lookup(copy->cpy_object,
882 (vm_object_offset_t)offset)) == VM_PAGE_NULL)
883 panic("kmem_io_object_trunc: unable to find object page");
884
885 /*
886 * Make sure these pages are marked dirty
887 */
888 mem->dirty = TRUE;
889 vm_page_lock_queues();
890 vm_page_unwire(mem);
891 vm_page_unlock_queues();
892 }
893 copy->cpy_object->size = new_size; /* adjust size of object */
894 vm_object_unlock(copy->cpy_object);
895 return(KERN_SUCCESS);
896 }
897
898 /*
899 * kmem_io_object_deallocate:
900 *
901 * Free an vm_map_copy_t.
902 * Called by the scatter/gather list network code to free a packet.
903 */
904
905 void
906 kmem_io_object_deallocate(
907 vm_map_copy_t copy) /* IN/OUT copy object */
908 {
909 kern_return_t ret;
910
911 /*
912 * Clear out all the object pages (this will leave an empty object).
913 */
914 ret = kmem_io_object_trunc(copy, 0);
915 if (ret != KERN_SUCCESS)
916 panic("kmem_io_object_deallocate: unable to truncate object");
917 /*
918 * ...and discard the copy object.
919 */
920 vm_map_copy_discard(copy);
921 }
922
923 /*
924 * Routine: copyinmap
925 * Purpose:
926 * Like copyin, except that fromaddr is an address
927 * in the specified VM map. This implementation
928 * is incomplete; it handles the current user map
929 * and the kernel map/submaps.
930 */
931 boolean_t
932 copyinmap(
933 vm_map_t map,
934 vm_offset_t fromaddr,
935 vm_offset_t toaddr,
936 vm_size_t length)
937 {
938 if (vm_map_pmap(map) == pmap_kernel()) {
939 /* assume a correct copy */
940 memcpy((void *)toaddr, (void *)fromaddr, length);
941 return FALSE;
942 }
943
944 if (current_map() == map)
945 return copyin((char *)fromaddr, (char *)toaddr, length);
946
947 return TRUE;
948 }
949
950 /*
951 * Routine: copyoutmap
952 * Purpose:
953 * Like copyout, except that toaddr is an address
954 * in the specified VM map. This implementation
955 * is incomplete; it handles the current user map
956 * and the kernel map/submaps.
957 */
958 boolean_t
959 copyoutmap(
960 vm_map_t map,
961 vm_offset_t fromaddr,
962 vm_offset_t toaddr,
963 vm_size_t length)
964 {
965 if (vm_map_pmap(map) == pmap_kernel()) {
966 /* assume a correct copy */
967 memcpy((void *)toaddr, (void *)fromaddr, length);
968 return FALSE;
969 }
970
971 if (current_map() == map)
972 return copyout((char *)fromaddr, (char *)toaddr, length);
973
974 return TRUE;
975 }