]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: vm/vm_kern.c | |
54 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
55 | * Date: 1985 | |
56 | * | |
57 | * Kernel memory management. | |
58 | */ | |
59 | ||
60 | #include <cpus.h> | |
61 | #include <mach/kern_return.h> | |
62 | #include <mach/vm_param.h> | |
63 | #include <kern/assert.h> | |
64 | #include <kern/lock.h> | |
65 | #include <kern/thread.h> | |
66 | #include <vm/vm_kern.h> | |
67 | #include <vm/vm_map.h> | |
68 | #include <vm/vm_object.h> | |
69 | #include <vm/vm_page.h> | |
70 | #include <vm/vm_pageout.h> | |
71 | #include <kern/misc_protos.h> | |
72 | #include <vm/cpm.h> | |
73 | ||
74 | #include <string.h> | |
75 | /* | |
76 | * Variables exported by this module. | |
77 | */ | |
78 | ||
79 | vm_map_t kernel_map; | |
80 | vm_map_t kernel_pageable_map; | |
81 | ||
82 | /* | |
83 | * Forward declarations for internal functions. | |
84 | */ | |
85 | extern kern_return_t kmem_alloc_pages( | |
86 | register vm_object_t object, | |
87 | register vm_object_offset_t offset, | |
88 | register vm_offset_t start, | |
89 | register vm_offset_t end, | |
90 | vm_prot_t protection); | |
91 | ||
92 | extern void kmem_remap_pages( | |
93 | register vm_object_t object, | |
94 | register vm_object_offset_t offset, | |
95 | register vm_offset_t start, | |
96 | register vm_offset_t end, | |
97 | vm_prot_t protection); | |
98 | ||
99 | kern_return_t | |
100 | kmem_alloc_contig( | |
101 | vm_map_t map, | |
102 | vm_offset_t *addrp, | |
103 | vm_size_t size, | |
104 | vm_offset_t mask, | |
105 | int flags) | |
106 | { | |
107 | vm_object_t object; | |
108 | vm_page_t m, pages; | |
109 | kern_return_t kr; | |
110 | vm_offset_t addr, i; | |
111 | vm_object_offset_t offset; | |
112 | vm_map_entry_t entry; | |
113 | ||
114 | if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) | |
115 | return KERN_INVALID_ARGUMENT; | |
116 | ||
117 | if (size == 0) { | |
118 | *addrp = 0; | |
119 | return KERN_INVALID_ARGUMENT; | |
120 | } | |
121 | ||
122 | size = round_page(size); | |
123 | if ((flags & KMA_KOBJECT) == 0) { | |
124 | object = vm_object_allocate(size); | |
125 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
126 | } | |
127 | else { | |
128 | object = kernel_object; | |
129 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
130 | } | |
131 | ||
132 | if ((flags & KMA_KOBJECT) == 0) { | |
133 | entry->object.vm_object = object; | |
134 | entry->offset = offset = 0; | |
135 | } else { | |
136 | offset = addr - VM_MIN_KERNEL_ADDRESS; | |
137 | ||
138 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
139 | vm_object_reference(object); | |
140 | entry->object.vm_object = object; | |
141 | entry->offset = offset; | |
142 | } | |
143 | } | |
144 | ||
145 | if (kr != KERN_SUCCESS) { | |
146 | if ((flags & KMA_KOBJECT) == 0) | |
147 | vm_object_deallocate(object); | |
148 | return kr; | |
149 | } | |
150 | ||
151 | vm_map_unlock(map); | |
152 | ||
153 | kr = cpm_allocate(size, &pages, FALSE); | |
154 | ||
155 | if (kr != KERN_SUCCESS) { | |
156 | vm_map_remove(map, addr, addr + size, 0); | |
157 | *addrp = 0; | |
158 | return kr; | |
159 | } | |
160 | ||
161 | vm_object_lock(object); | |
162 | for (i = 0; i < size; i += PAGE_SIZE) { | |
163 | m = pages; | |
164 | pages = NEXT_PAGE(m); | |
165 | m->busy = FALSE; | |
166 | vm_page_insert(m, object, offset + i); | |
167 | } | |
168 | vm_object_unlock(object); | |
169 | ||
170 | if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE)) | |
171 | != KERN_SUCCESS) { | |
172 | if (object == kernel_object) { | |
173 | vm_object_lock(object); | |
174 | vm_object_page_remove(object, offset, offset + size); | |
175 | vm_object_unlock(object); | |
176 | } | |
177 | vm_map_remove(map, addr, addr + size, 0); | |
178 | return kr; | |
179 | } | |
180 | if (object == kernel_object) | |
181 | vm_map_simplify(map, addr); | |
182 | ||
183 | *addrp = addr; | |
184 | return KERN_SUCCESS; | |
185 | } | |
186 | ||
187 | /* | |
188 | * Master entry point for allocating kernel memory. | |
189 | * NOTE: this routine is _never_ interrupt safe. | |
190 | * | |
191 | * map : map to allocate into | |
192 | * addrp : pointer to start address of new memory | |
193 | * size : size of memory requested | |
194 | * flags : options | |
195 | * KMA_HERE *addrp is base address, else "anywhere" | |
196 | * KMA_NOPAGEWAIT don't wait for pages if unavailable | |
197 | * KMA_KOBJECT use kernel_object | |
198 | */ | |
199 | ||
200 | kern_return_t | |
201 | kernel_memory_allocate( | |
202 | register vm_map_t map, | |
203 | register vm_offset_t *addrp, | |
204 | register vm_size_t size, | |
205 | register vm_offset_t mask, | |
206 | int flags) | |
207 | { | |
208 | vm_object_t object = VM_OBJECT_NULL; | |
209 | vm_map_entry_t entry; | |
210 | vm_object_offset_t offset; | |
211 | vm_offset_t addr; | |
212 | vm_offset_t i; | |
213 | kern_return_t kr; | |
214 | ||
215 | size = round_page(size); | |
216 | if ((flags & KMA_KOBJECT) == 0) { | |
217 | /* | |
218 | * Allocate a new object. We must do this before locking | |
219 | * the map, or risk deadlock with the default pager: | |
220 | * device_read_alloc uses kmem_alloc, | |
221 | * which tries to allocate an object, | |
222 | * which uses kmem_alloc_wired to get memory, | |
223 | * which blocks for pages. | |
224 | * then the default pager needs to read a block | |
225 | * to process a memory_object_data_write, | |
226 | * and device_read_alloc calls kmem_alloc | |
227 | * and deadlocks on the map lock. | |
228 | */ | |
229 | object = vm_object_allocate(size); | |
230 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
231 | } | |
232 | else { | |
233 | object = kernel_object; | |
234 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
235 | } | |
236 | if (kr != KERN_SUCCESS) { | |
237 | if ((flags & KMA_KOBJECT) == 0) | |
238 | vm_object_deallocate(object); | |
239 | return kr; | |
240 | } | |
241 | ||
242 | if ((flags & KMA_KOBJECT) == 0) { | |
243 | entry->object.vm_object = object; | |
244 | entry->offset = offset = 0; | |
245 | } else { | |
246 | offset = addr - VM_MIN_KERNEL_ADDRESS; | |
247 | ||
248 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
249 | vm_object_reference(object); | |
250 | entry->object.vm_object = object; | |
251 | entry->offset = offset; | |
252 | } | |
253 | } | |
254 | ||
255 | /* | |
256 | * Since we have not given out this address yet, | |
257 | * it is safe to unlock the map. | |
258 | */ | |
259 | vm_map_unlock(map); | |
260 | ||
261 | vm_object_lock(object); | |
262 | for (i = 0; i < size; i += PAGE_SIZE) { | |
263 | vm_page_t mem; | |
264 | ||
265 | while ((mem = vm_page_alloc(object, | |
266 | offset + (vm_object_offset_t)i)) | |
267 | == VM_PAGE_NULL) { | |
268 | if (flags & KMA_NOPAGEWAIT) { | |
269 | if (object == kernel_object) | |
270 | vm_object_page_remove(object, offset, | |
271 | offset + (vm_object_offset_t)i); | |
272 | vm_object_unlock(object); | |
273 | vm_map_remove(map, addr, addr + size, 0); | |
274 | return KERN_RESOURCE_SHORTAGE; | |
275 | } | |
276 | vm_object_unlock(object); | |
277 | VM_PAGE_WAIT(); | |
278 | vm_object_lock(object); | |
279 | } | |
280 | mem->busy = FALSE; | |
281 | } | |
282 | vm_object_unlock(object); | |
283 | ||
284 | if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE)) | |
285 | != KERN_SUCCESS) { | |
286 | if (object == kernel_object) { | |
287 | vm_object_lock(object); | |
288 | vm_object_page_remove(object, offset, offset + size); | |
289 | vm_object_unlock(object); | |
290 | } | |
291 | vm_map_remove(map, addr, addr + size, 0); | |
292 | return (kr); | |
293 | } | |
294 | if (object == kernel_object) | |
295 | vm_map_simplify(map, addr); | |
296 | ||
297 | /* | |
298 | * Return the memory, not zeroed. | |
299 | */ | |
300 | #if (NCPUS > 1) && i860 | |
301 | bzero( addr, size ); | |
302 | #endif /* #if (NCPUS > 1) && i860 */ | |
303 | *addrp = addr; | |
304 | return KERN_SUCCESS; | |
305 | } | |
306 | ||
307 | /* | |
308 | * kmem_alloc: | |
309 | * | |
310 | * Allocate wired-down memory in the kernel's address map | |
311 | * or a submap. The memory is not zero-filled. | |
312 | */ | |
313 | ||
314 | kern_return_t | |
315 | kmem_alloc( | |
316 | vm_map_t map, | |
317 | vm_offset_t *addrp, | |
318 | vm_size_t size) | |
319 | { | |
320 | return kernel_memory_allocate(map, addrp, size, 0, 0); | |
321 | } | |
322 | ||
323 | /* | |
324 | * kmem_realloc: | |
325 | * | |
326 | * Reallocate wired-down memory in the kernel's address map | |
327 | * or a submap. Newly allocated pages are not zeroed. | |
328 | * This can only be used on regions allocated with kmem_alloc. | |
329 | * | |
330 | * If successful, the pages in the old region are mapped twice. | |
331 | * The old region is unchanged. Use kmem_free to get rid of it. | |
332 | */ | |
333 | kern_return_t | |
334 | kmem_realloc( | |
335 | vm_map_t map, | |
336 | vm_offset_t oldaddr, | |
337 | vm_size_t oldsize, | |
338 | vm_offset_t *newaddrp, | |
339 | vm_size_t newsize) | |
340 | { | |
341 | vm_offset_t oldmin, oldmax; | |
342 | vm_offset_t newaddr; | |
343 | vm_object_t object; | |
344 | vm_map_entry_t oldentry, newentry; | |
345 | kern_return_t kr; | |
346 | ||
347 | oldmin = trunc_page(oldaddr); | |
348 | oldmax = round_page(oldaddr + oldsize); | |
349 | oldsize = oldmax - oldmin; | |
350 | newsize = round_page(newsize); | |
351 | ||
352 | /* | |
353 | * Find space for the new region. | |
354 | */ | |
355 | ||
356 | kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0, | |
357 | &newentry); | |
358 | if (kr != KERN_SUCCESS) { | |
359 | return kr; | |
360 | } | |
361 | ||
362 | /* | |
363 | * Find the VM object backing the old region. | |
364 | */ | |
365 | ||
366 | if (!vm_map_lookup_entry(map, oldmin, &oldentry)) | |
367 | panic("kmem_realloc"); | |
368 | object = oldentry->object.vm_object; | |
369 | ||
370 | /* | |
371 | * Increase the size of the object and | |
372 | * fill in the new region. | |
373 | */ | |
374 | ||
375 | vm_object_reference(object); | |
376 | vm_object_lock(object); | |
377 | if (object->size != oldsize) | |
378 | panic("kmem_realloc"); | |
379 | object->size = newsize; | |
380 | vm_object_unlock(object); | |
381 | ||
382 | newentry->object.vm_object = object; | |
383 | newentry->offset = 0; | |
384 | assert (newentry->wired_count == 0); | |
385 | newentry->wired_count = 1; | |
386 | ||
387 | /* | |
388 | * Since we have not given out this address yet, | |
389 | * it is safe to unlock the map. We are trusting | |
390 | * that nobody will play with either region. | |
391 | */ | |
392 | ||
393 | vm_map_unlock(map); | |
394 | ||
395 | /* | |
396 | * Remap the pages in the old region and | |
397 | * allocate more pages for the new region. | |
398 | */ | |
399 | ||
400 | kmem_remap_pages(object, 0, | |
401 | newaddr, newaddr + oldsize, | |
402 | VM_PROT_DEFAULT); | |
403 | kmem_alloc_pages(object, oldsize, | |
404 | newaddr + oldsize, newaddr + newsize, | |
405 | VM_PROT_DEFAULT); | |
406 | ||
407 | *newaddrp = newaddr; | |
408 | return KERN_SUCCESS; | |
409 | } | |
410 | ||
411 | /* | |
412 | * kmem_alloc_wired: | |
413 | * | |
414 | * Allocate wired-down memory in the kernel's address map | |
415 | * or a submap. The memory is not zero-filled. | |
416 | * | |
417 | * The memory is allocated in the kernel_object. | |
418 | * It may not be copied with vm_map_copy, and | |
419 | * it may not be reallocated with kmem_realloc. | |
420 | */ | |
421 | ||
422 | kern_return_t | |
423 | kmem_alloc_wired( | |
424 | vm_map_t map, | |
425 | vm_offset_t *addrp, | |
426 | vm_size_t size) | |
427 | { | |
428 | return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT); | |
429 | } | |
430 | ||
431 | /* | |
432 | * kmem_alloc_aligned: | |
433 | * | |
434 | * Like kmem_alloc_wired, except that the memory is aligned. | |
435 | * The size should be a power-of-2. | |
436 | */ | |
437 | ||
438 | kern_return_t | |
439 | kmem_alloc_aligned( | |
440 | vm_map_t map, | |
441 | vm_offset_t *addrp, | |
442 | vm_size_t size) | |
443 | { | |
444 | if ((size & (size - 1)) != 0) | |
445 | panic("kmem_alloc_aligned: size not aligned"); | |
446 | return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT); | |
447 | } | |
448 | ||
449 | /* | |
450 | * kmem_alloc_pageable: | |
451 | * | |
452 | * Allocate pageable memory in the kernel's address map. | |
453 | */ | |
454 | ||
455 | kern_return_t | |
456 | kmem_alloc_pageable( | |
457 | vm_map_t map, | |
458 | vm_offset_t *addrp, | |
459 | vm_size_t size) | |
460 | { | |
461 | vm_offset_t addr; | |
462 | kern_return_t kr; | |
463 | ||
464 | #ifndef normal | |
465 | addr = (vm_map_min(map)) + 0x1000; | |
466 | #else | |
467 | addr = vm_map_min(map); | |
468 | #endif | |
469 | kr = vm_map_enter(map, &addr, round_page(size), | |
470 | (vm_offset_t) 0, TRUE, | |
471 | VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, | |
472 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
473 | if (kr != KERN_SUCCESS) | |
474 | return kr; | |
475 | ||
476 | *addrp = addr; | |
477 | return KERN_SUCCESS; | |
478 | } | |
479 | ||
480 | /* | |
481 | * kmem_free: | |
482 | * | |
483 | * Release a region of kernel virtual memory allocated | |
484 | * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable, | |
485 | * and return the physical pages associated with that region. | |
486 | */ | |
487 | ||
488 | void | |
489 | kmem_free( | |
490 | vm_map_t map, | |
491 | vm_offset_t addr, | |
492 | vm_size_t size) | |
493 | { | |
494 | kern_return_t kr; | |
495 | ||
496 | kr = vm_map_remove(map, trunc_page(addr), | |
497 | round_page(addr + size), VM_MAP_REMOVE_KUNWIRE); | |
498 | if (kr != KERN_SUCCESS) | |
499 | panic("kmem_free"); | |
500 | } | |
501 | ||
502 | /* | |
503 | * Allocate new wired pages in an object. | |
504 | * The object is assumed to be mapped into the kernel map or | |
505 | * a submap. | |
506 | */ | |
507 | ||
508 | kern_return_t | |
509 | kmem_alloc_pages( | |
510 | register vm_object_t object, | |
511 | register vm_object_offset_t offset, | |
512 | register vm_offset_t start, | |
513 | register vm_offset_t end, | |
514 | vm_prot_t protection) | |
515 | { | |
516 | /* | |
517 | * Mark the pmap region as not pageable. | |
518 | */ | |
519 | pmap_pageable(kernel_pmap, start, end, FALSE); | |
520 | ||
521 | while (start < end) { | |
522 | register vm_page_t mem; | |
523 | ||
524 | vm_object_lock(object); | |
525 | ||
526 | /* | |
527 | * Allocate a page | |
528 | */ | |
529 | while ((mem = vm_page_alloc(object, offset)) | |
530 | == VM_PAGE_NULL) { | |
531 | vm_object_unlock(object); | |
532 | VM_PAGE_WAIT(); | |
533 | vm_object_lock(object); | |
534 | } | |
535 | ||
536 | /* | |
537 | * Wire it down | |
538 | */ | |
539 | vm_page_lock_queues(); | |
540 | vm_page_wire(mem); | |
541 | vm_page_unlock_queues(); | |
542 | vm_object_unlock(object); | |
543 | ||
544 | /* | |
545 | * Enter it in the kernel pmap | |
546 | */ | |
547 | PMAP_ENTER(kernel_pmap, start, mem, | |
548 | protection, TRUE); | |
549 | ||
550 | vm_object_lock(object); | |
551 | PAGE_WAKEUP_DONE(mem); | |
552 | vm_object_unlock(object); | |
553 | ||
554 | start += PAGE_SIZE; | |
555 | offset += PAGE_SIZE_64; | |
556 | } | |
557 | return KERN_SUCCESS; | |
558 | } | |
559 | ||
560 | /* | |
561 | * Remap wired pages in an object into a new region. | |
562 | * The object is assumed to be mapped into the kernel map or | |
563 | * a submap. | |
564 | */ | |
565 | void | |
566 | kmem_remap_pages( | |
567 | register vm_object_t object, | |
568 | register vm_object_offset_t offset, | |
569 | register vm_offset_t start, | |
570 | register vm_offset_t end, | |
571 | vm_prot_t protection) | |
572 | { | |
573 | /* | |
574 | * Mark the pmap region as not pageable. | |
575 | */ | |
576 | pmap_pageable(kernel_pmap, start, end, FALSE); | |
577 | ||
578 | while (start < end) { | |
579 | register vm_page_t mem; | |
580 | ||
581 | vm_object_lock(object); | |
582 | ||
583 | /* | |
584 | * Find a page | |
585 | */ | |
586 | if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL) | |
587 | panic("kmem_remap_pages"); | |
588 | ||
589 | /* | |
590 | * Wire it down (again) | |
591 | */ | |
592 | vm_page_lock_queues(); | |
593 | vm_page_wire(mem); | |
594 | vm_page_unlock_queues(); | |
595 | vm_object_unlock(object); | |
596 | ||
597 | /* | |
598 | * Enter it in the kernel pmap. The page isn't busy, | |
599 | * but this shouldn't be a problem because it is wired. | |
600 | */ | |
601 | PMAP_ENTER(kernel_pmap, start, mem, | |
602 | protection, TRUE); | |
603 | ||
604 | start += PAGE_SIZE; | |
605 | offset += PAGE_SIZE; | |
606 | } | |
607 | } | |
608 | ||
609 | /* | |
610 | * kmem_suballoc: | |
611 | * | |
612 | * Allocates a map to manage a subrange | |
613 | * of the kernel virtual address space. | |
614 | * | |
615 | * Arguments are as follows: | |
616 | * | |
617 | * parent Map to take range from | |
618 | * addr Address of start of range (IN/OUT) | |
619 | * size Size of range to find | |
620 | * pageable Can region be paged | |
621 | * anywhere Can region be located anywhere in map | |
622 | * new_map Pointer to new submap | |
623 | */ | |
624 | kern_return_t | |
625 | kmem_suballoc( | |
626 | vm_map_t parent, | |
627 | vm_offset_t *addr, | |
628 | vm_size_t size, | |
629 | boolean_t pageable, | |
630 | boolean_t anywhere, | |
631 | vm_map_t *new_map) | |
632 | { | |
633 | vm_map_t map; | |
634 | kern_return_t kr; | |
635 | ||
636 | size = round_page(size); | |
637 | ||
638 | /* | |
639 | * Need reference on submap object because it is internal | |
640 | * to the vm_system. vm_object_enter will never be called | |
641 | * on it (usual source of reference for vm_map_enter). | |
642 | */ | |
643 | vm_object_reference(vm_submap_object); | |
644 | ||
645 | if (anywhere == TRUE) | |
646 | *addr = (vm_offset_t)vm_map_min(parent); | |
647 | kr = vm_map_enter(parent, addr, size, | |
648 | (vm_offset_t) 0, anywhere, | |
649 | vm_submap_object, (vm_object_offset_t) 0, FALSE, | |
650 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
651 | if (kr != KERN_SUCCESS) { | |
652 | vm_object_deallocate(vm_submap_object); | |
653 | return (kr); | |
654 | } | |
655 | ||
656 | pmap_reference(vm_map_pmap(parent)); | |
657 | map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable); | |
658 | if (map == VM_MAP_NULL) | |
659 | panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ | |
660 | ||
661 | kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE); | |
662 | if (kr != KERN_SUCCESS) { | |
663 | /* | |
664 | * See comment preceding vm_map_submap(). | |
665 | */ | |
666 | vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS); | |
667 | vm_map_deallocate(map); /* also removes ref to pmap */ | |
668 | vm_object_deallocate(vm_submap_object); | |
669 | return (kr); | |
670 | } | |
1c79356b A |
671 | *new_map = map; |
672 | return (KERN_SUCCESS); | |
673 | } | |
674 | ||
675 | /* | |
676 | * kmem_init: | |
677 | * | |
678 | * Initialize the kernel's virtual memory map, taking | |
679 | * into account all memory allocated up to this time. | |
680 | */ | |
681 | void | |
682 | kmem_init( | |
683 | vm_offset_t start, | |
684 | vm_offset_t end) | |
685 | { | |
686 | kernel_map = vm_map_create(pmap_kernel(), | |
687 | VM_MIN_KERNEL_ADDRESS, end, | |
688 | FALSE); | |
689 | ||
690 | /* | |
691 | * Reserve virtual memory allocated up to this time. | |
692 | */ | |
693 | ||
694 | if (start != VM_MIN_KERNEL_ADDRESS) { | |
695 | vm_offset_t addr = VM_MIN_KERNEL_ADDRESS; | |
696 | (void) vm_map_enter(kernel_map, | |
697 | &addr, start - VM_MIN_KERNEL_ADDRESS, | |
698 | (vm_offset_t) 0, TRUE, | |
699 | VM_OBJECT_NULL, | |
700 | (vm_object_offset_t) 0, FALSE, | |
701 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
702 | VM_INHERIT_DEFAULT); | |
703 | } | |
704 | ||
705 | /* | |
706 | * Account for kernel memory (text, data, bss, vm shenanigans). | |
707 | * This may include inaccessible "holes" as determined by what | |
708 | * the machine-dependent init code includes in mem_size. | |
709 | */ | |
710 | vm_page_wire_count = (atop(mem_size) - (vm_page_free_count | |
711 | + vm_page_active_count | |
712 | + vm_page_inactive_count)); | |
713 | } | |
714 | ||
1c79356b A |
715 | |
716 | /* | |
717 | * kmem_io_object_trunc: | |
718 | * | |
719 | * Truncate an object vm_map_copy_t. | |
720 | * Called by the scatter/gather list network code to remove pages from | |
721 | * the tail end of a packet. Also unwires the objects pages. | |
722 | */ | |
723 | ||
724 | kern_return_t | |
725 | kmem_io_object_trunc(copy, new_size) | |
726 | vm_map_copy_t copy; /* IN/OUT copy object */ | |
727 | register vm_size_t new_size; /* IN new object size */ | |
728 | { | |
729 | register vm_size_t offset, old_size; | |
730 | ||
731 | assert(copy->type == VM_MAP_COPY_OBJECT); | |
732 | ||
733 | old_size = (vm_size_t)round_page_64(copy->size); | |
734 | copy->size = new_size; | |
735 | new_size = round_page(new_size); | |
736 | ||
737 | vm_object_lock(copy->cpy_object); | |
738 | vm_object_page_remove(copy->cpy_object, | |
739 | (vm_object_offset_t)new_size, (vm_object_offset_t)old_size); | |
740 | for (offset = 0; offset < new_size; offset += PAGE_SIZE) { | |
741 | register vm_page_t mem; | |
742 | ||
743 | if ((mem = vm_page_lookup(copy->cpy_object, | |
744 | (vm_object_offset_t)offset)) == VM_PAGE_NULL) | |
745 | panic("kmem_io_object_trunc: unable to find object page"); | |
746 | ||
747 | /* | |
748 | * Make sure these pages are marked dirty | |
749 | */ | |
750 | mem->dirty = TRUE; | |
751 | vm_page_lock_queues(); | |
752 | vm_page_unwire(mem); | |
753 | vm_page_unlock_queues(); | |
754 | } | |
755 | copy->cpy_object->size = new_size; /* adjust size of object */ | |
756 | vm_object_unlock(copy->cpy_object); | |
757 | return(KERN_SUCCESS); | |
758 | } | |
759 | ||
760 | /* | |
761 | * kmem_io_object_deallocate: | |
762 | * | |
763 | * Free an vm_map_copy_t. | |
764 | * Called by the scatter/gather list network code to free a packet. | |
765 | */ | |
766 | ||
767 | void | |
768 | kmem_io_object_deallocate( | |
769 | vm_map_copy_t copy) /* IN/OUT copy object */ | |
770 | { | |
771 | kern_return_t ret; | |
772 | ||
773 | /* | |
774 | * Clear out all the object pages (this will leave an empty object). | |
775 | */ | |
776 | ret = kmem_io_object_trunc(copy, 0); | |
777 | if (ret != KERN_SUCCESS) | |
778 | panic("kmem_io_object_deallocate: unable to truncate object"); | |
779 | /* | |
780 | * ...and discard the copy object. | |
781 | */ | |
782 | vm_map_copy_discard(copy); | |
783 | } | |
784 | ||
785 | /* | |
786 | * Routine: copyinmap | |
787 | * Purpose: | |
788 | * Like copyin, except that fromaddr is an address | |
789 | * in the specified VM map. This implementation | |
790 | * is incomplete; it handles the current user map | |
791 | * and the kernel map/submaps. | |
792 | */ | |
793 | boolean_t | |
794 | copyinmap( | |
795 | vm_map_t map, | |
796 | vm_offset_t fromaddr, | |
797 | vm_offset_t toaddr, | |
798 | vm_size_t length) | |
799 | { | |
800 | if (vm_map_pmap(map) == pmap_kernel()) { | |
801 | /* assume a correct copy */ | |
802 | memcpy((void *)toaddr, (void *)fromaddr, length); | |
803 | return FALSE; | |
804 | } | |
805 | ||
806 | if (current_map() == map) | |
807 | return copyin((char *)fromaddr, (char *)toaddr, length); | |
808 | ||
809 | return TRUE; | |
810 | } | |
811 | ||
812 | /* | |
813 | * Routine: copyoutmap | |
814 | * Purpose: | |
815 | * Like copyout, except that toaddr is an address | |
816 | * in the specified VM map. This implementation | |
817 | * is incomplete; it handles the current user map | |
818 | * and the kernel map/submaps. | |
819 | */ | |
820 | boolean_t | |
821 | copyoutmap( | |
822 | vm_map_t map, | |
823 | vm_offset_t fromaddr, | |
824 | vm_offset_t toaddr, | |
825 | vm_size_t length) | |
826 | { | |
827 | if (vm_map_pmap(map) == pmap_kernel()) { | |
828 | /* assume a correct copy */ | |
829 | memcpy((void *)toaddr, (void *)fromaddr, length); | |
830 | return FALSE; | |
831 | } | |
832 | ||
833 | if (current_map() == map) | |
834 | return copyout((char *)fromaddr, (char *)toaddr, length); | |
835 | ||
836 | return TRUE; | |
837 | } |