]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_vm_init.c
866dfa1fb1ace6562e2f4af626756c4199a74145
[apple/xnu.git] / osfmk / i386 / i386_vm_init.c
1 /*
2 * Copyright (c) 2003-2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <platforms.h>
58 #include <mach_kdb.h>
59
60 #include <mach/i386/vm_param.h>
61
62 #include <string.h>
63 #include <mach/vm_param.h>
64 #include <mach/vm_prot.h>
65 #include <mach/machine.h>
66 #include <mach/time_value.h>
67 #include <kern/spl.h>
68 #include <kern/assert.h>
69 #include <kern/debug.h>
70 #include <kern/misc_protos.h>
71 #include <kern/cpu_data.h>
72 #include <kern/processor.h>
73 #include <vm/vm_page.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_kern.h>
76 #include <i386/pmap.h>
77 #include <i386/misc_protos.h>
78 #include <i386/cpuid.h>
79 #include <mach/thread_status.h>
80 #include <pexpert/i386/efi.h>
81 #include <i386/i386_lowmem.h>
82 #include <i386/lowglobals.h>
83 #include <i386/pal_routines.h>
84
85 #include <mach-o/loader.h>
86 #include <libkern/kernel_mach_header.h>
87
88 vm_size_t mem_size = 0;
89 pmap_paddr_t first_avail = 0;/* first after page tables */
90
91 uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
92 uint64_t mem_actual;
93 uint64_t sane_size = 0; /* Memory size to use for defaults calculations */
94
95 #define MAXLORESERVE (32 * 1024 * 1024)
96
97 ppnum_t max_ppnum = 0;
98 ppnum_t lowest_lo = 0;
99 ppnum_t lowest_hi = 0;
100 ppnum_t highest_hi = 0;
101
102 enum {PMAP_MAX_RESERVED_RANGES = 32};
103 uint32_t pmap_reserved_pages_allocated = 0;
104 uint32_t pmap_reserved_range_indices[PMAP_MAX_RESERVED_RANGES];
105 uint32_t pmap_last_reserved_range_index = 0;
106 uint32_t pmap_reserved_ranges = 0;
107
108 extern unsigned int bsd_mbuf_cluster_reserve(boolean_t *);
109
110 pmap_paddr_t avail_start, avail_end;
111 vm_offset_t virtual_avail, virtual_end;
112 static pmap_paddr_t avail_remaining;
113 vm_offset_t static_memory_end = 0;
114
115 vm_offset_t sHIB, eHIB, stext, etext, sdata, edata, end;
116
117 /*
118 * _mh_execute_header is the mach_header for the currently executing kernel
119 */
120 void *sectTEXTB; unsigned long sectSizeTEXT;
121 void *sectDATAB; unsigned long sectSizeDATA;
122 void *sectOBJCB; unsigned long sectSizeOBJC;
123 void *sectLINKB; unsigned long sectSizeLINK;
124 void *sectPRELINKB; unsigned long sectSizePRELINK;
125 void *sectHIBB; unsigned long sectSizeHIB;
126 void *sectINITPTB; unsigned long sectSizeINITPT;
127
128 kernel_segment_command_t *segTEXT;
129 kernel_section_t *cursectTEXT, *lastsectTEXT;
130
131 extern uint64_t firmware_Conventional_bytes;
132 extern uint64_t firmware_RuntimeServices_bytes;
133 extern uint64_t firmware_ACPIReclaim_bytes;
134 extern uint64_t firmware_ACPINVS_bytes;
135 extern uint64_t firmware_PalCode_bytes;
136 extern uint64_t firmware_Reserved_bytes;
137 extern uint64_t firmware_Unusable_bytes;
138 extern uint64_t firmware_other_bytes;
139 uint64_t firmware_MMIO_bytes;
140
141 #if DEBUG
142 #define PRINT_PMAP_MEMORY_TABLE
143 #endif /* DEBUG */
144 /*
145 * Basic VM initialization.
146 */
147 void
148 i386_vm_init(uint64_t maxmem,
149 boolean_t IA32e,
150 boot_args *args)
151 {
152 pmap_memory_region_t *pmptr;
153 pmap_memory_region_t *prev_pmptr;
154 EfiMemoryRange *mptr;
155 unsigned int mcount;
156 unsigned int msize;
157 ppnum_t fap;
158 unsigned int i;
159 unsigned int safeboot;
160 ppnum_t maxpg = 0;
161 uint32_t pmap_type;
162 uint32_t maxloreserve;
163 uint32_t maxdmaaddr;
164 uint32_t mbuf_reserve = 0;
165 boolean_t mbuf_override = FALSE;
166 boolean_t coalescing_permitted;
167 #if DEBUG
168 kprintf("Boot args revision: %d version: %d",
169 args->Revision, args->Version);
170 kprintf(" commandline: \"");
171 for(i=0; i<BOOT_LINE_LENGTH; i++)
172 kprintf("%c", args->CommandLine[i]);
173 kprintf("\"\n");
174 #endif
175
176 /*
177 * Now retrieve addresses for end, edata, and etext
178 * from MACH-O headers.
179 */
180
181 sectTEXTB = (void *) getsegdatafromheader(
182 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
183 sectDATAB = (void *) getsegdatafromheader(
184 &_mh_execute_header, "__DATA", &sectSizeDATA);
185 sectOBJCB = (void *) getsegdatafromheader(
186 &_mh_execute_header, "__OBJC", &sectSizeOBJC);
187 sectLINKB = (void *) getsegdatafromheader(
188 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
189 sectHIBB = (void *)getsegdatafromheader(
190 &_mh_execute_header, "__HIB", &sectSizeHIB);
191 sectINITPTB = (void *)getsegdatafromheader(
192 &_mh_execute_header, "__INITPT", &sectSizeINITPT);
193 sectPRELINKB = (void *) getsegdatafromheader(
194 &_mh_execute_header, "__PRELINK_TEXT", &sectSizePRELINK);
195
196 segTEXT = getsegbynamefromheader(&_mh_execute_header, "__TEXT");
197 cursectTEXT = lastsectTEXT = firstsect(segTEXT);
198 /* Discover the last TEXT section within the TEXT segment */
199 while ((cursectTEXT = nextsect(segTEXT, cursectTEXT)) != NULL) {
200 lastsectTEXT = cursectTEXT;
201 }
202
203 sHIB = (vm_offset_t) sectHIBB;
204 eHIB = (vm_offset_t) sectHIBB + sectSizeHIB;
205 /* Zero-padded from ehib to stext if text is 2M-aligned */
206 stext = (vm_offset_t) sectTEXTB;
207 etext = (vm_offset_t) round_page_64(lastsectTEXT->addr + lastsectTEXT->size);
208 /* Zero-padded from etext to sdata if text is 2M-aligned */
209 sdata = (vm_offset_t) sectDATAB;
210 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
211
212 #if DEBUG
213 kprintf("sectTEXTB = %p\n", sectTEXTB);
214 kprintf("sectDATAB = %p\n", sectDATAB);
215 kprintf("sectOBJCB = %p\n", sectOBJCB);
216 kprintf("sectLINKB = %p\n", sectLINKB);
217 kprintf("sectHIBB = %p\n", sectHIBB);
218 kprintf("sectPRELINKB = %p\n", sectPRELINKB);
219 kprintf("eHIB = %p\n", (void *) eHIB);
220 kprintf("stext = %p\n", (void *) stext);
221 kprintf("etext = %p\n", (void *) etext);
222 kprintf("sdata = %p\n", (void *) sdata);
223 kprintf("edata = %p\n", (void *) edata);
224 #endif
225
226 vm_set_page_size();
227
228 /*
229 * Compute the memory size.
230 */
231
232 if ((1 == vm_himemory_mode) || PE_parse_boot_argn("-x", &safeboot, sizeof (safeboot))) {
233 maxpg = 1 << (32 - I386_PGSHIFT);
234 }
235 avail_remaining = 0;
236 avail_end = 0;
237 pmptr = pmap_memory_regions;
238 prev_pmptr = 0;
239 pmap_memory_region_count = pmap_memory_region_current = 0;
240 fap = (ppnum_t) i386_btop(first_avail);
241
242 mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap);
243 if (args->MemoryMapDescriptorSize == 0)
244 panic("Invalid memory map descriptor size");
245 msize = args->MemoryMapDescriptorSize;
246 mcount = args->MemoryMapSize / msize;
247
248 #define FOURGIG 0x0000000100000000ULL
249 #define ONEGIG 0x0000000040000000ULL
250
251 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
252 ppnum_t base, top;
253 uint64_t region_bytes = 0;
254
255 if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) {
256 kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count);
257 break;
258 }
259 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
260 top = (ppnum_t) (((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1);
261
262 #if MR_RSV_TEST
263 static uint32_t nmr = 0;
264 if ((base > 0x20000) && (nmr++ < 4))
265 mptr->Attribute |= EFI_MEMORY_KERN_RESERVED;
266 #endif
267 region_bytes = (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
268 pmap_type = mptr->Type;
269
270 switch (mptr->Type) {
271 case kEfiLoaderCode:
272 case kEfiLoaderData:
273 case kEfiBootServicesCode:
274 case kEfiBootServicesData:
275 case kEfiConventionalMemory:
276 /*
277 * Consolidate usable memory types into one.
278 */
279 pmap_type = kEfiConventionalMemory;
280 sane_size += region_bytes;
281 firmware_Conventional_bytes += region_bytes;
282 break;
283 /*
284 * sane_size should reflect the total amount of physical
285 * RAM in the system, not just the amount that is
286 * available for the OS to use.
287 * FIXME:Consider deriving this value from SMBIOS tables
288 * rather than reverse engineering the memory map.
289 * Alternatively, see
290 * <rdar://problem/4642773> Memory map should
291 * describe all memory
292 * Firmware on some systems guarantees that the memory
293 * map is complete via the "RomReservedMemoryTracked"
294 * feature field--consult that where possible to
295 * avoid the "round up to 128M" workaround below.
296 */
297
298 case kEfiRuntimeServicesCode:
299 case kEfiRuntimeServicesData:
300 firmware_RuntimeServices_bytes += region_bytes;
301 sane_size += region_bytes;
302 break;
303 case kEfiACPIReclaimMemory:
304 firmware_ACPIReclaim_bytes += region_bytes;
305 sane_size += region_bytes;
306 break;
307 case kEfiACPIMemoryNVS:
308 firmware_ACPINVS_bytes += region_bytes;
309 sane_size += region_bytes;
310 break;
311 case kEfiPalCode:
312 firmware_PalCode_bytes += region_bytes;
313 sane_size += region_bytes;
314 break;
315
316 case kEfiReservedMemoryType:
317 firmware_Reserved_bytes += region_bytes;
318 break;
319 case kEfiUnusableMemory:
320 firmware_Unusable_bytes += region_bytes;
321 break;
322 case kEfiMemoryMappedIO:
323 case kEfiMemoryMappedIOPortSpace:
324 firmware_MMIO_bytes += region_bytes;
325 break;
326 default:
327 firmware_other_bytes += region_bytes;
328 break;
329 }
330
331 #if DEBUG
332 kprintf("EFI region %d: type %u/%d, base 0x%x, top 0x%x\n",
333 i, mptr->Type, pmap_type, base, top);
334 #endif
335
336 if (maxpg) {
337 if (base >= maxpg)
338 break;
339 top = (top > maxpg) ? maxpg : top;
340 }
341
342 /*
343 * handle each region
344 */
345 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
346 pmap_type != kEfiConventionalMemory) {
347 prev_pmptr = 0;
348 continue;
349 } else {
350 /*
351 * Usable memory region
352 */
353 if (top < I386_LOWMEM_RESERVED ||
354 !pal_is_usable_memory(base, top)) {
355 prev_pmptr = 0;
356 continue;
357 }
358 /*
359 * A range may be marked with with the
360 * EFI_MEMORY_KERN_RESERVED attribute
361 * on some systems, to indicate that the range
362 * must not be made available to devices.
363 */
364
365 if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) {
366 if (++pmap_reserved_ranges > PMAP_MAX_RESERVED_RANGES) {
367 panic("Too many reserved ranges %u\n", pmap_reserved_ranges);
368 }
369 }
370
371 if (top < fap) {
372 /*
373 * entire range below first_avail
374 * salvage some low memory pages
375 * we use some very low memory at startup
376 * mark as already allocated here
377 */
378 if (base >= I386_LOWMEM_RESERVED)
379 pmptr->base = base;
380 else
381 pmptr->base = I386_LOWMEM_RESERVED;
382
383 pmptr->end = top;
384
385
386 if ((mptr->Attribute & EFI_MEMORY_KERN_RESERVED) &&
387 (top < I386_KERNEL_IMAGE_BASE_PAGE)) {
388 pmptr->alloc = pmptr->base;
389 pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
390 }
391 else {
392 /*
393 * mark as already mapped
394 */
395 pmptr->alloc = top;
396 }
397 pmptr->type = pmap_type;
398 pmptr->attribute = mptr->Attribute;
399 }
400 else if ( (base < fap) && (top > fap) ) {
401 /*
402 * spans first_avail
403 * put mem below first avail in table but
404 * mark already allocated
405 */
406 pmptr->base = base;
407 pmptr->alloc = pmptr->end = (fap - 1);
408 pmptr->type = pmap_type;
409 pmptr->attribute = mptr->Attribute;
410 /*
411 * we bump these here inline so the accounting
412 * below works correctly
413 */
414 pmptr++;
415 pmap_memory_region_count++;
416
417 pmptr->alloc = pmptr->base = fap;
418 pmptr->type = pmap_type;
419 pmptr->attribute = mptr->Attribute;
420 pmptr->end = top;
421
422 if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
423 pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
424 } else {
425 /*
426 * entire range useable
427 */
428 pmptr->alloc = pmptr->base = base;
429 pmptr->type = pmap_type;
430 pmptr->attribute = mptr->Attribute;
431 pmptr->end = top;
432 if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
433 pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
434 }
435
436 if (i386_ptob(pmptr->end) > avail_end )
437 avail_end = i386_ptob(pmptr->end);
438
439 avail_remaining += (pmptr->end - pmptr->base);
440 coalescing_permitted = (prev_pmptr && (pmptr->attribute == prev_pmptr->attribute) && ((pmptr->attribute & EFI_MEMORY_KERN_RESERVED) == 0));
441 /*
442 * Consolidate contiguous memory regions, if possible
443 */
444 if (prev_pmptr &&
445 (pmptr->type == prev_pmptr->type) &&
446 (coalescing_permitted) &&
447 (pmptr->base == pmptr->alloc) &&
448 (pmptr->base == (prev_pmptr->end + 1)))
449 {
450 if (prev_pmptr->end == prev_pmptr->alloc)
451 prev_pmptr->alloc = pmptr->base;
452 prev_pmptr->end = pmptr->end;
453 } else {
454 pmap_memory_region_count++;
455 prev_pmptr = pmptr;
456 pmptr++;
457 }
458 }
459 }
460
461 #ifdef PRINT_PMAP_MEMORY_TABLE
462 {
463 unsigned int j;
464 pmap_memory_region_t *p = pmap_memory_regions;
465 addr64_t region_start, region_end;
466 addr64_t efi_start, efi_end;
467 for (j=0;j<pmap_memory_region_count;j++, p++) {
468 kprintf("pmap region %d type %d base 0x%llx alloc 0x%llx top 0x%llx\n",
469 j, p->type,
470 (addr64_t) p->base << I386_PGSHIFT,
471 (addr64_t) p->alloc << I386_PGSHIFT,
472 (addr64_t) p->end << I386_PGSHIFT);
473 region_start = (addr64_t) p->base << I386_PGSHIFT;
474 region_end = ((addr64_t) p->end << I386_PGSHIFT) - 1;
475 mptr = (EfiMemoryRange *) ml_static_ptovirt((vm_offset_t)args->MemoryMap);
476 for (i=0; i<mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
477 if (mptr->Type != kEfiLoaderCode &&
478 mptr->Type != kEfiLoaderData &&
479 mptr->Type != kEfiBootServicesCode &&
480 mptr->Type != kEfiBootServicesData &&
481 mptr->Type != kEfiConventionalMemory) {
482 efi_start = (addr64_t)mptr->PhysicalStart;
483 efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1;
484 if ((efi_start >= region_start && efi_start <= region_end) ||
485 (efi_end >= region_start && efi_end <= region_end)) {
486 kprintf(" *** Overlapping region with EFI runtime region %d\n", i);
487 }
488 }
489 }
490 }
491 }
492 #endif
493
494 avail_start = first_avail;
495 mem_actual = sane_size;
496
497 /*
498 * For user visible memory size, round up to 128 Mb - accounting for the various stolen memory
499 * not reported by EFI.
500 */
501
502 sane_size = (sane_size + 128 * MB - 1) & ~((uint64_t)(128 * MB - 1));
503
504 /*
505 * We cap at KERNEL_MAXMEM bytes (currently 32GB for K32, 96GB for K64).
506 * Unless overriden by the maxmem= boot-arg
507 * -- which is a non-zero maxmem argument to this function.
508 */
509 if (maxmem == 0 && sane_size > KERNEL_MAXMEM) {
510 maxmem = KERNEL_MAXMEM;
511 printf("Physical memory %lld bytes capped at %dGB\n",
512 sane_size, (uint32_t) (KERNEL_MAXMEM/GB));
513 }
514
515 /*
516 * if user set maxmem, reduce memory sizes
517 */
518 if ( (maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) {
519 ppnum_t discarded_pages = (ppnum_t)((sane_size - maxmem) >> I386_PGSHIFT);
520 ppnum_t highest_pn = 0;
521 ppnum_t cur_alloc = 0;
522 uint64_t pages_to_use;
523 unsigned cur_region = 0;
524
525 sane_size = maxmem;
526
527 if (avail_remaining > discarded_pages)
528 avail_remaining -= discarded_pages;
529 else
530 avail_remaining = 0;
531
532 pages_to_use = avail_remaining;
533
534 while (cur_region < pmap_memory_region_count && pages_to_use) {
535 for (cur_alloc = pmap_memory_regions[cur_region].alloc;
536 cur_alloc < pmap_memory_regions[cur_region].end && pages_to_use;
537 cur_alloc++) {
538 if (cur_alloc > highest_pn)
539 highest_pn = cur_alloc;
540 pages_to_use--;
541 }
542 if (pages_to_use == 0)
543 pmap_memory_regions[cur_region].end = cur_alloc;
544
545 cur_region++;
546 }
547 pmap_memory_region_count = cur_region;
548
549 avail_end = i386_ptob(highest_pn + 1);
550 }
551
552 /*
553 * mem_size is only a 32 bit container... follow the PPC route
554 * and pin it to a 2 Gbyte maximum
555 */
556 if (sane_size > (FOURGIG >> 1))
557 mem_size = (vm_size_t)(FOURGIG >> 1);
558 else
559 mem_size = (vm_size_t)sane_size;
560 max_mem = sane_size;
561
562 kprintf("Physical memory %llu MB\n", sane_size/MB);
563
564 max_valid_low_ppnum = (2 * GB) / PAGE_SIZE;
565
566 if (!PE_parse_boot_argn("max_valid_dma_addr", &maxdmaaddr, sizeof (maxdmaaddr))) {
567 max_valid_dma_address = (uint64_t)4 * (uint64_t)GB;
568 } else {
569 max_valid_dma_address = ((uint64_t) maxdmaaddr) * MB;
570
571 if ((max_valid_dma_address / PAGE_SIZE) < max_valid_low_ppnum)
572 max_valid_low_ppnum = (ppnum_t)(max_valid_dma_address / PAGE_SIZE);
573 }
574 if (avail_end >= max_valid_dma_address) {
575
576 if (!PE_parse_boot_argn("maxloreserve", &maxloreserve, sizeof (maxloreserve))) {
577
578 if (sane_size >= (ONEGIG * 15))
579 maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 4;
580 else if (sane_size >= (ONEGIG * 7))
581 maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 2;
582 else
583 maxloreserve = MAXLORESERVE / PAGE_SIZE;
584
585 mbuf_reserve = bsd_mbuf_cluster_reserve(&mbuf_override) / PAGE_SIZE;
586 } else
587 maxloreserve = (maxloreserve * (1024 * 1024)) / PAGE_SIZE;
588
589 if (maxloreserve) {
590 vm_lopage_free_limit = maxloreserve;
591
592 if (mbuf_override == TRUE) {
593 vm_lopage_free_limit += mbuf_reserve;
594 vm_lopage_lowater = 0;
595 } else
596 vm_lopage_lowater = vm_lopage_free_limit / 16;
597
598 vm_lopage_refill = TRUE;
599 vm_lopage_needed = TRUE;
600 }
601 }
602
603 /*
604 * Initialize kernel physical map.
605 * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
606 */
607 kprintf("avail_remaining = 0x%lx\n", (unsigned long)avail_remaining);
608 pmap_bootstrap(0, IA32e);
609 }
610
611
612 unsigned int
613 pmap_free_pages(void)
614 {
615 return (unsigned int)avail_remaining;
616 }
617
618
619 boolean_t pmap_next_page_reserved(ppnum_t *);
620
621 /*
622 * Pick a page from a "kernel private" reserved range; works around
623 * errata on some hardware.
624 */
625 boolean_t
626 pmap_next_page_reserved(ppnum_t *pn) {
627 if (pmap_reserved_ranges) {
628 uint32_t n;
629 pmap_memory_region_t *region;
630 for (n = 0; n < pmap_last_reserved_range_index; n++) {
631 uint32_t reserved_index = pmap_reserved_range_indices[n];
632 region = &pmap_memory_regions[reserved_index];
633 if (region->alloc < region->end) {
634 *pn = region->alloc++;
635 avail_remaining--;
636
637 if (*pn > max_ppnum)
638 max_ppnum = *pn;
639
640 if (lowest_lo == 0 || *pn < lowest_lo)
641 lowest_lo = *pn;
642
643 pmap_reserved_pages_allocated++;
644 #if DEBUG
645 if (region->alloc == region->end) {
646 kprintf("Exhausted reserved range index: %u, base: 0x%x end: 0x%x, type: 0x%x, attribute: 0x%llx\n", reserved_index, region->base, region->end, region->type, region->attribute);
647 }
648 #endif
649 return TRUE;
650 }
651 }
652 }
653 return FALSE;
654 }
655
656
657 boolean_t
658 pmap_next_page_hi(
659 ppnum_t *pn)
660 {
661 pmap_memory_region_t *region;
662 int n;
663
664 if (pmap_next_page_reserved(pn))
665 return TRUE;
666
667 if (avail_remaining) {
668 for (n = pmap_memory_region_count - 1; n >= 0; n--) {
669 region = &pmap_memory_regions[n];
670
671 if (region->alloc != region->end) {
672 *pn = region->alloc++;
673 avail_remaining--;
674
675 if (*pn > max_ppnum)
676 max_ppnum = *pn;
677
678 if (lowest_lo == 0 || *pn < lowest_lo)
679 lowest_lo = *pn;
680
681 if (lowest_hi == 0 || *pn < lowest_hi)
682 lowest_hi = *pn;
683
684 if (*pn > highest_hi)
685 highest_hi = *pn;
686
687 return TRUE;
688 }
689 }
690 }
691 return FALSE;
692 }
693
694
695 boolean_t
696 pmap_next_page(
697 ppnum_t *pn)
698 {
699 if (avail_remaining) while (pmap_memory_region_current < pmap_memory_region_count) {
700 if (pmap_memory_regions[pmap_memory_region_current].alloc ==
701 pmap_memory_regions[pmap_memory_region_current].end) {
702 pmap_memory_region_current++;
703 continue;
704 }
705 *pn = pmap_memory_regions[pmap_memory_region_current].alloc++;
706 avail_remaining--;
707
708 if (*pn > max_ppnum)
709 max_ppnum = *pn;
710
711 if (lowest_lo == 0 || *pn < lowest_lo)
712 lowest_lo = *pn;
713
714 return TRUE;
715 }
716 return FALSE;
717 }
718
719
720 boolean_t
721 pmap_valid_page(
722 ppnum_t pn)
723 {
724 unsigned int i;
725 pmap_memory_region_t *pmptr = pmap_memory_regions;
726
727 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
728 if ( (pn >= pmptr->base) && (pn <= pmptr->end) )
729 return TRUE;
730 }
731 return FALSE;
732 }
733