]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_vm_init.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_vm_init.c
1 /*
2 * Copyright (c) 2003-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57
58 #include <mach/i386/vm_param.h>
59
60 #include <string.h>
61 #include <mach/vm_param.h>
62 #include <mach/vm_prot.h>
63 #include <mach/machine.h>
64 #include <mach/time_value.h>
65 #include <kern/spl.h>
66 #include <kern/assert.h>
67 #include <kern/debug.h>
68 #include <kern/misc_protos.h>
69 #include <kern/cpu_data.h>
70 #include <kern/processor.h>
71 #include <vm/vm_page.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_kern.h>
74 #include <i386/pmap.h>
75 #include <i386/misc_protos.h>
76 #include <i386/cpuid.h>
77 #include <mach/thread_status.h>
78 #include <pexpert/i386/efi.h>
79 #include <i386/i386_lowmem.h>
80 #include <x86_64/lowglobals.h>
81 #include <i386/pal_routines.h>
82
83 #include <mach-o/loader.h>
84 #include <libkern/kernel_mach_header.h>
85
86
87 vm_size_t mem_size = 0;
88 pmap_paddr_t first_avail = 0;/* first after page tables */
89
90 uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
91 uint64_t mem_actual;
92 uint64_t sane_size = 0; /* Memory size for defaults calculations */
93
94 /*
95 * KASLR parameters
96 */
97 ppnum_t vm_kernel_base_page;
98 vm_offset_t vm_kernel_base;
99 vm_offset_t vm_kernel_top;
100 vm_offset_t vm_kernel_stext;
101 vm_offset_t vm_kernel_etext;
102 vm_offset_t vm_kernel_slide;
103 vm_offset_t vm_kernel_slid_base;
104 vm_offset_t vm_kernel_slid_top;
105 vm_offset_t vm_hib_base;
106 vm_offset_t vm_kext_base = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
107 vm_offset_t vm_kext_top = VM_MIN_KERNEL_ADDRESS;
108
109 vm_offset_t vm_prelink_stext;
110 vm_offset_t vm_prelink_etext;
111 vm_offset_t vm_prelink_sinfo;
112 vm_offset_t vm_prelink_einfo;
113 vm_offset_t vm_slinkedit;
114 vm_offset_t vm_elinkedit;
115
116 vm_offset_t vm_kernel_builtinkmod_text;
117 vm_offset_t vm_kernel_builtinkmod_text_end;
118
119 #define MAXLORESERVE (32 * 1024 * 1024)
120
121 ppnum_t max_ppnum = 0;
122 ppnum_t lowest_lo = 0;
123 ppnum_t lowest_hi = 0;
124 ppnum_t highest_hi = 0;
125
126 enum {PMAP_MAX_RESERVED_RANGES = 32};
127 uint32_t pmap_reserved_pages_allocated = 0;
128 uint32_t pmap_reserved_range_indices[PMAP_MAX_RESERVED_RANGES];
129 uint32_t pmap_last_reserved_range_index = 0;
130 uint32_t pmap_reserved_ranges = 0;
131
132 extern unsigned int bsd_mbuf_cluster_reserve(boolean_t *);
133
134 pmap_paddr_t avail_start, avail_end;
135 vm_offset_t virtual_avail, virtual_end;
136 static pmap_paddr_t avail_remaining;
137 vm_offset_t static_memory_end = 0;
138
139 vm_offset_t sHIB, eHIB, stext, etext, sdata, edata, end, sconst, econst;
140
141 /*
142 * _mh_execute_header is the mach_header for the currently executing kernel
143 */
144 vm_offset_t segTEXTB; unsigned long segSizeTEXT;
145 vm_offset_t segDATAB; unsigned long segSizeDATA;
146 vm_offset_t segLINKB; unsigned long segSizeLINK;
147 vm_offset_t segPRELINKTEXTB; unsigned long segSizePRELINKTEXT;
148 vm_offset_t segPRELINKINFOB; unsigned long segSizePRELINKINFO;
149 vm_offset_t segHIBB; unsigned long segSizeHIB;
150 unsigned long segSizeConst;
151
152 static kernel_segment_command_t *segTEXT, *segDATA;
153 static kernel_section_t *cursectTEXT, *lastsectTEXT;
154 static kernel_segment_command_t *segCONST;
155
156 extern uint64_t firmware_Conventional_bytes;
157 extern uint64_t firmware_RuntimeServices_bytes;
158 extern uint64_t firmware_ACPIReclaim_bytes;
159 extern uint64_t firmware_ACPINVS_bytes;
160 extern uint64_t firmware_PalCode_bytes;
161 extern uint64_t firmware_Reserved_bytes;
162 extern uint64_t firmware_Unusable_bytes;
163 extern uint64_t firmware_other_bytes;
164 uint64_t firmware_MMIO_bytes;
165
166 /*
167 * Linker magic to establish the highest address in the kernel.
168 */
169 extern void *last_kernel_symbol;
170
171 boolean_t memmap = FALSE;
172 #if DEBUG || DEVELOPMENT
173 static void
174 kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) {
175 unsigned int i;
176 unsigned int j;
177 pmap_memory_region_t *p = pmap_memory_regions;
178 EfiMemoryRange *mptr;
179 addr64_t region_start, region_end;
180 addr64_t efi_start, efi_end;
181
182 for (j = 0; j < pmap_memory_region_count; j++, p++) {
183 kprintf("pmap region %d type %d base 0x%llx alloc_up 0x%llx alloc_down 0x%llx top 0x%llx\n",
184 j, p->type,
185 (addr64_t) p->base << I386_PGSHIFT,
186 (addr64_t) p->alloc_up << I386_PGSHIFT,
187 (addr64_t) p->alloc_down << I386_PGSHIFT,
188 (addr64_t) p->end << I386_PGSHIFT);
189 region_start = (addr64_t) p->base << I386_PGSHIFT;
190 region_end = ((addr64_t) p->end << I386_PGSHIFT) - 1;
191 mptr = (EfiMemoryRange *) maddr;
192 for (i = 0;
193 i < mcount;
194 i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
195 if (mptr->Type != kEfiLoaderCode &&
196 mptr->Type != kEfiLoaderData &&
197 mptr->Type != kEfiBootServicesCode &&
198 mptr->Type != kEfiBootServicesData &&
199 mptr->Type != kEfiConventionalMemory) {
200 efi_start = (addr64_t)mptr->PhysicalStart;
201 efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1;
202 if ((efi_start >= region_start && efi_start <= region_end) ||
203 (efi_end >= region_start && efi_end <= region_end)) {
204 kprintf(" *** Overlapping region with EFI runtime region %d\n", i);
205 }
206 }
207 }
208 }
209 }
210 #define DPRINTF(x...) do { if (memmap) kprintf(x); } while (0)
211
212 #else
213
214 static void
215 kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) {
216 #pragma unused(maddr, msize, mcount)
217 }
218
219 #define DPRINTF(x...)
220 #endif /* DEBUG */
221
222 /*
223 * Basic VM initialization.
224 */
225 void
226 i386_vm_init(uint64_t maxmem,
227 boolean_t IA32e,
228 boot_args *args)
229 {
230 pmap_memory_region_t *pmptr;
231 pmap_memory_region_t *prev_pmptr;
232 EfiMemoryRange *mptr;
233 unsigned int mcount;
234 unsigned int msize;
235 vm_offset_t maddr;
236 ppnum_t fap;
237 unsigned int i;
238 ppnum_t maxpg = 0;
239 uint32_t pmap_type;
240 uint32_t maxloreserve;
241 uint32_t maxdmaaddr;
242 uint32_t mbuf_reserve = 0;
243 boolean_t mbuf_override = FALSE;
244 boolean_t coalescing_permitted;
245 vm_kernel_base_page = i386_btop(args->kaddr);
246 vm_offset_t base_address;
247 vm_offset_t static_base_address;
248
249 PE_parse_boot_argn("memmap", &memmap, sizeof(memmap));
250
251 /*
252 * Establish the KASLR parameters.
253 */
254 static_base_address = ml_static_ptovirt(KERNEL_BASE_OFFSET);
255 base_address = ml_static_ptovirt(args->kaddr);
256 vm_kernel_slide = base_address - static_base_address;
257 if (args->kslide) {
258 kprintf("KASLR slide: 0x%016lx dynamic\n", vm_kernel_slide);
259 if (vm_kernel_slide != ((vm_offset_t)args->kslide))
260 panic("Kernel base inconsistent with slide - rebased?");
261 } else {
262 /* No slide relative to on-disk symbols */
263 kprintf("KASLR slide: 0x%016lx static and ignored\n",
264 vm_kernel_slide);
265 vm_kernel_slide = 0;
266 }
267
268 /*
269 * Zero out local relocations to avoid confusing kxld.
270 * TODO: might be better to move this code to OSKext::initialize
271 */
272 if (_mh_execute_header.flags & MH_PIE) {
273 struct load_command *loadcmd;
274 uint32_t cmd;
275
276 loadcmd = (struct load_command *)((uintptr_t)&_mh_execute_header +
277 sizeof (_mh_execute_header));
278
279 for (cmd = 0; cmd < _mh_execute_header.ncmds; cmd++) {
280 if (loadcmd->cmd == LC_DYSYMTAB) {
281 struct dysymtab_command *dysymtab;
282
283 dysymtab = (struct dysymtab_command *)loadcmd;
284 dysymtab->nlocrel = 0;
285 dysymtab->locreloff = 0;
286 kprintf("Hiding local relocations\n");
287 break;
288 }
289 loadcmd = (struct load_command *)((uintptr_t)loadcmd + loadcmd->cmdsize);
290 }
291 }
292
293 /*
294 * Now retrieve addresses for end, edata, and etext
295 * from MACH-O headers.
296 */
297 segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
298 "__TEXT", &segSizeTEXT);
299 segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
300 "__DATA", &segSizeDATA);
301 segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
302 "__LINKEDIT", &segSizeLINK);
303 segHIBB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
304 "__HIB", &segSizeHIB);
305 segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
306 "__PRELINK_TEXT", &segSizePRELINKTEXT);
307 segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
308 "__PRELINK_INFO", &segSizePRELINKINFO);
309 segTEXT = getsegbynamefromheader(&_mh_execute_header,
310 "__TEXT");
311 segDATA = getsegbynamefromheader(&_mh_execute_header,
312 "__DATA");
313 segCONST = getsegbynamefromheader(&_mh_execute_header,
314 "__CONST");
315 cursectTEXT = lastsectTEXT = firstsect(segTEXT);
316 /* Discover the last TEXT section within the TEXT segment */
317 while ((cursectTEXT = nextsect(segTEXT, cursectTEXT)) != NULL) {
318 lastsectTEXT = cursectTEXT;
319 }
320
321 sHIB = segHIBB;
322 eHIB = segHIBB + segSizeHIB;
323 vm_hib_base = sHIB;
324 /* Zero-padded from ehib to stext if text is 2M-aligned */
325 stext = segTEXTB;
326 lowGlo.lgStext = stext;
327 etext = (vm_offset_t) round_page_64(lastsectTEXT->addr + lastsectTEXT->size);
328 /* Zero-padded from etext to sdata if text is 2M-aligned */
329 sdata = segDATAB;
330 edata = segDATAB + segSizeDATA;
331
332 sconst = segCONST->vmaddr;
333 segSizeConst = segCONST->vmsize;
334 econst = sconst + segSizeConst;
335
336 assert(((sconst|econst) & PAGE_MASK) == 0);
337
338 DPRINTF("segTEXTB = %p\n", (void *) segTEXTB);
339 DPRINTF("segDATAB = %p\n", (void *) segDATAB);
340 DPRINTF("segLINKB = %p\n", (void *) segLINKB);
341 DPRINTF("segHIBB = %p\n", (void *) segHIBB);
342 DPRINTF("segPRELINKTEXTB = %p\n", (void *) segPRELINKTEXTB);
343 DPRINTF("segPRELINKINFOB = %p\n", (void *) segPRELINKINFOB);
344 DPRINTF("sHIB = %p\n", (void *) sHIB);
345 DPRINTF("eHIB = %p\n", (void *) eHIB);
346 DPRINTF("stext = %p\n", (void *) stext);
347 DPRINTF("etext = %p\n", (void *) etext);
348 DPRINTF("sdata = %p\n", (void *) sdata);
349 DPRINTF("edata = %p\n", (void *) edata);
350 DPRINTF("sconst = %p\n", (void *) sconst);
351 DPRINTF("econst = %p\n", (void *) econst);
352 DPRINTF("kernel_top = %p\n", (void *) &last_kernel_symbol);
353
354 vm_kernel_base = sHIB;
355 vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
356 vm_kernel_stext = stext;
357 vm_kernel_etext = etext;
358 vm_prelink_stext = segPRELINKTEXTB;
359 vm_prelink_etext = segPRELINKTEXTB + segSizePRELINKTEXT;
360 vm_prelink_sinfo = segPRELINKINFOB;
361 vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
362 vm_slinkedit = segLINKB;
363 vm_elinkedit = segLINKB + segSizeLINK;
364 vm_kernel_slid_base = vm_kext_base + vm_kernel_slide;
365 vm_kernel_slid_top = vm_prelink_einfo;
366
367 vm_set_page_size();
368
369 /*
370 * Compute the memory size.
371 */
372
373 avail_remaining = 0;
374 avail_end = 0;
375 pmptr = pmap_memory_regions;
376 prev_pmptr = 0;
377 pmap_memory_region_count = pmap_memory_region_current = 0;
378 fap = (ppnum_t) i386_btop(first_avail);
379
380 maddr = ml_static_ptovirt((vm_offset_t)args->MemoryMap);
381 mptr = (EfiMemoryRange *)maddr;
382 if (args->MemoryMapDescriptorSize == 0)
383 panic("Invalid memory map descriptor size");
384 msize = args->MemoryMapDescriptorSize;
385 mcount = args->MemoryMapSize / msize;
386
387 #define FOURGIG 0x0000000100000000ULL
388 #define ONEGIG 0x0000000040000000ULL
389
390 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
391 ppnum_t base, top;
392 uint64_t region_bytes = 0;
393
394 if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) {
395 kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count);
396 break;
397 }
398 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
399 top = (ppnum_t) (((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1);
400
401 if (base == 0) {
402 /*
403 * Avoid having to deal with the edge case of the
404 * very first possible physical page and the roll-over
405 * to -1; just ignore that page.
406 */
407 kprintf("WARNING: ignoring first page in [0x%llx:0x%llx]\n", (uint64_t) base, (uint64_t) top);
408 base++;
409 }
410 if (top + 1 == 0) {
411 /*
412 * Avoid having to deal with the edge case of the
413 * very last possible physical page and the roll-over
414 * to 0; just ignore that page.
415 */
416 kprintf("WARNING: ignoring last page in [0x%llx:0x%llx]\n", (uint64_t) base, (uint64_t) top);
417 top--;
418 }
419 if (top < base) {
420 /*
421 * That was the only page in that region, so
422 * ignore the whole region.
423 */
424 continue;
425 }
426
427 #if MR_RSV_TEST
428 static uint32_t nmr = 0;
429 if ((base > 0x20000) && (nmr++ < 4))
430 mptr->Attribute |= EFI_MEMORY_KERN_RESERVED;
431 #endif
432 region_bytes = (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
433 pmap_type = mptr->Type;
434
435 switch (mptr->Type) {
436 case kEfiLoaderCode:
437 case kEfiLoaderData:
438 case kEfiBootServicesCode:
439 case kEfiBootServicesData:
440 case kEfiConventionalMemory:
441 /*
442 * Consolidate usable memory types into one.
443 */
444 pmap_type = kEfiConventionalMemory;
445 sane_size += region_bytes;
446 firmware_Conventional_bytes += region_bytes;
447 break;
448 /*
449 * sane_size should reflect the total amount of physical
450 * RAM in the system, not just the amount that is
451 * available for the OS to use.
452 * We now get this value from SMBIOS tables
453 * rather than reverse engineering the memory map.
454 * But the legacy computation of "sane_size" is kept
455 * for diagnostic information.
456 */
457
458 case kEfiRuntimeServicesCode:
459 case kEfiRuntimeServicesData:
460 firmware_RuntimeServices_bytes += region_bytes;
461 sane_size += region_bytes;
462 break;
463 case kEfiACPIReclaimMemory:
464 firmware_ACPIReclaim_bytes += region_bytes;
465 sane_size += region_bytes;
466 break;
467 case kEfiACPIMemoryNVS:
468 firmware_ACPINVS_bytes += region_bytes;
469 sane_size += region_bytes;
470 break;
471 case kEfiPalCode:
472 firmware_PalCode_bytes += region_bytes;
473 sane_size += region_bytes;
474 break;
475
476 case kEfiReservedMemoryType:
477 firmware_Reserved_bytes += region_bytes;
478 break;
479 case kEfiUnusableMemory:
480 firmware_Unusable_bytes += region_bytes;
481 break;
482 case kEfiMemoryMappedIO:
483 case kEfiMemoryMappedIOPortSpace:
484 firmware_MMIO_bytes += region_bytes;
485 break;
486 default:
487 firmware_other_bytes += region_bytes;
488 break;
489 }
490
491 DPRINTF("EFI region %d: type %u/%d, base 0x%x, top 0x%x %s\n",
492 i, mptr->Type, pmap_type, base, top,
493 (mptr->Attribute&EFI_MEMORY_KERN_RESERVED)? "RESERVED" :
494 (mptr->Attribute&EFI_MEMORY_RUNTIME)? "RUNTIME" : "");
495
496 if (maxpg) {
497 if (base >= maxpg)
498 break;
499 top = (top > maxpg) ? maxpg : top;
500 }
501
502 /*
503 * handle each region
504 */
505 if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
506 pmap_type != kEfiConventionalMemory) {
507 prev_pmptr = 0;
508 continue;
509 } else {
510 /*
511 * Usable memory region
512 */
513 if (top < I386_LOWMEM_RESERVED ||
514 !pal_is_usable_memory(base, top)) {
515 prev_pmptr = 0;
516 continue;
517 }
518 /*
519 * A range may be marked with with the
520 * EFI_MEMORY_KERN_RESERVED attribute
521 * on some systems, to indicate that the range
522 * must not be made available to devices.
523 */
524
525 if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) {
526 if (++pmap_reserved_ranges > PMAP_MAX_RESERVED_RANGES) {
527 panic("Too many reserved ranges %u\n", pmap_reserved_ranges);
528 }
529 }
530
531 if (top < fap) {
532 /*
533 * entire range below first_avail
534 * salvage some low memory pages
535 * we use some very low memory at startup
536 * mark as already allocated here
537 */
538 if (base >= I386_LOWMEM_RESERVED)
539 pmptr->base = base;
540 else
541 pmptr->base = I386_LOWMEM_RESERVED;
542
543 pmptr->end = top;
544
545
546 if ((mptr->Attribute & EFI_MEMORY_KERN_RESERVED) &&
547 (top < vm_kernel_base_page)) {
548 pmptr->alloc_up = pmptr->base;
549 pmptr->alloc_down = pmptr->end;
550 pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
551 }
552 else {
553 /*
554 * mark as already mapped
555 */
556 pmptr->alloc_up = top + 1;
557 pmptr->alloc_down = top;
558 }
559 pmptr->type = pmap_type;
560 pmptr->attribute = mptr->Attribute;
561 }
562 else if ( (base < fap) && (top > fap) ) {
563 /*
564 * spans first_avail
565 * put mem below first avail in table but
566 * mark already allocated
567 */
568 pmptr->base = base;
569 pmptr->end = (fap - 1);
570 pmptr->alloc_up = pmptr->end + 1;
571 pmptr->alloc_down = pmptr->end;
572 pmptr->type = pmap_type;
573 pmptr->attribute = mptr->Attribute;
574 /*
575 * we bump these here inline so the accounting
576 * below works correctly
577 */
578 pmptr++;
579 pmap_memory_region_count++;
580
581 pmptr->alloc_up = pmptr->base = fap;
582 pmptr->type = pmap_type;
583 pmptr->attribute = mptr->Attribute;
584 pmptr->alloc_down = pmptr->end = top;
585
586 if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
587 pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
588 } else {
589 /*
590 * entire range useable
591 */
592 pmptr->alloc_up = pmptr->base = base;
593 pmptr->type = pmap_type;
594 pmptr->attribute = mptr->Attribute;
595 pmptr->alloc_down = pmptr->end = top;
596 if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)
597 pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count;
598 }
599
600 if (i386_ptob(pmptr->end) > avail_end )
601 avail_end = i386_ptob(pmptr->end);
602
603 avail_remaining += (pmptr->end - pmptr->base);
604 coalescing_permitted = (prev_pmptr && (pmptr->attribute == prev_pmptr->attribute) && ((pmptr->attribute & EFI_MEMORY_KERN_RESERVED) == 0));
605 /*
606 * Consolidate contiguous memory regions, if possible
607 */
608 if (prev_pmptr &&
609 (pmptr->type == prev_pmptr->type) &&
610 (coalescing_permitted) &&
611 (pmptr->base == pmptr->alloc_up) &&
612 (prev_pmptr->end == prev_pmptr->alloc_down) &&
613 (pmptr->base == (prev_pmptr->end + 1)))
614 {
615 prev_pmptr->end = pmptr->end;
616 prev_pmptr->alloc_down = pmptr->alloc_down;
617 } else {
618 pmap_memory_region_count++;
619 prev_pmptr = pmptr;
620 pmptr++;
621 }
622 }
623 }
624
625 if (memmap) {
626 kprint_memmap(maddr, msize, mcount);
627 }
628
629 avail_start = first_avail;
630 mem_actual = args->PhysicalMemorySize;
631
632 /*
633 * For user visible memory size, round up to 128 Mb
634 * - accounting for the various stolen memory not reported by EFI.
635 * This is maintained for historical, comparison purposes but
636 * we now use the memory size reported by EFI/Booter.
637 */
638 sane_size = (sane_size + 128 * MB - 1) & ~((uint64_t)(128 * MB - 1));
639 if (sane_size != mem_actual)
640 printf("mem_actual: 0x%llx\n legacy sane_size: 0x%llx\n",
641 mem_actual, sane_size);
642 sane_size = mem_actual;
643
644 /*
645 * We cap at KERNEL_MAXMEM bytes (currently 32GB for K32, 96GB for K64).
646 * Unless overriden by the maxmem= boot-arg
647 * -- which is a non-zero maxmem argument to this function.
648 */
649 if (maxmem == 0 && sane_size > KERNEL_MAXMEM) {
650 maxmem = KERNEL_MAXMEM;
651 printf("Physical memory %lld bytes capped at %dGB\n",
652 sane_size, (uint32_t) (KERNEL_MAXMEM/GB));
653 }
654
655 /*
656 * if user set maxmem, reduce memory sizes
657 */
658 if ( (maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) {
659 ppnum_t discarded_pages = (ppnum_t)((sane_size - maxmem) >> I386_PGSHIFT);
660 ppnum_t highest_pn = 0;
661 ppnum_t cur_end = 0;
662 uint64_t pages_to_use;
663 unsigned cur_region = 0;
664
665 sane_size = maxmem;
666
667 if (avail_remaining > discarded_pages)
668 avail_remaining -= discarded_pages;
669 else
670 avail_remaining = 0;
671
672 pages_to_use = avail_remaining;
673
674 while (cur_region < pmap_memory_region_count && pages_to_use) {
675 for (cur_end = pmap_memory_regions[cur_region].base;
676 cur_end < pmap_memory_regions[cur_region].end && pages_to_use;
677 cur_end++) {
678 if (cur_end > highest_pn)
679 highest_pn = cur_end;
680 pages_to_use--;
681 }
682 if (pages_to_use == 0) {
683 pmap_memory_regions[cur_region].end = cur_end;
684 pmap_memory_regions[cur_region].alloc_down = cur_end;
685 }
686
687 cur_region++;
688 }
689 pmap_memory_region_count = cur_region;
690
691 avail_end = i386_ptob(highest_pn + 1);
692 }
693
694 /*
695 * mem_size is only a 32 bit container... follow the PPC route
696 * and pin it to a 2 Gbyte maximum
697 */
698 if (sane_size > (FOURGIG >> 1))
699 mem_size = (vm_size_t)(FOURGIG >> 1);
700 else
701 mem_size = (vm_size_t)sane_size;
702 max_mem = sane_size;
703
704 kprintf("Physical memory %llu MB\n", sane_size/MB);
705
706 max_valid_low_ppnum = (2 * GB) / PAGE_SIZE;
707
708 if (!PE_parse_boot_argn("max_valid_dma_addr", &maxdmaaddr, sizeof (maxdmaaddr))) {
709 max_valid_dma_address = (uint64_t)4 * (uint64_t)GB;
710 } else {
711 max_valid_dma_address = ((uint64_t) maxdmaaddr) * MB;
712
713 if ((max_valid_dma_address / PAGE_SIZE) < max_valid_low_ppnum)
714 max_valid_low_ppnum = (ppnum_t)(max_valid_dma_address / PAGE_SIZE);
715 }
716 if (avail_end >= max_valid_dma_address) {
717
718 if (!PE_parse_boot_argn("maxloreserve", &maxloreserve, sizeof (maxloreserve))) {
719
720 if (sane_size >= (ONEGIG * 15))
721 maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 4;
722 else if (sane_size >= (ONEGIG * 7))
723 maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 2;
724 else
725 maxloreserve = MAXLORESERVE / PAGE_SIZE;
726
727 #if SOCKETS
728 mbuf_reserve = bsd_mbuf_cluster_reserve(&mbuf_override) / PAGE_SIZE;
729 #endif
730 } else
731 maxloreserve = (maxloreserve * (1024 * 1024)) / PAGE_SIZE;
732
733 if (maxloreserve) {
734 vm_lopage_free_limit = maxloreserve;
735
736 if (mbuf_override == TRUE) {
737 vm_lopage_free_limit += mbuf_reserve;
738 vm_lopage_lowater = 0;
739 } else
740 vm_lopage_lowater = vm_lopage_free_limit / 16;
741
742 vm_lopage_refill = TRUE;
743 vm_lopage_needed = TRUE;
744 }
745 }
746
747 /*
748 * Initialize kernel physical map.
749 * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
750 */
751 kprintf("avail_remaining = 0x%lx\n", (unsigned long)avail_remaining);
752 pmap_bootstrap(0, IA32e);
753 }
754
755
756 unsigned int
757 pmap_free_pages(void)
758 {
759 return (unsigned int)avail_remaining;
760 }
761
762
763 boolean_t pmap_next_page_reserved(ppnum_t *);
764
765 /*
766 * Pick a page from a "kernel private" reserved range; works around
767 * errata on some hardware.
768 */
769 boolean_t
770 pmap_next_page_reserved(ppnum_t *pn) {
771 if (pmap_reserved_ranges) {
772 uint32_t n;
773 pmap_memory_region_t *region;
774 for (n = 0; n < pmap_last_reserved_range_index; n++) {
775 uint32_t reserved_index = pmap_reserved_range_indices[n];
776 region = &pmap_memory_regions[reserved_index];
777 if (region->alloc_up <= region->alloc_down) {
778 *pn = region->alloc_up++;
779 avail_remaining--;
780
781 if (*pn > max_ppnum)
782 max_ppnum = *pn;
783
784 if (lowest_lo == 0 || *pn < lowest_lo)
785 lowest_lo = *pn;
786
787 pmap_reserved_pages_allocated++;
788 #if DEBUG
789 if (region->alloc_up > region->alloc_down) {
790 kprintf("Exhausted reserved range index: %u, base: 0x%x end: 0x%x, type: 0x%x, attribute: 0x%llx\n", reserved_index, region->base, region->end, region->type, region->attribute);
791 }
792 #endif
793 return TRUE;
794 }
795 }
796 }
797 return FALSE;
798 }
799
800
801 boolean_t
802 pmap_next_page_hi(
803 ppnum_t *pn)
804 {
805 pmap_memory_region_t *region;
806 int n;
807
808 if (pmap_next_page_reserved(pn))
809 return TRUE;
810
811 if (avail_remaining) {
812 for (n = pmap_memory_region_count - 1; n >= 0; n--) {
813 region = &pmap_memory_regions[n];
814
815 if (region->alloc_down >= region->alloc_up) {
816 *pn = region->alloc_down--;
817 avail_remaining--;
818
819 if (*pn > max_ppnum)
820 max_ppnum = *pn;
821
822 if (lowest_lo == 0 || *pn < lowest_lo)
823 lowest_lo = *pn;
824
825 if (lowest_hi == 0 || *pn < lowest_hi)
826 lowest_hi = *pn;
827
828 if (*pn > highest_hi)
829 highest_hi = *pn;
830
831 return TRUE;
832 }
833 }
834 }
835 return FALSE;
836 }
837
838
839 boolean_t
840 pmap_next_page(
841 ppnum_t *pn)
842 {
843 if (avail_remaining) while (pmap_memory_region_current < pmap_memory_region_count) {
844 if (pmap_memory_regions[pmap_memory_region_current].alloc_up >
845 pmap_memory_regions[pmap_memory_region_current].alloc_down) {
846 pmap_memory_region_current++;
847 continue;
848 }
849 *pn = pmap_memory_regions[pmap_memory_region_current].alloc_up++;
850 avail_remaining--;
851
852 if (*pn > max_ppnum)
853 max_ppnum = *pn;
854
855 if (lowest_lo == 0 || *pn < lowest_lo)
856 lowest_lo = *pn;
857
858 return TRUE;
859 }
860 return FALSE;
861 }
862
863
864 boolean_t
865 pmap_valid_page(
866 ppnum_t pn)
867 {
868 unsigned int i;
869 pmap_memory_region_t *pmptr = pmap_memory_regions;
870
871 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
872 if ( (pn >= pmptr->base) && (pn <= pmptr->end) )
873 return TRUE;
874 }
875 return FALSE;
876 }
877