]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_vm_init.c
02f3803f5f4c20376355f10e779c88fc13fbb9e0
[apple/xnu.git] / osfmk / i386 / i386_vm_init.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59 #include <platforms.h>
60 #include <mach_kdb.h>
61
62 #include <mach/i386/vm_param.h>
63
64 #include <string.h>
65 #include <mach/vm_param.h>
66 #include <mach/vm_prot.h>
67 #include <mach/machine.h>
68 #include <mach/time_value.h>
69 #include <kern/spl.h>
70 #include <kern/assert.h>
71 #include <kern/debug.h>
72 #include <kern/misc_protos.h>
73 #include <kern/cpu_data.h>
74 #include <kern/processor.h>
75 #include <vm/vm_page.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_kern.h>
78 #include <i386/pmap.h>
79 #include <i386/ipl.h>
80 #include <i386/misc_protos.h>
81 #include <i386/mp_slave_boot.h>
82 #include <i386/cpuid.h>
83 #include <mach/thread_status.h>
84 #include <pexpert/i386/efi.h>
85 #include "i386_lowmem.h"
86
87 vm_size_t mem_size = 0;
88 vm_offset_t first_avail = 0;/* first after page tables */
89 vm_offset_t last_addr;
90
91 uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
92 uint64_t mem_actual;
93 uint64_t sane_size = 0; /* Memory size to use for defaults calculations */
94
95 #define MAXBOUNCEPOOL (128 * 1024 * 1024)
96 #define MAXLORESERVE ( 32 * 1024 * 1024)
97
98 extern int bsd_mbuf_cluster_reserve(void);
99
100
101 uint32_t bounce_pool_base = 0;
102 uint32_t bounce_pool_size = 0;
103
104 static void reserve_bouncepool(uint32_t);
105
106
107 pmap_paddr_t avail_start, avail_end;
108 vm_offset_t virtual_avail, virtual_end;
109 static pmap_paddr_t avail_remaining;
110 vm_offset_t static_memory_end = 0;
111
112 #include <mach-o/loader.h>
113 vm_offset_t edata, etext, end;
114
115 /*
116 * _mh_execute_header is the mach_header for the currently executing
117 * 32 bit kernel
118 */
119 extern struct mach_header _mh_execute_header;
120 void *sectTEXTB; int sectSizeTEXT;
121 void *sectDATAB; int sectSizeDATA;
122 void *sectOBJCB; int sectSizeOBJC;
123 void *sectLINKB; int sectSizeLINK;
124 void *sectPRELINKB; int sectSizePRELINK;
125 void *sectHIBB; int sectSizeHIB;
126
127 extern void *getsegdatafromheader(struct mach_header *, const char *, int *);
128 extern struct segment_command *getsegbyname(const char *);
129 extern struct section *firstsect(struct segment_command *);
130 extern struct section *nextsect(struct segment_command *, struct section *);
131
132
133 void
134 i386_macho_zerofill(void)
135 {
136 struct segment_command *sgp;
137 struct section *sp;
138
139 sgp = getsegbyname("__DATA");
140 if (sgp) {
141 sp = firstsect(sgp);
142 if (sp) {
143 do {
144 if ((sp->flags & S_ZEROFILL))
145 bzero((char *) sp->addr, sp->size);
146 } while ((sp = nextsect(sgp, sp)));
147 }
148 }
149
150 return;
151 }
152
153 /*
154 * Basic VM initialization.
155 */
156 void
157 i386_vm_init(uint64_t maxmem,
158 boolean_t IA32e,
159 boot_args *args)
160 {
161 pmap_memory_region_t *pmptr;
162 pmap_memory_region_t *prev_pmptr;
163 EfiMemoryRange *mptr;
164 unsigned int mcount;
165 unsigned int msize;
166 ppnum_t fap;
167 unsigned int i;
168 unsigned int safeboot;
169 ppnum_t maxpg = 0;
170 uint32_t pmap_type;
171 uint32_t maxbouncepoolsize;
172 uint32_t maxloreserve;
173 uint32_t maxdmaaddr;
174
175 /*
176 * Now retrieve addresses for end, edata, and etext
177 * from MACH-O headers.
178 */
179
180 sectTEXTB = (void *) getsegdatafromheader(
181 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
182 sectDATAB = (void *) getsegdatafromheader(
183 &_mh_execute_header, "__DATA", &sectSizeDATA);
184 sectOBJCB = (void *) getsegdatafromheader(
185 &_mh_execute_header, "__OBJC", &sectSizeOBJC);
186 sectLINKB = (void *) getsegdatafromheader(
187 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
188 sectHIBB = (void *)getsegdatafromheader(
189 &_mh_execute_header, "__HIB", &sectSizeHIB);
190 sectPRELINKB = (void *) getsegdatafromheader(
191 &_mh_execute_header, "__PRELINK", &sectSizePRELINK);
192
193 etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
194 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
195
196 cpuid_set_info();
197 vm_set_page_size();
198
199 /*
200 * Compute the memory size.
201 */
202
203 if ((1 == vm_himemory_mode) || PE_parse_boot_arg("-x", &safeboot)) {
204 maxpg = 1 << (32 - I386_PGSHIFT);
205 }
206 avail_remaining = 0;
207 avail_end = 0;
208 pmptr = pmap_memory_regions;
209 prev_pmptr = 0;
210 pmap_memory_region_count = pmap_memory_region_current = 0;
211 fap = (ppnum_t) i386_btop(first_avail);
212
213 mptr = (EfiMemoryRange *)args->MemoryMap;
214 if (args->MemoryMapDescriptorSize == 0)
215 panic("Invalid memory map descriptor size");
216 msize = args->MemoryMapDescriptorSize;
217 mcount = args->MemoryMapSize / msize;
218
219 #define FOURGIG 0x0000000100000000ULL
220
221 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
222 ppnum_t base, top;
223
224 if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) {
225 kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count);
226 break;
227 }
228 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
229 top = (ppnum_t) ((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1;
230
231 switch (mptr->Type) {
232 case kEfiLoaderCode:
233 case kEfiLoaderData:
234 case kEfiBootServicesCode:
235 case kEfiBootServicesData:
236 case kEfiConventionalMemory:
237 /*
238 * Consolidate usable memory types into one.
239 */
240 pmap_type = kEfiConventionalMemory;
241 sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
242 break;
243
244 case kEfiRuntimeServicesCode:
245 case kEfiRuntimeServicesData:
246 case kEfiACPIReclaimMemory:
247 case kEfiACPIMemoryNVS:
248 case kEfiPalCode:
249 /*
250 * sane_size should reflect the total amount of physical ram
251 * in the system, not just the amount that is available for
252 * the OS to use
253 */
254 sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
255 /* fall thru */
256
257 case kEfiUnusableMemory:
258 case kEfiMemoryMappedIO:
259 case kEfiMemoryMappedIOPortSpace:
260 case kEfiReservedMemoryType:
261 default:
262 pmap_type = mptr->Type;
263 }
264
265 kprintf("EFI region: type = %d/%d, base = 0x%x, top = 0x%x\n", mptr->Type, pmap_type, base, top);
266
267 if (maxpg) {
268 if (base >= maxpg)
269 break;
270 top = (top > maxpg) ? maxpg : top;
271 }
272
273 /*
274 * handle each region
275 */
276 if (kEfiACPIMemoryNVS == pmap_type) {
277 prev_pmptr = 0;
278 continue;
279 } else if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
280 pmap_type != kEfiConventionalMemory) {
281 prev_pmptr = 0;
282 continue;
283 } else {
284 /*
285 * Usable memory region
286 */
287 if (top < I386_LOWMEM_RESERVED) {
288 prev_pmptr = 0;
289 continue;
290 }
291 if (top < fap) {
292 /*
293 * entire range below first_avail
294 * salvage some low memory pages
295 * we use some very low memory at startup
296 * mark as already allocated here
297 */
298 if (base >= I386_LOWMEM_RESERVED)
299 pmptr->base = base;
300 else
301 pmptr->base = I386_LOWMEM_RESERVED;
302 /*
303 * mark as already mapped
304 */
305 pmptr->alloc = pmptr->end = top;
306 pmptr->type = pmap_type;
307 }
308 else if ( (base < fap) && (top > fap) ) {
309 /*
310 * spans first_avail
311 * put mem below first avail in table but
312 * mark already allocated
313 */
314 pmptr->base = base;
315 pmptr->alloc = pmptr->end = (fap - 1);
316 pmptr->type = pmap_type;
317 /*
318 * we bump these here inline so the accounting
319 * below works correctly
320 */
321 pmptr++;
322 pmap_memory_region_count++;
323 pmptr->alloc = pmptr->base = fap;
324 pmptr->type = pmap_type;
325 pmptr->end = top;
326 }
327 else {
328 /*
329 * entire range useable
330 */
331 pmptr->alloc = pmptr->base = base;
332 pmptr->type = pmap_type;
333 pmptr->end = top;
334 }
335
336 if (i386_ptob(pmptr->end) > avail_end )
337 avail_end = i386_ptob(pmptr->end);
338
339 avail_remaining += (pmptr->end - pmptr->base);
340
341 /*
342 * Consolidate contiguous memory regions, if possible
343 */
344 if (prev_pmptr &&
345 pmptr->type == prev_pmptr->type &&
346 pmptr->base == pmptr->alloc &&
347 pmptr->base == (prev_pmptr->end + 1)) {
348 prev_pmptr->end = pmptr->end;
349 } else {
350 pmap_memory_region_count++;
351 prev_pmptr = pmptr;
352 pmptr++;
353 }
354 }
355 }
356
357
358 #ifdef PRINT_PMAP_MEMORY_TABLE
359 {
360 unsigned int j;
361 pmap_memory_region_t *p = pmap_memory_regions;
362 vm_offset_t region_start, region_end;
363 vm_offset_t efi_start, efi_end;
364 for (j=0;j<pmap_memory_region_count;j++, p++) {
365 kprintf("type %d base 0x%x alloc 0x%x top 0x%x\n", p->type,
366 p->base << I386_PGSHIFT, p->alloc << I386_PGSHIFT, p->end << I386_PGSHIFT);
367 region_start = p->base << I386_PGSHIFT;
368 region_end = (p->end << I386_PGSHIFT) - 1;
369 mptr = args->MemoryMap;
370 for (i=0; i<mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
371 if (mptr->Type != kEfiLoaderCode &&
372 mptr->Type != kEfiLoaderData &&
373 mptr->Type != kEfiBootServicesCode &&
374 mptr->Type != kEfiBootServicesData &&
375 mptr->Type != kEfiConventionalMemory) {
376 efi_start = (vm_offset_t)mptr->PhysicalStart;
377 efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1;
378 if ((efi_start >= region_start && efi_start <= region_end) ||
379 (efi_end >= region_start && efi_end <= region_end)) {
380 kprintf(" *** Overlapping region with EFI runtime region %d\n", i);
381 }
382 }
383
384 }
385 }
386 }
387 #endif
388
389 avail_start = first_avail;
390 mem_actual = sane_size;
391
392 #define MEG (1024*1024)
393
394 /*
395 * For user visible memory size, round up to 128 Mb - accounting for the various stolen memory
396 * not reported by EFI.
397 */
398
399 sane_size = (sane_size + 128 * MEG - 1) & ~((uint64_t)(128 * MEG - 1));
400
401 /*
402 * if user set maxmem, reduce memory sizes
403 */
404 if ( (maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) {
405 ppnum_t discarded_pages = (sane_size - maxmem) >> I386_PGSHIFT;
406 sane_size = maxmem;
407 if (avail_remaining > discarded_pages)
408 avail_remaining -= discarded_pages;
409 else
410 avail_remaining = 0;
411 }
412
413 /*
414 * mem_size is only a 32 bit container... follow the PPC route
415 * and pin it to a 2 Gbyte maximum
416 */
417 if (sane_size > (FOURGIG >> 1))
418 mem_size = (vm_size_t)(FOURGIG >> 1);
419 else
420 mem_size = (vm_size_t)sane_size;
421 max_mem = sane_size;
422
423 kprintf("Physical memory %d MB\n", sane_size/MEG);
424
425 if (!PE_parse_boot_arg("max_valid_dma_addr", &maxdmaaddr))
426 max_valid_dma_address = 1024ULL * 1024ULL * 4096ULL;
427 else
428 max_valid_dma_address = ((uint64_t) maxdmaaddr) * 1024ULL * 1024ULL;
429
430 if (!PE_parse_boot_arg("maxbouncepool", &maxbouncepoolsize))
431 maxbouncepoolsize = MAXBOUNCEPOOL;
432 else
433 maxbouncepoolsize = maxbouncepoolsize * (1024 * 1024);
434
435 /*
436 * bsd_mbuf_cluster_reserve depends on sane_size being set
437 * in order to correctly determine the size of the mbuf pool
438 * that will be reserved
439 */
440 if (!PE_parse_boot_arg("maxloreserve", &maxloreserve))
441 maxloreserve = MAXLORESERVE + bsd_mbuf_cluster_reserve();
442 else
443 maxloreserve = maxloreserve * (1024 * 1024);
444
445
446 if (avail_end >= max_valid_dma_address) {
447 if (maxbouncepoolsize)
448 reserve_bouncepool(maxbouncepoolsize);
449
450 if (maxloreserve)
451 vm_lopage_poolsize = maxloreserve / PAGE_SIZE;
452 }
453
454 /*
455 * Initialize kernel physical map.
456 * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
457 */
458 pmap_bootstrap(0, IA32e);
459 }
460
461
462 unsigned int
463 pmap_free_pages(void)
464 {
465 return avail_remaining;
466 }
467
468
469 boolean_t
470 pmap_next_page(
471 ppnum_t *pn)
472 {
473
474 if (avail_remaining) while (pmap_memory_region_current < pmap_memory_region_count) {
475 if (pmap_memory_regions[pmap_memory_region_current].alloc ==
476 pmap_memory_regions[pmap_memory_region_current].end) {
477 pmap_memory_region_current++;
478 continue;
479 }
480 *pn = pmap_memory_regions[pmap_memory_region_current].alloc++;
481 avail_remaining--;
482
483 return TRUE;
484 }
485 return FALSE;
486 }
487
488
489 boolean_t
490 pmap_valid_page(
491 ppnum_t pn)
492 {
493 unsigned int i;
494 pmap_memory_region_t *pmptr = pmap_memory_regions;
495
496 assert(pn);
497 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
498 if ( (pn >= pmptr->base) && (pn <= pmptr->end) && pmptr->type == kEfiConventionalMemory )
499 return TRUE;
500 }
501 return FALSE;
502 }
503
504
505 static void
506 reserve_bouncepool(uint32_t bounce_pool_wanted)
507 {
508 pmap_memory_region_t *pmptr = pmap_memory_regions;
509 pmap_memory_region_t *lowest = NULL;
510 unsigned int i;
511 unsigned int pages_needed;
512
513 pages_needed = bounce_pool_wanted / PAGE_SIZE;
514
515 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
516 if ( (pmptr->type == kEfiConventionalMemory) && ((pmptr->end - pmptr->alloc) >= pages_needed) ) {
517 if ( (lowest == NULL) || (pmptr->alloc < lowest->alloc) )
518 lowest = pmptr;
519 }
520 }
521 if ( (lowest != NULL) ) {
522 bounce_pool_base = lowest->alloc * PAGE_SIZE;
523 bounce_pool_size = bounce_pool_wanted;
524
525 lowest->alloc += pages_needed;
526 avail_remaining -= pages_needed;
527 }
528 }