]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/i386_vm_init.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_vm_init.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 #include <platforms.h>
52 #include <mach_kdb.h>
53
54 #include <mach/i386/vm_param.h>
55
56 #include <string.h>
57 #include <mach/vm_param.h>
58 #include <mach/vm_prot.h>
59 #include <mach/machine.h>
60 #include <mach/time_value.h>
61 #include <kern/spl.h>
62 #include <kern/assert.h>
63 #include <kern/debug.h>
64 #include <kern/misc_protos.h>
65 #include <kern/cpu_data.h>
66 #include <kern/processor.h>
67 #include <vm/vm_page.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_kern.h>
70 #include <i386/pmap.h>
71 #include <i386/ipl.h>
72 #include <i386/misc_protos.h>
73 #include <i386/mp_slave_boot.h>
74 #include <i386/cpuid.h>
75 #include <mach/thread_status.h>
76 #include <pexpert/i386/efi.h>
77 #include "i386_lowmem.h"
78
79 vm_size_t mem_size = 0;
80 vm_offset_t first_avail = 0;/* first after page tables */
81 vm_offset_t last_addr;
82
83 uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
84 uint64_t mem_actual;
85 uint64_t sane_size = 0; /* Memory size to use for defaults calculations */
86
87 #define MAXBOUNCEPOOL (128 * 1024 * 1024)
88 #define MAXLORESERVE ( 32 * 1024 * 1024)
89
90 extern int bsd_mbuf_cluster_reserve(void);
91
92
93 uint32_t bounce_pool_base = 0;
94 uint32_t bounce_pool_size = 0;
95
96 static void reserve_bouncepool(uint32_t);
97
98
99 pmap_paddr_t avail_start, avail_end;
100 vm_offset_t virtual_avail, virtual_end;
101 static pmap_paddr_t avail_remaining;
102 vm_offset_t static_memory_end = 0;
103
104 #include <mach-o/loader.h>
105 vm_offset_t edata, etext, end;
106
107 /*
108 * _mh_execute_header is the mach_header for the currently executing
109 * 32 bit kernel
110 */
111 extern struct mach_header _mh_execute_header;
112 void *sectTEXTB; int sectSizeTEXT;
113 void *sectDATAB; int sectSizeDATA;
114 void *sectOBJCB; int sectSizeOBJC;
115 void *sectLINKB; int sectSizeLINK;
116 void *sectPRELINKB; int sectSizePRELINK;
117 void *sectHIBB; int sectSizeHIB;
118
119 extern void *getsegdatafromheader(struct mach_header *, const char *, int *);
120 extern struct segment_command *getsegbyname(const char *);
121 extern struct section *firstsect(struct segment_command *);
122 extern struct section *nextsect(struct segment_command *, struct section *);
123
124
125 void
126 i386_macho_zerofill(void)
127 {
128 struct segment_command *sgp;
129 struct section *sp;
130
131 sgp = getsegbyname("__DATA");
132 if (sgp) {
133 sp = firstsect(sgp);
134 if (sp) {
135 do {
136 if ((sp->flags & S_ZEROFILL))
137 bzero((char *) sp->addr, sp->size);
138 } while ((sp = nextsect(sgp, sp)));
139 }
140 }
141
142 return;
143 }
144
145 /*
146 * Basic VM initialization.
147 */
148 void
149 i386_vm_init(uint64_t maxmem,
150 boolean_t IA32e,
151 boot_args *args)
152 {
153 pmap_memory_region_t *pmptr;
154 pmap_memory_region_t *prev_pmptr;
155 EfiMemoryRange *mptr;
156 unsigned int mcount;
157 unsigned int msize;
158 ppnum_t fap;
159 unsigned int i;
160 unsigned int safeboot;
161 ppnum_t maxpg = 0;
162 uint32_t pmap_type;
163 uint32_t maxbouncepoolsize;
164 uint32_t maxloreserve;
165 uint32_t maxdmaaddr;
166
167 /*
168 * Now retrieve addresses for end, edata, and etext
169 * from MACH-O headers.
170 */
171
172 sectTEXTB = (void *) getsegdatafromheader(
173 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
174 sectDATAB = (void *) getsegdatafromheader(
175 &_mh_execute_header, "__DATA", &sectSizeDATA);
176 sectOBJCB = (void *) getsegdatafromheader(
177 &_mh_execute_header, "__OBJC", &sectSizeOBJC);
178 sectLINKB = (void *) getsegdatafromheader(
179 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
180 sectHIBB = (void *)getsegdatafromheader(
181 &_mh_execute_header, "__HIB", &sectSizeHIB);
182 sectPRELINKB = (void *) getsegdatafromheader(
183 &_mh_execute_header, "__PRELINK", &sectSizePRELINK);
184
185 etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
186 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
187
188 cpuid_set_info();
189 vm_set_page_size();
190
191 /*
192 * Compute the memory size.
193 */
194
195 if ((1 == vm_himemory_mode) || PE_parse_boot_arg("-x", &safeboot)) {
196 maxpg = 1 << (32 - I386_PGSHIFT);
197 }
198 avail_remaining = 0;
199 avail_end = 0;
200 pmptr = pmap_memory_regions;
201 prev_pmptr = 0;
202 pmap_memory_region_count = pmap_memory_region_current = 0;
203 fap = (ppnum_t) i386_btop(first_avail);
204
205 mptr = (EfiMemoryRange *)args->MemoryMap;
206 if (args->MemoryMapDescriptorSize == 0)
207 panic("Invalid memory map descriptor size");
208 msize = args->MemoryMapDescriptorSize;
209 mcount = args->MemoryMapSize / msize;
210
211 #define FOURGIG 0x0000000100000000ULL
212
213 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
214 ppnum_t base, top;
215
216 if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) {
217 kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count);
218 break;
219 }
220 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
221 top = (ppnum_t) ((mptr->PhysicalStart) >> I386_PGSHIFT) + mptr->NumberOfPages - 1;
222
223 switch (mptr->Type) {
224 case kEfiLoaderCode:
225 case kEfiLoaderData:
226 case kEfiBootServicesCode:
227 case kEfiBootServicesData:
228 case kEfiConventionalMemory:
229 /*
230 * Consolidate usable memory types into one.
231 */
232 pmap_type = kEfiConventionalMemory;
233 sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
234 break;
235
236 case kEfiRuntimeServicesCode:
237 case kEfiRuntimeServicesData:
238 case kEfiACPIReclaimMemory:
239 case kEfiACPIMemoryNVS:
240 case kEfiPalCode:
241 /*
242 * sane_size should reflect the total amount of physical ram
243 * in the system, not just the amount that is available for
244 * the OS to use
245 */
246 sane_size += (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT);
247 /* fall thru */
248
249 case kEfiUnusableMemory:
250 case kEfiMemoryMappedIO:
251 case kEfiMemoryMappedIOPortSpace:
252 case kEfiReservedMemoryType:
253 default:
254 pmap_type = mptr->Type;
255 }
256
257 kprintf("EFI region: type = %d/%d, base = 0x%x, top = 0x%x\n", mptr->Type, pmap_type, base, top);
258
259 if (maxpg) {
260 if (base >= maxpg)
261 break;
262 top = (top > maxpg) ? maxpg : top;
263 }
264
265 /*
266 * handle each region
267 */
268 if (kEfiACPIMemoryNVS == pmap_type) {
269 prev_pmptr = 0;
270 continue;
271 } else if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME ||
272 pmap_type != kEfiConventionalMemory) {
273 prev_pmptr = 0;
274 continue;
275 } else {
276 /*
277 * Usable memory region
278 */
279 if (top < I386_LOWMEM_RESERVED) {
280 prev_pmptr = 0;
281 continue;
282 }
283 if (top < fap) {
284 /*
285 * entire range below first_avail
286 * salvage some low memory pages
287 * we use some very low memory at startup
288 * mark as already allocated here
289 */
290 if (base >= I386_LOWMEM_RESERVED)
291 pmptr->base = base;
292 else
293 pmptr->base = I386_LOWMEM_RESERVED;
294 /*
295 * mark as already mapped
296 */
297 pmptr->alloc = pmptr->end = top;
298 pmptr->type = pmap_type;
299 }
300 else if ( (base < fap) && (top > fap) ) {
301 /*
302 * spans first_avail
303 * put mem below first avail in table but
304 * mark already allocated
305 */
306 pmptr->base = base;
307 pmptr->alloc = pmptr->end = (fap - 1);
308 pmptr->type = pmap_type;
309 /*
310 * we bump these here inline so the accounting
311 * below works correctly
312 */
313 pmptr++;
314 pmap_memory_region_count++;
315 pmptr->alloc = pmptr->base = fap;
316 pmptr->type = pmap_type;
317 pmptr->end = top;
318 }
319 else {
320 /*
321 * entire range useable
322 */
323 pmptr->alloc = pmptr->base = base;
324 pmptr->type = pmap_type;
325 pmptr->end = top;
326 }
327
328 if (i386_ptob(pmptr->end) > avail_end )
329 avail_end = i386_ptob(pmptr->end);
330
331 avail_remaining += (pmptr->end - pmptr->base);
332
333 /*
334 * Consolidate contiguous memory regions, if possible
335 */
336 if (prev_pmptr &&
337 pmptr->type == prev_pmptr->type &&
338 pmptr->base == pmptr->alloc &&
339 pmptr->base == (prev_pmptr->end + 1)) {
340 prev_pmptr->end = pmptr->end;
341 } else {
342 pmap_memory_region_count++;
343 prev_pmptr = pmptr;
344 pmptr++;
345 }
346 }
347 }
348
349
350 #ifdef PRINT_PMAP_MEMORY_TABLE
351 {
352 unsigned int j;
353 pmap_memory_region_t *p = pmap_memory_regions;
354 vm_offset_t region_start, region_end;
355 vm_offset_t efi_start, efi_end;
356 for (j=0;j<pmap_memory_region_count;j++, p++) {
357 kprintf("type %d base 0x%x alloc 0x%x top 0x%x\n", p->type,
358 p->base << I386_PGSHIFT, p->alloc << I386_PGSHIFT, p->end << I386_PGSHIFT);
359 region_start = p->base << I386_PGSHIFT;
360 region_end = (p->end << I386_PGSHIFT) - 1;
361 mptr = args->MemoryMap;
362 for (i=0; i<mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
363 if (mptr->Type != kEfiLoaderCode &&
364 mptr->Type != kEfiLoaderData &&
365 mptr->Type != kEfiBootServicesCode &&
366 mptr->Type != kEfiBootServicesData &&
367 mptr->Type != kEfiConventionalMemory) {
368 efi_start = (vm_offset_t)mptr->PhysicalStart;
369 efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1;
370 if ((efi_start >= region_start && efi_start <= region_end) ||
371 (efi_end >= region_start && efi_end <= region_end)) {
372 kprintf(" *** Overlapping region with EFI runtime region %d\n", i);
373 }
374 }
375
376 }
377 }
378 }
379 #endif
380
381 avail_start = first_avail;
382 mem_actual = sane_size;
383
384 #define MEG (1024*1024)
385
386 /*
387 * For user visible memory size, round up to 128 Mb - accounting for the various stolen memory
388 * not reported by EFI.
389 */
390
391 sane_size = (sane_size + 128 * MEG - 1) & ~((uint64_t)(128 * MEG - 1));
392
393 /*
394 * if user set maxmem, reduce memory sizes
395 */
396 if ( (maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) {
397 ppnum_t discarded_pages = (sane_size - maxmem) >> I386_PGSHIFT;
398 sane_size = maxmem;
399 if (avail_remaining > discarded_pages)
400 avail_remaining -= discarded_pages;
401 else
402 avail_remaining = 0;
403 }
404
405 /*
406 * mem_size is only a 32 bit container... follow the PPC route
407 * and pin it to a 2 Gbyte maximum
408 */
409 if (sane_size > (FOURGIG >> 1))
410 mem_size = (vm_size_t)(FOURGIG >> 1);
411 else
412 mem_size = (vm_size_t)sane_size;
413 max_mem = sane_size;
414
415 kprintf("Physical memory %d MB\n", sane_size/MEG);
416
417 if (!PE_parse_boot_arg("max_valid_dma_addr", &maxdmaaddr))
418 max_valid_dma_address = 1024ULL * 1024ULL * 4096ULL;
419 else
420 max_valid_dma_address = ((uint64_t) maxdmaaddr) * 1024ULL * 1024ULL;
421
422 if (!PE_parse_boot_arg("maxbouncepool", &maxbouncepoolsize))
423 maxbouncepoolsize = MAXBOUNCEPOOL;
424 else
425 maxbouncepoolsize = maxbouncepoolsize * (1024 * 1024);
426
427 /*
428 * bsd_mbuf_cluster_reserve depends on sane_size being set
429 * in order to correctly determine the size of the mbuf pool
430 * that will be reserved
431 */
432 if (!PE_parse_boot_arg("maxloreserve", &maxloreserve))
433 maxloreserve = MAXLORESERVE + bsd_mbuf_cluster_reserve();
434 else
435 maxloreserve = maxloreserve * (1024 * 1024);
436
437
438 if (avail_end >= max_valid_dma_address) {
439 if (maxbouncepoolsize)
440 reserve_bouncepool(maxbouncepoolsize);
441
442 if (maxloreserve)
443 vm_lopage_poolsize = maxloreserve / PAGE_SIZE;
444 }
445
446 /*
447 * Initialize kernel physical map.
448 * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS.
449 */
450 pmap_bootstrap(0, IA32e);
451 }
452
453
454 unsigned int
455 pmap_free_pages(void)
456 {
457 return avail_remaining;
458 }
459
460
461 boolean_t
462 pmap_next_page(
463 ppnum_t *pn)
464 {
465
466 if (avail_remaining) while (pmap_memory_region_current < pmap_memory_region_count) {
467 if (pmap_memory_regions[pmap_memory_region_current].alloc ==
468 pmap_memory_regions[pmap_memory_region_current].end) {
469 pmap_memory_region_current++;
470 continue;
471 }
472 *pn = pmap_memory_regions[pmap_memory_region_current].alloc++;
473 avail_remaining--;
474
475 return TRUE;
476 }
477 return FALSE;
478 }
479
480
481 boolean_t
482 pmap_valid_page(
483 ppnum_t pn)
484 {
485 unsigned int i;
486 pmap_memory_region_t *pmptr = pmap_memory_regions;
487
488 assert(pn);
489 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
490 if ( (pn >= pmptr->base) && (pn <= pmptr->end) && pmptr->type == kEfiConventionalMemory )
491 return TRUE;
492 }
493 return FALSE;
494 }
495
496
497 static void
498 reserve_bouncepool(uint32_t bounce_pool_wanted)
499 {
500 pmap_memory_region_t *pmptr = pmap_memory_regions;
501 pmap_memory_region_t *lowest = NULL;
502 unsigned int i;
503 unsigned int pages_needed;
504
505 pages_needed = bounce_pool_wanted / PAGE_SIZE;
506
507 for (i = 0; i < pmap_memory_region_count; i++, pmptr++) {
508 if ( (pmptr->type == kEfiConventionalMemory) && ((pmptr->end - pmptr->alloc) >= pages_needed) ) {
509 if ( (lowest == NULL) || (pmptr->alloc < lowest->alloc) )
510 lowest = pmptr;
511 }
512 }
513 if ( (lowest != NULL) ) {
514 bounce_pool_base = lowest->alloc * PAGE_SIZE;
515 bounce_pool_size = bounce_pool_wanted;
516
517 lowest->alloc += pages_needed;
518 avail_remaining -= pages_needed;
519 }
520 }