]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/ppc_vm_init.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / ppc / ppc_vm_init.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * @APPLE_FREE_COPYRIGHT@
27 */
28
29 #include <mach_debug.h>
30 #include <mach_kdb.h>
31 #include <mach_kdp.h>
32 #include <debug.h>
33 #include <cpus.h>
34
35 #include <mach/vm_types.h>
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
38 #include <kern/misc_protos.h>
39 #include <kern/assert.h>
40 #include <kern/cpu_number.h>
41
42 #include <ppc/proc_reg.h>
43 #include <ppc/Firmware.h>
44 #include <ppc/boot.h>
45 #include <ppc/misc_protos.h>
46 #include <ppc/pmap.h>
47 #include <ppc/pmap_internals.h>
48 #include <ppc/mem.h>
49 #include <ppc/mappings.h>
50 #include <ppc/exception.h>
51 #include <ppc/mp.h>
52
53 #ifdef __MACHO__
54 #include <mach-o/mach_header.h>
55 #endif
56
57 extern unsigned int intstack[]; /* declared in start.s */
58 extern unsigned int intstack_top_ss; /* declared in start.s */
59
60 vm_offset_t mem_size; /* Size of actual physical memory present
61 minus any performance buffer and possibly limited
62 by mem_limit in bytes */
63 vm_offset_t mem_actual; /* The "One True" physical memory size
64 actually, it's the highest physical address + 1 */
65
66
67 mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX];
68 int pmap_mem_regions_count = 0; /* No non-contiguous memory regions */
69
70 mem_region_t free_regions[FREE_REGION_MAX];
71 int free_regions_count;
72
73 #ifndef __MACHO__
74 extern unsigned long etext;
75 #endif
76
77 unsigned int avail_remaining = 0;
78 vm_offset_t first_avail;
79 vm_offset_t static_memory_end;
80 extern vm_offset_t avail_next;
81
82 #ifdef __MACHO__
83 extern struct mach_header _mh_execute_header;
84 vm_offset_t sectTEXTB;
85 int sectSizeTEXT;
86 vm_offset_t sectDATAB;
87 int sectSizeDATA;
88 vm_offset_t sectOBJCB;
89 int sectSizeOBJC;
90 vm_offset_t sectLINKB;
91 int sectSizeLINK;
92 vm_offset_t sectKLDB;
93 int sectSizeKLD;
94
95 vm_offset_t end, etext, edata;
96 #endif
97
98 extern unsigned long exception_entry;
99 extern unsigned long exception_end;
100
101
102 void ppc_vm_init(unsigned int mem_limit, boot_args *args)
103 {
104 unsigned int htabmask;
105 unsigned int i, j, batsize, kmapsize;
106 vm_offset_t addr;
107 int boot_task_end_offset;
108 const char *cpus;
109 mapping *mp;
110 vm_offset_t first_phys_avail;
111 vm_offset_t sizeadj, oldstart;
112
113 #ifdef __MACHO__
114 /* Now retrieve addresses for end, edata, and etext
115 * from MACH-O headers.
116 */
117 sectTEXTB = (vm_offset_t)getsegdatafromheader(
118 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
119 sectDATAB = (vm_offset_t)getsegdatafromheader(
120 &_mh_execute_header, "__DATA", &sectSizeDATA);
121 sectOBJCB = (vm_offset_t)getsegdatafromheader(
122 &_mh_execute_header, "__OBJC", &sectSizeOBJC);
123 sectLINKB = (vm_offset_t)getsegdatafromheader(
124 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
125 sectKLDB = (vm_offset_t)getsegdatafromheader(
126 &_mh_execute_header, "__KLD", &sectSizeKLD);
127
128 etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
129 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
130 end = round_page(getlastaddr()); /* Force end to next page */
131 #if DEBUG
132 kprintf("sectTEXT: %x, size: %x\n", sectTEXTB, sectSizeTEXT);
133 kprintf("sectDATA: %x, size: %x\n", sectDATAB, sectSizeDATA);
134 kprintf("sectOBJC: %x, size: %x\n", sectOBJCB, sectSizeOBJC);
135 kprintf("sectLINK: %x, size: %x\n", sectLINKB, sectSizeLINK);
136 kprintf("sectKLD: %x, size: %x\n", sectKLDB, sectSizeKLD);
137 kprintf("end: %x\n", end);
138 #endif
139 #endif /* __MACHO__ */
140
141 /* Stitch valid memory regions together - they may be contiguous
142 * even though they're not already glued together
143 */
144 mem_actual = mem_actual = args->PhysicalDRAM[0].base + args->PhysicalDRAM[0].size; /* Initialize to the first region size */
145 addr = 0; /* temp use as pointer to previous memory region... */
146 for (i = 1; i < kMaxDRAMBanks; i++) {
147
148 if (args->PhysicalDRAM[i].size == 0) continue; /* If region is empty, skip it */
149
150 if((args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size) > mem_actual) { /* New high? */
151 mem_actual = args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size; /* Take the high bid */
152 }
153
154 if (args->PhysicalDRAM[i].base == /* Does the end of the last hit the start of the next? */
155 args->PhysicalDRAM[addr].base +
156 args->PhysicalDRAM[addr].size) {
157 kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n",
158 args->PhysicalDRAM[addr].base, args->PhysicalDRAM[addr].size,
159 args->PhysicalDRAM[i].base, args->PhysicalDRAM[i].size);
160
161 args->PhysicalDRAM[addr].size += args->PhysicalDRAM[i].size; /* Join them */
162 args->PhysicalDRAM[i].size = 0;
163 continue;
164 }
165 /* This is now last non-zero region to compare against */
166 addr = i;
167 }
168
169 /* Go through the list of memory regions passed in via the args
170 * and copy valid entries into the pmap_mem_regions table, adding
171 * further calculated entries.
172 */
173
174 pmap_mem_regions_count = 0;
175 mem_size = 0; /* Will use to total memory found so far */
176
177 for (i = 0; i < kMaxDRAMBanks; i++) {
178 if (args->PhysicalDRAM[i].size == 0)
179 continue;
180
181 /* The following should only happen if memory size has
182 been artificially reduced with -m */
183 if (mem_limit > 0 &&
184 mem_size + args->PhysicalDRAM[i].size > mem_limit)
185 args->PhysicalDRAM[i].size = mem_limit - mem_size;
186
187 /* We've found a region, tally memory */
188
189 pmap_mem_regions[pmap_mem_regions_count].start =
190 args->PhysicalDRAM[i].base;
191 pmap_mem_regions[pmap_mem_regions_count].end =
192 args->PhysicalDRAM[i].base +
193 args->PhysicalDRAM[i].size;
194
195 /* Regions must be provided in ascending order */
196 assert ((pmap_mem_regions_count == 0) ||
197 pmap_mem_regions[pmap_mem_regions_count].start >
198 pmap_mem_regions[pmap_mem_regions_count-1].start);
199
200 if (pmap_mem_regions_count > 0) {
201 /* we add on any pages not in the first memory
202 * region to the avail_remaining count. The first
203 * memory region is used for mapping everything for
204 * bootup and is taken care of specially.
205 */
206 avail_remaining +=
207 args->PhysicalDRAM[i].size / PPC_PGBYTES;
208 }
209
210 /* Keep track of how much memory we've found */
211
212 mem_size += args->PhysicalDRAM[i].size;
213
214 /* incremement number of regions found */
215 pmap_mem_regions_count++;
216 }
217
218 kprintf("mem_size: %d M\n",mem_size / (1024 * 1024));
219
220 /*
221 * Initialize the pmap system, using space above `first_avail'
222 * for the necessary data structures.
223 * NOTE : assume that we'll have enough space mapped in already
224 */
225
226 first_phys_avail = static_memory_end;
227 first_avail = adjust_bat_limit(first_phys_avail, 0, FALSE, FALSE);
228
229 kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */
230 (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) +
231 (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) +
232 (round_page(sectOBJCB+sectSizeOBJC) - trunc_page(sectOBJCB)) +
233 (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) +
234 (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) +
235 (round_page(static_memory_end) - trunc_page(end));
236
237 pmap_bootstrap(mem_size,&first_avail,&first_phys_avail, kmapsize);
238
239 #ifdef __MACHO__
240 #if DEBUG
241 kprintf("Mapping memory:\n");
242 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry),
243 trunc_page(exception_entry), round_page(exception_end));
244 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB),
245 trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT));
246 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB),
247 trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA));
248 kprintf(" sectOBJCB: %08X, %08X - %08X\n", trunc_page(sectOBJCB),
249 trunc_page(sectOBJCB), round_page(sectOBJCB+sectSizeOBJC));
250 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB),
251 trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK));
252 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB),
253 trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD));
254 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end),
255 trunc_page(end), static_memory_end);
256 #endif /* DEBUG */
257 pmap_map(trunc_page(exception_entry), trunc_page(exception_entry),
258 round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE);
259 pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB),
260 round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE);
261 pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB),
262 round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE);
263 pmap_map(trunc_page(sectOBJCB), trunc_page(sectOBJCB),
264 round_page(sectOBJCB+sectSizeOBJC), VM_PROT_READ|VM_PROT_WRITE);
265
266
267 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
268 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
269 * to map both segments page-by-page.
270 */
271 for (addr = trunc_page(sectKLDB);
272 addr < round_page(sectKLDB+sectSizeKLD);
273 addr += PAGE_SIZE) {
274
275 pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
276 }
277
278 for (addr = trunc_page(sectLINKB);
279 addr < round_page(sectLINKB+sectSizeLINK);
280 addr += PAGE_SIZE) {
281
282 pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
283 }
284
285 /*
286 * We need to map the remainder page-by-page because some of this will
287 * be released later, but not all. Ergo, no block mapping here
288 */
289 for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) {
290 pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
291 }
292 #endif /* __MACHO__ */
293
294 #if DEBUG
295 for (i=0 ; i < free_regions_count; i++) {
296 kprintf("Free region start 0x%08x end 0x%08x\n",
297 free_regions[i].start,free_regions[i].end);
298 }
299 #endif
300
301 /* Initialize shadow IBATs */
302 shadow_BAT.IBATs[0].upper=BAT_INVALID;
303 shadow_BAT.IBATs[0].lower=BAT_INVALID;
304 shadow_BAT.IBATs[1].upper=BAT_INVALID;
305 shadow_BAT.IBATs[1].lower=BAT_INVALID;
306 shadow_BAT.IBATs[2].upper=BAT_INVALID;
307 shadow_BAT.IBATs[2].lower=BAT_INVALID;
308 shadow_BAT.IBATs[3].upper=BAT_INVALID;
309 shadow_BAT.IBATs[3].lower=BAT_INVALID;
310
311 LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */
312
313 /* Initialize shadow DBATs */
314 shadow_BAT.DBATs[0].upper=BAT_INVALID;
315 shadow_BAT.DBATs[0].lower=BAT_INVALID;
316 shadow_BAT.DBATs[1].upper=BAT_INVALID;
317 shadow_BAT.DBATs[1].lower=BAT_INVALID;
318 mfdbatu(shadow_BAT.DBATs[2].upper,2);
319 mfdbatl(shadow_BAT.DBATs[2].lower,2);
320 mfdbatu(shadow_BAT.DBATs[3].upper,3);
321 mfdbatl(shadow_BAT.DBATs[3].lower,3);
322
323 LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */
324
325 sync();isync();
326 #if DEBUG
327 for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n",
328 i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower);
329 for(i=0; i<4; i++) kprintf("IBAT%1d: %08X %08X\n",
330 i, shadow_BAT.IBATs[i].upper, shadow_BAT.IBATs[i].lower);
331 #endif
332 }
333
334 void ppc_vm_cpu_init(
335 struct per_proc_info *proc_info)
336 {
337 hash_table_init(hash_table_base, hash_table_size);
338
339 LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]);
340 LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]);
341
342 sync();isync();
343 }