]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * @APPLE_FREE_COPYRIGHT@ | |
27 | */ | |
28 | ||
29 | #include <mach_debug.h> | |
30 | #include <mach_kdb.h> | |
31 | #include <mach_kdp.h> | |
32 | #include <debug.h> | |
33 | #include <cpus.h> | |
34 | ||
35 | #include <mach/vm_types.h> | |
36 | #include <mach/vm_param.h> | |
37 | #include <mach/thread_status.h> | |
38 | #include <kern/misc_protos.h> | |
39 | #include <kern/assert.h> | |
40 | #include <kern/cpu_number.h> | |
41 | ||
42 | #include <ppc/proc_reg.h> | |
43 | #include <ppc/Firmware.h> | |
44 | #include <ppc/boot.h> | |
45 | #include <ppc/misc_protos.h> | |
46 | #include <ppc/pmap.h> | |
47 | #include <ppc/pmap_internals.h> | |
48 | #include <ppc/mem.h> | |
49 | #include <ppc/mappings.h> | |
50 | #include <ppc/exception.h> | |
51 | #include <ppc/mp.h> | |
52 | ||
53 | #ifdef __MACHO__ | |
54 | #include <mach-o/mach_header.h> | |
55 | #endif | |
56 | ||
57 | extern unsigned int intstack[]; /* declared in start.s */ | |
58 | extern unsigned int intstack_top_ss; /* declared in start.s */ | |
59 | ||
60 | vm_offset_t mem_size; /* Size of actual physical memory present | |
61 | minus any performance buffer and possibly limited | |
62 | by mem_limit in bytes */ | |
63 | vm_offset_t mem_actual; /* The "One True" physical memory size | |
64 | actually, it's the highest physical address + 1 */ | |
65 | ||
66 | ||
67 | mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX]; | |
68 | int pmap_mem_regions_count = 0; /* No non-contiguous memory regions */ | |
69 | ||
70 | mem_region_t free_regions[FREE_REGION_MAX]; | |
71 | int free_regions_count; | |
72 | ||
73 | #ifndef __MACHO__ | |
74 | extern unsigned long etext; | |
75 | #endif | |
76 | ||
77 | unsigned int avail_remaining = 0; | |
78 | vm_offset_t first_avail; | |
79 | vm_offset_t static_memory_end; | |
80 | extern vm_offset_t avail_next; | |
81 | ||
82 | #ifdef __MACHO__ | |
83 | extern struct mach_header _mh_execute_header; | |
84 | vm_offset_t sectTEXTB; | |
85 | int sectSizeTEXT; | |
86 | vm_offset_t sectDATAB; | |
87 | int sectSizeDATA; | |
1c79356b A |
88 | vm_offset_t sectLINKB; |
89 | int sectSizeLINK; | |
90 | vm_offset_t sectKLDB; | |
91 | int sectSizeKLD; | |
92 | ||
93 | vm_offset_t end, etext, edata; | |
94 | #endif | |
95 | ||
96 | extern unsigned long exception_entry; | |
97 | extern unsigned long exception_end; | |
98 | ||
99 | ||
100 | void ppc_vm_init(unsigned int mem_limit, boot_args *args) | |
101 | { | |
102 | unsigned int htabmask; | |
103 | unsigned int i, j, batsize, kmapsize; | |
104 | vm_offset_t addr; | |
105 | int boot_task_end_offset; | |
106 | const char *cpus; | |
107 | mapping *mp; | |
108 | vm_offset_t first_phys_avail; | |
109 | vm_offset_t sizeadj, oldstart; | |
110 | ||
1c79356b A |
111 | /* Now retrieve addresses for end, edata, and etext |
112 | * from MACH-O headers. | |
113 | */ | |
114 | sectTEXTB = (vm_offset_t)getsegdatafromheader( | |
115 | &_mh_execute_header, "__TEXT", §SizeTEXT); | |
116 | sectDATAB = (vm_offset_t)getsegdatafromheader( | |
117 | &_mh_execute_header, "__DATA", §SizeDATA); | |
1c79356b A |
118 | sectLINKB = (vm_offset_t)getsegdatafromheader( |
119 | &_mh_execute_header, "__LINKEDIT", §SizeLINK); | |
120 | sectKLDB = (vm_offset_t)getsegdatafromheader( | |
121 | &_mh_execute_header, "__KLD", §SizeKLD); | |
122 | ||
123 | etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; | |
124 | edata = (vm_offset_t) sectDATAB + sectSizeDATA; | |
125 | end = round_page(getlastaddr()); /* Force end to next page */ | |
126 | #if DEBUG | |
127 | kprintf("sectTEXT: %x, size: %x\n", sectTEXTB, sectSizeTEXT); | |
128 | kprintf("sectDATA: %x, size: %x\n", sectDATAB, sectSizeDATA); | |
1c79356b A |
129 | kprintf("sectLINK: %x, size: %x\n", sectLINKB, sectSizeLINK); |
130 | kprintf("sectKLD: %x, size: %x\n", sectKLDB, sectSizeKLD); | |
131 | kprintf("end: %x\n", end); | |
132 | #endif | |
1c79356b A |
133 | |
134 | /* Stitch valid memory regions together - they may be contiguous | |
135 | * even though they're not already glued together | |
136 | */ | |
0b4e3aa0 | 137 | mem_actual = args->PhysicalDRAM[0].base + args->PhysicalDRAM[0].size; /* Initialize to the first region size */ |
1c79356b A |
138 | addr = 0; /* temp use as pointer to previous memory region... */ |
139 | for (i = 1; i < kMaxDRAMBanks; i++) { | |
140 | ||
141 | if (args->PhysicalDRAM[i].size == 0) continue; /* If region is empty, skip it */ | |
142 | ||
143 | if((args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size) > mem_actual) { /* New high? */ | |
144 | mem_actual = args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size; /* Take the high bid */ | |
145 | } | |
146 | ||
147 | if (args->PhysicalDRAM[i].base == /* Does the end of the last hit the start of the next? */ | |
148 | args->PhysicalDRAM[addr].base + | |
149 | args->PhysicalDRAM[addr].size) { | |
150 | kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n", | |
151 | args->PhysicalDRAM[addr].base, args->PhysicalDRAM[addr].size, | |
152 | args->PhysicalDRAM[i].base, args->PhysicalDRAM[i].size); | |
153 | ||
154 | args->PhysicalDRAM[addr].size += args->PhysicalDRAM[i].size; /* Join them */ | |
155 | args->PhysicalDRAM[i].size = 0; | |
156 | continue; | |
157 | } | |
158 | /* This is now last non-zero region to compare against */ | |
159 | addr = i; | |
160 | } | |
161 | ||
162 | /* Go through the list of memory regions passed in via the args | |
163 | * and copy valid entries into the pmap_mem_regions table, adding | |
164 | * further calculated entries. | |
165 | */ | |
166 | ||
167 | pmap_mem_regions_count = 0; | |
168 | mem_size = 0; /* Will use to total memory found so far */ | |
169 | ||
170 | for (i = 0; i < kMaxDRAMBanks; i++) { | |
171 | if (args->PhysicalDRAM[i].size == 0) | |
172 | continue; | |
173 | ||
174 | /* The following should only happen if memory size has | |
175 | been artificially reduced with -m */ | |
176 | if (mem_limit > 0 && | |
177 | mem_size + args->PhysicalDRAM[i].size > mem_limit) | |
178 | args->PhysicalDRAM[i].size = mem_limit - mem_size; | |
179 | ||
180 | /* We've found a region, tally memory */ | |
181 | ||
182 | pmap_mem_regions[pmap_mem_regions_count].start = | |
183 | args->PhysicalDRAM[i].base; | |
184 | pmap_mem_regions[pmap_mem_regions_count].end = | |
185 | args->PhysicalDRAM[i].base + | |
186 | args->PhysicalDRAM[i].size; | |
187 | ||
188 | /* Regions must be provided in ascending order */ | |
189 | assert ((pmap_mem_regions_count == 0) || | |
190 | pmap_mem_regions[pmap_mem_regions_count].start > | |
191 | pmap_mem_regions[pmap_mem_regions_count-1].start); | |
192 | ||
193 | if (pmap_mem_regions_count > 0) { | |
194 | /* we add on any pages not in the first memory | |
195 | * region to the avail_remaining count. The first | |
196 | * memory region is used for mapping everything for | |
197 | * bootup and is taken care of specially. | |
198 | */ | |
199 | avail_remaining += | |
200 | args->PhysicalDRAM[i].size / PPC_PGBYTES; | |
201 | } | |
202 | ||
203 | /* Keep track of how much memory we've found */ | |
204 | ||
205 | mem_size += args->PhysicalDRAM[i].size; | |
206 | ||
207 | /* incremement number of regions found */ | |
208 | pmap_mem_regions_count++; | |
209 | } | |
210 | ||
211 | kprintf("mem_size: %d M\n",mem_size / (1024 * 1024)); | |
212 | ||
213 | /* | |
214 | * Initialize the pmap system, using space above `first_avail' | |
215 | * for the necessary data structures. | |
216 | * NOTE : assume that we'll have enough space mapped in already | |
217 | */ | |
218 | ||
219 | first_phys_avail = static_memory_end; | |
220 | first_avail = adjust_bat_limit(first_phys_avail, 0, FALSE, FALSE); | |
221 | ||
222 | kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */ | |
223 | (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) + | |
224 | (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) + | |
1c79356b A |
225 | (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) + |
226 | (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) + | |
227 | (round_page(static_memory_end) - trunc_page(end)); | |
228 | ||
229 | pmap_bootstrap(mem_size,&first_avail,&first_phys_avail, kmapsize); | |
230 | ||
231 | #ifdef __MACHO__ | |
232 | #if DEBUG | |
233 | kprintf("Mapping memory:\n"); | |
234 | kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry), | |
235 | trunc_page(exception_entry), round_page(exception_end)); | |
236 | kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB), | |
237 | trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT)); | |
238 | kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB), | |
239 | trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA)); | |
1c79356b A |
240 | kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB), |
241 | trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK)); | |
242 | kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB), | |
243 | trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD)); | |
244 | kprintf(" end: %08X, %08X - %08X\n", trunc_page(end), | |
245 | trunc_page(end), static_memory_end); | |
246 | #endif /* DEBUG */ | |
247 | pmap_map(trunc_page(exception_entry), trunc_page(exception_entry), | |
248 | round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE); | |
249 | pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB), | |
250 | round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE); | |
251 | pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB), | |
252 | round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE); | |
1c79356b A |
253 | |
254 | ||
255 | /* The KLD and LINKEDIT segments are unloaded in toto after boot completes, | |
256 | * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have | |
257 | * to map both segments page-by-page. | |
258 | */ | |
259 | for (addr = trunc_page(sectKLDB); | |
260 | addr < round_page(sectKLDB+sectSizeKLD); | |
261 | addr += PAGE_SIZE) { | |
262 | ||
263 | pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); | |
264 | } | |
265 | ||
266 | for (addr = trunc_page(sectLINKB); | |
267 | addr < round_page(sectLINKB+sectSizeLINK); | |
268 | addr += PAGE_SIZE) { | |
269 | ||
270 | pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); | |
271 | } | |
272 | ||
273 | /* | |
274 | * We need to map the remainder page-by-page because some of this will | |
275 | * be released later, but not all. Ergo, no block mapping here | |
276 | */ | |
277 | for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) { | |
278 | pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); | |
279 | } | |
280 | #endif /* __MACHO__ */ | |
281 | ||
282 | #if DEBUG | |
283 | for (i=0 ; i < free_regions_count; i++) { | |
284 | kprintf("Free region start 0x%08x end 0x%08x\n", | |
285 | free_regions[i].start,free_regions[i].end); | |
286 | } | |
287 | #endif | |
288 | ||
289 | /* Initialize shadow IBATs */ | |
290 | shadow_BAT.IBATs[0].upper=BAT_INVALID; | |
291 | shadow_BAT.IBATs[0].lower=BAT_INVALID; | |
292 | shadow_BAT.IBATs[1].upper=BAT_INVALID; | |
293 | shadow_BAT.IBATs[1].lower=BAT_INVALID; | |
294 | shadow_BAT.IBATs[2].upper=BAT_INVALID; | |
295 | shadow_BAT.IBATs[2].lower=BAT_INVALID; | |
296 | shadow_BAT.IBATs[3].upper=BAT_INVALID; | |
297 | shadow_BAT.IBATs[3].lower=BAT_INVALID; | |
298 | ||
299 | LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */ | |
300 | ||
301 | /* Initialize shadow DBATs */ | |
302 | shadow_BAT.DBATs[0].upper=BAT_INVALID; | |
303 | shadow_BAT.DBATs[0].lower=BAT_INVALID; | |
304 | shadow_BAT.DBATs[1].upper=BAT_INVALID; | |
305 | shadow_BAT.DBATs[1].lower=BAT_INVALID; | |
306 | mfdbatu(shadow_BAT.DBATs[2].upper,2); | |
307 | mfdbatl(shadow_BAT.DBATs[2].lower,2); | |
308 | mfdbatu(shadow_BAT.DBATs[3].upper,3); | |
309 | mfdbatl(shadow_BAT.DBATs[3].lower,3); | |
310 | ||
311 | LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */ | |
312 | ||
313 | sync();isync(); | |
314 | #if DEBUG | |
315 | for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n", | |
316 | i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower); | |
317 | for(i=0; i<4; i++) kprintf("IBAT%1d: %08X %08X\n", | |
318 | i, shadow_BAT.IBATs[i].upper, shadow_BAT.IBATs[i].lower); | |
319 | #endif | |
320 | } | |
321 | ||
322 | void ppc_vm_cpu_init( | |
323 | struct per_proc_info *proc_info) | |
324 | { | |
325 | hash_table_init(hash_table_base, hash_table_size); | |
326 | ||
327 | LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); | |
328 | LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); | |
329 | ||
330 | sync();isync(); | |
331 | } |