]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/ppc_vm_init.c
81622cb784c45bbcc6c1834c8ffa2a9767146009
[apple/xnu.git] / osfmk / ppc / ppc_vm_init.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * @APPLE_FREE_COPYRIGHT@
30 */
31
32 #include <mach_debug.h>
33 #include <mach_kdb.h>
34 #include <mach_kdp.h>
35 #include <debug.h>
36 #include <cpus.h>
37
38 #include <mach/vm_types.h>
39 #include <mach/vm_param.h>
40 #include <mach/thread_status.h>
41 #include <kern/misc_protos.h>
42 #include <kern/assert.h>
43 #include <kern/cpu_number.h>
44
45 #include <ppc/proc_reg.h>
46 #include <ppc/Firmware.h>
47 #include <ppc/boot.h>
48 #include <ppc/misc_protos.h>
49 #include <ppc/pmap.h>
50 #include <ppc/pmap_internals.h>
51 #include <ppc/mem.h>
52 #include <ppc/mappings.h>
53 #include <ppc/exception.h>
54 #include <ppc/mp.h>
55
56 #ifdef __MACHO__
57 #include <mach-o/mach_header.h>
58 #endif
59
60 extern unsigned int intstack[]; /* declared in start.s */
61 extern unsigned int intstack_top_ss; /* declared in start.s */
62
63 vm_offset_t mem_size; /* Size of actual physical memory present
64 minus any performance buffer and possibly limited
65 by mem_limit in bytes */
66 vm_offset_t mem_actual; /* The "One True" physical memory size
67 actually, it's the highest physical address + 1 */
68 uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */
69
70 mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX];
71 int pmap_mem_regions_count = 0; /* No non-contiguous memory regions */
72
73 mem_region_t free_regions[FREE_REGION_MAX];
74 int free_regions_count;
75
76 #ifndef __MACHO__
77 extern unsigned long etext;
78 #endif
79
80 unsigned int avail_remaining = 0;
81 vm_offset_t first_avail;
82 vm_offset_t static_memory_end;
83 extern vm_offset_t avail_next;
84
85 #ifdef __MACHO__
86 extern struct mach_header _mh_execute_header;
87 vm_offset_t sectTEXTB;
88 int sectSizeTEXT;
89 vm_offset_t sectDATAB;
90 int sectSizeDATA;
91 vm_offset_t sectLINKB;
92 int sectSizeLINK;
93 vm_offset_t sectKLDB;
94 int sectSizeKLD;
95
96 vm_offset_t end, etext, edata;
97 #endif
98
99 extern unsigned long exception_entry;
100 extern unsigned long exception_end;
101
102
103 void ppc_vm_init(unsigned int mem_limit, boot_args *args)
104 {
105 unsigned int htabmask;
106 unsigned int i, j, batsize, kmapsize;
107 vm_offset_t addr;
108 int boot_task_end_offset;
109 const char *cpus;
110 mapping *mp;
111 vm_offset_t first_phys_avail;
112 vm_offset_t sizeadj, oldstart;
113
114 /* Now retrieve addresses for end, edata, and etext
115 * from MACH-O headers.
116 */
117 sectTEXTB = (vm_offset_t)getsegdatafromheader(
118 &_mh_execute_header, "__TEXT", &sectSizeTEXT);
119 sectDATAB = (vm_offset_t)getsegdatafromheader(
120 &_mh_execute_header, "__DATA", &sectSizeDATA);
121 sectLINKB = (vm_offset_t)getsegdatafromheader(
122 &_mh_execute_header, "__LINKEDIT", &sectSizeLINK);
123 sectKLDB = (vm_offset_t)getsegdatafromheader(
124 &_mh_execute_header, "__KLD", &sectSizeKLD);
125
126 etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
127 edata = (vm_offset_t) sectDATAB + sectSizeDATA;
128 end = round_page(getlastaddr()); /* Force end to next page */
129 #if DEBUG
130 kprintf("sectTEXT: %x, size: %x\n", sectTEXTB, sectSizeTEXT);
131 kprintf("sectDATA: %x, size: %x\n", sectDATAB, sectSizeDATA);
132 kprintf("sectLINK: %x, size: %x\n", sectLINKB, sectSizeLINK);
133 kprintf("sectKLD: %x, size: %x\n", sectKLDB, sectSizeKLD);
134 kprintf("end: %x\n", end);
135 #endif
136
137 /* Stitch valid memory regions together - they may be contiguous
138 * even though they're not already glued together
139 */
140 mem_actual = args->PhysicalDRAM[0].base + args->PhysicalDRAM[0].size; /* Initialize to the first region size */
141 addr = 0; /* temp use as pointer to previous memory region... */
142 for (i = 1; i < kMaxDRAMBanks; i++) {
143
144 if (args->PhysicalDRAM[i].size == 0) continue; /* If region is empty, skip it */
145
146 if((args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size) > mem_actual) { /* New high? */
147 mem_actual = args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size; /* Take the high bid */
148 }
149
150 if (args->PhysicalDRAM[i].base == /* Does the end of the last hit the start of the next? */
151 args->PhysicalDRAM[addr].base +
152 args->PhysicalDRAM[addr].size) {
153 kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n",
154 args->PhysicalDRAM[addr].base, args->PhysicalDRAM[addr].size,
155 args->PhysicalDRAM[i].base, args->PhysicalDRAM[i].size);
156
157 args->PhysicalDRAM[addr].size += args->PhysicalDRAM[i].size; /* Join them */
158 args->PhysicalDRAM[i].size = 0;
159 continue;
160 }
161 /* This is now last non-zero region to compare against */
162 addr = i;
163 }
164
165 /* Go through the list of memory regions passed in via the args
166 * and copy valid entries into the pmap_mem_regions table, adding
167 * further calculated entries.
168 */
169
170 pmap_mem_regions_count = 0;
171 mem_size = 0; /* Will use to total memory found so far */
172
173 for (i = 0; i < kMaxDRAMBanks; i++) {
174 if (args->PhysicalDRAM[i].size == 0)
175 continue;
176
177 /* The following should only happen if memory size has
178 been artificially reduced with -m */
179 if (mem_limit > 0 &&
180 mem_size + args->PhysicalDRAM[i].size > mem_limit)
181 args->PhysicalDRAM[i].size = mem_limit - mem_size;
182
183 /* We've found a region, tally memory */
184
185 pmap_mem_regions[pmap_mem_regions_count].start =
186 args->PhysicalDRAM[i].base;
187 pmap_mem_regions[pmap_mem_regions_count].end =
188 args->PhysicalDRAM[i].base +
189 args->PhysicalDRAM[i].size;
190
191 /* Regions must be provided in ascending order */
192 assert ((pmap_mem_regions_count == 0) ||
193 pmap_mem_regions[pmap_mem_regions_count].start >
194 pmap_mem_regions[pmap_mem_regions_count-1].start);
195
196 if (pmap_mem_regions_count > 0) {
197 /* we add on any pages not in the first memory
198 * region to the avail_remaining count. The first
199 * memory region is used for mapping everything for
200 * bootup and is taken care of specially.
201 */
202 avail_remaining +=
203 args->PhysicalDRAM[i].size / PPC_PGBYTES;
204 }
205
206 /* Keep track of how much memory we've found */
207
208 mem_size += args->PhysicalDRAM[i].size;
209
210 /* incremement number of regions found */
211 pmap_mem_regions_count++;
212 }
213
214 max_mem = mem_size;
215
216 kprintf("mem_size: %d M\n",mem_size / (1024 * 1024));
217
218 /*
219 * Initialize the pmap system, using space above `first_avail'
220 * for the necessary data structures.
221 * NOTE : assume that we'll have enough space mapped in already
222 */
223
224 first_phys_avail = static_memory_end;
225 first_avail = adjust_bat_limit(first_phys_avail, 0, FALSE, FALSE);
226
227 kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */
228 (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) +
229 (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) +
230 (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) +
231 (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) +
232 (round_page(static_memory_end) - trunc_page(end));
233
234 pmap_bootstrap(mem_size,&first_avail,&first_phys_avail, kmapsize);
235
236 #ifdef __MACHO__
237 #if DEBUG
238 kprintf("Mapping memory:\n");
239 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry),
240 trunc_page(exception_entry), round_page(exception_end));
241 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB),
242 trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT));
243 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB),
244 trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA));
245 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB),
246 trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK));
247 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB),
248 trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD));
249 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end),
250 trunc_page(end), static_memory_end);
251 #endif /* DEBUG */
252 pmap_map(trunc_page(exception_entry), trunc_page(exception_entry),
253 round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE);
254 pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB),
255 round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE);
256 pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB),
257 round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE);
258
259
260 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
261 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
262 * to map both segments page-by-page.
263 */
264 for (addr = trunc_page(sectKLDB);
265 addr < round_page(sectKLDB+sectSizeKLD);
266 addr += PAGE_SIZE) {
267
268 pmap_enter(kernel_pmap, addr, addr,
269 VM_PROT_READ|VM_PROT_WRITE,
270 VM_WIMG_USE_DEFAULT, TRUE);
271 }
272
273 for (addr = trunc_page(sectLINKB);
274 addr < round_page(sectLINKB+sectSizeLINK);
275 addr += PAGE_SIZE) {
276
277 pmap_enter(kernel_pmap, addr, addr,
278 VM_PROT_READ|VM_PROT_WRITE,
279 VM_WIMG_USE_DEFAULT, TRUE);
280 }
281
282 /*
283 * We need to map the remainder page-by-page because some of this will
284 * be released later, but not all. Ergo, no block mapping here
285 */
286 for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) {
287 pmap_enter(kernel_pmap, addr, addr,
288 VM_PROT_READ|VM_PROT_WRITE,
289 VM_WIMG_USE_DEFAULT, TRUE);
290 }
291 #endif /* __MACHO__ */
292
293 #if DEBUG
294 for (i=0 ; i < free_regions_count; i++) {
295 kprintf("Free region start 0x%08x end 0x%08x\n",
296 free_regions[i].start,free_regions[i].end);
297 }
298 #endif
299
300 /*
301 * Note: the shadow BAT registers were already loaded in ppc_init.c
302 */
303
304
305 LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */
306 LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */
307
308 #if DEBUG
309 for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n",
310 i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower);
311 for(i=0; i<4; i++) kprintf("IBAT%1d: %08X %08X\n",
312 i, shadow_BAT.IBATs[i].upper, shadow_BAT.IBATs[i].lower);
313 #endif
314 }
315
316 void ppc_vm_cpu_init(
317 struct per_proc_info *proc_info)
318 {
319 hash_table_init(hash_table_base, hash_table_size);
320
321 LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]);
322 LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]);
323
324 sync();isync();
325 }