2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
29 #include <mach_debug.h>
35 #include <mach/vm_types.h>
36 #include <mach/vm_param.h>
37 #include <mach/thread_status.h>
38 #include <kern/misc_protos.h>
39 #include <kern/assert.h>
40 #include <kern/cpu_number.h>
42 #include <ppc/proc_reg.h>
43 #include <ppc/Firmware.h>
45 #include <ppc/misc_protos.h>
47 #include <ppc/pmap_internals.h>
49 #include <ppc/mappings.h>
50 #include <ppc/exception.h>
54 #include <mach-o/mach_header.h>
57 extern unsigned int intstack
[]; /* declared in start.s */
58 extern unsigned int intstack_top_ss
; /* declared in start.s */
60 vm_offset_t mem_size
; /* Size of actual physical memory present
61 minus any performance buffer and possibly limited
62 by mem_limit in bytes */
63 vm_offset_t mem_actual
; /* The "One True" physical memory size
64 actually, it's the highest physical address + 1 */
67 mem_region_t pmap_mem_regions
[PMAP_MEM_REGION_MAX
];
68 int pmap_mem_regions_count
= 0; /* No non-contiguous memory regions */
70 mem_region_t free_regions
[FREE_REGION_MAX
];
71 int free_regions_count
;
74 extern unsigned long etext
;
77 unsigned int avail_remaining
= 0;
78 vm_offset_t first_avail
;
79 vm_offset_t static_memory_end
;
80 extern vm_offset_t avail_next
;
83 extern struct mach_header _mh_execute_header
;
84 vm_offset_t sectTEXTB
;
86 vm_offset_t sectDATAB
;
88 vm_offset_t sectLINKB
;
93 vm_offset_t end
, etext
, edata
;
96 extern unsigned long exception_entry
;
97 extern unsigned long exception_end
;
100 void ppc_vm_init(unsigned int mem_limit
, boot_args
*args
)
102 unsigned int htabmask
;
103 unsigned int i
, j
, batsize
, kmapsize
;
105 int boot_task_end_offset
;
108 vm_offset_t first_phys_avail
;
109 vm_offset_t sizeadj
, oldstart
;
111 /* Now retrieve addresses for end, edata, and etext
112 * from MACH-O headers.
114 sectTEXTB
= (vm_offset_t
)getsegdatafromheader(
115 &_mh_execute_header
, "__TEXT", §SizeTEXT
);
116 sectDATAB
= (vm_offset_t
)getsegdatafromheader(
117 &_mh_execute_header
, "__DATA", §SizeDATA
);
118 sectLINKB
= (vm_offset_t
)getsegdatafromheader(
119 &_mh_execute_header
, "__LINKEDIT", §SizeLINK
);
120 sectKLDB
= (vm_offset_t
)getsegdatafromheader(
121 &_mh_execute_header
, "__KLD", §SizeKLD
);
123 etext
= (vm_offset_t
) sectTEXTB
+ sectSizeTEXT
;
124 edata
= (vm_offset_t
) sectDATAB
+ sectSizeDATA
;
125 end
= round_page(getlastaddr()); /* Force end to next page */
127 kprintf("sectTEXT: %x, size: %x\n", sectTEXTB
, sectSizeTEXT
);
128 kprintf("sectDATA: %x, size: %x\n", sectDATAB
, sectSizeDATA
);
129 kprintf("sectLINK: %x, size: %x\n", sectLINKB
, sectSizeLINK
);
130 kprintf("sectKLD: %x, size: %x\n", sectKLDB
, sectSizeKLD
);
131 kprintf("end: %x\n", end
);
134 /* Stitch valid memory regions together - they may be contiguous
135 * even though they're not already glued together
137 mem_actual
= args
->PhysicalDRAM
[0].base
+ args
->PhysicalDRAM
[0].size
; /* Initialize to the first region size */
138 addr
= 0; /* temp use as pointer to previous memory region... */
139 for (i
= 1; i
< kMaxDRAMBanks
; i
++) {
141 if (args
->PhysicalDRAM
[i
].size
== 0) continue; /* If region is empty, skip it */
143 if((args
->PhysicalDRAM
[i
].base
+ args
->PhysicalDRAM
[i
].size
) > mem_actual
) { /* New high? */
144 mem_actual
= args
->PhysicalDRAM
[i
].base
+ args
->PhysicalDRAM
[i
].size
; /* Take the high bid */
147 if (args
->PhysicalDRAM
[i
].base
== /* Does the end of the last hit the start of the next? */
148 args
->PhysicalDRAM
[addr
].base
+
149 args
->PhysicalDRAM
[addr
].size
) {
150 kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n",
151 args
->PhysicalDRAM
[addr
].base
, args
->PhysicalDRAM
[addr
].size
,
152 args
->PhysicalDRAM
[i
].base
, args
->PhysicalDRAM
[i
].size
);
154 args
->PhysicalDRAM
[addr
].size
+= args
->PhysicalDRAM
[i
].size
; /* Join them */
155 args
->PhysicalDRAM
[i
].size
= 0;
158 /* This is now last non-zero region to compare against */
162 /* Go through the list of memory regions passed in via the args
163 * and copy valid entries into the pmap_mem_regions table, adding
164 * further calculated entries.
167 pmap_mem_regions_count
= 0;
168 mem_size
= 0; /* Will use to total memory found so far */
170 for (i
= 0; i
< kMaxDRAMBanks
; i
++) {
171 if (args
->PhysicalDRAM
[i
].size
== 0)
174 /* The following should only happen if memory size has
175 been artificially reduced with -m */
177 mem_size
+ args
->PhysicalDRAM
[i
].size
> mem_limit
)
178 args
->PhysicalDRAM
[i
].size
= mem_limit
- mem_size
;
180 /* We've found a region, tally memory */
182 pmap_mem_regions
[pmap_mem_regions_count
].start
=
183 args
->PhysicalDRAM
[i
].base
;
184 pmap_mem_regions
[pmap_mem_regions_count
].end
=
185 args
->PhysicalDRAM
[i
].base
+
186 args
->PhysicalDRAM
[i
].size
;
188 /* Regions must be provided in ascending order */
189 assert ((pmap_mem_regions_count
== 0) ||
190 pmap_mem_regions
[pmap_mem_regions_count
].start
>
191 pmap_mem_regions
[pmap_mem_regions_count
-1].start
);
193 if (pmap_mem_regions_count
> 0) {
194 /* we add on any pages not in the first memory
195 * region to the avail_remaining count. The first
196 * memory region is used for mapping everything for
197 * bootup and is taken care of specially.
200 args
->PhysicalDRAM
[i
].size
/ PPC_PGBYTES
;
203 /* Keep track of how much memory we've found */
205 mem_size
+= args
->PhysicalDRAM
[i
].size
;
207 /* incremement number of regions found */
208 pmap_mem_regions_count
++;
211 kprintf("mem_size: %d M\n",mem_size
/ (1024 * 1024));
214 * Initialize the pmap system, using space above `first_avail'
215 * for the necessary data structures.
216 * NOTE : assume that we'll have enough space mapped in already
219 first_phys_avail
= static_memory_end
;
220 first_avail
= adjust_bat_limit(first_phys_avail
, 0, FALSE
, FALSE
);
222 kmapsize
= (round_page(exception_end
) - trunc_page(exception_entry
)) + /* Get size we will map later */
223 (round_page(sectTEXTB
+sectSizeTEXT
) - trunc_page(sectTEXTB
)) +
224 (round_page(sectDATAB
+sectSizeDATA
) - trunc_page(sectDATAB
)) +
225 (round_page(sectLINKB
+sectSizeLINK
) - trunc_page(sectLINKB
)) +
226 (round_page(sectKLDB
+sectSizeKLD
) - trunc_page(sectKLDB
)) +
227 (round_page(static_memory_end
) - trunc_page(end
));
229 pmap_bootstrap(mem_size
,&first_avail
,&first_phys_avail
, kmapsize
);
233 kprintf("Mapping memory:\n");
234 kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry
),
235 trunc_page(exception_entry
), round_page(exception_end
));
236 kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB
),
237 trunc_page(sectTEXTB
), round_page(sectTEXTB
+sectSizeTEXT
));
238 kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB
),
239 trunc_page(sectDATAB
), round_page(sectDATAB
+sectSizeDATA
));
240 kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB
),
241 trunc_page(sectLINKB
), round_page(sectLINKB
+sectSizeLINK
));
242 kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB
),
243 trunc_page(sectKLDB
), round_page(sectKLDB
+sectSizeKLD
));
244 kprintf(" end: %08X, %08X - %08X\n", trunc_page(end
),
245 trunc_page(end
), static_memory_end
);
247 pmap_map(trunc_page(exception_entry
), trunc_page(exception_entry
),
248 round_page(exception_end
), VM_PROT_READ
|VM_PROT_EXECUTE
);
249 pmap_map(trunc_page(sectTEXTB
), trunc_page(sectTEXTB
),
250 round_page(sectTEXTB
+sectSizeTEXT
), VM_PROT_READ
|VM_PROT_EXECUTE
);
251 pmap_map(trunc_page(sectDATAB
), trunc_page(sectDATAB
),
252 round_page(sectDATAB
+sectSizeDATA
), VM_PROT_READ
|VM_PROT_WRITE
);
255 /* The KLD and LINKEDIT segments are unloaded in toto after boot completes,
256 * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have
257 * to map both segments page-by-page.
259 for (addr
= trunc_page(sectKLDB
);
260 addr
< round_page(sectKLDB
+sectSizeKLD
);
263 pmap_enter(kernel_pmap
, addr
, addr
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
266 for (addr
= trunc_page(sectLINKB
);
267 addr
< round_page(sectLINKB
+sectSizeLINK
);
270 pmap_enter(kernel_pmap
, addr
, addr
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
274 * We need to map the remainder page-by-page because some of this will
275 * be released later, but not all. Ergo, no block mapping here
277 for(addr
= trunc_page(end
); addr
< round_page(static_memory_end
); addr
+= PAGE_SIZE
) {
278 pmap_enter(kernel_pmap
, addr
, addr
, VM_PROT_READ
|VM_PROT_WRITE
, TRUE
);
280 #endif /* __MACHO__ */
283 for (i
=0 ; i
< free_regions_count
; i
++) {
284 kprintf("Free region start 0x%08x end 0x%08x\n",
285 free_regions
[i
].start
,free_regions
[i
].end
);
290 * Note: the shadow BAT registers were already loaded in ppc_init.c
294 LoadIBATs((unsigned int *)&shadow_BAT
.IBATs
[0]); /* Load up real IBATs from shadows */
295 LoadDBATs((unsigned int *)&shadow_BAT
.DBATs
[0]); /* Load up real DBATs from shadows */
298 for(i
=0; i
<4; i
++) kprintf("DBAT%1d: %08X %08X\n",
299 i
, shadow_BAT
.DBATs
[i
].upper
, shadow_BAT
.DBATs
[i
].lower
);
300 for(i
=0; i
<4; i
++) kprintf("IBAT%1d: %08X %08X\n",
301 i
, shadow_BAT
.IBATs
[i
].upper
, shadow_BAT
.IBATs
[i
].lower
);
305 void ppc_vm_cpu_init(
306 struct per_proc_info
*proc_info
)
308 hash_table_init(hash_table_base
, hash_table_size
);
310 LoadIBATs((unsigned int *)&shadow_BAT
.IBATs
[0]);
311 LoadDBATs((unsigned int *)&shadow_BAT
.DBATs
[0]);