]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/mp_desc.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / i386 / mp_desc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53
54/*
55 */
56
57#include <cpus.h>
58
59#if NCPUS > 1
60
61#include <kern/cpu_number.h>
62#include <kern/cpu_data.h>
63#include <mach/machine.h>
64#include <vm/vm_kern.h>
65
66#include <i386/mp_desc.h>
67#include <i386/lock.h>
68#include <i386/misc_protos.h>
69
70#include <kern/misc_protos.h>
71
72#include <mach_kdb.h>
73
74/*
75 * The i386 needs an interrupt stack to keep the PCB stack from being
76 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
77 * than any thread`s kernel stack.
78 */
79
80/*
81 * Addresses of bottom and top of interrupt stacks.
82 */
83vm_offset_t interrupt_stack[NCPUS];
84vm_offset_t int_stack_top[NCPUS];
85
86/*
87 * Barrier address.
88 */
89vm_offset_t int_stack_high;
90
91/*
92 * First cpu`s interrupt stack.
93 */
94extern char intstack[]; /* bottom */
95extern char eintstack[]; /* top */
96
97/*
98 * We allocate interrupt stacks from physical memory.
99 */
100extern
101vm_offset_t avail_start;
102
103/*
104 * Multiprocessor i386/i486 systems use a separate copy of the
105 * GDT, IDT, LDT, and kernel TSS per processor. The first three
106 * are separate to avoid lock contention: the i386 uses locked
107 * memory cycles to access the descriptor tables. The TSS is
108 * separate since each processor needs its own kernel stack,
109 * and since using a TSS marks it busy.
110 */
111
112/*
113 * Allocated descriptor tables.
114 */
115struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
116
117/*
118 * Pointer to TSS for access in load_context.
119 */
120struct i386_tss *mp_ktss[NCPUS] = { 0 };
121
122#if MACH_KDB
123/*
124 * Pointer to TSS for debugger use.
125 */
126struct i386_tss *mp_dbtss[NCPUS] = { 0 };
127#endif /* MACH_KDB */
128
129/*
130 * Pointer to GDT to reset the KTSS busy bit.
131 */
132struct fake_descriptor *mp_gdt[NCPUS] = { 0 };
133struct fake_descriptor *mp_idt[NCPUS] = { 0 };
134
135/*
136 * Allocate and initialize the per-processor descriptor tables.
137 */
138
139struct fake_descriptor ldt_desc_pattern = {
140 (unsigned int) 0,
141 LDTSZ * sizeof(struct fake_descriptor) - 1,
142 0,
143 ACC_P|ACC_PL_K|ACC_LDT
144};
145struct fake_descriptor tss_desc_pattern = {
146 (unsigned int) 0,
147 sizeof(struct i386_tss),
148 0,
149 ACC_P|ACC_PL_K|ACC_TSS
150};
151
152struct fake_descriptor cpudata_desc_pattern = {
153 (unsigned int) 0,
154 sizeof(cpu_data_t)-1,
155 SZ_32,
156 ACC_P|ACC_PL_K|ACC_DATA_W
157};
158
159struct mp_desc_table *
160mp_desc_init(
161 int mycpu)
162{
163 register struct mp_desc_table *mpt;
164
165 if (mycpu == master_cpu) {
166 /*
167 * Master CPU uses the tables built at boot time.
168 * Just set the TSS and GDT pointers.
169 */
170 mp_ktss[mycpu] = &ktss;
171#if MACH_KDB
172 mp_dbtss[mycpu] = &dbtss;
173#endif /* MACH_KDB */
174 mp_gdt[mycpu] = gdt;
175 mp_idt[mycpu] = idt;
176 return 0;
177 }
178 else {
179 mpt = mp_desc_table[mycpu];
180 mp_ktss[mycpu] = &mpt->ktss;
181 mp_gdt[mycpu] = mpt->gdt;
182 mp_idt[mycpu] = mpt->idt;
183
184 /*
185 * Copy the tables
186 */
187 bcopy((char *)idt,
188 (char *)mpt->idt,
189 sizeof(idt));
190 bcopy((char *)gdt,
191 (char *)mpt->gdt,
192 sizeof(gdt));
193 bcopy((char *)ldt,
194 (char *)mpt->ldt,
195 sizeof(ldt));
196 bzero((char *)&mpt->ktss,
197 sizeof(struct i386_tss));
198 bzero((char *)&cpu_data[mycpu],
199 sizeof(cpu_data_t));
200#if MACH_KDB
201 mp_dbtss[mycpu] = &mpt->dbtss;
202 bcopy((char *)&dbtss,
203 (char *)&mpt->dbtss,
204 sizeof(struct i386_tss));
205#endif /* MACH_KDB */
206
207 /*
208 * Fix up the entries in the GDT to point to
209 * this LDT and this TSS.
210 */
211 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
212 mpt->gdt[sel_idx(KERNEL_LDT)].offset =
213 LINEAR_KERNEL_ADDRESS + (unsigned int) mpt->ldt;
214 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
215
216 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
217 mpt->gdt[sel_idx(KERNEL_TSS)].offset =
218 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->ktss;
219 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
220
221 mpt->gdt[sel_idx(CPU_DATA)] = cpudata_desc_pattern;
222 mpt->gdt[sel_idx(CPU_DATA)].offset =
223 LINEAR_KERNEL_ADDRESS + (unsigned int) &cpu_data[mycpu];
224 fix_desc(&mpt->gdt[sel_idx(CPU_DATA)], 1);
225
226#if MACH_KDB
227 mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
228 mpt->gdt[sel_idx(DEBUG_TSS)].offset =
229 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->dbtss;
230 fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
231
232 mpt->dbtss.esp0 = (int)(db_task_stack_store +
233 (INTSTACK_SIZE * (mycpu + 1)) - sizeof (natural_t));
234 mpt->dbtss.esp = mpt->dbtss.esp0;
235 mpt->dbtss.eip = (int)&db_task_start;
236#endif /* MACH_KDB */
237
238 mpt->ktss.ss0 = KERNEL_DS;
239 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
240
241 return mpt;
242 }
243}
244
245/*
246 * Called after all CPUs have been found, but before the VM system
247 * is running. The machine array must show which CPUs exist.
248 */
249void
250interrupt_stack_alloc(void)
251{
252 register int i;
253 int cpu_count;
254 vm_offset_t stack_start;
255 struct mp_desc_table *mpt;
256
257 /*
258 * Count the number of CPUs.
259 */
260 cpu_count = 0;
261 for (i = 0; i < NCPUS; i++)
262 if (machine_slot[i].is_cpu)
263 cpu_count++;
264
265 /*
266 * Allocate an interrupt stack for each CPU except for
267 * the master CPU (which uses the bootstrap stack)
268 */
269 stack_start = phystokv(avail_start);
270 avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1));
271 bzero((char *)stack_start, INTSTACK_SIZE*(cpu_count-1));
272
273 /*
274 * Set up pointers to the top of the interrupt stack.
275 */
276 for (i = 0; i < NCPUS; i++) {
277 if (i == master_cpu) {
278 interrupt_stack[i] = (vm_offset_t) intstack;
279 int_stack_top[i] = (vm_offset_t) eintstack;
280 }
281 else if (machine_slot[i].is_cpu) {
282 interrupt_stack[i] = stack_start;
283 int_stack_top[i] = stack_start + INTSTACK_SIZE;
284
285 stack_start += INTSTACK_SIZE;
286 }
287 }
288
289 /*
290 * Allocate descriptor tables for each CPU except for
291 * the master CPU (which already has them initialized)
292 */
293
294 mpt = (struct mp_desc_table *) phystokv(avail_start);
295 avail_start = round_page((vm_offset_t)avail_start +
296 sizeof(struct mp_desc_table)*(cpu_count-1));
297 for (i = 0; i < NCPUS; i++)
298 if (i != master_cpu)
299 mp_desc_table[i] = mpt++;
300
301
302 /*
303 * Set up the barrier address. All thread stacks MUST
304 * be above this address.
305 */
306 /*
307 * intstack is at higher addess than stack_start for AT mps
308 * so int_stack_high must point at eintstack.
309 * XXX
310 * But what happens if a kernel stack gets allocated below
311 * 1 Meg ? Probably never happens, there is only 640 K available
312 * There.
313 */
314 int_stack_high = (vm_offset_t) eintstack;
315}
316
317#endif /* NCPUS > 1 */