]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp_desc.c
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / i386 / mp_desc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 /*
52 */
53
54 #include <cpus.h>
55
56 #if NCPUS > 1
57
58 #include <kern/cpu_number.h>
59 #include <kern/cpu_data.h>
60 #include <mach/machine.h>
61 #include <vm/vm_kern.h>
62
63 #include <i386/mp_desc.h>
64 #include <i386/lock.h>
65 #include <i386/misc_protos.h>
66 #include <i386/mp.h>
67
68 #include <kern/misc_protos.h>
69
70 #include <mach_kdb.h>
71
72 /*
73 * The i386 needs an interrupt stack to keep the PCB stack from being
74 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
75 * than any thread`s kernel stack.
76 */
77
78 /*
79 * Addresses of bottom and top of interrupt stacks.
80 */
81 vm_offset_t interrupt_stack[NCPUS];
82 vm_offset_t int_stack_top[NCPUS];
83
84 /*
85 * Barrier address.
86 */
87 vm_offset_t int_stack_high;
88
89 /*
90 * First cpu`s interrupt stack.
91 */
92 extern char intstack[]; /* bottom */
93 extern char eintstack[]; /* top */
94
95 /*
96 * We allocate interrupt stacks from physical memory.
97 */
98 extern
99 vm_offset_t avail_start;
100
101 /*
102 * Multiprocessor i386/i486 systems use a separate copy of the
103 * GDT, IDT, LDT, and kernel TSS per processor. The first three
104 * are separate to avoid lock contention: the i386 uses locked
105 * memory cycles to access the descriptor tables. The TSS is
106 * separate since each processor needs its own kernel stack,
107 * and since using a TSS marks it busy.
108 */
109
110 /*
111 * Allocated descriptor tables.
112 */
113 struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
114
115 /*
116 * Pointer to TSS for access in load_context.
117 */
118 struct i386_tss *mp_ktss[NCPUS] = { 0 };
119
120 #if MACH_KDB
121 /*
122 * Pointer to TSS for debugger use.
123 */
124 struct i386_tss *mp_dbtss[NCPUS] = { 0 };
125 #endif /* MACH_KDB */
126
127 /*
128 * Pointer to GDT to reset the KTSS busy bit.
129 */
130 struct fake_descriptor *mp_gdt[NCPUS] = { 0 };
131 struct fake_descriptor *mp_idt[NCPUS] = { 0 };
132 struct fake_descriptor *mp_ldt[NCPUS] = { 0 };
133
134 /*
135 * Allocate and initialize the per-processor descriptor tables.
136 */
137
138 struct fake_descriptor ldt_desc_pattern = {
139 (unsigned int) 0,
140 LDTSZ * sizeof(struct fake_descriptor) - 1,
141 0,
142 ACC_P|ACC_PL_K|ACC_LDT
143 };
144 struct fake_descriptor tss_desc_pattern = {
145 (unsigned int) 0,
146 sizeof(struct i386_tss),
147 0,
148 ACC_P|ACC_PL_K|ACC_TSS
149 };
150
151 struct fake_descriptor cpudata_desc_pattern = {
152 (unsigned int) 0,
153 sizeof(cpu_data_t)-1,
154 SZ_32,
155 ACC_P|ACC_PL_K|ACC_DATA_W
156 };
157
158 struct mp_desc_table *
159 mp_desc_init(
160 int mycpu)
161 {
162 register struct mp_desc_table *mpt;
163
164 if (mycpu == master_cpu) {
165 /*
166 * Master CPU uses the tables built at boot time.
167 * Just set the TSS and GDT pointers.
168 */
169 mp_ktss[mycpu] = &ktss;
170 #if MACH_KDB
171 mp_dbtss[mycpu] = &dbtss;
172 #endif /* MACH_KDB */
173 mp_gdt[mycpu] = gdt;
174 mp_idt[mycpu] = idt;
175 mp_ldt[mycpu] = ldt;
176 return 0;
177 }
178 else {
179 mpt = mp_desc_table[mycpu];
180 mp_ktss[mycpu] = &mpt->ktss;
181 mp_gdt[mycpu] = mpt->gdt;
182 mp_idt[mycpu] = mpt->idt;
183 mp_ldt[mycpu] = mpt->ldt;
184
185 /*
186 * Copy the tables
187 */
188 bcopy((char *)idt,
189 (char *)mpt->idt,
190 sizeof(idt));
191 bcopy((char *)gdt,
192 (char *)mpt->gdt,
193 sizeof(gdt));
194 bcopy((char *)ldt,
195 (char *)mpt->ldt,
196 sizeof(ldt));
197 bzero((char *)&mpt->ktss,
198 sizeof(struct i386_tss));
199 #if 0
200 bzero((char *)&cpu_data[mycpu],
201 sizeof(cpu_data_t));
202 #endif
203 /* I am myself */
204 cpu_data[mycpu].cpu_number = mycpu;
205
206 #if MACH_KDB
207 mp_dbtss[mycpu] = &mpt->dbtss;
208 bcopy((char *)&dbtss,
209 (char *)&mpt->dbtss,
210 sizeof(struct i386_tss));
211 #endif /* MACH_KDB */
212
213 /*
214 * Fix up the entries in the GDT to point to
215 * this LDT and this TSS.
216 */
217 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
218 mpt->gdt[sel_idx(KERNEL_LDT)].offset =
219 LINEAR_KERNEL_ADDRESS + (unsigned int) mpt->ldt;
220 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
221
222 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
223 mpt->gdt[sel_idx(KERNEL_TSS)].offset =
224 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->ktss;
225 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
226
227 mpt->gdt[sel_idx(CPU_DATA)] = cpudata_desc_pattern;
228 mpt->gdt[sel_idx(CPU_DATA)].offset =
229 LINEAR_KERNEL_ADDRESS + (unsigned int) &cpu_data[mycpu];
230 fix_desc(&mpt->gdt[sel_idx(CPU_DATA)], 1);
231
232 #if MACH_KDB
233 mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
234 mpt->gdt[sel_idx(DEBUG_TSS)].offset =
235 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->dbtss;
236 fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
237
238 mpt->dbtss.esp0 = (int)(db_task_stack_store +
239 (INTSTACK_SIZE * (mycpu + 1)) - sizeof (natural_t));
240 mpt->dbtss.esp = mpt->dbtss.esp0;
241 mpt->dbtss.eip = (int)&db_task_start;
242 #endif /* MACH_KDB */
243
244 mpt->ktss.ss0 = KERNEL_DS;
245 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
246
247 return mpt;
248 }
249 }
250
251 /*
252 * Called after all CPUs have been found, but before the VM system
253 * is running. The machine array must show which CPUs exist.
254 */
255 void
256 interrupt_stack_alloc(void)
257 {
258 register int i;
259 int cpu_count;
260 vm_offset_t stack_start;
261 struct mp_desc_table *mpt;
262
263 /*
264 * Number of CPUs possible.
265 */
266 cpu_count = wncpu;
267
268 /*
269 * Allocate an interrupt stack for each CPU except for
270 * the master CPU (which uses the bootstrap stack)
271 */
272 stack_start = phystokv(avail_start);
273 avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1));
274 bzero((char *)stack_start, INTSTACK_SIZE*(cpu_count-1));
275
276 /*
277 * Set up pointers to the top of the interrupt stack.
278 */
279 for (i = 0; i < cpu_count; i++) {
280 if (i == master_cpu) {
281 interrupt_stack[i] = (vm_offset_t) intstack;
282 int_stack_top[i] = (vm_offset_t) eintstack;
283 }
284 else {
285 interrupt_stack[i] = stack_start;
286 int_stack_top[i] = stack_start + INTSTACK_SIZE;
287
288 stack_start += INTSTACK_SIZE;
289 }
290 }
291
292 /*
293 * Allocate descriptor tables for each CPU except for
294 * the master CPU (which already has them initialized)
295 */
296
297 mpt = (struct mp_desc_table *) phystokv(avail_start);
298 avail_start = round_page((vm_offset_t)avail_start +
299 sizeof(struct mp_desc_table)*(cpu_count-1));
300 for (i = 0; i < cpu_count; i++)
301 if (i != master_cpu)
302 mp_desc_table[i] = mpt++;
303
304
305 /*
306 * Set up the barrier address. All thread stacks MUST
307 * be above this address.
308 */
309 /*
310 * intstack is at higher addess than stack_start for AT mps
311 * so int_stack_high must point at eintstack.
312 * XXX
313 * But what happens if a kernel stack gets allocated below
314 * 1 Meg ? Probably never happens, there is only 640 K available
315 * There.
316 */
317 int_stack_high = (vm_offset_t) eintstack;
318 }
319
320 #endif /* NCPUS > 1 */