]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp_desc.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / i386 / mp_desc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 /*
52 */
53
54 #include <cpus.h>
55
56 #if NCPUS > 1
57
58 #include <kern/cpu_number.h>
59 #include <kern/cpu_data.h>
60 #include <mach/machine.h>
61 #include <vm/vm_kern.h>
62
63 #include <i386/mp_desc.h>
64 #include <i386/lock.h>
65 #include <i386/misc_protos.h>
66
67 #include <kern/misc_protos.h>
68
69 #include <mach_kdb.h>
70
71 /*
72 * The i386 needs an interrupt stack to keep the PCB stack from being
73 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
74 * than any thread`s kernel stack.
75 */
76
77 /*
78 * Addresses of bottom and top of interrupt stacks.
79 */
80 vm_offset_t interrupt_stack[NCPUS];
81 vm_offset_t int_stack_top[NCPUS];
82
83 /*
84 * Barrier address.
85 */
86 vm_offset_t int_stack_high;
87
88 /*
89 * First cpu`s interrupt stack.
90 */
91 extern char intstack[]; /* bottom */
92 extern char eintstack[]; /* top */
93
94 /*
95 * We allocate interrupt stacks from physical memory.
96 */
97 extern
98 vm_offset_t avail_start;
99
100 /*
101 * Multiprocessor i386/i486 systems use a separate copy of the
102 * GDT, IDT, LDT, and kernel TSS per processor. The first three
103 * are separate to avoid lock contention: the i386 uses locked
104 * memory cycles to access the descriptor tables. The TSS is
105 * separate since each processor needs its own kernel stack,
106 * and since using a TSS marks it busy.
107 */
108
109 /*
110 * Allocated descriptor tables.
111 */
112 struct mp_desc_table *mp_desc_table[NCPUS] = { 0 };
113
114 /*
115 * Pointer to TSS for access in load_context.
116 */
117 struct i386_tss *mp_ktss[NCPUS] = { 0 };
118
119 #if MACH_KDB
120 /*
121 * Pointer to TSS for debugger use.
122 */
123 struct i386_tss *mp_dbtss[NCPUS] = { 0 };
124 #endif /* MACH_KDB */
125
126 /*
127 * Pointer to GDT to reset the KTSS busy bit.
128 */
129 struct fake_descriptor *mp_gdt[NCPUS] = { 0 };
130 struct fake_descriptor *mp_idt[NCPUS] = { 0 };
131
132 /*
133 * Allocate and initialize the per-processor descriptor tables.
134 */
135
136 struct fake_descriptor ldt_desc_pattern = {
137 (unsigned int) 0,
138 LDTSZ * sizeof(struct fake_descriptor) - 1,
139 0,
140 ACC_P|ACC_PL_K|ACC_LDT
141 };
142 struct fake_descriptor tss_desc_pattern = {
143 (unsigned int) 0,
144 sizeof(struct i386_tss),
145 0,
146 ACC_P|ACC_PL_K|ACC_TSS
147 };
148
149 struct fake_descriptor cpudata_desc_pattern = {
150 (unsigned int) 0,
151 sizeof(cpu_data_t)-1,
152 SZ_32,
153 ACC_P|ACC_PL_K|ACC_DATA_W
154 };
155
156 struct mp_desc_table *
157 mp_desc_init(
158 int mycpu)
159 {
160 register struct mp_desc_table *mpt;
161
162 if (mycpu == master_cpu) {
163 /*
164 * Master CPU uses the tables built at boot time.
165 * Just set the TSS and GDT pointers.
166 */
167 mp_ktss[mycpu] = &ktss;
168 #if MACH_KDB
169 mp_dbtss[mycpu] = &dbtss;
170 #endif /* MACH_KDB */
171 mp_gdt[mycpu] = gdt;
172 mp_idt[mycpu] = idt;
173 return 0;
174 }
175 else {
176 mpt = mp_desc_table[mycpu];
177 mp_ktss[mycpu] = &mpt->ktss;
178 mp_gdt[mycpu] = mpt->gdt;
179 mp_idt[mycpu] = mpt->idt;
180
181 /*
182 * Copy the tables
183 */
184 bcopy((char *)idt,
185 (char *)mpt->idt,
186 sizeof(idt));
187 bcopy((char *)gdt,
188 (char *)mpt->gdt,
189 sizeof(gdt));
190 bcopy((char *)ldt,
191 (char *)mpt->ldt,
192 sizeof(ldt));
193 bzero((char *)&mpt->ktss,
194 sizeof(struct i386_tss));
195 bzero((char *)&cpu_data[mycpu],
196 sizeof(cpu_data_t));
197 #if MACH_KDB
198 mp_dbtss[mycpu] = &mpt->dbtss;
199 bcopy((char *)&dbtss,
200 (char *)&mpt->dbtss,
201 sizeof(struct i386_tss));
202 #endif /* MACH_KDB */
203
204 /*
205 * Fix up the entries in the GDT to point to
206 * this LDT and this TSS.
207 */
208 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
209 mpt->gdt[sel_idx(KERNEL_LDT)].offset =
210 LINEAR_KERNEL_ADDRESS + (unsigned int) mpt->ldt;
211 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
212
213 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
214 mpt->gdt[sel_idx(KERNEL_TSS)].offset =
215 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->ktss;
216 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
217
218 mpt->gdt[sel_idx(CPU_DATA)] = cpudata_desc_pattern;
219 mpt->gdt[sel_idx(CPU_DATA)].offset =
220 LINEAR_KERNEL_ADDRESS + (unsigned int) &cpu_data[mycpu];
221 fix_desc(&mpt->gdt[sel_idx(CPU_DATA)], 1);
222
223 #if MACH_KDB
224 mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
225 mpt->gdt[sel_idx(DEBUG_TSS)].offset =
226 LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->dbtss;
227 fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
228
229 mpt->dbtss.esp0 = (int)(db_task_stack_store +
230 (INTSTACK_SIZE * (mycpu + 1)) - sizeof (natural_t));
231 mpt->dbtss.esp = mpt->dbtss.esp0;
232 mpt->dbtss.eip = (int)&db_task_start;
233 #endif /* MACH_KDB */
234
235 mpt->ktss.ss0 = KERNEL_DS;
236 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
237
238 return mpt;
239 }
240 }
241
242 /*
243 * Called after all CPUs have been found, but before the VM system
244 * is running. The machine array must show which CPUs exist.
245 */
246 void
247 interrupt_stack_alloc(void)
248 {
249 register int i;
250 int cpu_count;
251 vm_offset_t stack_start;
252 struct mp_desc_table *mpt;
253
254 /*
255 * Count the number of CPUs.
256 */
257 cpu_count = 0;
258 for (i = 0; i < NCPUS; i++)
259 if (machine_slot[i].is_cpu)
260 cpu_count++;
261
262 /*
263 * Allocate an interrupt stack for each CPU except for
264 * the master CPU (which uses the bootstrap stack)
265 */
266 stack_start = phystokv(avail_start);
267 avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1));
268 bzero((char *)stack_start, INTSTACK_SIZE*(cpu_count-1));
269
270 /*
271 * Set up pointers to the top of the interrupt stack.
272 */
273 for (i = 0; i < NCPUS; i++) {
274 if (i == master_cpu) {
275 interrupt_stack[i] = (vm_offset_t) intstack;
276 int_stack_top[i] = (vm_offset_t) eintstack;
277 }
278 else if (machine_slot[i].is_cpu) {
279 interrupt_stack[i] = stack_start;
280 int_stack_top[i] = stack_start + INTSTACK_SIZE;
281
282 stack_start += INTSTACK_SIZE;
283 }
284 }
285
286 /*
287 * Allocate descriptor tables for each CPU except for
288 * the master CPU (which already has them initialized)
289 */
290
291 mpt = (struct mp_desc_table *) phystokv(avail_start);
292 avail_start = round_page((vm_offset_t)avail_start +
293 sizeof(struct mp_desc_table)*(cpu_count-1));
294 for (i = 0; i < NCPUS; i++)
295 if (i != master_cpu)
296 mp_desc_table[i] = mpt++;
297
298
299 /*
300 * Set up the barrier address. All thread stacks MUST
301 * be above this address.
302 */
303 /*
304 * intstack is at higher addess than stack_start for AT mps
305 * so int_stack_high must point at eintstack.
306 * XXX
307 * But what happens if a kernel stack gets allocated below
308 * 1 Meg ? Probably never happens, there is only 640 K available
309 * There.
310 */
311 int_stack_high = (vm_offset_t) eintstack;
312 }
313
314 #endif /* NCPUS > 1 */