]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp_desc.c
f136838b1cdff6581ebb47ad4766579301398103
[apple/xnu.git] / osfmk / i386 / mp_desc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60
61 #include <kern/cpu_number.h>
62 #include <kern/kalloc.h>
63 #include <kern/cpu_data.h>
64 #include <mach/machine.h>
65 #include <vm/vm_kern.h>
66
67 #include <i386/mp_desc.h>
68 #include <i386/lock.h>
69 #include <i386/misc_protos.h>
70 #include <i386/mp.h>
71 #include <i386/pmap.h>
72
73 #include <kern/misc_protos.h>
74
75 #include <mach_kdb.h>
76
77 /*
78 * The i386 needs an interrupt stack to keep the PCB stack from being
79 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
80 * than any thread`s kernel stack.
81 */
82
83 /*
84 * First cpu`s interrupt stack.
85 */
86 extern char intstack[]; /* bottom */
87 extern char eintstack[]; /* top */
88
89 /*
90 * Per-cpu data area pointers.
91 * The master cpu (cpu 0) has its data area statically allocated;
92 * others are allocated dynamically and this array is updated at runtime.
93 */
94 cpu_data_t cpu_data_master;
95 cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] &cpu_data_master };
96
97 decl_simple_lock_data(,cpu_lock); /* protects real_ncpus */
98 unsigned int real_ncpus = 1;
99 unsigned int max_ncpus = MAX_CPUS;
100
101 /*
102 * Multiprocessor i386/i486 systems use a separate copy of the
103 * GDT, IDT, LDT, and kernel TSS per processor. The first three
104 * are separate to avoid lock contention: the i386 uses locked
105 * memory cycles to access the descriptor tables. The TSS is
106 * separate since each processor needs its own kernel stack,
107 * and since using a TSS marks it busy.
108 */
109
110 /*
111 * Allocate and initialize the per-processor descriptor tables.
112 */
113
114 struct fake_descriptor ldt_desc_pattern = {
115 (unsigned int) 0,
116 LDTSZ * sizeof(struct fake_descriptor) - 1,
117 0,
118 ACC_P|ACC_PL_K|ACC_LDT
119 };
120 struct fake_descriptor tss_desc_pattern = {
121 (unsigned int) 0,
122 sizeof(struct i386_tss),
123 0,
124 ACC_P|ACC_PL_K|ACC_TSS
125 };
126
127 struct fake_descriptor cpudata_desc_pattern = {
128 (unsigned int) 0,
129 sizeof(cpu_data_t)-1,
130 SZ_32,
131 ACC_P|ACC_PL_K|ACC_DATA_W
132 };
133
134 void
135 mp_desc_init(
136 cpu_data_t *cdp,
137 boolean_t is_boot_cpu)
138 {
139 struct mp_desc_table *mpt = cdp->cpu_desc_tablep;
140 cpu_desc_index_t *cdt = &cdp->cpu_desc_index;
141
142 if (is_boot_cpu) {
143 /*
144 * Master CPU uses the tables built at boot time.
145 * Just set the TSS and GDT pointers.
146 */
147 cdt->cdi_ktss = &ktss;
148 #if MACH_KDB
149 cdt->cdi_dbtss = &dbtss;
150 #endif /* MACH_KDB */
151 cdt->cdi_gdt = gdt;
152 cdt->cdi_idt = idt;
153 cdt->cdi_ldt = ldt;
154
155 } else {
156
157 cdt->cdi_ktss = &mpt->ktss;
158 cdt->cdi_gdt = mpt->gdt;
159 cdt->cdi_idt = mpt->idt;
160 cdt->cdi_ldt = mpt->ldt;
161
162 /*
163 * Copy the tables
164 */
165 bcopy((char *)idt,
166 (char *)mpt->idt,
167 sizeof(idt));
168 bcopy((char *)gdt,
169 (char *)mpt->gdt,
170 sizeof(gdt));
171 bcopy((char *)ldt,
172 (char *)mpt->ldt,
173 sizeof(ldt));
174 bzero((char *)&mpt->ktss,
175 sizeof(struct i386_tss));
176
177 #if MACH_KDB
178 cdt->cdi_dbtss = &dbtss;
179 bcopy((char *)&dbtss,
180 (char *)&mpt->dbtss,
181 sizeof(struct i386_tss));
182 #endif /* MACH_KDB */
183
184 /*
185 * Fix up the entries in the GDT to point to
186 * this LDT and this TSS.
187 */
188 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
189 mpt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) mpt->ldt;
190 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
191
192 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
193 mpt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) &mpt->ktss;
194 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
195
196 mpt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern;
197 mpt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp;
198 fix_desc(&mpt->gdt[sel_idx(CPU_DATA_GS)], 1);
199
200 #if MACH_KDB
201 mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
202 mpt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) &mpt->dbtss;
203 fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
204
205 mpt->dbtss.esp0 = (int)(db_task_stack_store +
206 (INTSTACK_SIZE * (cpu + 1)) - sizeof (natural_t));
207 mpt->dbtss.esp = mpt->dbtss.esp0;
208 mpt->dbtss.eip = (int)&db_task_start;
209 #endif /* MACH_KDB */
210
211 mpt->ktss.ss0 = KERNEL_DS;
212 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
213 }
214 }
215
216 cpu_data_t *
217 cpu_data_alloc(boolean_t is_boot_cpu)
218 {
219 int ret;
220 cpu_data_t *cdp;
221
222 if (is_boot_cpu) {
223 assert(real_ncpus == 1);
224 simple_lock_init(&cpu_lock, 0);
225 cdp = &cpu_data_master;
226 if (cdp->cpu_processor == NULL) {
227 cdp->cpu_processor = cpu_processor_alloc(TRUE);
228 cdp->cpu_pmap = pmap_cpu_alloc(TRUE);
229 cdp->cpu_this = cdp;
230 cdp->cpu_int_stack_top = (vm_offset_t) eintstack;
231 mp_desc_init(cdp, TRUE);
232 }
233 return cdp;
234 }
235
236 /* Check count before making allocations */
237 if (real_ncpus >= max_ncpus)
238 return NULL;
239
240 /*
241 * Allocate per-cpu data:
242 */
243 ret = kmem_alloc(kernel_map,
244 (vm_offset_t *) &cdp, sizeof(cpu_data_t));
245 if (ret != KERN_SUCCESS) {
246 printf("cpu_data_alloc() failed, ret=%d\n", ret);
247 goto abort;
248 }
249 bzero((void*) cdp, sizeof(cpu_data_t));
250 cdp->cpu_this = cdp;
251
252 /*
253 * Allocate interrupt stack:
254 */
255 ret = kmem_alloc(kernel_map,
256 (vm_offset_t *) &cdp->cpu_int_stack_top,
257 INTSTACK_SIZE);
258 if (ret != KERN_SUCCESS) {
259 printf("cpu_data_alloc() int stack failed, ret=%d\n", ret);
260 goto abort;
261 }
262 bzero((void*) cdp->cpu_int_stack_top, INTSTACK_SIZE);
263 cdp->cpu_int_stack_top += INTSTACK_SIZE;
264
265 /*
266 * Allocate descriptor table:
267 */
268 ret = kmem_alloc(kernel_map,
269 (vm_offset_t *) &cdp->cpu_desc_tablep,
270 sizeof(struct mp_desc_table));
271 if (ret != KERN_SUCCESS) {
272 printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret);
273 goto abort;
274 }
275
276 simple_lock(&cpu_lock);
277 if (real_ncpus >= max_ncpus) {
278 simple_unlock(&cpu_lock);
279 goto abort;
280 }
281 cpu_data_ptr[real_ncpus] = cdp;
282 cdp->cpu_number = real_ncpus;
283 real_ncpus++;
284 simple_unlock(&cpu_lock);
285
286 kprintf("cpu_data_alloc(%d) 0x%x desc_table: 0x%x "
287 "int_stack: 0x%x-0x%x\n",
288 cdp->cpu_number, cdp, cdp->cpu_desc_tablep,
289 cdp->cpu_int_stack_top - INTSTACK_SIZE, cdp->cpu_int_stack_top);
290
291 return cdp;
292
293 abort:
294 if (cdp) {
295 if (cdp->cpu_desc_tablep)
296 kfree((void *) cdp->cpu_desc_tablep,
297 sizeof(*cdp->cpu_desc_tablep));
298 if (cdp->cpu_int_stack_top)
299 kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE),
300 INTSTACK_SIZE);
301 kfree((void *) cdp, sizeof(*cdp));
302 }
303 return NULL;
304 }
305
306 boolean_t
307 valid_user_segment_selectors(uint16_t cs,
308 uint16_t ss,
309 uint16_t ds,
310 uint16_t es,
311 uint16_t fs,
312 uint16_t gs)
313 {
314 return valid_user_code_selector(cs) &&
315 valid_user_stack_selector(ss) &&
316 valid_user_data_selector(ds) &&
317 valid_user_data_selector(es) &&
318 valid_user_data_selector(fs) &&
319 valid_user_data_selector(gs);
320 }
321