]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp_desc.c
dcbb5c8bf6bbbd9da7d0fceb87463cb7f1480d74
[apple/xnu.git] / osfmk / i386 / mp_desc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58
59 /*
60 */
61
62
63 #include <kern/cpu_number.h>
64 #include <kern/kalloc.h>
65 #include <kern/cpu_data.h>
66 #include <mach/machine.h>
67 #include <vm/vm_kern.h>
68
69 #include <i386/mp_desc.h>
70 #include <i386/lock.h>
71 #include <i386/misc_protos.h>
72 #include <i386/mp.h>
73 #include <i386/pmap.h>
74
75 #include <kern/misc_protos.h>
76
77 #include <mach_kdb.h>
78
79 /*
80 * The i386 needs an interrupt stack to keep the PCB stack from being
81 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
82 * than any thread`s kernel stack.
83 */
84
85 /*
86 * First cpu`s interrupt stack.
87 */
88 extern char intstack[]; /* bottom */
89 extern char eintstack[]; /* top */
90
91 /*
92 * Per-cpu data area pointers.
93 * The master cpu (cpu 0) has its data area statically allocated;
94 * others are allocated dynamically and this array is updated at runtime.
95 */
96 cpu_data_t cpu_data_master;
97 cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] &cpu_data_master };
98
99 decl_simple_lock_data(,cpu_lock); /* protects real_ncpus */
100 unsigned int real_ncpus = 1;
101 unsigned int max_ncpus = MAX_CPUS;
102
103 /*
104 * Multiprocessor i386/i486 systems use a separate copy of the
105 * GDT, IDT, LDT, and kernel TSS per processor. The first three
106 * are separate to avoid lock contention: the i386 uses locked
107 * memory cycles to access the descriptor tables. The TSS is
108 * separate since each processor needs its own kernel stack,
109 * and since using a TSS marks it busy.
110 */
111
112 /*
113 * Allocate and initialize the per-processor descriptor tables.
114 */
115
116 struct fake_descriptor ldt_desc_pattern = {
117 (unsigned int) 0,
118 LDTSZ * sizeof(struct fake_descriptor) - 1,
119 0,
120 ACC_P|ACC_PL_K|ACC_LDT
121 };
122 struct fake_descriptor tss_desc_pattern = {
123 (unsigned int) 0,
124 sizeof(struct i386_tss),
125 0,
126 ACC_P|ACC_PL_K|ACC_TSS
127 };
128
129 struct fake_descriptor cpudata_desc_pattern = {
130 (unsigned int) 0,
131 sizeof(cpu_data_t)-1,
132 SZ_32,
133 ACC_P|ACC_PL_K|ACC_DATA_W
134 };
135
136 void
137 mp_desc_init(
138 cpu_data_t *cdp,
139 boolean_t is_boot_cpu)
140 {
141 struct mp_desc_table *mpt = cdp->cpu_desc_tablep;
142 cpu_desc_index_t *cdt = &cdp->cpu_desc_index;
143
144 if (is_boot_cpu) {
145 /*
146 * Master CPU uses the tables built at boot time.
147 * Just set the TSS and GDT pointers.
148 */
149 cdt->cdi_ktss = &ktss;
150 #if MACH_KDB
151 cdt->cdi_dbtss = &dbtss;
152 #endif /* MACH_KDB */
153 cdt->cdi_gdt = gdt;
154 cdt->cdi_idt = idt;
155 cdt->cdi_ldt = ldt;
156
157 } else {
158
159 cdt->cdi_ktss = &mpt->ktss;
160 cdt->cdi_gdt = mpt->gdt;
161 cdt->cdi_idt = mpt->idt;
162 cdt->cdi_ldt = mpt->ldt;
163
164 /*
165 * Copy the tables
166 */
167 bcopy((char *)idt,
168 (char *)mpt->idt,
169 sizeof(idt));
170 bcopy((char *)gdt,
171 (char *)mpt->gdt,
172 sizeof(gdt));
173 bcopy((char *)ldt,
174 (char *)mpt->ldt,
175 sizeof(ldt));
176 bzero((char *)&mpt->ktss,
177 sizeof(struct i386_tss));
178
179 #if MACH_KDB
180 cdt->cdi_dbtss = &dbtss;
181 bcopy((char *)&dbtss,
182 (char *)&mpt->dbtss,
183 sizeof(struct i386_tss));
184 #endif /* MACH_KDB */
185
186 /*
187 * Fix up the entries in the GDT to point to
188 * this LDT and this TSS.
189 */
190 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
191 mpt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) mpt->ldt;
192 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
193
194 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
195 mpt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) &mpt->ktss;
196 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
197
198 mpt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern;
199 mpt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp;
200 fix_desc(&mpt->gdt[sel_idx(CPU_DATA_GS)], 1);
201
202 #if MACH_KDB
203 mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
204 mpt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) &mpt->dbtss;
205 fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
206
207 mpt->dbtss.esp0 = (int)(db_task_stack_store +
208 (INTSTACK_SIZE * (cpu + 1)) - sizeof (natural_t));
209 mpt->dbtss.esp = mpt->dbtss.esp0;
210 mpt->dbtss.eip = (int)&db_task_start;
211 #endif /* MACH_KDB */
212
213 mpt->ktss.ss0 = KERNEL_DS;
214 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
215 }
216 }
217
218 cpu_data_t *
219 cpu_data_alloc(boolean_t is_boot_cpu)
220 {
221 int ret;
222 cpu_data_t *cdp;
223
224 if (is_boot_cpu) {
225 assert(real_ncpus == 1);
226 simple_lock_init(&cpu_lock, 0);
227 cdp = &cpu_data_master;
228 if (cdp->cpu_processor == NULL) {
229 cdp->cpu_processor = cpu_processor_alloc(TRUE);
230 cdp->cpu_pmap = pmap_cpu_alloc(TRUE);
231 cdp->cpu_this = cdp;
232 cdp->cpu_int_stack_top = (vm_offset_t) eintstack;
233 mp_desc_init(cdp, TRUE);
234 }
235 return cdp;
236 }
237
238 /* Check count before making allocations */
239 if (real_ncpus >= max_ncpus)
240 return NULL;
241
242 /*
243 * Allocate per-cpu data:
244 */
245 ret = kmem_alloc(kernel_map,
246 (vm_offset_t *) &cdp, sizeof(cpu_data_t));
247 if (ret != KERN_SUCCESS) {
248 printf("cpu_data_alloc() failed, ret=%d\n", ret);
249 goto abort;
250 }
251 bzero((void*) cdp, sizeof(cpu_data_t));
252 cdp->cpu_this = cdp;
253
254 /*
255 * Allocate interrupt stack:
256 */
257 ret = kmem_alloc(kernel_map,
258 (vm_offset_t *) &cdp->cpu_int_stack_top,
259 INTSTACK_SIZE);
260 if (ret != KERN_SUCCESS) {
261 printf("cpu_data_alloc() int stack failed, ret=%d\n", ret);
262 goto abort;
263 }
264 bzero((void*) cdp->cpu_int_stack_top, INTSTACK_SIZE);
265 cdp->cpu_int_stack_top += INTSTACK_SIZE;
266
267 /*
268 * Allocate descriptor table:
269 */
270 ret = kmem_alloc(kernel_map,
271 (vm_offset_t *) &cdp->cpu_desc_tablep,
272 sizeof(struct mp_desc_table));
273 if (ret != KERN_SUCCESS) {
274 printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret);
275 goto abort;
276 }
277
278 simple_lock(&cpu_lock);
279 if (real_ncpus >= max_ncpus) {
280 simple_unlock(&cpu_lock);
281 goto abort;
282 }
283 cpu_data_ptr[real_ncpus] = cdp;
284 cdp->cpu_number = real_ncpus;
285 real_ncpus++;
286 simple_unlock(&cpu_lock);
287
288 kprintf("cpu_data_alloc(%d) 0x%x desc_table: 0x%x "
289 "int_stack: 0x%x-0x%x\n",
290 cdp->cpu_number, cdp, cdp->cpu_desc_tablep,
291 cdp->cpu_int_stack_top - INTSTACK_SIZE, cdp->cpu_int_stack_top);
292
293 return cdp;
294
295 abort:
296 if (cdp) {
297 if (cdp->cpu_desc_tablep)
298 kfree((void *) cdp->cpu_desc_tablep,
299 sizeof(*cdp->cpu_desc_tablep));
300 if (cdp->cpu_int_stack_top)
301 kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE),
302 INTSTACK_SIZE);
303 kfree((void *) cdp, sizeof(*cdp));
304 }
305 return NULL;
306 }
307
308 boolean_t
309 valid_user_segment_selectors(uint16_t cs,
310 uint16_t ss,
311 uint16_t ds,
312 uint16_t es,
313 uint16_t fs,
314 uint16_t gs)
315 {
316 return valid_user_code_selector(cs) &&
317 valid_user_stack_selector(ss) &&
318 valid_user_data_selector(ds) &&
319 valid_user_data_selector(es) &&
320 valid_user_data_selector(fs) &&
321 valid_user_data_selector(gs);
322 }
323