]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp_desc.c
xnu-792.1.5.tar.gz
[apple/xnu.git] / osfmk / i386 / mp_desc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 /*
52 */
53
54
55 #include <kern/cpu_number.h>
56 #include <kern/kalloc.h>
57 #include <kern/cpu_data.h>
58 #include <mach/machine.h>
59 #include <vm/vm_kern.h>
60
61 #include <i386/mp_desc.h>
62 #include <i386/lock.h>
63 #include <i386/misc_protos.h>
64 #include <i386/mp.h>
65 #include <i386/pmap.h>
66
67 #include <kern/misc_protos.h>
68
69 #include <mach_kdb.h>
70
71 /*
72 * The i386 needs an interrupt stack to keep the PCB stack from being
73 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
74 * than any thread`s kernel stack.
75 */
76
77 /*
78 * First cpu`s interrupt stack.
79 */
80 extern char intstack[]; /* bottom */
81 extern char eintstack[]; /* top */
82
83 /*
84 * Per-cpu data area pointers.
85 * The master cpu (cpu 0) has its data area statically allocated;
86 * others are allocated dynamically and this array is updated at runtime.
87 */
88 cpu_data_t cpu_data_master;
89 cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] &cpu_data_master };
90
91 decl_simple_lock_data(,cpu_lock); /* protects real_ncpus */
92 unsigned int real_ncpus = 1;
93 unsigned int max_ncpus = MAX_CPUS;
94
95 /*
96 * Multiprocessor i386/i486 systems use a separate copy of the
97 * GDT, IDT, LDT, and kernel TSS per processor. The first three
98 * are separate to avoid lock contention: the i386 uses locked
99 * memory cycles to access the descriptor tables. The TSS is
100 * separate since each processor needs its own kernel stack,
101 * and since using a TSS marks it busy.
102 */
103
104 /*
105 * Allocate and initialize the per-processor descriptor tables.
106 */
107
108 struct fake_descriptor ldt_desc_pattern = {
109 (unsigned int) 0,
110 LDTSZ * sizeof(struct fake_descriptor) - 1,
111 0,
112 ACC_P|ACC_PL_K|ACC_LDT
113 };
114 struct fake_descriptor tss_desc_pattern = {
115 (unsigned int) 0,
116 sizeof(struct i386_tss),
117 0,
118 ACC_P|ACC_PL_K|ACC_TSS
119 };
120
121 struct fake_descriptor cpudata_desc_pattern = {
122 (unsigned int) 0,
123 sizeof(cpu_data_t)-1,
124 SZ_32,
125 ACC_P|ACC_PL_K|ACC_DATA_W
126 };
127
128 void
129 mp_desc_init(
130 cpu_data_t *cdp,
131 boolean_t is_boot_cpu)
132 {
133 struct mp_desc_table *mpt = cdp->cpu_desc_tablep;
134 cpu_desc_index_t *cdt = &cdp->cpu_desc_index;
135
136 if (is_boot_cpu) {
137 /*
138 * Master CPU uses the tables built at boot time.
139 * Just set the TSS and GDT pointers.
140 */
141 cdt->cdi_ktss = &ktss;
142 #if MACH_KDB
143 cdt->cdi_dbtss = &dbtss;
144 #endif /* MACH_KDB */
145 cdt->cdi_gdt = gdt;
146 cdt->cdi_idt = idt;
147 cdt->cdi_ldt = ldt;
148
149 } else {
150
151 cdt->cdi_ktss = &mpt->ktss;
152 cdt->cdi_gdt = mpt->gdt;
153 cdt->cdi_idt = mpt->idt;
154 cdt->cdi_ldt = mpt->ldt;
155
156 /*
157 * Copy the tables
158 */
159 bcopy((char *)idt,
160 (char *)mpt->idt,
161 sizeof(idt));
162 bcopy((char *)gdt,
163 (char *)mpt->gdt,
164 sizeof(gdt));
165 bcopy((char *)ldt,
166 (char *)mpt->ldt,
167 sizeof(ldt));
168 bzero((char *)&mpt->ktss,
169 sizeof(struct i386_tss));
170
171 #if MACH_KDB
172 cdt->cdi_dbtss = &dbtss;
173 bcopy((char *)&dbtss,
174 (char *)&mpt->dbtss,
175 sizeof(struct i386_tss));
176 #endif /* MACH_KDB */
177
178 /*
179 * Fix up the entries in the GDT to point to
180 * this LDT and this TSS.
181 */
182 mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
183 mpt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) mpt->ldt;
184 fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1);
185
186 mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
187 mpt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) &mpt->ktss;
188 fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1);
189
190 mpt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern;
191 mpt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp;
192 fix_desc(&mpt->gdt[sel_idx(CPU_DATA_GS)], 1);
193
194 #if MACH_KDB
195 mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
196 mpt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) &mpt->dbtss;
197 fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1);
198
199 mpt->dbtss.esp0 = (int)(db_task_stack_store +
200 (INTSTACK_SIZE * (cpu + 1)) - sizeof (natural_t));
201 mpt->dbtss.esp = mpt->dbtss.esp0;
202 mpt->dbtss.eip = (int)&db_task_start;
203 #endif /* MACH_KDB */
204
205 mpt->ktss.ss0 = KERNEL_DS;
206 mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */
207 }
208 }
209
210 cpu_data_t *
211 cpu_data_alloc(boolean_t is_boot_cpu)
212 {
213 int ret;
214 cpu_data_t *cdp;
215
216 if (is_boot_cpu) {
217 assert(real_ncpus == 1);
218 simple_lock_init(&cpu_lock, 0);
219 cdp = &cpu_data_master;
220 if (cdp->cpu_processor == NULL) {
221 cdp->cpu_processor = cpu_processor_alloc(TRUE);
222 cdp->cpu_pmap = pmap_cpu_alloc(TRUE);
223 cdp->cpu_this = cdp;
224 cdp->cpu_int_stack_top = (vm_offset_t) eintstack;
225 mp_desc_init(cdp, TRUE);
226 }
227 return cdp;
228 }
229
230 /* Check count before making allocations */
231 if (real_ncpus >= max_ncpus)
232 return NULL;
233
234 /*
235 * Allocate per-cpu data:
236 */
237 ret = kmem_alloc(kernel_map,
238 (vm_offset_t *) &cdp, sizeof(cpu_data_t));
239 if (ret != KERN_SUCCESS) {
240 printf("cpu_data_alloc() failed, ret=%d\n", ret);
241 goto abort;
242 }
243 bzero((void*) cdp, sizeof(cpu_data_t));
244 cdp->cpu_this = cdp;
245
246 /*
247 * Allocate interrupt stack:
248 */
249 ret = kmem_alloc(kernel_map,
250 (vm_offset_t *) &cdp->cpu_int_stack_top,
251 INTSTACK_SIZE);
252 if (ret != KERN_SUCCESS) {
253 printf("cpu_data_alloc() int stack failed, ret=%d\n", ret);
254 goto abort;
255 }
256 bzero((void*) cdp->cpu_int_stack_top, INTSTACK_SIZE);
257 cdp->cpu_int_stack_top += INTSTACK_SIZE;
258
259 /*
260 * Allocate descriptor table:
261 */
262 ret = kmem_alloc(kernel_map,
263 (vm_offset_t *) &cdp->cpu_desc_tablep,
264 sizeof(struct mp_desc_table));
265 if (ret != KERN_SUCCESS) {
266 printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret);
267 goto abort;
268 }
269
270 simple_lock(&cpu_lock);
271 if (real_ncpus >= max_ncpus) {
272 simple_unlock(&cpu_lock);
273 goto abort;
274 }
275 cpu_data_ptr[real_ncpus] = cdp;
276 cdp->cpu_number = real_ncpus;
277 real_ncpus++;
278 simple_unlock(&cpu_lock);
279
280 kprintf("cpu_data_alloc(%d) 0x%x desc_table: 0x%x "
281 "int_stack: 0x%x-0x%x\n",
282 cdp->cpu_number, cdp, cdp->cpu_desc_tablep,
283 cdp->cpu_int_stack_top - INTSTACK_SIZE, cdp->cpu_int_stack_top);
284
285 return cdp;
286
287 abort:
288 if (cdp) {
289 if (cdp->cpu_desc_tablep)
290 kfree((void *) cdp->cpu_desc_tablep,
291 sizeof(*cdp->cpu_desc_tablep));
292 if (cdp->cpu_int_stack_top)
293 kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE),
294 INTSTACK_SIZE);
295 kfree((void *) cdp, sizeof(*cdp));
296 }
297 return NULL;
298 }
299
300 boolean_t
301 valid_user_segment_selectors(uint16_t cs,
302 uint16_t ss,
303 uint16_t ds,
304 uint16_t es,
305 uint16_t fs,
306 uint16_t gs)
307 {
308 return valid_user_code_selector(cs) &&
309 valid_user_stack_selector(ss) &&
310 valid_user_data_selector(ds) &&
311 valid_user_data_selector(es) &&
312 valid_user_data_selector(fs) &&
313 valid_user_data_selector(gs);
314 }
315