2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
61 #include <kern/cpu_number.h>
62 #include <kern/cpu_data.h>
63 #include <mach/machine.h>
64 #include <vm/vm_kern.h>
66 #include <i386/mp_desc.h>
67 #include <i386/lock.h>
68 #include <i386/misc_protos.h>
70 #include <kern/misc_protos.h>
75 * The i386 needs an interrupt stack to keep the PCB stack from being
76 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
77 * than any thread`s kernel stack.
81 * Addresses of bottom and top of interrupt stacks.
83 vm_offset_t interrupt_stack
[NCPUS
];
84 vm_offset_t int_stack_top
[NCPUS
];
89 vm_offset_t int_stack_high
;
92 * First cpu`s interrupt stack.
94 extern char intstack
[]; /* bottom */
95 extern char eintstack
[]; /* top */
98 * We allocate interrupt stacks from physical memory.
101 vm_offset_t avail_start
;
104 * Multiprocessor i386/i486 systems use a separate copy of the
105 * GDT, IDT, LDT, and kernel TSS per processor. The first three
106 * are separate to avoid lock contention: the i386 uses locked
107 * memory cycles to access the descriptor tables. The TSS is
108 * separate since each processor needs its own kernel stack,
109 * and since using a TSS marks it busy.
113 * Allocated descriptor tables.
115 struct mp_desc_table
*mp_desc_table
[NCPUS
] = { 0 };
118 * Pointer to TSS for access in load_context.
120 struct i386_tss
*mp_ktss
[NCPUS
] = { 0 };
124 * Pointer to TSS for debugger use.
126 struct i386_tss
*mp_dbtss
[NCPUS
] = { 0 };
127 #endif /* MACH_KDB */
130 * Pointer to GDT to reset the KTSS busy bit.
132 struct fake_descriptor
*mp_gdt
[NCPUS
] = { 0 };
133 struct fake_descriptor
*mp_idt
[NCPUS
] = { 0 };
136 * Allocate and initialize the per-processor descriptor tables.
139 struct fake_descriptor ldt_desc_pattern
= {
141 LDTSZ
* sizeof(struct fake_descriptor
) - 1,
143 ACC_P
|ACC_PL_K
|ACC_LDT
145 struct fake_descriptor tss_desc_pattern
= {
147 sizeof(struct i386_tss
),
149 ACC_P
|ACC_PL_K
|ACC_TSS
152 struct fake_descriptor cpudata_desc_pattern
= {
154 sizeof(cpu_data_t
)-1,
156 ACC_P
|ACC_PL_K
|ACC_DATA_W
159 struct mp_desc_table
*
163 register struct mp_desc_table
*mpt
;
165 if (mycpu
== master_cpu
) {
167 * Master CPU uses the tables built at boot time.
168 * Just set the TSS and GDT pointers.
170 mp_ktss
[mycpu
] = &ktss
;
172 mp_dbtss
[mycpu
] = &dbtss
;
173 #endif /* MACH_KDB */
179 mpt
= mp_desc_table
[mycpu
];
180 mp_ktss
[mycpu
] = &mpt
->ktss
;
181 mp_gdt
[mycpu
] = mpt
->gdt
;
182 mp_idt
[mycpu
] = mpt
->idt
;
196 bzero((char *)&mpt
->ktss
,
197 sizeof(struct i386_tss
));
198 bzero((char *)&cpu_data
[mycpu
],
201 mp_dbtss
[mycpu
] = &mpt
->dbtss
;
202 bcopy((char *)&dbtss
,
204 sizeof(struct i386_tss
));
205 #endif /* MACH_KDB */
208 * Fix up the entries in the GDT to point to
209 * this LDT and this TSS.
211 mpt
->gdt
[sel_idx(KERNEL_LDT
)] = ldt_desc_pattern
;
212 mpt
->gdt
[sel_idx(KERNEL_LDT
)].offset
=
213 LINEAR_KERNEL_ADDRESS
+ (unsigned int) mpt
->ldt
;
214 fix_desc(&mpt
->gdt
[sel_idx(KERNEL_LDT
)], 1);
216 mpt
->gdt
[sel_idx(KERNEL_TSS
)] = tss_desc_pattern
;
217 mpt
->gdt
[sel_idx(KERNEL_TSS
)].offset
=
218 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &mpt
->ktss
;
219 fix_desc(&mpt
->gdt
[sel_idx(KERNEL_TSS
)], 1);
221 mpt
->gdt
[sel_idx(CPU_DATA
)] = cpudata_desc_pattern
;
222 mpt
->gdt
[sel_idx(CPU_DATA
)].offset
=
223 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &cpu_data
[mycpu
];
224 fix_desc(&mpt
->gdt
[sel_idx(CPU_DATA
)], 1);
227 mpt
->gdt
[sel_idx(DEBUG_TSS
)] = tss_desc_pattern
;
228 mpt
->gdt
[sel_idx(DEBUG_TSS
)].offset
=
229 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &mpt
->dbtss
;
230 fix_desc(&mpt
->gdt
[sel_idx(DEBUG_TSS
)], 1);
232 mpt
->dbtss
.esp0
= (int)(db_task_stack_store
+
233 (INTSTACK_SIZE
* (mycpu
+ 1)) - sizeof (natural_t
));
234 mpt
->dbtss
.esp
= mpt
->dbtss
.esp0
;
235 mpt
->dbtss
.eip
= (int)&db_task_start
;
236 #endif /* MACH_KDB */
238 mpt
->ktss
.ss0
= KERNEL_DS
;
239 mpt
->ktss
.io_bit_map_offset
= 0x0FFF; /* no IO bitmap */
246 * Called after all CPUs have been found, but before the VM system
247 * is running. The machine array must show which CPUs exist.
250 interrupt_stack_alloc(void)
254 vm_offset_t stack_start
;
255 struct mp_desc_table
*mpt
;
258 * Count the number of CPUs.
261 for (i
= 0; i
< NCPUS
; i
++)
262 if (machine_slot
[i
].is_cpu
)
266 * Allocate an interrupt stack for each CPU except for
267 * the master CPU (which uses the bootstrap stack)
269 stack_start
= phystokv(avail_start
);
270 avail_start
= round_page(avail_start
+ INTSTACK_SIZE
*(cpu_count
-1));
271 bzero((char *)stack_start
, INTSTACK_SIZE
*(cpu_count
-1));
274 * Set up pointers to the top of the interrupt stack.
276 for (i
= 0; i
< NCPUS
; i
++) {
277 if (i
== master_cpu
) {
278 interrupt_stack
[i
] = (vm_offset_t
) intstack
;
279 int_stack_top
[i
] = (vm_offset_t
) eintstack
;
281 else if (machine_slot
[i
].is_cpu
) {
282 interrupt_stack
[i
] = stack_start
;
283 int_stack_top
[i
] = stack_start
+ INTSTACK_SIZE
;
285 stack_start
+= INTSTACK_SIZE
;
290 * Allocate descriptor tables for each CPU except for
291 * the master CPU (which already has them initialized)
294 mpt
= (struct mp_desc_table
*) phystokv(avail_start
);
295 avail_start
= round_page((vm_offset_t
)avail_start
+
296 sizeof(struct mp_desc_table
)*(cpu_count
-1));
297 for (i
= 0; i
< NCPUS
; i
++)
299 mp_desc_table
[i
] = mpt
++;
303 * Set up the barrier address. All thread stacks MUST
304 * be above this address.
307 * intstack is at higher addess than stack_start for AT mps
308 * so int_stack_high must point at eintstack.
310 * But what happens if a kernel stack gets allocated below
311 * 1 Meg ? Probably never happens, there is only 640 K available
314 int_stack_high
= (vm_offset_t
) eintstack
;
317 #endif /* NCPUS > 1 */