2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
58 #include <kern/cpu_number.h>
59 #include <kern/cpu_data.h>
60 #include <mach/machine.h>
61 #include <vm/vm_kern.h>
63 #include <i386/mp_desc.h>
64 #include <i386/lock.h>
65 #include <i386/misc_protos.h>
67 #include <kern/misc_protos.h>
72 * The i386 needs an interrupt stack to keep the PCB stack from being
73 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
74 * than any thread`s kernel stack.
78 * Addresses of bottom and top of interrupt stacks.
80 vm_offset_t interrupt_stack
[NCPUS
];
81 vm_offset_t int_stack_top
[NCPUS
];
86 vm_offset_t int_stack_high
;
89 * First cpu`s interrupt stack.
91 extern char intstack
[]; /* bottom */
92 extern char eintstack
[]; /* top */
95 * We allocate interrupt stacks from physical memory.
98 vm_offset_t avail_start
;
101 * Multiprocessor i386/i486 systems use a separate copy of the
102 * GDT, IDT, LDT, and kernel TSS per processor. The first three
103 * are separate to avoid lock contention: the i386 uses locked
104 * memory cycles to access the descriptor tables. The TSS is
105 * separate since each processor needs its own kernel stack,
106 * and since using a TSS marks it busy.
110 * Allocated descriptor tables.
112 struct mp_desc_table
*mp_desc_table
[NCPUS
] = { 0 };
115 * Pointer to TSS for access in load_context.
117 struct i386_tss
*mp_ktss
[NCPUS
] = { 0 };
121 * Pointer to TSS for debugger use.
123 struct i386_tss
*mp_dbtss
[NCPUS
] = { 0 };
124 #endif /* MACH_KDB */
127 * Pointer to GDT to reset the KTSS busy bit.
129 struct fake_descriptor
*mp_gdt
[NCPUS
] = { 0 };
130 struct fake_descriptor
*mp_idt
[NCPUS
] = { 0 };
133 * Allocate and initialize the per-processor descriptor tables.
136 struct fake_descriptor ldt_desc_pattern
= {
138 LDTSZ
* sizeof(struct fake_descriptor
) - 1,
140 ACC_P
|ACC_PL_K
|ACC_LDT
142 struct fake_descriptor tss_desc_pattern
= {
144 sizeof(struct i386_tss
),
146 ACC_P
|ACC_PL_K
|ACC_TSS
149 struct fake_descriptor cpudata_desc_pattern
= {
151 sizeof(cpu_data_t
)-1,
153 ACC_P
|ACC_PL_K
|ACC_DATA_W
156 struct mp_desc_table
*
160 register struct mp_desc_table
*mpt
;
162 if (mycpu
== master_cpu
) {
164 * Master CPU uses the tables built at boot time.
165 * Just set the TSS and GDT pointers.
167 mp_ktss
[mycpu
] = &ktss
;
169 mp_dbtss
[mycpu
] = &dbtss
;
170 #endif /* MACH_KDB */
176 mpt
= mp_desc_table
[mycpu
];
177 mp_ktss
[mycpu
] = &mpt
->ktss
;
178 mp_gdt
[mycpu
] = mpt
->gdt
;
179 mp_idt
[mycpu
] = mpt
->idt
;
193 bzero((char *)&mpt
->ktss
,
194 sizeof(struct i386_tss
));
195 bzero((char *)&cpu_data
[mycpu
],
198 mp_dbtss
[mycpu
] = &mpt
->dbtss
;
199 bcopy((char *)&dbtss
,
201 sizeof(struct i386_tss
));
202 #endif /* MACH_KDB */
205 * Fix up the entries in the GDT to point to
206 * this LDT and this TSS.
208 mpt
->gdt
[sel_idx(KERNEL_LDT
)] = ldt_desc_pattern
;
209 mpt
->gdt
[sel_idx(KERNEL_LDT
)].offset
=
210 LINEAR_KERNEL_ADDRESS
+ (unsigned int) mpt
->ldt
;
211 fix_desc(&mpt
->gdt
[sel_idx(KERNEL_LDT
)], 1);
213 mpt
->gdt
[sel_idx(KERNEL_TSS
)] = tss_desc_pattern
;
214 mpt
->gdt
[sel_idx(KERNEL_TSS
)].offset
=
215 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &mpt
->ktss
;
216 fix_desc(&mpt
->gdt
[sel_idx(KERNEL_TSS
)], 1);
218 mpt
->gdt
[sel_idx(CPU_DATA
)] = cpudata_desc_pattern
;
219 mpt
->gdt
[sel_idx(CPU_DATA
)].offset
=
220 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &cpu_data
[mycpu
];
221 fix_desc(&mpt
->gdt
[sel_idx(CPU_DATA
)], 1);
224 mpt
->gdt
[sel_idx(DEBUG_TSS
)] = tss_desc_pattern
;
225 mpt
->gdt
[sel_idx(DEBUG_TSS
)].offset
=
226 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &mpt
->dbtss
;
227 fix_desc(&mpt
->gdt
[sel_idx(DEBUG_TSS
)], 1);
229 mpt
->dbtss
.esp0
= (int)(db_task_stack_store
+
230 (INTSTACK_SIZE
* (mycpu
+ 1)) - sizeof (natural_t
));
231 mpt
->dbtss
.esp
= mpt
->dbtss
.esp0
;
232 mpt
->dbtss
.eip
= (int)&db_task_start
;
233 #endif /* MACH_KDB */
235 mpt
->ktss
.ss0
= KERNEL_DS
;
236 mpt
->ktss
.io_bit_map_offset
= 0x0FFF; /* no IO bitmap */
243 * Called after all CPUs have been found, but before the VM system
244 * is running. The machine array must show which CPUs exist.
247 interrupt_stack_alloc(void)
251 vm_offset_t stack_start
;
252 struct mp_desc_table
*mpt
;
255 * Count the number of CPUs.
258 for (i
= 0; i
< NCPUS
; i
++)
259 if (machine_slot
[i
].is_cpu
)
263 * Allocate an interrupt stack for each CPU except for
264 * the master CPU (which uses the bootstrap stack)
266 stack_start
= phystokv(avail_start
);
267 avail_start
= round_page(avail_start
+ INTSTACK_SIZE
*(cpu_count
-1));
268 bzero((char *)stack_start
, INTSTACK_SIZE
*(cpu_count
-1));
271 * Set up pointers to the top of the interrupt stack.
273 for (i
= 0; i
< NCPUS
; i
++) {
274 if (i
== master_cpu
) {
275 interrupt_stack
[i
] = (vm_offset_t
) intstack
;
276 int_stack_top
[i
] = (vm_offset_t
) eintstack
;
278 else if (machine_slot
[i
].is_cpu
) {
279 interrupt_stack
[i
] = stack_start
;
280 int_stack_top
[i
] = stack_start
+ INTSTACK_SIZE
;
282 stack_start
+= INTSTACK_SIZE
;
287 * Allocate descriptor tables for each CPU except for
288 * the master CPU (which already has them initialized)
291 mpt
= (struct mp_desc_table
*) phystokv(avail_start
);
292 avail_start
= round_page((vm_offset_t
)avail_start
+
293 sizeof(struct mp_desc_table
)*(cpu_count
-1));
294 for (i
= 0; i
< NCPUS
; i
++)
296 mp_desc_table
[i
] = mpt
++;
300 * Set up the barrier address. All thread stacks MUST
301 * be above this address.
304 * intstack is at higher addess than stack_start for AT mps
305 * so int_stack_high must point at eintstack.
307 * But what happens if a kernel stack gets allocated below
308 * 1 Meg ? Probably never happens, there is only 640 K available
311 int_stack_high
= (vm_offset_t
) eintstack
;
314 #endif /* NCPUS > 1 */