2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
58 #include <kern/cpu_number.h>
59 #include <kern/cpu_data.h>
60 #include <mach/machine.h>
61 #include <vm/vm_kern.h>
63 #include <i386/mp_desc.h>
64 #include <i386/lock.h>
65 #include <i386/misc_protos.h>
68 #include <kern/misc_protos.h>
73 * The i386 needs an interrupt stack to keep the PCB stack from being
74 * overrun by interrupts. All interrupt stacks MUST lie at lower addresses
75 * than any thread`s kernel stack.
79 * Addresses of bottom and top of interrupt stacks.
81 vm_offset_t interrupt_stack
[NCPUS
];
82 vm_offset_t int_stack_top
[NCPUS
];
87 vm_offset_t int_stack_high
;
90 * First cpu`s interrupt stack.
92 extern char intstack
[]; /* bottom */
93 extern char eintstack
[]; /* top */
96 * We allocate interrupt stacks from physical memory.
99 vm_offset_t avail_start
;
102 * Multiprocessor i386/i486 systems use a separate copy of the
103 * GDT, IDT, LDT, and kernel TSS per processor. The first three
104 * are separate to avoid lock contention: the i386 uses locked
105 * memory cycles to access the descriptor tables. The TSS is
106 * separate since each processor needs its own kernel stack,
107 * and since using a TSS marks it busy.
111 * Allocated descriptor tables.
113 struct mp_desc_table
*mp_desc_table
[NCPUS
] = { 0 };
116 * Pointer to TSS for access in load_context.
118 struct i386_tss
*mp_ktss
[NCPUS
] = { 0 };
122 * Pointer to TSS for debugger use.
124 struct i386_tss
*mp_dbtss
[NCPUS
] = { 0 };
125 #endif /* MACH_KDB */
128 * Pointer to GDT to reset the KTSS busy bit.
130 struct fake_descriptor
*mp_gdt
[NCPUS
] = { 0 };
131 struct fake_descriptor
*mp_idt
[NCPUS
] = { 0 };
132 struct fake_descriptor
*mp_ldt
[NCPUS
] = { 0 };
135 * Allocate and initialize the per-processor descriptor tables.
138 struct fake_descriptor ldt_desc_pattern
= {
140 LDTSZ
* sizeof(struct fake_descriptor
) - 1,
142 ACC_P
|ACC_PL_K
|ACC_LDT
144 struct fake_descriptor tss_desc_pattern
= {
146 sizeof(struct i386_tss
),
148 ACC_P
|ACC_PL_K
|ACC_TSS
151 struct fake_descriptor cpudata_desc_pattern
= {
153 sizeof(cpu_data_t
)-1,
155 ACC_P
|ACC_PL_K
|ACC_DATA_W
158 struct mp_desc_table
*
162 register struct mp_desc_table
*mpt
;
164 if (mycpu
== master_cpu
) {
166 * Master CPU uses the tables built at boot time.
167 * Just set the TSS and GDT pointers.
169 mp_ktss
[mycpu
] = &ktss
;
171 mp_dbtss
[mycpu
] = &dbtss
;
172 #endif /* MACH_KDB */
179 mpt
= mp_desc_table
[mycpu
];
180 mp_ktss
[mycpu
] = &mpt
->ktss
;
181 mp_gdt
[mycpu
] = mpt
->gdt
;
182 mp_idt
[mycpu
] = mpt
->idt
;
183 mp_ldt
[mycpu
] = mpt
->ldt
;
197 bzero((char *)&mpt
->ktss
,
198 sizeof(struct i386_tss
));
200 bzero((char *)&cpu_data
[mycpu
],
204 cpu_data
[mycpu
].cpu_number
= mycpu
;
207 mp_dbtss
[mycpu
] = &mpt
->dbtss
;
208 bcopy((char *)&dbtss
,
210 sizeof(struct i386_tss
));
211 #endif /* MACH_KDB */
214 * Fix up the entries in the GDT to point to
215 * this LDT and this TSS.
217 mpt
->gdt
[sel_idx(KERNEL_LDT
)] = ldt_desc_pattern
;
218 mpt
->gdt
[sel_idx(KERNEL_LDT
)].offset
=
219 LINEAR_KERNEL_ADDRESS
+ (unsigned int) mpt
->ldt
;
220 fix_desc(&mpt
->gdt
[sel_idx(KERNEL_LDT
)], 1);
222 mpt
->gdt
[sel_idx(KERNEL_TSS
)] = tss_desc_pattern
;
223 mpt
->gdt
[sel_idx(KERNEL_TSS
)].offset
=
224 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &mpt
->ktss
;
225 fix_desc(&mpt
->gdt
[sel_idx(KERNEL_TSS
)], 1);
227 mpt
->gdt
[sel_idx(CPU_DATA
)] = cpudata_desc_pattern
;
228 mpt
->gdt
[sel_idx(CPU_DATA
)].offset
=
229 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &cpu_data
[mycpu
];
230 fix_desc(&mpt
->gdt
[sel_idx(CPU_DATA
)], 1);
233 mpt
->gdt
[sel_idx(DEBUG_TSS
)] = tss_desc_pattern
;
234 mpt
->gdt
[sel_idx(DEBUG_TSS
)].offset
=
235 LINEAR_KERNEL_ADDRESS
+ (unsigned int) &mpt
->dbtss
;
236 fix_desc(&mpt
->gdt
[sel_idx(DEBUG_TSS
)], 1);
238 mpt
->dbtss
.esp0
= (int)(db_task_stack_store
+
239 (INTSTACK_SIZE
* (mycpu
+ 1)) - sizeof (natural_t
));
240 mpt
->dbtss
.esp
= mpt
->dbtss
.esp0
;
241 mpt
->dbtss
.eip
= (int)&db_task_start
;
242 #endif /* MACH_KDB */
244 mpt
->ktss
.ss0
= KERNEL_DS
;
245 mpt
->ktss
.io_bit_map_offset
= 0x0FFF; /* no IO bitmap */
252 * Called after all CPUs have been found, but before the VM system
253 * is running. The machine array must show which CPUs exist.
256 interrupt_stack_alloc(void)
260 vm_offset_t stack_start
;
261 struct mp_desc_table
*mpt
;
264 * Number of CPUs possible.
269 * Allocate an interrupt stack for each CPU except for
270 * the master CPU (which uses the bootstrap stack)
272 stack_start
= phystokv(avail_start
);
273 avail_start
= round_page(avail_start
+ INTSTACK_SIZE
*(cpu_count
-1));
274 bzero((char *)stack_start
, INTSTACK_SIZE
*(cpu_count
-1));
277 * Set up pointers to the top of the interrupt stack.
279 for (i
= 0; i
< cpu_count
; i
++) {
280 if (i
== master_cpu
) {
281 interrupt_stack
[i
] = (vm_offset_t
) intstack
;
282 int_stack_top
[i
] = (vm_offset_t
) eintstack
;
285 interrupt_stack
[i
] = stack_start
;
286 int_stack_top
[i
] = stack_start
+ INTSTACK_SIZE
;
288 stack_start
+= INTSTACK_SIZE
;
293 * Allocate descriptor tables for each CPU except for
294 * the master CPU (which already has them initialized)
297 mpt
= (struct mp_desc_table
*) phystokv(avail_start
);
298 avail_start
= round_page((vm_offset_t
)avail_start
+
299 sizeof(struct mp_desc_table
)*(cpu_count
-1));
300 for (i
= 0; i
< cpu_count
; i
++)
302 mp_desc_table
[i
] = mpt
++;
306 * Set up the barrier address. All thread stacks MUST
307 * be above this address.
310 * intstack is at higher addess than stack_start for AT mps
311 * so int_stack_high must point at eintstack.
313 * But what happens if a kernel stack gets allocated below
314 * 1 Meg ? Probably never happens, there is only 640 K available
317 int_stack_high
= (vm_offset_t
) eintstack
;
320 #endif /* NCPUS > 1 */