2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <i386/proc_reg.h>
30 #include <i386/cpuid.h>
33 #include <vm/vm_map.h>
34 #include <i386/pmap_internal.h>
35 #include <i386/pmap_pcid.h>
36 #include <mach/branch_predicates.h>
39 * PCID (Process context identifier) aka tagged TLB support.
40 * On processors with this feature, unless disabled via the -pmap_pcid_disable
41 * boot-arg, the following algorithm is in effect:
42 * Each processor maintains an array of tag refcounts indexed by tag.
43 * Each address space maintains an array of tags indexed by CPU number.
44 * Each address space maintains a coherency vector, indexed by CPU
45 * indicating that the TLB state for that address space has a pending
47 * On a context switch, a refcounted tag is lazily assigned to the newly
48 * dispatched (CPU, address space) tuple.
49 * When an inactive address space is invalidated on a remote CPU, it is marked
50 * for invalidation upon the next dispatch. Some invalidations are
51 * also processed at the user/kernel boundary.
52 * Provisions are made for the case where a CPU is overcommmitted, i.e.
53 * more active address spaces exist than the number of logical tags
54 * provided for by the processor architecture (currently 4096).
55 * The algorithm assumes the processor remaps the logical tags
56 * to physical TLB context IDs in an LRU fashion for efficiency. (DRK '10)
59 uint32_t pmap_pcid_ncpus
;
60 boolean_t pmap_pcid_disabled
= FALSE
;
62 void pmap_pcid_configure(void) {
63 int ccpu
= cpu_number();
64 uintptr_t cr4
= get_cr4();
65 boolean_t pcid_present
= FALSE
;
67 pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu
);
68 pmap_assert(ml_get_interrupts_enabled() == FALSE
|| get_preemption_level() !=0);
69 pmap_assert(cpu_mode_is64bit());
71 if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled
, sizeof (pmap_pcid_disabled
))) {
72 pmap_pcid_log("PMAP: PCID feature disabled\n");
73 printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled
);
74 kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled
);
76 /* no_shared_cr3+PCID is currently unsupported */
78 if (pmap_pcid_disabled
== FALSE
)
79 no_shared_cr3
= FALSE
;
84 pmap_pcid_disabled
= TRUE
;
86 if (pmap_pcid_disabled
|| no_shared_cr3
) {
88 /* Reset PCID status, as we may have picked up
89 * strays if discovered prior to platform
90 * expert initialization.
92 for (i
= 0; i
< real_ncpus
; i
++) {
94 cpu_datap(i
)->cpu_pmap_pcid_enabled
= FALSE
;
98 cpu_datap(ccpu
)->cpu_pmap_pcid_enabled
= FALSE
;
101 /* DRKTODO: assert if features haven't been discovered yet. Redundant
102 * invocation of cpu_mode_init and descendants masks this for now.
104 if ((cpuid_features() & CPUID_FEATURE_PCID
))
107 cpu_datap(ccpu
)->cpu_pmap_pcid_enabled
= FALSE
;
108 pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu
);
111 if ((cr4
& (CR4_PCIDE
| CR4_PGE
)) == (CR4_PCIDE
|CR4_PGE
)) {
112 cpu_datap(ccpu
)->cpu_pmap_pcid_enabled
= TRUE
;
113 pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu
);
116 if (pcid_present
== TRUE
) {
117 pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu
, cr4
);
119 if (cpu_number() >= PMAP_PCID_MAX_CPUS
) {
120 panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number());
122 if ((get_cr4() & CR4_PGE
) == 0) {
123 set_cr4(get_cr4() | CR4_PGE
);
124 pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu
);
126 set_cr4(get_cr4() | CR4_PCIDE
);
127 pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu
, get_cr4());
129 cpu_datap(ccpu
)->cpu_pmap_pcid_enabled
= TRUE
;
131 if (OSIncrementAtomic(&pmap_pcid_ncpus
) == machine_info
.max_cpus
) {
132 pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus
, pmap_pcid_ncpus
);
134 cpu_datap(ccpu
)->cpu_pmap_pcid_coherentp
=
135 cpu_datap(ccpu
)->cpu_pmap_pcid_coherentp_kernel
=
136 &(kernel_pmap
->pmap_pcid_coherency_vector
[ccpu
]);
137 cpu_datap(ccpu
)->cpu_pcid_refcounts
[0] = 1;
141 void pmap_pcid_initialize(pmap_t p
) {
143 unsigned nc
= sizeof(p
->pmap_pcid_cpus
)/sizeof(pcid_t
);
145 pmap_assert(nc
>= real_ncpus
);
146 for (i
= 0; i
< nc
; i
++) {
147 p
->pmap_pcid_cpus
[i
] = PMAP_PCID_INVALID_PCID
;
148 /* We assume here that the coherency vector is zeroed by
154 void pmap_pcid_initialize_kernel(pmap_t p
) {
156 unsigned nc
= sizeof(p
->pmap_pcid_cpus
)/sizeof(pcid_t
);
158 for (i
= 0; i
< nc
; i
++) {
159 p
->pmap_pcid_cpus
[i
] = 0;
160 /* We assume here that the coherency vector is zeroed by
166 pcid_t
pmap_pcid_allocate_pcid(int ccpu
) {
168 pcid_ref_t cur_min
= 0xFF;
169 uint32_t cur_min_index
= ~1;
170 pcid_ref_t
*cpu_pcid_refcounts
= &cpu_datap(ccpu
)->cpu_pcid_refcounts
[0];
171 pcid_ref_t old_count
;
173 if ((i
= cpu_datap(ccpu
)->cpu_pcid_free_hint
) != 0) {
174 if (cpu_pcid_refcounts
[i
] == 0) {
175 (void)__sync_fetch_and_add(&cpu_pcid_refcounts
[i
], 1);
176 cpu_datap(ccpu
)->cpu_pcid_free_hint
= 0;
180 /* Linear scan to discover free slot, with hint. Room for optimization
181 * but with intelligent prefetchers this should be
182 * adequately performant, as it is invoked
183 * only on first dispatch of a new address space onto
184 * a given processor. DRKTODO: use larger loads and
185 * zero byte discovery -- any pattern != ~1 should
186 * signify a free slot.
188 for (i
= PMAP_PCID_MIN_PCID
; i
< PMAP_PCID_MAX_PCID
; i
++) {
189 pcid_ref_t cur_refcount
= cpu_pcid_refcounts
[i
];
191 pmap_assert(cur_refcount
< PMAP_PCID_MAX_REFCOUNT
);
193 if (cur_refcount
== 0) {
194 (void)__sync_fetch_and_add(&cpu_pcid_refcounts
[i
], 1);
198 if (cur_refcount
< cur_min
) {
200 cur_min
= cur_refcount
;
204 pmap_assert(cur_min_index
> 0 && cur_min_index
< PMAP_PCID_MAX_PCID
);
205 /* Consider "rebalancing" tags actively in highly oversubscribed cases
206 * perhaps selecting tags with lower activity.
209 old_count
= __sync_fetch_and_add(&cpu_pcid_refcounts
[cur_min_index
], 1);
210 pmap_assert(old_count
< PMAP_PCID_MAX_REFCOUNT
);
211 return cur_min_index
;
214 void pmap_pcid_deallocate_pcid(int ccpu
, pmap_t tpmap
) {
217 pcid_ref_t prior_count
;
219 pcid
= tpmap
->pmap_pcid_cpus
[ccpu
];
220 pmap_assert(pcid
!= PMAP_PCID_INVALID_PCID
);
221 if (pcid
== PMAP_PCID_INVALID_PCID
)
224 lp
= cpu_datap(ccpu
)->cpu_pcid_last_pmap_dispatched
[pcid
];
225 pmap_assert(pcid
> 0 && pcid
< PMAP_PCID_MAX_PCID
);
226 pmap_assert(cpu_datap(ccpu
)->cpu_pcid_refcounts
[pcid
] >= 1);
229 (void)__sync_bool_compare_and_swap(&cpu_datap(ccpu
)->cpu_pcid_last_pmap_dispatched
[pcid
], tpmap
, PMAP_INVALID
);
231 if ((prior_count
= __sync_fetch_and_sub(&cpu_datap(ccpu
)->cpu_pcid_refcounts
[pcid
], 1)) == 1) {
232 cpu_datap(ccpu
)->cpu_pcid_free_hint
= pcid
;
234 pmap_assert(prior_count
<= PMAP_PCID_MAX_REFCOUNT
);
237 void pmap_destroy_pcid_sync(pmap_t p
) {
239 pmap_assert(ml_get_interrupts_enabled() == FALSE
|| get_preemption_level() !=0);
240 for (i
= 0; i
< PMAP_PCID_MAX_CPUS
; i
++)
241 if (p
->pmap_pcid_cpus
[i
] != PMAP_PCID_INVALID_PCID
)
242 pmap_pcid_deallocate_pcid(i
, p
);
245 pcid_t
pcid_for_pmap_cpu_tuple(pmap_t cpmap
, thread_t cthread
, int ccpu
) {
246 pmap_t active_pmap
= cpmap
;
248 if (__improbable(cpmap
->pagezero_accessible
)) {
249 if ((cthread
->machine
.specFlags
& CopyIOActive
) == 0) {
250 active_pmap
= kernel_pmap
;
254 return active_pmap
->pmap_pcid_cpus
[ccpu
];
258 #define PCID_RECORD_SIZE 128
259 uint64_t pcid_record_array
[PCID_RECORD_SIZE
];
262 void pmap_pcid_activate(pmap_t tpmap
, int ccpu
, boolean_t nopagezero
, boolean_t copyio
) {
263 pcid_t new_pcid
= tpmap
->pmap_pcid_cpus
[ccpu
];
265 boolean_t pcid_conflict
= FALSE
, pending_flush
= FALSE
;
267 pmap_assert(cpu_datap(ccpu
)->cpu_pmap_pcid_enabled
);
268 if (__improbable(new_pcid
== PMAP_PCID_INVALID_PCID
)) {
269 new_pcid
= tpmap
->pmap_pcid_cpus
[ccpu
] = pmap_pcid_allocate_pcid(ccpu
);
272 pmap_assert(new_pcid
!= PMAP_PCID_INVALID_PCID
);
274 cpu_datap(ccpu
)->cpu_last_pcid
= cpu_datap(ccpu
)->cpu_active_pcid
;
276 cpu_datap(ccpu
)->cpu_active_pcid
= new_pcid
;
278 pending_flush
= (tpmap
->pmap_pcid_coherency_vector
[ccpu
] != 0);
279 if (__probable(pending_flush
== FALSE
)) {
280 last_pmap
= cpu_datap(ccpu
)->cpu_pcid_last_pmap_dispatched
[new_pcid
];
281 pcid_conflict
= ((last_pmap
!= NULL
) && (tpmap
!= last_pmap
));
283 if (__improbable(pending_flush
|| pcid_conflict
)) {
284 pmap_pcid_validate_cpu(tpmap
, ccpu
);
286 /* Consider making this a unique id */
287 cpu_datap(ccpu
)->cpu_pcid_last_pmap_dispatched
[new_pcid
] = tpmap
;
289 pmap_assert(new_pcid
< PMAP_PCID_MAX_PCID
);
290 pmap_assert(((tpmap
== kernel_pmap
) && new_pcid
== 0) ||
291 ((new_pcid
!= PMAP_PCID_INVALID_PCID
) && (new_pcid
!= 0)));
293 pcid_record_array
[ccpu
% PCID_RECORD_SIZE
] = tpmap
->pm_cr3
| new_pcid
| (((uint64_t)(!(pending_flush
|| pcid_conflict
))) <<63);
294 pml4_entry_t
*pml4
= pmap64_pml4(tpmap
, 0ULL);
295 /* Diagnostic to detect pagetable anchor corruption */
296 if (pml4
[KERNEL_PML4_INDEX
] != kernel_pmap
->pm_pml4
[KERNEL_PML4_INDEX
])
297 __asm__
volatile("int3");
298 #endif /* PMAP_ASSERT */
300 pmap_paddr_t ncr3
= tpmap
->pm_cr3
;
302 if (__improbable(nopagezero
)) {
303 pending_flush
= TRUE
;
304 if (copyio
== FALSE
) {
305 new_pcid
= kernel_pmap
->pmap_pcid_cpus
[ccpu
];
306 ncr3
= kernel_pmap
->pm_cr3
;
308 cpu_datap(ccpu
)->cpu_kernel_pcid
= kernel_pmap
->pmap_pcid_cpus
[ccpu
];
311 set_cr3_composed(ncr3
, new_pcid
, !(pending_flush
|| pcid_conflict
));
313 if (!pending_flush
) {
314 /* We did not previously observe a pending invalidation for this
315 * ASID. However, the load from the coherency vector
316 * could've been reordered ahead of the store to the
317 * active_cr3 field (in the context switch path, our
318 * caller). Re-consult the pending invalidation vector
319 * after the CR3 write. We rely on MOV CR3's documented
320 * serializing property to avoid insertion of an expensive
323 pending_flush
= (tpmap
->pmap_pcid_coherency_vector
[ccpu
] != 0);
324 if (__improbable(pending_flush
!= 0)) {
325 pmap_pcid_validate_cpu(tpmap
, ccpu
);
326 set_cr3_composed(ncr3
, new_pcid
, FALSE
);
329 cpu_datap(ccpu
)->cpu_pmap_pcid_coherentp
= &(tpmap
->pmap_pcid_coherency_vector
[ccpu
]);
331 KERNEL_DEBUG_CONSTANT(0x9c1d0000, tpmap
, new_pcid
, pending_flush
, pcid_conflict
, 0);