2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
78 #include <libkern/OSAtomic.h>
79 #include <sys/kdebug.h>
84 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
89 /* XXX - should be gone from here */
90 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
91 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
92 extern boolean_t
phys_page_exists(ppnum_t
);
93 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
94 extern void pmap_set_reference(ppnum_t pn
);
95 extern void mapping_set_mod(ppnum_t pa
);
96 extern void mapping_set_ref(ppnum_t pn
);
98 extern void ovbcopy(const char *from
,
101 void machine_callstack(natural_t
*buf
, vm_size_t callstack_max
);
104 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
105 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
107 #define INT_SIZE (BYTE_SIZE * sizeof (int))
110 * Set indicated bit in bit string.
113 setbit(int bitno
, int *s
)
115 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
119 * Clear indicated bit in bit string.
122 clrbit(int bitno
, int *s
)
124 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
128 * Test if indicated bit is set in bit string.
131 testbit(int bitno
, int *s
)
133 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
137 * Find first bit set in bit string.
144 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
);
145 return offset
+ __builtin_ctz(*s
);
149 ffs(unsigned int mask
)
155 * NOTE: cannot use __builtin_ffs because it generates a call to
158 return 1 + __builtin_ctz(mask
);
166 bzero_phys(src64
,bytes
);
174 bzero(PHYSMAP_PTOV(src64
), bytes
);
179 * bcopy_phys - like bcopy but copies from/to physical addresses.
188 /* Not necessary for K64 - but ensure we stay within a page */
189 if (((((uint32_t)src64
& (NBPG
-1)) + bytes
) > NBPG
) ||
190 ((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
191 panic("bcopy_phys alignment");
193 bcopy(PHYSMAP_PTOV(src64
), PHYSMAP_PTOV(dst64
), bytes
);
197 * allow a function to get a quick virtual mapping of a physical page
204 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
207 /* Not necessary for K64 - but ensure we stay within a page */
208 if (((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
209 panic("apply_func_phys alignment");
212 return func(PHYSMAP_PTOV(dst64
), bytes
, arg
);
216 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
224 vm_size_t bytes
) /* num bytes to copy */
226 /* Assume that bcopy copies left-to-right (low addr first). */
227 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
228 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
230 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
232 /* to > from: overlapping, and must copy right-to-left. */
242 * Read data from a physical address. Memory should not be cache inhibited.
246 static inline unsigned int
247 ml_phys_read_data(pmap_paddr_t paddr
, int size
)
251 if (!physmap_enclosed(paddr
))
252 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
258 s1
= *(volatile unsigned char *)PHYSMAP_PTOV(paddr
);
262 s2
= *(volatile unsigned short *)PHYSMAP_PTOV(paddr
);
266 result
= *(volatile unsigned int *)PHYSMAP_PTOV(paddr
);
269 panic("Invalid size %d for ml_phys_read_data\n", size
);
275 static unsigned long long
276 ml_phys_read_long_long(pmap_paddr_t paddr
)
278 if (!physmap_enclosed(paddr
))
279 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
280 return *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
);
283 unsigned int ml_phys_read( vm_offset_t paddr
)
285 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
288 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
290 return ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
293 unsigned int ml_phys_read_64(addr64_t paddr64
)
295 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
298 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
300 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
303 unsigned int ml_phys_read_half(vm_offset_t paddr
)
305 return ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
308 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
310 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
313 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
315 return ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
318 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
320 return ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
323 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
325 return ml_phys_read_long_long((pmap_paddr_t
)paddr
);
328 unsigned long long ml_phys_read_double_64(addr64_t paddr64
)
330 return ml_phys_read_long_long((pmap_paddr_t
)paddr64
);
336 * Write data to a physical address. Memory should not be cache inhibited.
340 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long data
, int size
)
342 if (!physmap_enclosed(paddr
))
343 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
347 *(volatile unsigned char *)PHYSMAP_PTOV(paddr
) = (unsigned char)data
;
350 *(volatile unsigned short *)PHYSMAP_PTOV(paddr
) = (unsigned short)data
;
353 *(volatile unsigned int *)PHYSMAP_PTOV(paddr
) = (unsigned int)data
;
356 panic("Invalid size %d for ml_phys_write_data\n", size
);
362 ml_phys_write_long_long(pmap_paddr_t paddr
, unsigned long long data
)
364 if (!physmap_enclosed(paddr
))
365 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
367 *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
) = data
;
370 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
372 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
375 void ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
377 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
380 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
382 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
385 void ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
387 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
390 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
392 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
395 void ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
397 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
400 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
402 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
405 void ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
407 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
410 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
412 ml_phys_write_long_long((pmap_paddr_t
)paddr
, data
);
415 void ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
417 ml_phys_write_long_long((pmap_paddr_t
)paddr64
, data
);
421 /* PCI config cycle probing
424 * Read the memory location at physical address paddr.
425 * *Does not* recover from machine checks, unlike the PowerPC implementation.
426 * Should probably be deprecated.
430 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
432 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4)
435 *val
= ml_phys_read((pmap_paddr_t
)paddr
);
441 * Read the memory location at physical address paddr.
442 * This is a part of a device probe, so there is a good chance we will
443 * have a machine check here. So we have to be able to handle that.
444 * We assume that machine checks are enabled both in MSR and HIDs
447 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
449 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4)
452 *val
= ml_phys_read_64((pmap_paddr_t
)paddr64
);
462 const char *a
= (const char *)pa
;
463 const char *b
= (const char *)pb
;
477 memcmp(const void *s1
, const void *s2
, size_t n
)
480 const unsigned char *p1
= s1
, *p2
= s2
;
484 return (*--p1
- *--p2
);
492 * strlen returns the number of characters in "string" preceeding
493 * the terminating null character.
498 register const char *string
)
500 register const char *ret
= string
;
502 while (*string
++ != '\0')
504 return string
- 1 - ret
;
508 hw_compare_and_store(uint32_t oldval
, uint32_t newval
, volatile uint32_t *dest
)
510 return OSCompareAndSwap((UInt32
)oldval
,
512 (volatile UInt32
*)dest
);
518 * Machine-dependent routine to fill in an array with up to callstack_max
519 * levels of return pc information.
521 void machine_callstack(
522 __unused natural_t
*buf
,
523 __unused vm_size_t callstack_max
)
527 #endif /* MACH_ASSERT */
529 void fillPage(ppnum_t pa
, unsigned int fill
)
533 int cnt
= PAGE_SIZE
/ sizeof(unsigned int);
537 for (i
= 0, addr
= (unsigned int *)PHYSMAP_PTOV(src
); i
< cnt
; i
++)
541 static inline void __sfence(void)
543 __asm__
volatile("sfence");
545 static inline void __mfence(void)
547 __asm__
volatile("mfence");
549 static inline void __wbinvd(void)
551 __asm__
volatile("wbinvd");
553 static inline void __clflush(void *ptr
)
555 __asm__
volatile("clflush (%0)" : : "r" (ptr
));
558 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
560 addr64_t linesize
= cpuid_info()->cache_linesize
;
561 addr64_t bound
= (pa
+ count
+ linesize
- 1) & ~(linesize
- 1);
566 __clflush(PHYSMAP_PTOV(pa
));
573 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
575 return(dcache_incoherent_io_store64(pa
,count
));
579 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
582 dcache_incoherent_io_flush64(addr
, count
);
585 uint32_t linesize
= cpuid_info()->cache_linesize
;
586 addr64_t bound
= (addr
+ count
+ linesize
-1) & ~(linesize
- 1);
588 while (addr
< bound
) {
589 __clflush((void *) (uintptr_t) addr
);
597 invalidate_icache64(__unused addr64_t addr
,
598 __unused
unsigned count
,
604 addr64_t vm_last_addr
;
607 mapping_set_mod(ppnum_t pn
)
613 mapping_set_ref(ppnum_t pn
)
615 pmap_set_reference(pn
);
619 cache_flush_page_phys(ppnum_t pa
)
622 unsigned char *cacheline_addr
;
623 int cacheline_size
= cpuid_info()->cache_linesize
;
624 int cachelines_to_flush
= PAGE_SIZE
/cacheline_size
;
628 istate
= ml_set_interrupts_enabled(FALSE
);
630 for (cacheline_addr
= (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa
));
631 cachelines_to_flush
> 0;
632 cachelines_to_flush
--, cacheline_addr
+= cacheline_size
) {
633 __clflush((void *) cacheline_addr
);
636 (void) ml_set_interrupts_enabled(istate
);
644 kdp_register_callout(void)
650 int host_vmxon(boolean_t exclusive __unused
)
652 return VMX_UNSUPPORTED
;
655 void host_vmxoff(void)