2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 #include <mach_assert.h>
61 #include <mach/boolean.h>
62 #include <mach/i386/vm_types.h>
63 #include <mach/i386/vm_param.h>
64 #include <kern/kern_types.h>
65 #include <kern/misc_protos.h>
66 #include <sys/errno.h>
67 #include <i386/param.h>
68 #include <i386/misc_protos.h>
69 #include <i386/cpu_data.h>
70 #include <i386/machine_routines.h>
71 #include <i386/cpuid.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_fault.h>
78 #include <libkern/OSAtomic.h>
79 #include <sys/kdebug.h>
82 #include <kdp/kdp_callout.h>
83 #endif /* !MACH_KDP */
85 #include <libkern/OSDebug.h>
93 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
98 /* XXX - should be gone from here */
99 extern void invalidate_icache64(addr64_t addr
, unsigned cnt
, int phys
);
100 extern void flush_dcache64(addr64_t addr
, unsigned count
, int phys
);
101 extern boolean_t
phys_page_exists(ppnum_t
);
102 extern void bcopy_no_overwrite(const char *from
, char *to
,vm_size_t bytes
);
103 extern void pmap_set_reference(ppnum_t pn
);
104 extern void mapping_set_mod(ppnum_t pa
);
105 extern void mapping_set_ref(ppnum_t pn
);
107 extern void ovbcopy(const char *from
,
110 void machine_callstack(uintptr_t *buf
, vm_size_t callstack_max
);
113 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL)
114 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL))
116 #define INT_SIZE (BYTE_SIZE * sizeof (int))
119 * Set indicated bit in bit string.
122 setbit(int bitno
, int *s
)
124 s
[bitno
/ INT_SIZE
] |= 1 << (bitno
% INT_SIZE
);
128 * Clear indicated bit in bit string.
131 clrbit(int bitno
, int *s
)
133 s
[bitno
/ INT_SIZE
] &= ~(1 << (bitno
% INT_SIZE
));
137 * Test if indicated bit is set in bit string.
140 testbit(int bitno
, int *s
)
142 return s
[bitno
/ INT_SIZE
] & (1 << (bitno
% INT_SIZE
));
146 * Find first bit set in bit string.
153 for (offset
= 0; !*s
; offset
+= (int)INT_SIZE
, ++s
);
154 return offset
+ __builtin_ctz(*s
);
158 ffs(unsigned int mask
)
164 * NOTE: cannot use __builtin_ffs because it generates a call to
167 return 1 + __builtin_ctz(mask
);
175 bzero_phys(src64
,bytes
);
183 bzero(PHYSMAP_PTOV(src64
), bytes
);
188 * bcopy_phys - like bcopy but copies from/to physical addresses.
197 /* Not necessary for K64 - but ensure we stay within a page */
198 if (((((uint32_t)src64
& (NBPG
-1)) + bytes
) > NBPG
) ||
199 ((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
200 panic("bcopy_phys alignment");
202 bcopy(PHYSMAP_PTOV(src64
), PHYSMAP_PTOV(dst64
), bytes
);
206 * allow a function to get a quick virtual mapping of a physical page
213 int (*func
)(void * buffer
, vm_size_t bytes
, void * arg
),
216 /* Not necessary for K64 - but ensure we stay within a page */
217 if (((((uint32_t)dst64
& (NBPG
-1)) + bytes
) > NBPG
) ) {
218 panic("apply_func_phys alignment");
221 return func(PHYSMAP_PTOV(dst64
), bytes
, arg
);
225 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
233 vm_size_t bytes
) /* num bytes to copy */
235 /* Assume that bcopy copies left-to-right (low addr first). */
236 if (from
+ bytes
<= to
|| to
+ bytes
<= from
|| to
== from
)
237 bcopy_no_overwrite(from
, to
, bytes
); /* non-overlapping or no-op*/
239 bcopy_no_overwrite(from
, to
, bytes
); /* overlapping but OK */
241 /* to > from: overlapping, and must copy right-to-left. */
251 * Read data from a physical address. Memory should not be cache inhibited.
254 uint64_t reportphyreaddelayabs
;
255 uint32_t reportphyreadosbt
;
256 #if DEVELOPMENT || DEBUG
257 uint32_t phyreadpanic
= 1;
259 uint32_t phyreadpanic
= 0;
262 __private_extern__
uint64_t
263 ml_phys_read_data(pmap_paddr_t paddr
, int size
) {
267 boolean_t istate
, timeread
= FALSE
;
270 if (__improbable(!physmap_enclosed(paddr
)))
271 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
273 if (__improbable(reportphyreaddelayabs
!= 0)) {
274 istate
= ml_set_interrupts_enabled(FALSE
);
275 sabs
= mach_absolute_time();
281 s1
= *(volatile unsigned char *)PHYSMAP_PTOV(paddr
);
285 s2
= *(volatile unsigned short *)PHYSMAP_PTOV(paddr
);
289 result
= *(volatile unsigned int *)PHYSMAP_PTOV(paddr
);
292 result
= *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
);
295 panic("Invalid size %d for ml_phys_read_data\n", size
);
299 if (__improbable(timeread
== TRUE
)) {
300 eabs
= mach_absolute_time();
301 (void)ml_set_interrupts_enabled(istate
);
303 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
305 panic_io_port_read();
306 panic("Read from physical addr 0x%llx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", paddr
, (eabs
- sabs
), result
, sabs
, eabs
, reportphyreaddelayabs
);
309 if (reportphyreadosbt
) {
310 OSReportWithBacktrace("ml_phys_read_data took %lluus\n", (eabs
- sabs
) / 1000);
313 DTRACE_PHYSLAT3(physread
, uint64_t, (eabs
- sabs
),
314 pmap_paddr_t
, paddr
, uint32_t, size
);
322 static unsigned long long
323 ml_phys_read_long_long(pmap_paddr_t paddr
) {
324 return ml_phys_read_data(paddr
, 8);
327 unsigned int ml_phys_read( vm_offset_t paddr
)
329 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
332 unsigned int ml_phys_read_word(vm_offset_t paddr
) {
334 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 4);
337 unsigned int ml_phys_read_64(addr64_t paddr64
)
339 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
342 unsigned int ml_phys_read_word_64(addr64_t paddr64
)
344 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 4);
347 unsigned int ml_phys_read_half(vm_offset_t paddr
)
349 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 2);
352 unsigned int ml_phys_read_half_64(addr64_t paddr64
)
354 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 2);
357 unsigned int ml_phys_read_byte(vm_offset_t paddr
)
359 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr
, 1);
362 unsigned int ml_phys_read_byte_64(addr64_t paddr64
)
364 return (unsigned int) ml_phys_read_data((pmap_paddr_t
)paddr64
, 1);
367 unsigned long long ml_phys_read_double(vm_offset_t paddr
)
369 return ml_phys_read_long_long((pmap_paddr_t
)paddr
);
372 unsigned long long ml_phys_read_double_64(addr64_t paddr64
)
374 return ml_phys_read_long_long((pmap_paddr_t
)paddr64
);
380 * Write data to a physical address. Memory should not be cache inhibited.
384 ml_phys_write_data(pmap_paddr_t paddr
, unsigned long data
, int size
)
386 if (!physmap_enclosed(paddr
))
387 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
391 *(volatile unsigned char *)PHYSMAP_PTOV(paddr
) = (unsigned char)data
;
394 *(volatile unsigned short *)PHYSMAP_PTOV(paddr
) = (unsigned short)data
;
397 *(volatile unsigned int *)PHYSMAP_PTOV(paddr
) = (unsigned int)data
;
400 panic("Invalid size %d for ml_phys_write_data\n", size
);
406 ml_phys_write_long_long(pmap_paddr_t paddr
, unsigned long long data
)
408 if (!physmap_enclosed(paddr
))
409 panic("%s: 0x%llx out of bounds\n", __FUNCTION__
, paddr
);
411 *(volatile unsigned long long *)PHYSMAP_PTOV(paddr
) = data
;
414 void ml_phys_write_byte(vm_offset_t paddr
, unsigned int data
)
416 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 1);
419 void ml_phys_write_byte_64(addr64_t paddr64
, unsigned int data
)
421 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 1);
424 void ml_phys_write_half(vm_offset_t paddr
, unsigned int data
)
426 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 2);
429 void ml_phys_write_half_64(addr64_t paddr64
, unsigned int data
)
431 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 2);
434 void ml_phys_write(vm_offset_t paddr
, unsigned int data
)
436 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
439 void ml_phys_write_64(addr64_t paddr64
, unsigned int data
)
441 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
444 void ml_phys_write_word(vm_offset_t paddr
, unsigned int data
)
446 ml_phys_write_data((pmap_paddr_t
)paddr
, data
, 4);
449 void ml_phys_write_word_64(addr64_t paddr64
, unsigned int data
)
451 ml_phys_write_data((pmap_paddr_t
)paddr64
, data
, 4);
454 void ml_phys_write_double(vm_offset_t paddr
, unsigned long long data
)
456 ml_phys_write_long_long((pmap_paddr_t
)paddr
, data
);
459 void ml_phys_write_double_64(addr64_t paddr64
, unsigned long long data
)
461 ml_phys_write_long_long((pmap_paddr_t
)paddr64
, data
);
465 /* PCI config cycle probing
468 * Read the memory location at physical address paddr.
469 * *Does not* recover from machine checks, unlike the PowerPC implementation.
470 * Should probably be deprecated.
474 ml_probe_read(vm_offset_t paddr
, unsigned int *val
)
476 if ((PAGE_SIZE
- (paddr
& PAGE_MASK
)) < 4)
479 *val
= ml_phys_read((pmap_paddr_t
)paddr
);
485 * Read the memory location at physical address paddr.
486 * This is a part of a device probe, so there is a good chance we will
487 * have a machine check here. So we have to be able to handle that.
488 * We assume that machine checks are enabled both in MSR and HIDs
491 ml_probe_read_64(addr64_t paddr64
, unsigned int *val
)
493 if ((PAGE_SIZE
- (paddr64
& PAGE_MASK
)) < 4)
496 *val
= ml_phys_read_64((pmap_paddr_t
)paddr64
);
506 const char *a
= (const char *)pa
;
507 const char *b
= (const char *)pb
;
521 memcmp(const void *s1
, const void *s2
, size_t n
)
524 const unsigned char *p1
= s1
, *p2
= s2
;
528 return (*--p1
- *--p2
);
535 memmove(void *dst
, const void *src
, size_t ulen
)
537 bcopy(src
, dst
, ulen
);
543 * strlen returns the number of characters in "string" preceeding
544 * the terminating null character.
551 const char *ret
= string
;
553 while (*string
++ != '\0')
555 return string
- 1 - ret
;
561 * Machine-dependent routine to fill in an array with up to callstack_max
562 * levels of return pc information.
564 void machine_callstack(
565 __unused
uintptr_t *buf
,
566 __unused vm_size_t callstack_max
)
570 #endif /* MACH_ASSERT */
572 void fillPage(ppnum_t pa
, unsigned int fill
)
576 int cnt
= PAGE_SIZE
/ sizeof(unsigned int);
580 for (i
= 0, addr
= (unsigned int *)PHYSMAP_PTOV(src
); i
< cnt
; i
++)
584 static inline void __clflush(void *ptr
)
586 __asm__
volatile("clflush (%0)" : : "r" (ptr
));
589 void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
)
591 addr64_t linesize
= cpuid_info()->cache_linesize
;
592 addr64_t bound
= (pa
+ count
+ linesize
- 1) & ~(linesize
- 1);
597 __clflush(PHYSMAP_PTOV(pa
));
604 void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
)
606 return(dcache_incoherent_io_store64(pa
,count
));
610 flush_dcache64(addr64_t addr
, unsigned count
, int phys
)
613 dcache_incoherent_io_flush64(addr
, count
);
616 uint64_t linesize
= cpuid_info()->cache_linesize
;
617 addr64_t bound
= (addr
+ count
+ linesize
-1) & ~(linesize
- 1);
619 while (addr
< bound
) {
620 __clflush((void *) (uintptr_t) addr
);
628 invalidate_icache64(__unused addr64_t addr
,
629 __unused
unsigned count
,
635 addr64_t vm_last_addr
;
638 mapping_set_mod(ppnum_t pn
)
644 mapping_set_ref(ppnum_t pn
)
646 pmap_set_reference(pn
);
649 extern i386_cpu_info_t cpuid_cpu_info
;
651 cache_flush_page_phys(ppnum_t pa
)
654 unsigned char *cacheline_addr
;
655 i386_cpu_info_t
*cpuid_infop
= cpuid_info();
657 int cachelines_to_flush
;
659 cacheline_size
= cpuid_infop
->cache_linesize
;
660 if (cacheline_size
== 0)
661 panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop
);
662 cachelines_to_flush
= PAGE_SIZE
/cacheline_size
;
666 istate
= ml_set_interrupts_enabled(FALSE
);
668 for (cacheline_addr
= (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa
));
669 cachelines_to_flush
> 0;
670 cachelines_to_flush
--, cacheline_addr
+= cacheline_size
) {
671 __clflush((void *) cacheline_addr
);
674 (void) ml_set_interrupts_enabled(istate
);
682 kdp_register_callout(kdp_callout_fn_t fn
, void *arg
)
684 #pragma unused(fn,arg)
689 int host_vmxon(boolean_t exclusive __unused
)
691 return VMX_UNSUPPORTED
;
694 void host_vmxoff(void)