]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/loose_ends.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / i386 / loose_ends.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58#include <mach_assert.h>
59
60#include <string.h>
61#include <mach/boolean.h>
62#include <mach/i386/vm_types.h>
0b4e3aa0 63#include <mach/i386/vm_param.h>
1c79356b
A
64#include <kern/kern_types.h>
65#include <kern/misc_protos.h>
55e303ae 66#include <i386/param.h>
1c79356b 67#include <i386/misc_protos.h>
91447636
A
68#include <i386/cpu_data.h>
69#include <i386/machine_routines.h>
70#include <i386/cpuid.h>
71#include <vm/pmap.h>
72#include <vm/vm_map.h>
73#include <vm/vm_kern.h>
74#include <vm/vm_fault.h>
75
76/* XXX - should be gone from here */
77extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
78extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
79extern boolean_t phys_page_exists(ppnum_t);
8f6c56a5 80extern pt_entry_t *pmap_mapgetpte(vm_map_t, vm_offset_t);
91447636
A
81extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes);
82extern void pmap_set_reference(ppnum_t pn);
83extern void mapping_set_mod(ppnum_t pa);
84extern void mapping_set_ref(ppnum_t pn);
8f6c56a5
A
85extern void switch_to_serial_console(void);
86extern kern_return_t copyp2p(vm_offset_t source,
87 vm_offset_t dest,
88 unsigned int size,
89 unsigned int flush_action);
91447636
A
90extern void fillPage(ppnum_t pa, unsigned int fill);
91extern void ovbcopy(const char *from,
92 char *to,
93 vm_size_t nbytes);
94void machine_callstack(natural_t *buf, vm_size_t callstack_max);
95
1c79356b 96
55e303ae
A
97#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
98#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
99
9bccf70c 100
55e303ae 101void
91447636
A
102bzero_phys(
103 addr64_t src64,
104 vm_size_t bytes)
55e303ae 105{
8f6c56a5
A
106 vm_offset_t src = low32(src64);
107 pt_entry_t save2;
5d5c5d0d 108 mp_disable_preemption();
8f6c56a5
A
109 if (*(pt_entry_t *) CM2)
110 panic("bzero_phys: CMAP busy");
8ad349bb 111
8f6c56a5
A
112 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
113 INTEL_PTE_REF | INTEL_PTE_MOD;
114 save2=*(pt_entry_t *)CM2;
115 invlpg((u_int)CA2);
8ad349bb 116
8f6c56a5
A
117 bzero((void *)((unsigned int)CA2 | (src & INTEL_OFFMASK)), bytes);
118 if (save2 != *(pt_entry_t *)CM2) panic("bzero_phys CMAP changed");
119 *(pt_entry_t *) CM2 = 0;
5d5c5d0d 120 mp_enable_preemption();
8ad349bb 121}
c0fea474 122
8f6c56a5
A
123/*
124 * copy 'size' bytes from physical to physical address
125 * the caller must validate the physical ranges
126 *
127 * if flush_action == 0, no cache flush necessary
128 * if flush_action == 1, flush the source
129 * if flush_action == 2, flush the dest
130 * if flush_action == 3, flush both source and dest
131 */
132
133kern_return_t
134copyp2p(vm_offset_t source,
135 vm_offset_t dest,
136 unsigned int size,
137 unsigned int flush_action)
138{
139
140 switch(flush_action) {
141 case 1:
142 flush_dcache(source, size, 1);
143 break;
144 case 2:
145 flush_dcache(dest, size, 1);
146 break;
147 case 3:
148 flush_dcache(source, size, 1);
149 flush_dcache(dest, size, 1);
150 break;
151
152 }
153 bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */
154
155 switch(flush_action) {
156 case 1:
157 flush_dcache(source, size, 1);
158 break;
159 case 2:
160 flush_dcache(dest, size, 1);
161 break;
162 case 3:
163 flush_dcache(source, size, 1);
164 flush_dcache(dest, size, 1);
165 break;
166
167 }
168 return KERN_SUCCESS;
169}
5d5c5d0d 170
0b4e3aa0
A
171/*
172 * bcopy_phys - like bcopy but copies from/to physical addresses.
0b4e3aa0
A
173 */
174
175void
91447636
A
176bcopy_phys(
177 addr64_t src64,
178 addr64_t dst64,
179 vm_size_t bytes)
0b4e3aa0 180{
8f6c56a5
A
181 vm_offset_t src = low32(src64);
182 vm_offset_t dst = low32(dst64);
183 pt_entry_t save1,save2;
184 /* ensure we stay within a page */
185 if ( (((src & (NBPG-1)) + bytes) > NBPG) ||
186 (((dst & (NBPG-1)) + bytes) > NBPG) ) panic("bcopy_phys");
187 mp_disable_preemption();
188 if (*(pt_entry_t *) CM1 || *(pt_entry_t *) CM2)
189 panic("bcopy_phys: CMAP busy");
190
191 *(pt_entry_t *) CM1 = INTEL_PTE_VALID | (src & PG_FRAME) | INTEL_PTE_REF;
192 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (dst & PG_FRAME) |
193 INTEL_PTE_REF | INTEL_PTE_MOD;
194 save1 = *(pt_entry_t *)CM1;save2 = *(pt_entry_t *)CM2;
195 invlpg((u_int)CA1);
196 invlpg((u_int)CA2);
197
198 bcopy((void *) ((uintptr_t)CA1 | (src & INTEL_OFFMASK)),
199 (void *) ((uintptr_t)CA2 | (dst & INTEL_OFFMASK)), bytes);
200 if ( (save1 != *(pt_entry_t *)CM1) || (save2 != *(pt_entry_t *)CM2)) panic("bcopy_phys CMAP changed");
201 *(pt_entry_t *) CM1 = 0;
202 *(pt_entry_t *) CM2 = 0;
5d5c5d0d 203 mp_enable_preemption();
8f6c56a5 204
91447636 205}
0b4e3aa0 206
1c79356b
A
207/*
208 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
209 * them correctly.
210 */
211
212void
213ovbcopy(
214 const char *from,
215 char *to,
216 vm_size_t bytes) /* num bytes to copy */
217{
218 /* Assume that bcopy copies left-to-right (low addr first). */
219 if (from + bytes <= to || to + bytes <= from || to == from)
220 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
221 else if (from > to)
222 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
223 else {
224 /* to > from: overlapping, and must copy right-to-left. */
225 from += bytes - 1;
226 to += bytes - 1;
227 while (bytes-- > 0)
228 *to-- = *from--;
229 }
230}
231
91447636
A
232
233/*
234 * Read data from a physical address. Memory should not be cache inhibited.
235 */
236
237
238static unsigned int
8f6c56a5 239ml_phys_read_data( vm_offset_t paddr, int size )
91447636 240{
8f6c56a5
A
241 unsigned int result;
242 pt_entry_t save;
243 mp_disable_preemption();
244 if (*(pt_entry_t *) CM3)
5d5c5d0d 245 panic("ml_phys_read_data: CMAP busy");
8ad349bb 246
8f6c56a5
A
247 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
248 save = *(pt_entry_t *)CM3;
249 invlpg((u_int)CA3);
250
91447636
A
251
252 switch (size) {
253 unsigned char s1;
254 unsigned short s2;
255 case 1:
8f6c56a5 256 s1 = *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
91447636
A
257 result = s1;
258 break;
259 case 2:
8f6c56a5 260 s2 = *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
91447636
A
261 result = s2;
262 break;
263 case 4:
264 default:
8f6c56a5 265 result = *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
91447636
A
266 break;
267 }
268
8f6c56a5
A
269 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
270 *(pt_entry_t *) CM3 = 0;
91447636
A
271 mp_enable_preemption();
272 return result;
273}
274
275static unsigned long long
8f6c56a5 276ml_phys_read_long_long( vm_offset_t paddr )
91447636 277{
8f6c56a5
A
278 unsigned long long result;
279 pt_entry_t save;
280 mp_disable_preemption();
281 if (*(pt_entry_t *) CM3)
282 panic("ml_phys_read_data: CMAP busy");
5d5c5d0d 283
8f6c56a5
A
284 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF;
285 save = *(pt_entry_t *)CM3;
286 invlpg((u_int)CA3);
c0fea474 287
8f6c56a5 288 result = *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK));
5d5c5d0d 289
8f6c56a5
A
290 if (save != *(pt_entry_t *)CM3) panic("ml_phys_read_data CMAP changed");
291 *(pt_entry_t *) CM3 = 0;
91447636 292 mp_enable_preemption();
8f6c56a5 293 return result;
91447636
A
294}
295
8f6c56a5 296unsigned int ml_phys_read( vm_offset_t paddr)
91447636 297{
8f6c56a5 298 return ml_phys_read_data(paddr, 4);
91447636
A
299}
300
301unsigned int ml_phys_read_word(vm_offset_t paddr) {
8f6c56a5 302 return ml_phys_read_data(paddr, 4);
91447636
A
303}
304
305unsigned int ml_phys_read_64(addr64_t paddr64)
306{
8f6c56a5 307 return ml_phys_read_data(low32(paddr64), 4);
91447636
A
308}
309
310unsigned int ml_phys_read_word_64(addr64_t paddr64)
311{
8f6c56a5 312 return ml_phys_read_data(low32(paddr64), 4);
91447636
A
313}
314
315unsigned int ml_phys_read_half(vm_offset_t paddr)
316{
8f6c56a5 317 return ml_phys_read_data(paddr, 2);
91447636
A
318}
319
320unsigned int ml_phys_read_half_64(addr64_t paddr64)
321{
8f6c56a5 322 return ml_phys_read_data(low32(paddr64), 2);
91447636
A
323}
324
325unsigned int ml_phys_read_byte(vm_offset_t paddr)
326{
8f6c56a5 327 return ml_phys_read_data(paddr, 1);
91447636
A
328}
329
330unsigned int ml_phys_read_byte_64(addr64_t paddr64)
331{
8f6c56a5 332 return ml_phys_read_data(low32(paddr64), 1);
91447636
A
333}
334
335unsigned long long ml_phys_read_double(vm_offset_t paddr)
336{
8f6c56a5 337 return ml_phys_read_long_long(paddr);
91447636
A
338}
339
8f6c56a5 340unsigned long long ml_phys_read_double_64(addr64_t paddr)
91447636 341{
8f6c56a5 342 return ml_phys_read_long_long(low32(paddr));
91447636
A
343}
344
345
346/*
347 * Write data to a physical address. Memory should not be cache inhibited.
348 */
349
350static void
8f6c56a5 351ml_phys_write_data( vm_offset_t paddr, unsigned long data, int size )
91447636 352{
8f6c56a5
A
353 pt_entry_t save;
354 mp_disable_preemption();
355 if (*(pt_entry_t *) CM3)
91447636
A
356 panic("ml_phys_write_data: CMAP busy");
357
8f6c56a5
A
358 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
359 INTEL_PTE_REF | INTEL_PTE_MOD;
360 save = *(pt_entry_t *)CM3;
361 invlpg((u_int)CA3);
91447636
A
362
363 switch (size) {
364 case 1:
8f6c56a5 365 *(unsigned char *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned char)data;
91447636
A
366 break;
367 case 2:
8f6c56a5 368 *(unsigned short *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = (unsigned short)data;
91447636
A
369 break;
370 case 4:
371 default:
8f6c56a5 372 *(unsigned int *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
91447636
A
373 break;
374 }
375
8f6c56a5
A
376 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
377 *(pt_entry_t *) CM3 = 0;
91447636
A
378 mp_enable_preemption();
379}
380
381static void
8f6c56a5 382ml_phys_write_long_long( vm_offset_t paddr, unsigned long long data )
91447636 383{
8f6c56a5
A
384 pt_entry_t save;
385 mp_disable_preemption();
386 if (*(pt_entry_t *) CM3)
91447636
A
387 panic("ml_phys_write_data: CMAP busy");
388
8f6c56a5
A
389 *(pt_entry_t *) CM3 = INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
390 INTEL_PTE_REF | INTEL_PTE_MOD;
391 save = *(pt_entry_t *)CM3;
392 invlpg((u_int)CA3);
91447636 393
8f6c56a5 394 *(unsigned long long *)((unsigned int)CA3 | (paddr & INTEL_OFFMASK)) = data;
91447636 395
8f6c56a5
A
396 if (save != *(pt_entry_t *)CM3) panic("ml_phys_write_data CMAP changed");
397 *(pt_entry_t *) CM3 = 0;
91447636
A
398 mp_enable_preemption();
399}
400
401void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
402{
8f6c56a5 403 ml_phys_write_data(paddr, data, 1);
91447636
A
404}
405
8f6c56a5 406void ml_phys_write_byte_64(addr64_t paddr, unsigned int data)
91447636 407{
8f6c56a5 408 ml_phys_write_data(low32(paddr), data, 1);
91447636
A
409}
410
411void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
412{
8f6c56a5 413 ml_phys_write_data(paddr, data, 2);
91447636
A
414}
415
8f6c56a5 416void ml_phys_write_half_64(addr64_t paddr, unsigned int data)
91447636 417{
8f6c56a5 418 ml_phys_write_data(low32(paddr), data, 2);
91447636
A
419}
420
421void ml_phys_write(vm_offset_t paddr, unsigned int data)
1c79356b 422{
8f6c56a5 423 ml_phys_write_data(paddr, data, 4);
1c79356b
A
424}
425
8f6c56a5 426void ml_phys_write_64(addr64_t paddr, unsigned int data)
91447636 427{
8f6c56a5 428 ml_phys_write_data(low32(paddr), data, 4);
91447636
A
429}
430
431void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
432{
8f6c56a5 433 ml_phys_write_data(paddr, data, 4);
91447636
A
434}
435
8f6c56a5 436void ml_phys_write_word_64(addr64_t paddr, unsigned int data)
91447636 437{
8f6c56a5 438 ml_phys_write_data(low32(paddr), data, 4);
91447636
A
439}
440
8f6c56a5 441
91447636
A
442void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
443{
8f6c56a5 444 ml_phys_write_long_long(paddr, data);
91447636
A
445}
446
8f6c56a5 447void ml_phys_write_double_64(addr64_t paddr, unsigned long long data)
91447636 448{
8f6c56a5 449 ml_phys_write_long_long(low32(paddr), data);
91447636
A
450}
451
452
453/* PCI config cycle probing
454 *
455 *
456 * Read the memory location at physical address paddr.
457 * This is a part of a device probe, so there is a good chance we will
458 * have a machine check here. So we have to be able to handle that.
459 * We assume that machine checks are enabled both in MSR and HIDs
460 */
461
462boolean_t
463ml_probe_read(vm_offset_t paddr, unsigned int *val)
464{
8f6c56a5
A
465 *val = ml_phys_read(paddr);
466 return TRUE;
91447636
A
467}
468
469/*
470 * Read the memory location at physical address paddr.
471 * This is a part of a device probe, so there is a good chance we will
472 * have a machine check here. So we have to be able to handle that.
473 * We assume that machine checks are enabled both in MSR and HIDs
474 */
475boolean_t
8f6c56a5 476ml_probe_read_64(addr64_t paddr, unsigned int *val)
91447636 477{
8f6c56a5
A
478 *val = ml_phys_read_64(paddr);
479 return TRUE;
91447636
A
480}
481
482
1c79356b 483int bcmp(
91447636
A
484 const void *pa,
485 const void *pb,
486 size_t len)
1c79356b 487{
91447636
A
488 const char *a = (const char *)pa;
489 const char *b = (const char *)pb;
490
1c79356b
A
491 if (len == 0)
492 return 0;
493
494 do
495 if (*a++ != *b++)
496 break;
497 while (--len);
498
499 return len;
500}
501
0b4e3aa0
A
502int
503memcmp(s1, s2, n)
91447636
A
504 const void *s1, *s2;
505 size_t n;
0b4e3aa0 506{
91447636
A
507 if (n != 0) {
508 const unsigned char *p1 = s1, *p2 = s2;
509
510 do {
511 if (*p1++ != *p2++)
512 return (*--p1 - *--p2);
513 } while (--n != 0);
514 }
0b4e3aa0
A
515 return (0);
516}
517
518/*
519 * Abstract:
520 * strlen returns the number of characters in "string" preceeding
521 * the terminating null character.
522 */
523
524size_t
525strlen(
526 register const char *string)
527{
528 register const char *ret = string;
529
530 while (*string++ != '\0')
531 continue;
532 return string - 1 - ret;
533}
534
8f6c56a5
A
535#include <libkern/OSAtomic.h>
536
9bccf70c
A
537uint32_t
538hw_atomic_add(
539 uint32_t *dest,
540 uint32_t delt)
541{
542 uint32_t oldValue;
543 uint32_t newValue;
544
545 do {
546 oldValue = *dest;
547 newValue = (oldValue + delt);
548 } while (!OSCompareAndSwap((UInt32)oldValue,
549 (UInt32)newValue, (UInt32 *)dest));
550
551 return newValue;
552}
553
554uint32_t
555hw_atomic_sub(
556 uint32_t *dest,
557 uint32_t delt)
558{
559 uint32_t oldValue;
560 uint32_t newValue;
561
562 do {
563 oldValue = *dest;
564 newValue = (oldValue - delt);
565 } while (!OSCompareAndSwap((UInt32)oldValue,
566 (UInt32)newValue, (UInt32 *)dest));
567
568 return newValue;
569}
570
571uint32_t
572hw_atomic_or(
573 uint32_t *dest,
574 uint32_t mask)
575{
576 uint32_t oldValue;
577 uint32_t newValue;
578
579 do {
580 oldValue = *dest;
581 newValue = (oldValue | mask);
582 } while (!OSCompareAndSwap((UInt32)oldValue,
583 (UInt32)newValue, (UInt32 *)dest));
584
585 return newValue;
586}
587
588uint32_t
589hw_atomic_and(
590 uint32_t *dest,
591 uint32_t mask)
592{
593 uint32_t oldValue;
594 uint32_t newValue;
595
596 do {
597 oldValue = *dest;
598 newValue = (oldValue & mask);
599 } while (!OSCompareAndSwap((UInt32)oldValue,
600 (UInt32)newValue, (UInt32 *)dest));
601
602 return newValue;
603}
604
605uint32_t
606hw_compare_and_store(
607 uint32_t oldval,
608 uint32_t newval,
609 uint32_t *dest)
610{
611 return OSCompareAndSwap((UInt32)oldval, (UInt32)newval, (UInt32 *)dest);
612}
613
1c79356b
A
614#if MACH_ASSERT
615
616/*
617 * Machine-dependent routine to fill in an array with up to callstack_max
618 * levels of return pc information.
619 */
620void machine_callstack(
91447636
A
621 __unused natural_t *buf,
622 __unused vm_size_t callstack_max)
1c79356b
A
623{
624}
625
626#endif /* MACH_ASSERT */
55e303ae
A
627
628
629
630
631void fillPage(ppnum_t pa, unsigned int fill)
632{
8f6c56a5
A
633 pmap_paddr_t src;
634 int i;
635 int cnt = PAGE_SIZE/sizeof(unsigned int);
636 unsigned int *addr;
637 mp_disable_preemption();
638 if (*(pt_entry_t *) CM2)
91447636 639 panic("fillPage: CMAP busy");
8f6c56a5
A
640 src = (pmap_paddr_t)i386_ptob(pa);
641 *(pt_entry_t *) CM2 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
642 INTEL_PTE_REF | INTEL_PTE_MOD;
643 invlpg((u_int)CA2);
91447636 644
8f6c56a5
A
645 for (i = 0, addr = (unsigned int *)CA2; i < cnt ; i++ )
646 *addr++ = fill;
91447636 647
8f6c56a5
A
648 *(pt_entry_t *) CM2 = 0;
649 mp_enable_preemption();
91447636
A
650}
651
652static inline void __sfence(void)
653{
654 __asm__ volatile("sfence");
655}
656static inline void __mfence(void)
657{
658 __asm__ volatile("mfence");
659}
660static inline void __wbinvd(void)
661{
662 __asm__ volatile("wbinvd");
663}
664static inline void __clflush(void *ptr)
665{
8f6c56a5 666 __asm__ volatile(".byte 0x0F; .byte 0xae; .byte 0x38" : : "a" (ptr));
91447636
A
667}
668
669void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
670{
8f6c56a5
A
671 if (cpuid_features() & CPUID_FEATURE_CLFSH)
672 {
91447636
A
673 uint32_t linesize = cpuid_info()->cache_linesize;
674 addr64_t addr;
675 uint32_t offset, chunk;
676 boolean_t istate;
677
678 istate = ml_set_interrupts_enabled(FALSE);
679
8f6c56a5 680 if (*(pt_entry_t *) CM2)
91447636
A
681 panic("cache_flush_page_phys: CMAP busy");
682
8f6c56a5 683 offset = pa & (linesize - 1);
91447636 684 count += offset;
8f6c56a5 685 addr = pa - offset;
91447636
A
686 offset = addr & ((addr64_t) (page_size - 1));
687 chunk = page_size - offset;
688
689 do
690 {
691 if (chunk > count)
692 chunk = count;
693
8f6c56a5
A
694 *(pt_entry_t *) CM2 = i386_ptob(atop_64(addr)) | INTEL_PTE_VALID;
695 invlpg((u_int)CA2);
91447636
A
696
697 for (; offset < chunk; offset += linesize)
8f6c56a5 698 __clflush((void *)(((u_int)CA2) + offset));
91447636
A
699
700 count -= chunk;
701 addr += chunk;
702 chunk = page_size;
703 offset = 0;
704 }
705 while (count);
706
8f6c56a5 707 *(pt_entry_t *) CM2 = 0;
91447636
A
708
709 (void) ml_set_interrupts_enabled(istate);
8f6c56a5
A
710 }
711 else
712 __wbinvd();
713 __sfence();
55e303ae
A
714}
715
91447636
A
716void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
717{
718 return(dcache_incoherent_io_store64(pa,count));
719}
55e303ae 720
91447636
A
721void
722flush_dcache64(__unused addr64_t addr,
723 __unused unsigned count,
724 __unused int phys)
55e303ae 725{
91447636 726}
55e303ae 727
91447636
A
728void
729invalidate_icache64(__unused addr64_t addr,
730 __unused unsigned count,
731 __unused int phys)
732{
733}
55e303ae 734
8f6c56a5
A
735kern_return_t copypv(addr64_t src64,
736 addr64_t snk64,
737 unsigned int size,
738 int which)
5d5c5d0d 739{
8f6c56a5
A
740
741 vm_map_t map;
742 kern_return_t ret;
743 vm_offset_t source, sink;
744 vm_offset_t vaddr;
745 vm_offset_t paddr;
746 spl_t s;
747 unsigned int lop, csize;
748 int needtran, bothphys;
749 vm_prot_t prot;
750 pt_entry_t *ptep;
8ad349bb 751
8f6c56a5 752 map = (which & cppvKmap) ? kernel_map : current_map_fast();
c0fea474 753
8f6c56a5
A
754 source = low32(src64);
755 sink = low32(snk64);
5d5c5d0d 756
8f6c56a5
A
757 if((which & (cppvPsrc | cppvPsnk)) == 0 ) { /* Make sure that only one is virtual */
758 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
5d5c5d0d 759 }
8f6c56a5
A
760
761 bothphys = 1; /* Assume both are physical */
762
763 if(!(which & cppvPsnk)) { /* Is there a virtual page here? */
764 vaddr = sink; /* Sink side is virtual */
765 bothphys = 0; /* Show both aren't physical */
766 prot = VM_PROT_READ | VM_PROT_WRITE; /* Sink always must be read/write */
767 } else /* if(!(which & cppvPsrc)) */ { /* Source side is virtual */
768 vaddr = source; /* Source side is virtual */
769 bothphys = 0; /* Show both aren't physical */
770 prot = VM_PROT_READ; /* Virtual source is always read only */
5d5c5d0d 771 }
5d5c5d0d 772
8f6c56a5
A
773 needtran = 1; /* Show we need to map the virtual the first time */
774 s = splhigh(); /* Don't bother me */
775
776 while(size) {
777
778 if(!bothphys && (needtran || !(vaddr & 4095LL))) { /* If first time or we stepped onto a new page, we need to translate */
779 needtran = 0;
780 while(1) {
781 ptep = pmap_mapgetpte(map, vaddr);
782 if((0 == ptep) || ((*ptep & INTEL_PTE_VALID) == 0)) {
783 splx(s); /* Restore the interrupt level */
784 ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0); /* Didn't find it, try to fault it in... */
785
786 if(ret != KERN_SUCCESS)return KERN_FAILURE; /* Didn't find any, return no good... */
787
788 s = splhigh(); /* Don't bother me */
789 continue; /* Go try for the map again... */
790
791 }
792
793 /* Note that we have to have the destination writable. So, if we already have it, or we are mapping the source,
794 we can just leave.
795 */
796 if((which & cppvPsnk) || (*ptep & INTEL_PTE_WRITE)) break; /* We got it mapped R/W or the source is not virtual, leave... */
797 splx(s); /* Restore the interrupt level */
798
799 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0); /* check for a COW area */
800 if (ret != KERN_SUCCESS) return KERN_FAILURE; /* We couldn't get it R/W, leave in disgrace... */
801 s = splhigh(); /* Don't bother me */
802 }
5d5c5d0d 803
8f6c56a5
A
804 paddr = pte_to_pa(*ptep) | (vaddr & 4095);
805
806 if(which & cppvPsrc) sink = paddr; /* If source is physical, then the sink is virtual */
807 else source = paddr; /* Otherwise the source is */
c0fea474 808 }
8f6c56a5
A
809
810 lop = (unsigned int)(4096LL - (sink & 4095LL)); /* Assume sink smallest */
811 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL)); /* No, source is smaller */
812
813 csize = size; /* Assume we can copy it all */
814 if(lop < size) csize = lop; /* Nope, we can't do it all */
815
816 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source before move */
817 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink before move */
c0fea474 818
8f6c56a5
A
819 bcopy_phys((addr64_t)source, (addr64_t)sink, csize); /* Do a physical copy, virtually */
820
821 if(which & cppvFsrc) flush_dcache64((addr64_t)source, csize, 1); /* If requested, flush source after move */
822 if(which & cppvFsnk) flush_dcache64((addr64_t)sink, csize, 1); /* If requested, flush sink after move */
c0fea474 823
5d5c5d0d 824
8f6c56a5
A
825/*
826 * Note that for certain ram disk flavors, we may be copying outside of known memory.
827 * Therefore, before we try to mark it modifed, we check if it exists.
828 */
5d5c5d0d 829
8f6c56a5
A
830 if( !(which & cppvNoModSnk)) {
831 if (phys_page_exists((ppnum_t)sink >> 12))
832 mapping_set_mod((ppnum_t)(sink >> 12)); /* Make sure we know that it is modified */
5d5c5d0d 833 }
8f6c56a5
A
834 if( !(which & cppvNoRefSrc)) {
835 if (phys_page_exists((ppnum_t)source >> 12))
836 mapping_set_ref((ppnum_t)(source >> 12)); /* Make sure we know that it is modified */
5d5c5d0d 837 }
5d5c5d0d 838
5d5c5d0d 839
8f6c56a5
A
840 size = size - csize; /* Calculate what is left */
841 vaddr = vaddr + csize; /* Move to next sink address */
842 source = source + csize; /* Bump source to next physical address */
843 sink = sink + csize; /* Bump sink to next physical address */
91447636 844 }
8f6c56a5
A
845
846 splx(s); /* Open up for interrupts */
5d5c5d0d 847
8f6c56a5 848 return KERN_SUCCESS;
55e303ae
A
849}
850
8f6c56a5 851void switch_to_serial_console(void)
55e303ae
A
852{
853}
854
8f6c56a5 855addr64_t vm_last_addr;
55e303ae 856
8f6c56a5
A
857void
858mapping_set_mod(ppnum_t pn)
c0fea474 859{
8f6c56a5 860 pmap_set_modify(pn);
5d5c5d0d 861}
91447636 862
8f6c56a5
A
863void
864mapping_set_ref(ppnum_t pn)
5d5c5d0d 865{
8f6c56a5 866 pmap_set_reference(pn);
5d5c5d0d 867}
91447636 868
8f6c56a5
A
869void
870cache_flush_page_phys(ppnum_t pa)
5d5c5d0d 871{
8f6c56a5
A
872 boolean_t istate;
873 int i;
874 unsigned int *cacheline_addr;
875 int cacheline_size = cpuid_info()->cache_linesize;
876 int cachelines_in_page = PAGE_SIZE/cacheline_size;
91447636 877
8f6c56a5
A
878 /*
879 * If there's no clflush instruction, we're sadly forced to use wbinvd.
880 */
881 if (!(cpuid_features() & CPUID_FEATURE_CLFSH)) {
882 asm volatile("wbinvd" : : : "memory");
883 return;
884 }
91447636 885
8f6c56a5 886 istate = ml_set_interrupts_enabled(FALSE);
91447636 887
8f6c56a5
A
888 if (*(pt_entry_t *) CM2)
889 panic("cache_flush_page_phys: CMAP busy");
8ad349bb 890
8f6c56a5
A
891 *(pt_entry_t *) CM2 = i386_ptob(pa) | INTEL_PTE_VALID;
892 invlpg((u_int)CA2);
8ad349bb 893
8f6c56a5
A
894 for (i = 0, cacheline_addr = (unsigned int *)CA2;
895 i < cachelines_in_page;
896 i++, cacheline_addr += cacheline_size) {
897 asm volatile("clflush %0" : : "m" (cacheline_addr));
5d5c5d0d 898 }
8ad349bb 899
8f6c56a5
A
900 *(pt_entry_t *) CM2 = 0;
901
902 (void) ml_set_interrupts_enabled(istate);
903
5d5c5d0d 904}
8f6c56a5 905