]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/loose_ends.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / i386 / loose_ends.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58#include <mach_assert.h>
59
60#include <string.h>
61#include <mach/boolean.h>
62#include <mach/i386/vm_types.h>
0b4e3aa0 63#include <mach/i386/vm_param.h>
1c79356b
A
64#include <kern/kern_types.h>
65#include <kern/misc_protos.h>
89b3af67 66#include <sys/errno.h>
55e303ae 67#include <i386/param.h>
1c79356b 68#include <i386/misc_protos.h>
91447636
A
69#include <i386/cpu_data.h>
70#include <i386/machine_routines.h>
71#include <i386/cpuid.h>
72#include <vm/pmap.h>
73#include <vm/vm_map.h>
74#include <vm/vm_kern.h>
75#include <vm/vm_fault.h>
76
89b3af67
A
77#include <libkern/OSAtomic.h>
78#include <sys/kdebug.h>
79
80#if 0
81
82#undef KERNEL_DEBUG
83#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
84#define KDEBUG 1
85
86#endif
87
91447636
A
88/* XXX - should be gone from here */
89extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys);
90extern void flush_dcache64(addr64_t addr, unsigned count, int phys);
91extern boolean_t phys_page_exists(ppnum_t);
91447636
A
92extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes);
93extern void pmap_set_reference(ppnum_t pn);
94extern void mapping_set_mod(ppnum_t pa);
95extern void mapping_set_ref(ppnum_t pn);
89b3af67 96
91447636
A
97extern void fillPage(ppnum_t pa, unsigned int fill);
98extern void ovbcopy(const char *from,
99 char *to,
100 vm_size_t nbytes);
101void machine_callstack(natural_t *buf, vm_size_t callstack_max);
102
1c79356b 103
55e303ae
A
104#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
105#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
106
9bccf70c 107
55e303ae 108void
91447636
A
109bzero_phys(
110 addr64_t src64,
111 vm_size_t bytes)
55e303ae 112{
89b3af67
A
113 mapwindow_t *map;
114 pt_entry_t save;
8ad349bb 115
89b3af67
A
116 mp_disable_preemption();
117 map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
118 if (map == 0) {
119 panic("bzero_phys: CMAP busy");
120 }
121 save = *map->prv_CMAP;
c0fea474 122
89b3af67 123 invlpg((uintptr_t)map->prv_CADDR);
8f6c56a5 124
89b3af67 125 bzero((void *)((uintptr_t)map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), bytes);
8f6c56a5 126
89b3af67
A
127 if (save != *map->prv_CMAP)
128 panic("bzero_phys: CMAP changed");
129 *map->prv_CMAP = 0;
8f6c56a5 130
89b3af67 131 mp_enable_preemption();
8f6c56a5 132}
5d5c5d0d 133
89b3af67 134
0b4e3aa0
A
135/*
136 * bcopy_phys - like bcopy but copies from/to physical addresses.
0b4e3aa0
A
137 */
138
139void
91447636
A
140bcopy_phys(
141 addr64_t src64,
142 addr64_t dst64,
143 vm_size_t bytes)
0b4e3aa0 144{
89b3af67
A
145 mapwindow_t *src_map, *dst_map;
146 pt_entry_t save1, save2;
147
148 /* ensure we stay within a page */
149 if ( ((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) {
150 panic("bcopy_phys alignment");
151 }
152 mp_disable_preemption();
153
154 src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
155 dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) |
156 INTEL_PTE_REF | INTEL_PTE_MOD));
157
158 if (src_map == 0 || dst_map == 0) {
159 panic("bcopy_phys: CMAP busy");
160 }
161 save1 = *src_map->prv_CMAP;
162 save2 = *dst_map->prv_CMAP;
163
164 invlpg((uintptr_t)src_map->prv_CADDR);
165 invlpg((uintptr_t)dst_map->prv_CADDR);
166
167 bcopy((void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)),
168 (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)), bytes);
169
170 if ( (save1 != *src_map->prv_CMAP) || (save2 != *dst_map->prv_CMAP))
171 panic("bcopy_phys CMAP changed");
8f6c56a5 172
89b3af67
A
173 *src_map->prv_CMAP = 0;
174 *dst_map->prv_CMAP = 0;
175
176 mp_enable_preemption();
91447636 177}
0b4e3aa0 178
1c79356b
A
179/*
180 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
181 * them correctly.
182 */
183
184void
185ovbcopy(
186 const char *from,
187 char *to,
188 vm_size_t bytes) /* num bytes to copy */
189{
190 /* Assume that bcopy copies left-to-right (low addr first). */
191 if (from + bytes <= to || to + bytes <= from || to == from)
192 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
193 else if (from > to)
194 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
195 else {
196 /* to > from: overlapping, and must copy right-to-left. */
197 from += bytes - 1;
198 to += bytes - 1;
199 while (bytes-- > 0)
200 *to-- = *from--;
201 }
202}
203
91447636
A
204
205/*
206 * Read data from a physical address. Memory should not be cache inhibited.
207 */
208
209
210static unsigned int
89b3af67 211ml_phys_read_data(pmap_paddr_t paddr, int size )
91447636 212{
89b3af67
A
213 mapwindow_t *map;
214 unsigned int result;
215 pt_entry_t save;
8ad349bb 216
89b3af67
A
217 mp_disable_preemption();
218 map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF));
219 if (map == 0) {
220 panic("ml_phys_read_data: CMAP busy");
221 }
8f6c56a5 222
89b3af67
A
223 save = *map->prv_CMAP;
224 invlpg((uintptr_t)map->prv_CADDR);
91447636
A
225
226 switch (size) {
227 unsigned char s1;
228 unsigned short s2;
229 case 1:
89b3af67 230 s1 = *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
91447636
A
231 result = s1;
232 break;
233 case 2:
89b3af67 234 s2 = *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
91447636
A
235 result = s2;
236 break;
237 case 4:
238 default:
89b3af67 239 result = *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
91447636
A
240 break;
241 }
242
89b3af67
A
243 if (save != *map->prv_CMAP)
244 panic("ml_phys_read_data CMAP changed");
245 *map->prv_CMAP = 0;
91447636 246 mp_enable_preemption();
89b3af67 247
91447636
A
248 return result;
249}
250
251static unsigned long long
89b3af67 252ml_phys_read_long_long(pmap_paddr_t paddr )
91447636 253{
89b3af67
A
254 mapwindow_t *map;
255 unsigned long long result;
256 pt_entry_t save;
5d5c5d0d 257
89b3af67
A
258 mp_disable_preemption();
259 map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF));
c0fea474 260
89b3af67
A
261 if (map == 0) {
262 panic("ml_phys_read_long_long: CMAP busy");
263 }
264
265 save = *map->prv_CMAP;
266 invlpg((uintptr_t)map->prv_CADDR);
5d5c5d0d 267
89b3af67
A
268 result = *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
269
270 if (save != *map->prv_CMAP)
271 panic("ml_phys_read_long_long CMAP changed");
272 *map->prv_CMAP = 0;
91447636 273 mp_enable_preemption();
89b3af67
A
274
275 return result;
91447636
A
276}
277
89b3af67
A
278
279
280unsigned int ml_phys_read(vm_offset_t paddr)
91447636 281{
89b3af67 282 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
91447636
A
283}
284
285unsigned int ml_phys_read_word(vm_offset_t paddr) {
89b3af67
A
286
287 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
91447636
A
288}
289
290unsigned int ml_phys_read_64(addr64_t paddr64)
291{
89b3af67 292 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
91447636
A
293}
294
295unsigned int ml_phys_read_word_64(addr64_t paddr64)
296{
89b3af67 297 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
91447636
A
298}
299
300unsigned int ml_phys_read_half(vm_offset_t paddr)
301{
89b3af67 302 return ml_phys_read_data((pmap_paddr_t)paddr, 2);
91447636
A
303}
304
305unsigned int ml_phys_read_half_64(addr64_t paddr64)
306{
89b3af67 307 return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
91447636
A
308}
309
310unsigned int ml_phys_read_byte(vm_offset_t paddr)
311{
89b3af67 312 return ml_phys_read_data((pmap_paddr_t)paddr, 1);
91447636
A
313}
314
315unsigned int ml_phys_read_byte_64(addr64_t paddr64)
316{
89b3af67 317 return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
91447636
A
318}
319
320unsigned long long ml_phys_read_double(vm_offset_t paddr)
321{
89b3af67 322 return ml_phys_read_long_long((pmap_paddr_t)paddr);
91447636
A
323}
324
89b3af67 325unsigned long long ml_phys_read_double_64(addr64_t paddr64)
91447636 326{
89b3af67 327 return ml_phys_read_long_long((pmap_paddr_t)paddr64);
91447636
A
328}
329
330
89b3af67 331
91447636
A
332/*
333 * Write data to a physical address. Memory should not be cache inhibited.
334 */
335
336static void
89b3af67 337ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
91447636 338{
89b3af67
A
339 mapwindow_t *map;
340 pt_entry_t save;
341
342 mp_disable_preemption();
343 map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
344 INTEL_PTE_REF | INTEL_PTE_MOD));
345
346 if (map == 0) {
91447636 347 panic("ml_phys_write_data: CMAP busy");
89b3af67 348 }
91447636 349
89b3af67
A
350 save = *map->prv_CMAP;
351 invlpg((uintptr_t)map->prv_CADDR);
91447636
A
352
353 switch (size) {
354 case 1:
89b3af67 355 *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned char)data;
91447636
A
356 break;
357 case 2:
89b3af67 358 *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned short)data;
91447636
A
359 break;
360 case 4:
361 default:
89b3af67 362 *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data;
91447636
A
363 break;
364 }
365
89b3af67
A
366 if (save != *map->prv_CMAP)
367 panic("ml_phys_write_data CMAP changed");
368 *map->prv_CMAP = 0;
369
91447636
A
370 mp_enable_preemption();
371}
372
373static void
89b3af67 374ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
91447636 375{
89b3af67
A
376 mapwindow_t *map;
377 pt_entry_t save;
378
379 mp_disable_preemption();
380 map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) |
381 INTEL_PTE_REF | INTEL_PTE_MOD));
382 if (map == 0) {
91447636 383 panic("ml_phys_write_data: CMAP busy");
89b3af67 384 }
91447636 385
89b3af67
A
386 save = *map->prv_CMAP;
387 invlpg((uintptr_t)map->prv_CADDR);
91447636 388
89b3af67 389 *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data;
91447636 390
89b3af67
A
391 if (save != *map->prv_CMAP)
392 panic("ml_phys_write_data CMAP changed");
393 *map->prv_CMAP = 0;
91447636
A
394 mp_enable_preemption();
395}
396
89b3af67
A
397
398
91447636
A
399void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
400{
89b3af67 401 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
91447636
A
402}
403
89b3af67 404void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
91447636 405{
89b3af67 406 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
91447636
A
407}
408
409void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
410{
89b3af67 411 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
91447636
A
412}
413
89b3af67 414void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
91447636 415{
89b3af67 416 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
91447636
A
417}
418
419void ml_phys_write(vm_offset_t paddr, unsigned int data)
1c79356b 420{
89b3af67 421 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
1c79356b
A
422}
423
89b3af67 424void ml_phys_write_64(addr64_t paddr64, unsigned int data)
91447636 425{
89b3af67 426 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
91447636
A
427}
428
429void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
430{
89b3af67 431 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
91447636
A
432}
433
89b3af67 434void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
91447636 435{
89b3af67 436 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
91447636
A
437}
438
91447636
A
439void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
440{
89b3af67 441 ml_phys_write_long_long((pmap_paddr_t)paddr, data);
91447636
A
442}
443
89b3af67 444void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
91447636 445{
89b3af67 446 ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
91447636
A
447}
448
449
450/* PCI config cycle probing
451 *
452 *
453 * Read the memory location at physical address paddr.
454 * This is a part of a device probe, so there is a good chance we will
455 * have a machine check here. So we have to be able to handle that.
456 * We assume that machine checks are enabled both in MSR and HIDs
457 */
458
459boolean_t
460ml_probe_read(vm_offset_t paddr, unsigned int *val)
461{
89b3af67
A
462 *val = ml_phys_read((pmap_paddr_t)paddr);
463
464 return TRUE;
91447636
A
465}
466
467/*
468 * Read the memory location at physical address paddr.
469 * This is a part of a device probe, so there is a good chance we will
470 * have a machine check here. So we have to be able to handle that.
471 * We assume that machine checks are enabled both in MSR and HIDs
472 */
473boolean_t
89b3af67 474ml_probe_read_64(addr64_t paddr64, unsigned int *val)
91447636 475{
89b3af67
A
476 *val = ml_phys_read_64((pmap_paddr_t)paddr64);
477
478 return TRUE;
91447636
A
479}
480
481
1c79356b 482int bcmp(
91447636
A
483 const void *pa,
484 const void *pb,
485 size_t len)
1c79356b 486{
91447636
A
487 const char *a = (const char *)pa;
488 const char *b = (const char *)pb;
489
1c79356b
A
490 if (len == 0)
491 return 0;
492
493 do
494 if (*a++ != *b++)
495 break;
496 while (--len);
497
498 return len;
499}
500
0b4e3aa0
A
501int
502memcmp(s1, s2, n)
91447636
A
503 const void *s1, *s2;
504 size_t n;
0b4e3aa0 505{
91447636
A
506 if (n != 0) {
507 const unsigned char *p1 = s1, *p2 = s2;
508
509 do {
510 if (*p1++ != *p2++)
511 return (*--p1 - *--p2);
512 } while (--n != 0);
513 }
0b4e3aa0
A
514 return (0);
515}
516
517/*
518 * Abstract:
519 * strlen returns the number of characters in "string" preceeding
520 * the terminating null character.
521 */
522
523size_t
524strlen(
525 register const char *string)
526{
527 register const char *ret = string;
528
529 while (*string++ != '\0')
530 continue;
531 return string - 1 - ret;
532}
533
9bccf70c
A
534uint32_t
535hw_atomic_add(
536 uint32_t *dest,
537 uint32_t delt)
538{
539 uint32_t oldValue;
540 uint32_t newValue;
541
542 do {
543 oldValue = *dest;
544 newValue = (oldValue + delt);
545 } while (!OSCompareAndSwap((UInt32)oldValue,
546 (UInt32)newValue, (UInt32 *)dest));
547
548 return newValue;
549}
550
551uint32_t
552hw_atomic_sub(
553 uint32_t *dest,
554 uint32_t delt)
555{
556 uint32_t oldValue;
557 uint32_t newValue;
558
559 do {
560 oldValue = *dest;
561 newValue = (oldValue - delt);
562 } while (!OSCompareAndSwap((UInt32)oldValue,
563 (UInt32)newValue, (UInt32 *)dest));
564
565 return newValue;
566}
567
568uint32_t
569hw_atomic_or(
570 uint32_t *dest,
571 uint32_t mask)
572{
573 uint32_t oldValue;
574 uint32_t newValue;
575
576 do {
577 oldValue = *dest;
578 newValue = (oldValue | mask);
579 } while (!OSCompareAndSwap((UInt32)oldValue,
580 (UInt32)newValue, (UInt32 *)dest));
581
582 return newValue;
583}
584
585uint32_t
586hw_atomic_and(
587 uint32_t *dest,
588 uint32_t mask)
589{
590 uint32_t oldValue;
591 uint32_t newValue;
592
593 do {
594 oldValue = *dest;
595 newValue = (oldValue & mask);
596 } while (!OSCompareAndSwap((UInt32)oldValue,
597 (UInt32)newValue, (UInt32 *)dest));
598
599 return newValue;
600}
601
602uint32_t
603hw_compare_and_store(
604 uint32_t oldval,
605 uint32_t newval,
606 uint32_t *dest)
607{
608 return OSCompareAndSwap((UInt32)oldval, (UInt32)newval, (UInt32 *)dest);
609}
610
1c79356b
A
611#if MACH_ASSERT
612
613/*
614 * Machine-dependent routine to fill in an array with up to callstack_max
615 * levels of return pc information.
616 */
617void machine_callstack(
91447636
A
618 __unused natural_t *buf,
619 __unused vm_size_t callstack_max)
1c79356b
A
620{
621}
622
623#endif /* MACH_ASSERT */
55e303ae
A
624
625
626
627
628void fillPage(ppnum_t pa, unsigned int fill)
629{
89b3af67
A
630 mapwindow_t *map;
631 pmap_paddr_t src;
632 int i;
633 int cnt = PAGE_SIZE/sizeof(unsigned int);
634 unsigned int *addr;
635
636 mp_disable_preemption();
637 src = i386_ptob(pa);
638 map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
639 INTEL_PTE_REF | INTEL_PTE_MOD));
640 if (map == 0) {
91447636 641 panic("fillPage: CMAP busy");
89b3af67
A
642 }
643 invlpg((uintptr_t)map->prv_CADDR);
91447636 644
89b3af67
A
645 for (i = 0, addr = (unsigned int *)map->prv_CADDR; i < cnt ; i++ )
646 *addr++ = fill;
91447636 647
89b3af67
A
648 *map->prv_CMAP = 0;
649 mp_enable_preemption();
91447636
A
650}
651
652static inline void __sfence(void)
653{
654 __asm__ volatile("sfence");
655}
656static inline void __mfence(void)
657{
658 __asm__ volatile("mfence");
659}
660static inline void __wbinvd(void)
661{
662 __asm__ volatile("wbinvd");
663}
664static inline void __clflush(void *ptr)
665{
89b3af67 666 __asm__ volatile("clflush (%0)" : : "r" (ptr));
91447636
A
667}
668
669void dcache_incoherent_io_store64(addr64_t pa, unsigned int count)
670{
89b3af67 671 mapwindow_t *map;
91447636
A
672 uint32_t linesize = cpuid_info()->cache_linesize;
673 addr64_t addr;
674 uint32_t offset, chunk;
675 boolean_t istate;
676
89b3af67
A
677 __mfence();
678
91447636
A
679 istate = ml_set_interrupts_enabled(FALSE);
680
89b3af67
A
681 offset = pa & (linesize - 1);
682 addr = pa - offset;
683
684 map = pmap_get_mapwindow((pt_entry_t)(i386_ptob(atop_64(addr)) | INTEL_PTE_VALID));
685 if (map == 0) {
91447636 686 panic("cache_flush_page_phys: CMAP busy");
89b3af67 687 }
91447636 688
91447636 689 count += offset;
91447636
A
690 offset = addr & ((addr64_t) (page_size - 1));
691 chunk = page_size - offset;
692
693 do
694 {
695 if (chunk > count)
696 chunk = count;
697
89b3af67
A
698 *map->prv_CMAP = (pt_entry_t)(i386_ptob(atop_64(addr)) | INTEL_PTE_VALID);
699 invlpg((uintptr_t)map->prv_CADDR);
91447636
A
700
701 for (; offset < chunk; offset += linesize)
89b3af67 702 __clflush((void *)(((uintptr_t)map->prv_CADDR) + offset));
91447636
A
703
704 count -= chunk;
705 addr += chunk;
706 chunk = page_size;
707 offset = 0;
708 }
709 while (count);
710
89b3af67 711 *map->prv_CMAP = 0;
91447636
A
712
713 (void) ml_set_interrupts_enabled(istate);
89b3af67
A
714
715 __mfence();
55e303ae
A
716}
717
91447636
A
718void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count)
719{
720 return(dcache_incoherent_io_store64(pa,count));
721}
55e303ae 722
91447636
A
723void
724flush_dcache64(__unused addr64_t addr,
725 __unused unsigned count,
726 __unused int phys)
55e303ae 727{
91447636 728}
55e303ae 729
91447636
A
730void
731invalidate_icache64(__unused addr64_t addr,
732 __unused unsigned count,
733 __unused int phys)
734{
735}
55e303ae 736
89b3af67
A
737
738addr64_t vm_last_addr;
739
740void
741mapping_set_mod(ppnum_t pn)
5d5c5d0d 742{
89b3af67
A
743 pmap_set_modify(pn);
744}
c0fea474 745
89b3af67
A
746void
747mapping_set_ref(ppnum_t pn)
748{
749 pmap_set_reference(pn);
750}
5d5c5d0d 751
89b3af67
A
752void
753cache_flush_page_phys(ppnum_t pa)
754{
755 mapwindow_t *map;
756 boolean_t istate;
757 int i;
758 unsigned char *cacheline_addr;
759 int cacheline_size = cpuid_info()->cache_linesize;
760 int cachelines_in_page = PAGE_SIZE/cacheline_size;
761
762 __mfence();
763
764 istate = ml_set_interrupts_enabled(FALSE);
765
766 map = pmap_get_mapwindow((pt_entry_t)(i386_ptob(pa) | INTEL_PTE_VALID));
767 if (map == 0) {
768 panic("cache_flush_page_phys: CMAP busy");
769 }
770
771 invlpg((uintptr_t)map->prv_CADDR);
772
773 for (i = 0, cacheline_addr = (unsigned char *)map->prv_CADDR;
774 i < cachelines_in_page;
775 i++, cacheline_addr += cacheline_size) {
776 __clflush((void *) cacheline_addr);
5d5c5d0d 777 }
89b3af67
A
778
779 *map->prv_CMAP = 0;
780
781 (void) ml_set_interrupts_enabled(istate);
782
783 __mfence();
784}
785
786
787void exit_funnel_section(void)
788{
789 thread_t thread;
790
791 thread = current_thread();
792
793 if (thread->funnel_lock)
794 (void) thread_funnel_set(thread->funnel_lock, FALSE);
795}
796
797
798
799/*
800 * the copy engine has the following characteristics
801 * - copyio handles copies to/from user or kernel space
802 * - copypv deals with physical or virtual addresses
803 *
804 * implementation details as follows
805 * - a cache of up to NCOPY_WINDOWS is maintained per thread for
806 * access of user virutal space
807 * - the window size is determined by the amount of virtual space
808 * that can be mapped by a single page table
809 * - the mapping is done by copying the page table pointer from
810 * the user's directory entry corresponding to the window's
811 * address in user space to the directory entry corresponding
812 * to the window slot in the kernel's address space
813 * - the set of mappings is preserved across context switches,
814 * so the copy can run with pre-emption enabled
815 * - there is a gdt entry set up to anchor the kernel window on
816 * each processor
817 * - the copies are done using the selector corresponding to the
818 * gdt entry
819 * - the addresses corresponding to the user virtual address are
820 * relative to the beginning of the window being used to map
821 * that region... thus the thread can be pre-empted and switched
822 * to a different processor while in the midst of a copy
823 * - the window caches must be invalidated if the pmap changes out
824 * from under the thread... this can happen during vfork/exec...
825 * inval_copy_windows is the invalidation routine to be used
826 * - the copyio engine has 4 different states associated with it
827 * that allows for lazy tlb flushes and the ability to avoid
828 * a flush all together if we've just come from user space
829 * the 4 states are as follows...
830 *
831 * WINDOWS_OPENED - set by copyio to indicate to the context
832 * switch code that it is necessary to do a tlbflush after
833 * switching the windows since we're in the middle of a copy
834 *
835 * WINDOWS_CLOSED - set by copyio to indicate that it's done
836 * using the windows, so that the context switch code need
837 * not do the tlbflush... instead it will set the state to...
838 *
839 * WINDOWS_DIRTY - set by the context switch code to indicate
840 * to the copy engine that it is responsible for doing a
841 * tlbflush before using the windows again... it's also
842 * set by the inval_copy_windows routine to indicate the
843 * same responsibility.
844 *
845 * WINDOWS_CLEAN - set by the return to user path to indicate
846 * that a tlbflush has happened and that there is no need
847 * for copyio to do another when it is entered next...
848 *
849 * - a window for mapping single physical pages is provided for copypv
850 * - this window is maintained across context switches and has the
851 * same characteristics as the user space windows w/r to pre-emption
852 */
853
854extern int copyout_user(const char *, vm_offset_t, vm_size_t);
855extern int copyout_kern(const char *, vm_offset_t, vm_size_t);
856extern int copyin_user(const vm_offset_t, char *, vm_size_t);
857extern int copyin_kern(const vm_offset_t, char *, vm_size_t);
858extern int copyoutphys_user(const char *, vm_offset_t, vm_size_t);
859extern int copyoutphys_kern(const char *, vm_offset_t, vm_size_t);
860extern int copyinphys_user(const vm_offset_t, char *, vm_size_t);
861extern int copyinphys_kern(const vm_offset_t, char *, vm_size_t);
862extern int copyinstr_user(const vm_offset_t, char *, vm_size_t, vm_size_t *);
863extern int copyinstr_kern(const vm_offset_t, char *, vm_size_t, vm_size_t *);
864
865static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int);
866static int copyio_phys(addr64_t, addr64_t, vm_size_t, int);
867
868
869#define COPYIN 0
870#define COPYOUT 1
871#define COPYINSTR 2
872#define COPYINPHYS 3
873#define COPYOUTPHYS 4
874
875
876
877void inval_copy_windows(thread_t thread)
878{
879 int i;
8f6c56a5 880
89b3af67
A
881 for (i = 0; i < NCOPY_WINDOWS; i++) {
882 thread->machine.copy_window[i].user_base = -1;
5d5c5d0d 883 }
89b3af67
A
884 thread->machine.nxt_window = 0;
885 thread->machine.copyio_state = WINDOWS_DIRTY;
5d5c5d0d 886
89b3af67
A
887 KERNEL_DEBUG(0xeff70058 | DBG_FUNC_NONE, (int)thread, (int)thread->map, 0, 0, 0);
888}
5d5c5d0d 889
89b3af67
A
890
891static int
892copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
893{
894 thread_t thread;
895 pmap_t pmap;
896 pt_entry_t *updp;
897 pt_entry_t *kpdp;
898 user_addr_t user_base;
899 vm_offset_t user_offset;
900 vm_offset_t kern_vaddr;
901 vm_size_t cnt;
902 vm_size_t bytes_copied;
903 int error = 0;
904 int window_index;
905 int copyio_state;
906 boolean_t istate;
907#if KDEBUG
908 int debug_type = 0xeff70010;
909 debug_type += (copy_type << 2);
910#endif
911
912 thread = current_thread();
913
914 KERNEL_DEBUG(debug_type | DBG_FUNC_START, (int)(user_addr >> 32), (int)user_addr, (int)nbytes, thread->machine.copyio_state, 0);
915
916 if (nbytes == 0) {
917 KERNEL_DEBUG(debug_type | DBG_FUNC_END, (int)user_addr, (int)kernel_addr, (int)nbytes, 0, 0);
918 return (0);
919 }
920 pmap = thread->map->pmap;
921
922 if (pmap == kernel_pmap || use_kernel_map) {
923
924 kern_vaddr = (vm_offset_t)user_addr;
925
926 switch (copy_type) {
927
928 case COPYIN:
929 error = copyin_kern(kern_vaddr, kernel_addr, nbytes);
930 break;
931
932 case COPYOUT:
933 error = copyout_kern(kernel_addr, kern_vaddr, nbytes);
934 break;
935
936 case COPYINSTR:
937 error = copyinstr_kern(kern_vaddr, kernel_addr, nbytes, lencopied);
938 break;
939
940 case COPYINPHYS:
941 error = copyinphys_kern(kern_vaddr, kernel_addr, nbytes);
942 break;
943
944 case COPYOUTPHYS:
945 error = copyoutphys_kern(kernel_addr, kern_vaddr, nbytes);
946 break;
c0fea474 947 }
89b3af67 948 KERNEL_DEBUG(debug_type | DBG_FUNC_END, (int)kern_vaddr, (int)kernel_addr, (int)nbytes, error | 0x80000000, 0);
c0fea474 949
89b3af67
A
950 return (error);
951 }
952 user_base = user_addr & ~((user_addr_t)(NBPDE - 1));
953 user_offset = user_addr & (NBPDE - 1);
c0fea474 954
89b3af67 955 KERNEL_DEBUG(debug_type | DBG_FUNC_NONE, (int)(user_base >> 32), (int)user_base, (int)user_offset, 0, 0);
5d5c5d0d 956
89b3af67
A
957 cnt = NBPDE - user_offset;
958
959 if (cnt > nbytes)
960 cnt = nbytes;
961
962 istate = ml_set_interrupts_enabled(FALSE);
963
964 copyio_state = thread->machine.copyio_state;
965 thread->machine.copyio_state = WINDOWS_OPENED;
966
967 (void) ml_set_interrupts_enabled(istate);
968
969
970 for (;;) {
5d5c5d0d 971
89b3af67
A
972 for (window_index = 0; window_index < NCOPY_WINDOWS; window_index++) {
973 if (thread->machine.copy_window[window_index].user_base == user_base)
974 break;
5d5c5d0d 975 }
89b3af67
A
976 if (window_index >= NCOPY_WINDOWS) {
977
978 window_index = thread->machine.nxt_window;
979 thread->machine.nxt_window++;
980
981 if (thread->machine.nxt_window >= NCOPY_WINDOWS)
982 thread->machine.nxt_window = 0;
983 thread->machine.copy_window[window_index].user_base = user_base;
984
985 /*
986 * it's necessary to disable pre-emption
987 * since I have to compute the kernel descriptor pointer
988 * for the new window
989 */
990 istate = ml_set_interrupts_enabled(FALSE);
991
992 updp = pmap_pde(pmap, user_base);
993
994 kpdp = current_cpu_datap()->cpu_copywindow_pdp;
995 kpdp += window_index;
996
997 pmap_store_pte(kpdp, updp ? *updp : 0);
998
999 (void) ml_set_interrupts_enabled(istate);
1000
1001 copyio_state = WINDOWS_DIRTY;
1002
1003 KERNEL_DEBUG(0xeff70040 | DBG_FUNC_NONE, window_index, (int)user_base, (int)updp, (int)kpdp, 0);
1004
5d5c5d0d 1005 }
89b3af67
A
1006#if JOE_DEBUG
1007 else {
1008 istate = ml_set_interrupts_enabled(FALSE);
5d5c5d0d 1009
89b3af67 1010 updp = pmap_pde(pmap, user_base);
5d5c5d0d 1011
89b3af67
A
1012 kpdp = current_cpu_datap()->cpu_copywindow_pdp;
1013
1014 kpdp += window_index;
1015
1016 if ((*kpdp & PG_FRAME) != (*updp & PG_FRAME)) {
1017 panic("copyio: user pdp mismatch - kpdp = 0x%x, updp = 0x%x\n", kpdp, updp);
1018 }
1019 (void) ml_set_interrupts_enabled(istate);
1020 }
1021#endif
1022 if (copyio_state == WINDOWS_DIRTY) {
1023 flush_tlb();
1024
1025 copyio_state = WINDOWS_CLEAN;
1026
1027 KERNEL_DEBUG(0xeff70054 | DBG_FUNC_NONE, window_index, 0, 0, 0, 0);
1028 }
1029 user_offset += (window_index * NBPDE);
1030
1031 KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE, (int)user_offset, (int)kernel_addr, cnt, 0, 0);
1032
1033 switch (copy_type) {
1034
1035 case COPYIN:
1036 error = copyin_user(user_offset, kernel_addr, cnt);
1037 break;
1038
1039 case COPYOUT:
1040 error = copyout_user(kernel_addr, user_offset, cnt);
1041 break;
1042
1043 case COPYINPHYS:
1044 error = copyinphys_user(user_offset, kernel_addr, cnt);
1045 break;
1046
1047 case COPYOUTPHYS:
1048 error = copyoutphys_user(kernel_addr, user_offset, cnt);
1049 break;
1050
1051 case COPYINSTR:
1052 error = copyinstr_user(user_offset, kernel_addr, cnt, &bytes_copied);
1053
1054 /*
1055 * lencopied should be updated on success
1056 * or ENAMETOOLONG... but not EFAULT
1057 */
1058 if (error != EFAULT)
1059 *lencopied += bytes_copied;
1060
1061 /*
1062 * if we still have room, then the ENAMETOOLONG
1063 * is just an artifact of the buffer straddling
1064 * a window boundary and we should continue
1065 */
1066 if (error == ENAMETOOLONG && nbytes > cnt)
1067 error = 0;
1068
1069 if (error) {
1070#if KDEBUG
1071 nbytes = *lencopied;
1072#endif
1073 break;
1074 }
1075 if (*(kernel_addr + bytes_copied - 1) == 0) {
1076 /*
1077 * we found a NULL terminator... we're done
1078 */
1079#if KDEBUG
1080 nbytes = *lencopied;
1081#endif
1082 goto done;
1083 }
1084 if (cnt == nbytes) {
1085 /*
1086 * no more room in the buffer and we haven't
1087 * yet come across a NULL terminator
1088 */
1089#if KDEBUG
1090 nbytes = *lencopied;
1091#endif
1092 error = ENAMETOOLONG;
1093 break;
1094 }
1095 assert(cnt == bytes_copied);
1096
1097 break;
1098 }
1099 if (error)
1100 break;
1101 if ((nbytes -= cnt) == 0)
1102 break;
1103
1104 kernel_addr += cnt;
1105 user_base += NBPDE;
1106 user_offset = 0;
1107
1108 if (nbytes > NBPDE)
1109 cnt = NBPDE;
1110 else
1111 cnt = nbytes;
91447636 1112 }
89b3af67
A
1113done:
1114 thread->machine.copyio_state = WINDOWS_CLOSED;
5d5c5d0d 1115
89b3af67
A
1116 KERNEL_DEBUG(debug_type | DBG_FUNC_END, (int)user_addr, (int)kernel_addr, (int)nbytes, error, 0);
1117
1118 return (error);
55e303ae
A
1119}
1120
89b3af67
A
1121
1122static int
1123copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which)
55e303ae 1124{
89b3af67
A
1125 pmap_paddr_t paddr;
1126 user_addr_t vaddr;
1127 char *window_offset;
1128 pt_entry_t pentry;
1129 int ctype;
1130 int retval;
1131 boolean_t istate;
1132
1133 if (which & cppvPsnk) {
1134 paddr = (pmap_paddr_t)sink;
1135 vaddr = (user_addr_t)source;
1136 ctype = COPYINPHYS;
1137 pentry = (pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_RW);
1138 } else {
1139 paddr = (pmap_paddr_t)source;
1140 vaddr = (user_addr_t)sink;
1141 ctype = COPYOUTPHYS;
1142 pentry = (pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME));
1143 }
1144 window_offset = (char *)((uint32_t)paddr & (PAGE_SIZE - 1));
1145
1146 if (current_thread()->machine.physwindow_busy) {
1147 pt_entry_t old_pentry;
1148
1149 KERNEL_DEBUG(0xeff70048 | DBG_FUNC_NONE, paddr, csize, 0, -1, 0);
1150 /*
1151 * we had better be targeting wired memory at this point
1152 * we will not be able to handle a fault with interrupts
1153 * disabled... we disable them because we can't tolerate
1154 * being preempted during this nested use of the window
1155 */
1156 istate = ml_set_interrupts_enabled(FALSE);
1157
1158 old_pentry = *(current_cpu_datap()->cpu_physwindow_ptep);
1159 pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), pentry);
1160
1161 invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);
1162
1163 retval = copyio(ctype, vaddr, window_offset, csize, NULL, which & cppvKmap);
1164
1165 pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), old_pentry);
1166
1167 invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);
1168
1169 (void) ml_set_interrupts_enabled(istate);
1170 } else {
1171 /*
1172 * mark the window as in use... if an interrupt hits while we're
1173 * busy, or we trigger another coyppv from the fault path into
1174 * the driver on a user address space page fault due to a copyin/out
1175 * then we need to save and restore the current window state instead
1176 * of caching the window preserving it across context switches
1177 */
1178 current_thread()->machine.physwindow_busy = 1;
1179
1180 if (current_thread()->machine.physwindow_pte != pentry) {
1181 KERNEL_DEBUG(0xeff70048 | DBG_FUNC_NONE, paddr, csize, 0, 0, 0);
1182
1183 current_thread()->machine.physwindow_pte = pentry;
1184
1185 /*
1186 * preemption at this point would be bad since we
1187 * could end up on the other processor after we grabbed the
1188 * pointer to the current cpu data area, but before we finished
1189 * using it to stuff the page table entry since we would
1190 * be modifying a window that no longer belonged to us
1191 * the invlpg can be done unprotected since it only flushes
1192 * this page address from the tlb... if it flushes the wrong
1193 * one, no harm is done, and the context switch that moved us
1194 * to the other processor will have already take care of
1195 * flushing the tlb after it reloaded the page table from machine.physwindow_pte
1196 */
1197 istate = ml_set_interrupts_enabled(FALSE);
1198 *(current_cpu_datap()->cpu_physwindow_ptep) = pentry;
1199 (void) ml_set_interrupts_enabled(istate);
1200
1201 invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base);
1202 }
1203#if JOE_DEBUG
1204 else {
1205 if (pentry !=
1206 (*(current_cpu_datap()->cpu_physwindow_ptep) & (INTEL_PTE_VALID | PG_FRAME | INTEL_PTE_RW)))
1207 panic("copyio_phys: pentry != *physwindow_ptep");
1208 }
1209#endif
1210 retval = copyio(ctype, vaddr, window_offset, csize, NULL, which & cppvKmap);
1211
1212 current_thread()->machine.physwindow_busy = 0;
1213 }
1214 return (retval);
55e303ae
A
1215}
1216
55e303ae 1217
89b3af67
A
1218
1219int
1220copyinmsg(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
c0fea474 1221{
89b3af67
A
1222 return (copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0));
1223}
91447636 1224
89b3af67
A
1225int
1226copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
5d5c5d0d 1227{
89b3af67 1228 return (copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0));
5d5c5d0d 1229}
91447636 1230
89b3af67
A
1231int
1232copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
5d5c5d0d 1233{
89b3af67 1234 *lencopied = 0;
91447636 1235
89b3af67
A
1236 return (copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0));
1237}
91447636 1238
89b3af67
A
1239int
1240copyoutmsg(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
1241{
1242 return (copyio(COPYOUT, user_addr, (char *)kernel_addr, nbytes, NULL, 0));
1243}
91447636 1244
89b3af67
A
1245int
1246copyout(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
1247{
1248 return (copyio(COPYOUT, user_addr, (char *)kernel_addr, nbytes, NULL, 0));
1249}
8ad349bb 1250
8ad349bb 1251
89b3af67
A
1252kern_return_t copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which)
1253{
1254 unsigned int lop, csize;
1255 int bothphys = 0;
1256
8ad349bb 1257
89b3af67 1258 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (int)src64, (int)snk64, size, which, 0);
8f6c56a5 1259
89b3af67
A
1260 if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */
1261 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
8f6c56a5 1262
89b3af67
A
1263 if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk))
1264 bothphys = 1; /* both are physical */
1265
1266 while (size) {
1267
1268 if (bothphys) {
1269 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */
1270
1271 if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))))
1272 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */
1273 } else {
1274 /*
1275 * only need to compute the resid for the physical page
1276 * address... we don't care about where we start/finish in
1277 * the virtual since we just call the normal copyin/copyout
1278 */
1279 if (which & cppvPsrc)
1280 lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)));
1281 else
1282 lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1)));
1283 }
1284 csize = size; /* Assume we can copy it all */
1285 if (lop < size)
1286 csize = lop; /* Nope, we can't do it all */
1287#if 0
1288 /*
1289 * flush_dcache64 is currently a nop on the i386...
1290 * it's used when copying to non-system memory such
1291 * as video capture cards... on PPC there was a need
1292 * to flush due to how we mapped this memory... not
1293 * sure if it's needed on i386.
1294 */
1295 if (which & cppvFsrc)
1296 flush_dcache64(src64, csize, 1); /* If requested, flush source before move */
1297 if (which & cppvFsnk)
1298 flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */
1299#endif
1300 if (bothphys)
1301 bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */
1302 else {
1303 if (copyio_phys(src64, snk64, csize, which))
1304 return (KERN_FAILURE);
1305 }
1306#if 0
1307 if (which & cppvFsrc)
1308 flush_dcache64(src64, csize, 1); /* If requested, flush source after move */
1309 if (which & cppvFsnk)
1310 flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */
1311#endif
1312 size -= csize; /* Calculate what is left */
1313 snk64 += csize; /* Bump sink to next physical address */
1314 src64 += csize; /* Bump source to next physical address */
1315 }
1316 KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (int)src64, (int)snk64, size, which, 0);
8f6c56a5 1317
89b3af67
A
1318 return KERN_SUCCESS;
1319}