]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | #include <mach_assert.h> | |
59 | ||
60 | #include <string.h> | |
61 | #include <mach/boolean.h> | |
62 | #include <mach/i386/vm_types.h> | |
63 | #include <mach/i386/vm_param.h> | |
64 | #include <kern/kern_types.h> | |
65 | #include <kern/misc_protos.h> | |
66 | #include <sys/errno.h> | |
67 | #include <i386/param.h> | |
68 | #include <i386/misc_protos.h> | |
69 | #include <i386/cpu_data.h> | |
70 | #include <i386/machine_routines.h> | |
71 | #include <i386/cpuid.h> | |
72 | #include <vm/pmap.h> | |
73 | #include <vm/vm_map.h> | |
74 | #include <vm/vm_kern.h> | |
75 | #include <vm/vm_fault.h> | |
76 | ||
77 | #include <libkern/OSAtomic.h> | |
78 | #include <sys/kdebug.h> | |
79 | ||
80 | #if 0 | |
81 | ||
82 | #undef KERNEL_DEBUG | |
83 | #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT | |
84 | #define KDEBUG 1 | |
85 | ||
86 | #endif | |
87 | ||
88 | /* XXX - should be gone from here */ | |
89 | extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); | |
90 | extern void flush_dcache64(addr64_t addr, unsigned count, int phys); | |
91 | extern boolean_t phys_page_exists(ppnum_t); | |
92 | extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes); | |
93 | extern void pmap_set_reference(ppnum_t pn); | |
94 | extern void mapping_set_mod(ppnum_t pa); | |
95 | extern void mapping_set_ref(ppnum_t pn); | |
96 | ||
97 | extern void ovbcopy(const char *from, | |
98 | char *to, | |
99 | vm_size_t nbytes); | |
100 | void machine_callstack(natural_t *buf, vm_size_t callstack_max); | |
101 | ||
102 | ||
103 | #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL) | |
104 | #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL)) | |
105 | ||
106 | void | |
107 | bzero_phys_nc( | |
108 | addr64_t src64, | |
109 | vm_size_t bytes) | |
110 | { | |
111 | bzero_phys(src64,bytes); | |
112 | } | |
113 | ||
114 | void | |
115 | bzero_phys( | |
116 | addr64_t src64, | |
117 | vm_size_t bytes) | |
118 | { | |
119 | mapwindow_t *map; | |
120 | ||
121 | mp_disable_preemption(); | |
122 | ||
123 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
124 | ||
125 | bzero((void *)((uintptr_t)map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), bytes); | |
126 | ||
127 | pmap_put_mapwindow(map); | |
128 | ||
129 | mp_enable_preemption(); | |
130 | } | |
131 | ||
132 | ||
133 | /* | |
134 | * bcopy_phys - like bcopy but copies from/to physical addresses. | |
135 | */ | |
136 | ||
137 | void | |
138 | bcopy_phys( | |
139 | addr64_t src64, | |
140 | addr64_t dst64, | |
141 | vm_size_t bytes) | |
142 | { | |
143 | mapwindow_t *src_map, *dst_map; | |
144 | ||
145 | /* ensure we stay within a page */ | |
146 | if ( ((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { | |
147 | panic("bcopy_phys alignment"); | |
148 | } | |
149 | mp_disable_preemption(); | |
150 | ||
151 | src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF)); | |
152 | dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | | |
153 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
154 | ||
155 | bcopy((void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), | |
156 | (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)), bytes); | |
157 | ||
158 | pmap_put_mapwindow(src_map); | |
159 | pmap_put_mapwindow(dst_map); | |
160 | ||
161 | mp_enable_preemption(); | |
162 | } | |
163 | ||
164 | /* | |
165 | * ovbcopy - like bcopy, but recognizes overlapping ranges and handles | |
166 | * them correctly. | |
167 | */ | |
168 | ||
169 | void | |
170 | ovbcopy( | |
171 | const char *from, | |
172 | char *to, | |
173 | vm_size_t bytes) /* num bytes to copy */ | |
174 | { | |
175 | /* Assume that bcopy copies left-to-right (low addr first). */ | |
176 | if (from + bytes <= to || to + bytes <= from || to == from) | |
177 | bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/ | |
178 | else if (from > to) | |
179 | bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */ | |
180 | else { | |
181 | /* to > from: overlapping, and must copy right-to-left. */ | |
182 | from += bytes - 1; | |
183 | to += bytes - 1; | |
184 | while (bytes-- > 0) | |
185 | *to-- = *from--; | |
186 | } | |
187 | } | |
188 | ||
189 | ||
190 | /* | |
191 | * Read data from a physical address. Memory should not be cache inhibited. | |
192 | */ | |
193 | ||
194 | ||
195 | static unsigned int | |
196 | ml_phys_read_data(pmap_paddr_t paddr, int size ) | |
197 | { | |
198 | mapwindow_t *map; | |
199 | unsigned int result; | |
200 | ||
201 | mp_disable_preemption(); | |
202 | ||
203 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF)); | |
204 | ||
205 | switch (size) { | |
206 | unsigned char s1; | |
207 | unsigned short s2; | |
208 | case 1: | |
209 | s1 = *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
210 | result = s1; | |
211 | break; | |
212 | case 2: | |
213 | s2 = *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
214 | result = s2; | |
215 | break; | |
216 | case 4: | |
217 | default: | |
218 | result = *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
219 | break; | |
220 | } | |
221 | pmap_put_mapwindow(map); | |
222 | ||
223 | mp_enable_preemption(); | |
224 | ||
225 | return result; | |
226 | } | |
227 | ||
228 | static unsigned long long | |
229 | ml_phys_read_long_long(pmap_paddr_t paddr ) | |
230 | { | |
231 | mapwindow_t *map; | |
232 | unsigned long long result; | |
233 | ||
234 | mp_disable_preemption(); | |
235 | ||
236 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF)); | |
237 | ||
238 | result = *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)); | |
239 | ||
240 | pmap_put_mapwindow(map); | |
241 | ||
242 | mp_enable_preemption(); | |
243 | ||
244 | return result; | |
245 | } | |
246 | ||
247 | ||
248 | ||
249 | unsigned int ml_phys_read( vm_offset_t paddr) | |
250 | { | |
251 | return ml_phys_read_data((pmap_paddr_t)paddr, 4); | |
252 | } | |
253 | ||
254 | unsigned int ml_phys_read_word(vm_offset_t paddr) { | |
255 | ||
256 | return ml_phys_read_data((pmap_paddr_t)paddr, 4); | |
257 | } | |
258 | ||
259 | unsigned int ml_phys_read_64(addr64_t paddr64) | |
260 | { | |
261 | return ml_phys_read_data((pmap_paddr_t)paddr64, 4); | |
262 | } | |
263 | ||
264 | unsigned int ml_phys_read_word_64(addr64_t paddr64) | |
265 | { | |
266 | return ml_phys_read_data((pmap_paddr_t)paddr64, 4); | |
267 | } | |
268 | ||
269 | unsigned int ml_phys_read_half(vm_offset_t paddr) | |
270 | { | |
271 | return ml_phys_read_data((pmap_paddr_t)paddr, 2); | |
272 | } | |
273 | ||
274 | unsigned int ml_phys_read_half_64(addr64_t paddr64) | |
275 | { | |
276 | return ml_phys_read_data((pmap_paddr_t)paddr64, 2); | |
277 | } | |
278 | ||
279 | unsigned int ml_phys_read_byte(vm_offset_t paddr) | |
280 | { | |
281 | return ml_phys_read_data((pmap_paddr_t)paddr, 1); | |
282 | } | |
283 | ||
284 | unsigned int ml_phys_read_byte_64(addr64_t paddr64) | |
285 | { | |
286 | return ml_phys_read_data((pmap_paddr_t)paddr64, 1); | |
287 | } | |
288 | ||
289 | unsigned long long ml_phys_read_double(vm_offset_t paddr) | |
290 | { | |
291 | return ml_phys_read_long_long((pmap_paddr_t)paddr); | |
292 | } | |
293 | ||
294 | unsigned long long ml_phys_read_double_64(addr64_t paddr64) | |
295 | { | |
296 | return ml_phys_read_long_long((pmap_paddr_t)paddr64); | |
297 | } | |
298 | ||
299 | ||
300 | ||
301 | /* | |
302 | * Write data to a physical address. Memory should not be cache inhibited. | |
303 | */ | |
304 | ||
305 | static void | |
306 | ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size) | |
307 | { | |
308 | mapwindow_t *map; | |
309 | ||
310 | mp_disable_preemption(); | |
311 | ||
312 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | | |
313 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
314 | ||
315 | switch (size) { | |
316 | case 1: | |
317 | *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned char)data; | |
318 | break; | |
319 | case 2: | |
320 | *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned short)data; | |
321 | break; | |
322 | case 4: | |
323 | default: | |
324 | *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data; | |
325 | break; | |
326 | } | |
327 | pmap_put_mapwindow(map); | |
328 | ||
329 | mp_enable_preemption(); | |
330 | } | |
331 | ||
332 | static void | |
333 | ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data) | |
334 | { | |
335 | mapwindow_t *map; | |
336 | ||
337 | mp_disable_preemption(); | |
338 | ||
339 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | | |
340 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
341 | ||
342 | *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data; | |
343 | ||
344 | pmap_put_mapwindow(map); | |
345 | ||
346 | mp_enable_preemption(); | |
347 | } | |
348 | ||
349 | ||
350 | ||
351 | void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) | |
352 | { | |
353 | ml_phys_write_data((pmap_paddr_t)paddr, data, 1); | |
354 | } | |
355 | ||
356 | void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) | |
357 | { | |
358 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); | |
359 | } | |
360 | ||
361 | void ml_phys_write_half(vm_offset_t paddr, unsigned int data) | |
362 | { | |
363 | ml_phys_write_data((pmap_paddr_t)paddr, data, 2); | |
364 | } | |
365 | ||
366 | void ml_phys_write_half_64(addr64_t paddr64, unsigned int data) | |
367 | { | |
368 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); | |
369 | } | |
370 | ||
371 | void ml_phys_write(vm_offset_t paddr, unsigned int data) | |
372 | { | |
373 | ml_phys_write_data((pmap_paddr_t)paddr, data, 4); | |
374 | } | |
375 | ||
376 | void ml_phys_write_64(addr64_t paddr64, unsigned int data) | |
377 | { | |
378 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); | |
379 | } | |
380 | ||
381 | void ml_phys_write_word(vm_offset_t paddr, unsigned int data) | |
382 | { | |
383 | ml_phys_write_data((pmap_paddr_t)paddr, data, 4); | |
384 | } | |
385 | ||
386 | void ml_phys_write_word_64(addr64_t paddr64, unsigned int data) | |
387 | { | |
388 | ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); | |
389 | } | |
390 | ||
391 | void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) | |
392 | { | |
393 | ml_phys_write_long_long((pmap_paddr_t)paddr, data); | |
394 | } | |
395 | ||
396 | void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) | |
397 | { | |
398 | ml_phys_write_long_long((pmap_paddr_t)paddr64, data); | |
399 | } | |
400 | ||
401 | ||
402 | /* PCI config cycle probing | |
403 | * | |
404 | * | |
405 | * Read the memory location at physical address paddr. | |
406 | * This is a part of a device probe, so there is a good chance we will | |
407 | * have a machine check here. So we have to be able to handle that. | |
408 | * We assume that machine checks are enabled both in MSR and HIDs | |
409 | */ | |
410 | ||
411 | boolean_t | |
412 | ml_probe_read(vm_offset_t paddr, unsigned int *val) | |
413 | { | |
414 | if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) | |
415 | return FALSE; | |
416 | ||
417 | *val = ml_phys_read((pmap_paddr_t)paddr); | |
418 | ||
419 | return TRUE; | |
420 | } | |
421 | ||
422 | /* | |
423 | * Read the memory location at physical address paddr. | |
424 | * This is a part of a device probe, so there is a good chance we will | |
425 | * have a machine check here. So we have to be able to handle that. | |
426 | * We assume that machine checks are enabled both in MSR and HIDs | |
427 | */ | |
428 | boolean_t | |
429 | ml_probe_read_64(addr64_t paddr64, unsigned int *val) | |
430 | { | |
431 | if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) | |
432 | return FALSE; | |
433 | ||
434 | *val = ml_phys_read_64((pmap_paddr_t)paddr64); | |
435 | return TRUE; | |
436 | } | |
437 | ||
438 | ||
439 | int bcmp( | |
440 | const void *pa, | |
441 | const void *pb, | |
442 | size_t len) | |
443 | { | |
444 | const char *a = (const char *)pa; | |
445 | const char *b = (const char *)pb; | |
446 | ||
447 | if (len == 0) | |
448 | return 0; | |
449 | ||
450 | do | |
451 | if (*a++ != *b++) | |
452 | break; | |
453 | while (--len); | |
454 | ||
455 | return len; | |
456 | } | |
457 | ||
458 | int | |
459 | memcmp(const void *s1, const void *s2, size_t n) | |
460 | { | |
461 | if (n != 0) { | |
462 | const unsigned char *p1 = s1, *p2 = s2; | |
463 | ||
464 | do { | |
465 | if (*p1++ != *p2++) | |
466 | return (*--p1 - *--p2); | |
467 | } while (--n != 0); | |
468 | } | |
469 | return (0); | |
470 | } | |
471 | ||
472 | /* | |
473 | * Abstract: | |
474 | * strlen returns the number of characters in "string" preceeding | |
475 | * the terminating null character. | |
476 | */ | |
477 | ||
478 | size_t | |
479 | strlen( | |
480 | register const char *string) | |
481 | { | |
482 | register const char *ret = string; | |
483 | ||
484 | while (*string++ != '\0') | |
485 | continue; | |
486 | return string - 1 - ret; | |
487 | } | |
488 | ||
489 | uint32_t | |
490 | hw_compare_and_store(uint32_t oldval, uint32_t newval, volatile uint32_t *dest) | |
491 | { | |
492 | return OSCompareAndSwap((UInt32)oldval, | |
493 | (UInt32)newval, | |
494 | (volatile UInt32 *)dest); | |
495 | } | |
496 | ||
497 | #if MACH_ASSERT | |
498 | ||
499 | /* | |
500 | * Machine-dependent routine to fill in an array with up to callstack_max | |
501 | * levels of return pc information. | |
502 | */ | |
503 | void machine_callstack( | |
504 | __unused natural_t *buf, | |
505 | __unused vm_size_t callstack_max) | |
506 | { | |
507 | } | |
508 | ||
509 | #endif /* MACH_ASSERT */ | |
510 | ||
511 | void fillPage(ppnum_t pa, unsigned int fill) | |
512 | { | |
513 | mapwindow_t *map; | |
514 | pmap_paddr_t src; | |
515 | int i; | |
516 | int cnt = PAGE_SIZE/sizeof(unsigned int); | |
517 | unsigned int *addr; | |
518 | ||
519 | mp_disable_preemption(); | |
520 | ||
521 | src = i386_ptob(pa); | |
522 | map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) | | |
523 | INTEL_PTE_REF | INTEL_PTE_MOD)); | |
524 | ||
525 | for (i = 0, addr = (unsigned int *)map->prv_CADDR; i < cnt ; i++ ) | |
526 | *addr++ = fill; | |
527 | ||
528 | pmap_put_mapwindow(map); | |
529 | ||
530 | mp_enable_preemption(); | |
531 | } | |
532 | ||
533 | static inline void __sfence(void) | |
534 | { | |
535 | __asm__ volatile("sfence"); | |
536 | } | |
537 | static inline void __mfence(void) | |
538 | { | |
539 | __asm__ volatile("mfence"); | |
540 | } | |
541 | static inline void __wbinvd(void) | |
542 | { | |
543 | __asm__ volatile("wbinvd"); | |
544 | } | |
545 | static inline void __clflush(void *ptr) | |
546 | { | |
547 | __asm__ volatile("clflush (%0)" : : "r" (ptr)); | |
548 | } | |
549 | ||
550 | void dcache_incoherent_io_store64(addr64_t pa, unsigned int count) | |
551 | { | |
552 | mapwindow_t *map; | |
553 | uint32_t linesize = cpuid_info()->cache_linesize; | |
554 | addr64_t addr; | |
555 | uint32_t offset, chunk; | |
556 | boolean_t istate; | |
557 | ||
558 | __mfence(); | |
559 | ||
560 | istate = ml_set_interrupts_enabled(FALSE); | |
561 | ||
562 | offset = pa & (linesize - 1); | |
563 | addr = pa - offset; | |
564 | ||
565 | map = pmap_get_mapwindow((pt_entry_t)(i386_ptob(atop_64(addr)) | INTEL_PTE_VALID)); | |
566 | ||
567 | count += offset; | |
568 | offset = addr & ((addr64_t) (page_size - 1)); | |
569 | chunk = page_size - offset; | |
570 | ||
571 | do | |
572 | { | |
573 | if (chunk > count) | |
574 | chunk = count; | |
575 | ||
576 | for (; offset < chunk; offset += linesize) | |
577 | __clflush((void *)(((uintptr_t)map->prv_CADDR) + offset)); | |
578 | ||
579 | count -= chunk; | |
580 | addr += chunk; | |
581 | chunk = page_size; | |
582 | offset = 0; | |
583 | ||
584 | if (count) { | |
585 | pmap_store_pte(map->prv_CMAP, (pt_entry_t)(i386_ptob(atop_64(addr)) | INTEL_PTE_VALID)); | |
586 | invlpg((uintptr_t)map->prv_CADDR); | |
587 | } | |
588 | } | |
589 | while (count); | |
590 | ||
591 | pmap_put_mapwindow(map); | |
592 | ||
593 | (void) ml_set_interrupts_enabled(istate); | |
594 | ||
595 | __mfence(); | |
596 | } | |
597 | ||
598 | void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count) | |
599 | { | |
600 | return(dcache_incoherent_io_store64(pa,count)); | |
601 | } | |
602 | ||
603 | void | |
604 | flush_dcache64(__unused addr64_t addr, | |
605 | __unused unsigned count, | |
606 | __unused int phys) | |
607 | { | |
608 | } | |
609 | ||
610 | void | |
611 | invalidate_icache64(__unused addr64_t addr, | |
612 | __unused unsigned count, | |
613 | __unused int phys) | |
614 | { | |
615 | } | |
616 | ||
617 | ||
618 | addr64_t vm_last_addr; | |
619 | ||
620 | void | |
621 | mapping_set_mod(ppnum_t pn) | |
622 | { | |
623 | pmap_set_modify(pn); | |
624 | } | |
625 | ||
626 | void | |
627 | mapping_set_ref(ppnum_t pn) | |
628 | { | |
629 | pmap_set_reference(pn); | |
630 | } | |
631 | ||
632 | void | |
633 | cache_flush_page_phys(ppnum_t pa) | |
634 | { | |
635 | mapwindow_t *map; | |
636 | boolean_t istate; | |
637 | int i; | |
638 | unsigned char *cacheline_addr; | |
639 | int cacheline_size = cpuid_info()->cache_linesize; | |
640 | int cachelines_in_page = PAGE_SIZE/cacheline_size; | |
641 | ||
642 | __mfence(); | |
643 | ||
644 | istate = ml_set_interrupts_enabled(FALSE); | |
645 | ||
646 | map = pmap_get_mapwindow((pt_entry_t)(i386_ptob(pa) | INTEL_PTE_VALID)); | |
647 | ||
648 | for (i = 0, cacheline_addr = (unsigned char *)map->prv_CADDR; | |
649 | i < cachelines_in_page; | |
650 | i++, cacheline_addr += cacheline_size) { | |
651 | __clflush((void *) cacheline_addr); | |
652 | } | |
653 | pmap_put_mapwindow(map); | |
654 | ||
655 | (void) ml_set_interrupts_enabled(istate); | |
656 | ||
657 | __mfence(); | |
658 | } | |
659 | ||
660 | ||
661 | /* | |
662 | * the copy engine has the following characteristics | |
663 | * - copyio handles copies to/from user or kernel space | |
664 | * - copypv deals with physical or virtual addresses | |
665 | * | |
666 | * implementation details as follows | |
667 | * - a cache of up to NCOPY_WINDOWS is maintained per thread for | |
668 | * access of user virutal space | |
669 | * - the window size is determined by the amount of virtual space | |
670 | * that can be mapped by a single page table | |
671 | * - the mapping is done by copying the page table pointer from | |
672 | * the user's directory entry corresponding to the window's | |
673 | * address in user space to the directory entry corresponding | |
674 | * to the window slot in the kernel's address space | |
675 | * - the set of mappings is preserved across context switches, | |
676 | * so the copy can run with pre-emption enabled | |
677 | * - there is a gdt entry set up to anchor the kernel window on | |
678 | * each processor | |
679 | * - the copies are done using the selector corresponding to the | |
680 | * gdt entry | |
681 | * - the addresses corresponding to the user virtual address are | |
682 | * relative to the beginning of the window being used to map | |
683 | * that region... thus the thread can be pre-empted and switched | |
684 | * to a different processor while in the midst of a copy | |
685 | * - the window caches must be invalidated if the pmap changes out | |
686 | * from under the thread... this can happen during vfork/exec... | |
687 | * inval_copy_windows is the invalidation routine to be used | |
688 | * - the copyio engine has 4 different states associated with it | |
689 | * that allows for lazy tlb flushes and the ability to avoid | |
690 | * a flush all together if we've just come from user space | |
691 | * the 4 states are as follows... | |
692 | * | |
693 | * WINDOWS_OPENED - set by copyio to indicate to the context | |
694 | * switch code that it is necessary to do a tlbflush after | |
695 | * switching the windows since we're in the middle of a copy | |
696 | * | |
697 | * WINDOWS_CLOSED - set by copyio to indicate that it's done | |
698 | * using the windows, so that the context switch code need | |
699 | * not do the tlbflush... instead it will set the state to... | |
700 | * | |
701 | * WINDOWS_DIRTY - set by the context switch code to indicate | |
702 | * to the copy engine that it is responsible for doing a | |
703 | * tlbflush before using the windows again... it's also | |
704 | * set by the inval_copy_windows routine to indicate the | |
705 | * same responsibility. | |
706 | * | |
707 | * WINDOWS_CLEAN - set by the return to user path to indicate | |
708 | * that a tlbflush has happened and that there is no need | |
709 | * for copyio to do another when it is entered next... | |
710 | * | |
711 | * - a window for mapping single physical pages is provided for copypv | |
712 | * - this window is maintained across context switches and has the | |
713 | * same characteristics as the user space windows w/r to pre-emption | |
714 | */ | |
715 | ||
716 | extern int copyout_user(const char *, vm_offset_t, vm_size_t); | |
717 | extern int copyout_kern(const char *, vm_offset_t, vm_size_t); | |
718 | extern int copyin_user(const vm_offset_t, char *, vm_size_t); | |
719 | extern int copyin_kern(const vm_offset_t, char *, vm_size_t); | |
720 | extern int copyoutphys_user(const char *, vm_offset_t, vm_size_t); | |
721 | extern int copyoutphys_kern(const char *, vm_offset_t, vm_size_t); | |
722 | extern int copyinphys_user(const vm_offset_t, char *, vm_size_t); | |
723 | extern int copyinphys_kern(const vm_offset_t, char *, vm_size_t); | |
724 | extern int copyinstr_user(const vm_offset_t, char *, vm_size_t, vm_size_t *); | |
725 | extern int copyinstr_kern(const vm_offset_t, char *, vm_size_t, vm_size_t *); | |
726 | ||
727 | static int copyio(int, user_addr_t, char *, vm_size_t, vm_size_t *, int); | |
728 | static int copyio_phys(addr64_t, addr64_t, vm_size_t, int); | |
729 | ||
730 | ||
731 | #define COPYIN 0 | |
732 | #define COPYOUT 1 | |
733 | #define COPYINSTR 2 | |
734 | #define COPYINPHYS 3 | |
735 | #define COPYOUTPHYS 4 | |
736 | ||
737 | ||
738 | ||
739 | void inval_copy_windows(thread_t thread) | |
740 | { | |
741 | int i; | |
742 | ||
743 | for (i = 0; i < NCOPY_WINDOWS; i++) { | |
744 | thread->machine.copy_window[i].user_base = -1; | |
745 | } | |
746 | thread->machine.nxt_window = 0; | |
747 | thread->machine.copyio_state = WINDOWS_DIRTY; | |
748 | ||
749 | KERNEL_DEBUG(0xeff70058 | DBG_FUNC_NONE, (int)thread, (int)thread->map, 0, 0, 0); | |
750 | } | |
751 | ||
752 | ||
753 | static int | |
754 | copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, | |
755 | vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map) | |
756 | { | |
757 | thread_t thread; | |
758 | pmap_t pmap; | |
759 | pt_entry_t *updp; | |
760 | pt_entry_t *kpdp; | |
761 | user_addr_t user_base; | |
762 | vm_offset_t user_offset; | |
763 | vm_offset_t kern_vaddr; | |
764 | vm_size_t cnt; | |
765 | vm_size_t bytes_copied; | |
766 | int error = 0; | |
767 | int window_index; | |
768 | int copyio_state; | |
769 | boolean_t istate; | |
770 | #if KDEBUG | |
771 | int debug_type = 0xeff70010; | |
772 | debug_type += (copy_type << 2); | |
773 | #endif | |
774 | ||
775 | thread = current_thread(); | |
776 | ||
777 | KERNEL_DEBUG(debug_type | DBG_FUNC_START, (int)(user_addr >> 32), (int)user_addr, | |
778 | (int)nbytes, thread->machine.copyio_state, 0); | |
779 | ||
780 | if (nbytes == 0) { | |
781 | KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)user_addr, | |
782 | (unsigned)kernel_addr, (unsigned)nbytes, 0, 0); | |
783 | return (0); | |
784 | } | |
785 | pmap = thread->map->pmap; | |
786 | ||
787 | #if CONFIG_DTRACE | |
788 | thread->machine.specFlags |= CopyIOActive; | |
789 | #endif /* CONFIG_DTRACE */ | |
790 | ||
791 | if (pmap == kernel_pmap || use_kernel_map) { | |
792 | ||
793 | kern_vaddr = (vm_offset_t)user_addr; | |
794 | ||
795 | switch (copy_type) { | |
796 | ||
797 | case COPYIN: | |
798 | error = copyin_kern(kern_vaddr, kernel_addr, nbytes); | |
799 | break; | |
800 | ||
801 | case COPYOUT: | |
802 | error = copyout_kern(kernel_addr, kern_vaddr, nbytes); | |
803 | break; | |
804 | ||
805 | case COPYINSTR: | |
806 | error = copyinstr_kern(kern_vaddr, kernel_addr, nbytes, lencopied); | |
807 | break; | |
808 | ||
809 | case COPYINPHYS: | |
810 | error = copyinphys_kern(kern_vaddr, kernel_addr, nbytes); | |
811 | break; | |
812 | ||
813 | case COPYOUTPHYS: | |
814 | error = copyoutphys_kern(kernel_addr, kern_vaddr, nbytes); | |
815 | break; | |
816 | } | |
817 | KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)kern_vaddr, | |
818 | (unsigned)kernel_addr, (unsigned)nbytes, | |
819 | error | 0x80000000, 0); | |
820 | ||
821 | #if CONFIG_DTRACE | |
822 | thread->machine.specFlags &= ~CopyIOActive; | |
823 | #endif /* CONFIG_DTRACE */ | |
824 | ||
825 | return (error); | |
826 | } | |
827 | user_base = user_addr & ~((user_addr_t)(NBPDE - 1)); | |
828 | user_offset = user_addr & (NBPDE - 1); | |
829 | ||
830 | KERNEL_DEBUG(debug_type | DBG_FUNC_NONE, (int)(user_base >> 32), (int)user_base, | |
831 | (int)user_offset, 0, 0); | |
832 | ||
833 | cnt = NBPDE - user_offset; | |
834 | ||
835 | if (cnt > nbytes) | |
836 | cnt = nbytes; | |
837 | ||
838 | istate = ml_set_interrupts_enabled(FALSE); | |
839 | ||
840 | copyio_state = thread->machine.copyio_state; | |
841 | thread->machine.copyio_state = WINDOWS_OPENED; | |
842 | ||
843 | (void) ml_set_interrupts_enabled(istate); | |
844 | ||
845 | ||
846 | for (;;) { | |
847 | ||
848 | for (window_index = 0; window_index < NCOPY_WINDOWS; window_index++) { | |
849 | if (thread->machine.copy_window[window_index].user_base == user_base) | |
850 | break; | |
851 | } | |
852 | if (window_index >= NCOPY_WINDOWS) { | |
853 | ||
854 | window_index = thread->machine.nxt_window; | |
855 | thread->machine.nxt_window++; | |
856 | ||
857 | if (thread->machine.nxt_window >= NCOPY_WINDOWS) | |
858 | thread->machine.nxt_window = 0; | |
859 | thread->machine.copy_window[window_index].user_base = user_base; | |
860 | ||
861 | /* | |
862 | * it's necessary to disable pre-emption | |
863 | * since I have to compute the kernel descriptor pointer | |
864 | * for the new window | |
865 | */ | |
866 | istate = ml_set_interrupts_enabled(FALSE); | |
867 | ||
868 | updp = pmap_pde(pmap, user_base); | |
869 | ||
870 | kpdp = current_cpu_datap()->cpu_copywindow_pdp; | |
871 | kpdp += window_index; | |
872 | ||
873 | pmap_store_pte(kpdp, updp ? *updp : 0); | |
874 | ||
875 | (void) ml_set_interrupts_enabled(istate); | |
876 | ||
877 | copyio_state = WINDOWS_DIRTY; | |
878 | ||
879 | KERNEL_DEBUG(0xeff70040 | DBG_FUNC_NONE, window_index, | |
880 | (unsigned)user_base, (unsigned)updp, | |
881 | (unsigned)kpdp, 0); | |
882 | ||
883 | } | |
884 | #if JOE_DEBUG | |
885 | else { | |
886 | istate = ml_set_interrupts_enabled(FALSE); | |
887 | ||
888 | updp = pmap_pde(pmap, user_base); | |
889 | ||
890 | kpdp = current_cpu_datap()->cpu_copywindow_pdp; | |
891 | ||
892 | kpdp += window_index; | |
893 | ||
894 | if ((*kpdp & PG_FRAME) != (*updp & PG_FRAME)) { | |
895 | panic("copyio: user pdp mismatch - kpdp = 0x%x, updp = 0x%x\n", kpdp, updp); | |
896 | } | |
897 | (void) ml_set_interrupts_enabled(istate); | |
898 | } | |
899 | #endif | |
900 | if (copyio_state == WINDOWS_DIRTY) { | |
901 | flush_tlb(); | |
902 | ||
903 | copyio_state = WINDOWS_CLEAN; | |
904 | ||
905 | KERNEL_DEBUG(0xeff70054 | DBG_FUNC_NONE, window_index, 0, 0, 0, 0); | |
906 | } | |
907 | user_offset += (window_index * NBPDE); | |
908 | ||
909 | KERNEL_DEBUG(0xeff70044 | DBG_FUNC_NONE, (unsigned)user_offset, | |
910 | (unsigned)kernel_addr, cnt, 0, 0); | |
911 | ||
912 | switch (copy_type) { | |
913 | ||
914 | case COPYIN: | |
915 | error = copyin_user(user_offset, kernel_addr, cnt); | |
916 | break; | |
917 | ||
918 | case COPYOUT: | |
919 | error = copyout_user(kernel_addr, user_offset, cnt); | |
920 | break; | |
921 | ||
922 | case COPYINPHYS: | |
923 | error = copyinphys_user(user_offset, kernel_addr, cnt); | |
924 | break; | |
925 | ||
926 | case COPYOUTPHYS: | |
927 | error = copyoutphys_user(kernel_addr, user_offset, cnt); | |
928 | break; | |
929 | ||
930 | case COPYINSTR: | |
931 | error = copyinstr_user(user_offset, kernel_addr, cnt, &bytes_copied); | |
932 | ||
933 | /* | |
934 | * lencopied should be updated on success | |
935 | * or ENAMETOOLONG... but not EFAULT | |
936 | */ | |
937 | if (error != EFAULT) | |
938 | *lencopied += bytes_copied; | |
939 | ||
940 | /* | |
941 | * if we still have room, then the ENAMETOOLONG | |
942 | * is just an artifact of the buffer straddling | |
943 | * a window boundary and we should continue | |
944 | */ | |
945 | if (error == ENAMETOOLONG && nbytes > cnt) | |
946 | error = 0; | |
947 | ||
948 | if (error) { | |
949 | #if KDEBUG | |
950 | nbytes = *lencopied; | |
951 | #endif | |
952 | break; | |
953 | } | |
954 | if (*(kernel_addr + bytes_copied - 1) == 0) { | |
955 | /* | |
956 | * we found a NULL terminator... we're done | |
957 | */ | |
958 | #if KDEBUG | |
959 | nbytes = *lencopied; | |
960 | #endif | |
961 | goto done; | |
962 | } | |
963 | if (cnt == nbytes) { | |
964 | /* | |
965 | * no more room in the buffer and we haven't | |
966 | * yet come across a NULL terminator | |
967 | */ | |
968 | #if KDEBUG | |
969 | nbytes = *lencopied; | |
970 | #endif | |
971 | error = ENAMETOOLONG; | |
972 | break; | |
973 | } | |
974 | assert(cnt == bytes_copied); | |
975 | ||
976 | break; | |
977 | } | |
978 | if (error) | |
979 | break; | |
980 | if ((nbytes -= cnt) == 0) | |
981 | break; | |
982 | ||
983 | kernel_addr += cnt; | |
984 | user_base += NBPDE; | |
985 | user_offset = 0; | |
986 | ||
987 | if (nbytes > NBPDE) | |
988 | cnt = NBPDE; | |
989 | else | |
990 | cnt = nbytes; | |
991 | } | |
992 | done: | |
993 | thread->machine.copyio_state = WINDOWS_CLOSED; | |
994 | ||
995 | KERNEL_DEBUG(debug_type | DBG_FUNC_END, (unsigned)user_addr, | |
996 | (unsigned)kernel_addr, (unsigned)nbytes, error, 0); | |
997 | ||
998 | #if CONFIG_DTRACE | |
999 | thread->machine.specFlags &= ~CopyIOActive; | |
1000 | #endif /* CONFIG_DTRACE */ | |
1001 | ||
1002 | return (error); | |
1003 | } | |
1004 | ||
1005 | ||
1006 | static int | |
1007 | copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which) | |
1008 | { | |
1009 | pmap_paddr_t paddr; | |
1010 | user_addr_t vaddr; | |
1011 | char *window_offset; | |
1012 | pt_entry_t pentry; | |
1013 | int ctype; | |
1014 | int retval; | |
1015 | boolean_t istate; | |
1016 | ||
1017 | if (which & cppvPsnk) { | |
1018 | paddr = (pmap_paddr_t)sink; | |
1019 | vaddr = (user_addr_t)source; | |
1020 | ctype = COPYINPHYS; | |
1021 | pentry = (pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_RW); | |
1022 | } else { | |
1023 | paddr = (pmap_paddr_t)source; | |
1024 | vaddr = (user_addr_t)sink; | |
1025 | ctype = COPYOUTPHYS; | |
1026 | pentry = (pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME)); | |
1027 | } | |
1028 | window_offset = (char *)((uint32_t)paddr & (PAGE_SIZE - 1)); | |
1029 | ||
1030 | if (current_thread()->machine.physwindow_busy) { | |
1031 | pt_entry_t old_pentry; | |
1032 | ||
1033 | KERNEL_DEBUG(0xeff70048 | DBG_FUNC_NONE, paddr, csize, 0, -1, 0); | |
1034 | /* | |
1035 | * we had better be targeting wired memory at this point | |
1036 | * we will not be able to handle a fault with interrupts | |
1037 | * disabled... we disable them because we can't tolerate | |
1038 | * being preempted during this nested use of the window | |
1039 | */ | |
1040 | istate = ml_set_interrupts_enabled(FALSE); | |
1041 | ||
1042 | old_pentry = *(current_cpu_datap()->cpu_physwindow_ptep); | |
1043 | pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), pentry); | |
1044 | ||
1045 | invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base); | |
1046 | ||
1047 | retval = copyio(ctype, vaddr, window_offset, csize, NULL, which & cppvKmap); | |
1048 | ||
1049 | pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), old_pentry); | |
1050 | ||
1051 | invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base); | |
1052 | ||
1053 | (void) ml_set_interrupts_enabled(istate); | |
1054 | } else { | |
1055 | /* | |
1056 | * mark the window as in use... if an interrupt hits while we're | |
1057 | * busy, or we trigger another coyppv from the fault path into | |
1058 | * the driver on a user address space page fault due to a copyin/out | |
1059 | * then we need to save and restore the current window state instead | |
1060 | * of caching the window preserving it across context switches | |
1061 | */ | |
1062 | current_thread()->machine.physwindow_busy = 1; | |
1063 | ||
1064 | if (current_thread()->machine.physwindow_pte != pentry) { | |
1065 | KERNEL_DEBUG(0xeff70048 | DBG_FUNC_NONE, paddr, csize, 0, 0, 0); | |
1066 | ||
1067 | current_thread()->machine.physwindow_pte = pentry; | |
1068 | ||
1069 | /* | |
1070 | * preemption at this point would be bad since we | |
1071 | * could end up on the other processor after we grabbed the | |
1072 | * pointer to the current cpu data area, but before we finished | |
1073 | * using it to stuff the page table entry since we would | |
1074 | * be modifying a window that no longer belonged to us | |
1075 | * the invlpg can be done unprotected since it only flushes | |
1076 | * this page address from the tlb... if it flushes the wrong | |
1077 | * one, no harm is done, and the context switch that moved us | |
1078 | * to the other processor will have already take care of | |
1079 | * flushing the tlb after it reloaded the page table from machine.physwindow_pte | |
1080 | */ | |
1081 | istate = ml_set_interrupts_enabled(FALSE); | |
1082 | pmap_store_pte((current_cpu_datap()->cpu_physwindow_ptep), pentry); | |
1083 | (void) ml_set_interrupts_enabled(istate); | |
1084 | ||
1085 | invlpg((uintptr_t)current_cpu_datap()->cpu_physwindow_base); | |
1086 | } | |
1087 | #if JOE_DEBUG | |
1088 | else { | |
1089 | if (pentry != | |
1090 | (*(current_cpu_datap()->cpu_physwindow_ptep) & (INTEL_PTE_VALID | PG_FRAME | INTEL_PTE_RW))) | |
1091 | panic("copyio_phys: pentry != *physwindow_ptep"); | |
1092 | } | |
1093 | #endif | |
1094 | retval = copyio(ctype, vaddr, window_offset, csize, NULL, which & cppvKmap); | |
1095 | ||
1096 | current_thread()->machine.physwindow_busy = 0; | |
1097 | } | |
1098 | return (retval); | |
1099 | } | |
1100 | ||
1101 | int | |
1102 | copyinmsg(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
1103 | { | |
1104 | return (copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0)); | |
1105 | } | |
1106 | ||
1107 | int | |
1108 | copyin(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes) | |
1109 | { | |
1110 | return (copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0)); | |
1111 | } | |
1112 | ||
1113 | int | |
1114 | copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied) | |
1115 | { | |
1116 | *lencopied = 0; | |
1117 | ||
1118 | return (copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0)); | |
1119 | } | |
1120 | ||
1121 | int | |
1122 | copyoutmsg(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
1123 | { | |
1124 | return (copyio(COPYOUT, user_addr, kernel_addr, nbytes, NULL, 0)); | |
1125 | } | |
1126 | ||
1127 | int | |
1128 | copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) | |
1129 | { | |
1130 | return (copyio(COPYOUT, user_addr, kernel_addr, nbytes, NULL, 0)); | |
1131 | } | |
1132 | ||
1133 | ||
1134 | kern_return_t | |
1135 | copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which) | |
1136 | { | |
1137 | unsigned int lop, csize; | |
1138 | int bothphys = 0; | |
1139 | ||
1140 | KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64, | |
1141 | (unsigned)snk64, size, which, 0); | |
1142 | ||
1143 | if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */ | |
1144 | panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ | |
1145 | ||
1146 | if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk)) | |
1147 | bothphys = 1; /* both are physical */ | |
1148 | ||
1149 | while (size) { | |
1150 | ||
1151 | if (bothphys) { | |
1152 | lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */ | |
1153 | ||
1154 | if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)))) | |
1155 | lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */ | |
1156 | } else { | |
1157 | /* | |
1158 | * only need to compute the resid for the physical page | |
1159 | * address... we don't care about where we start/finish in | |
1160 | * the virtual since we just call the normal copyin/copyout | |
1161 | */ | |
1162 | if (which & cppvPsrc) | |
1163 | lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); | |
1164 | else | |
1165 | lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); | |
1166 | } | |
1167 | csize = size; /* Assume we can copy it all */ | |
1168 | if (lop < size) | |
1169 | csize = lop; /* Nope, we can't do it all */ | |
1170 | #if 0 | |
1171 | /* | |
1172 | * flush_dcache64 is currently a nop on the i386... | |
1173 | * it's used when copying to non-system memory such | |
1174 | * as video capture cards... on PPC there was a need | |
1175 | * to flush due to how we mapped this memory... not | |
1176 | * sure if it's needed on i386. | |
1177 | */ | |
1178 | if (which & cppvFsrc) | |
1179 | flush_dcache64(src64, csize, 1); /* If requested, flush source before move */ | |
1180 | if (which & cppvFsnk) | |
1181 | flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */ | |
1182 | #endif | |
1183 | if (bothphys) | |
1184 | bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */ | |
1185 | else { | |
1186 | if (copyio_phys(src64, snk64, csize, which)) | |
1187 | return (KERN_FAILURE); | |
1188 | } | |
1189 | #if 0 | |
1190 | if (which & cppvFsrc) | |
1191 | flush_dcache64(src64, csize, 1); /* If requested, flush source after move */ | |
1192 | if (which & cppvFsnk) | |
1193 | flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */ | |
1194 | #endif | |
1195 | size -= csize; /* Calculate what is left */ | |
1196 | snk64 += csize; /* Bump sink to next physical address */ | |
1197 | src64 += csize; /* Bump source to next physical address */ | |
1198 | } | |
1199 | KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64, | |
1200 | (unsigned)snk64, size, which, 0); | |
1201 | ||
1202 | return KERN_SUCCESS; | |
1203 | } | |
1204 | ||
1205 | #if !MACH_KDP | |
1206 | void | |
1207 | kdp_register_callout(void) | |
1208 | { | |
1209 | } | |
1210 | #endif |