]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/loose_ends.c
1eec5310480f2c12ed2d6083045eb5cd919e554b
[apple/xnu.git] / osfmk / arm64 / loose_ends.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49
50 #include <arm/misc_protos.h>
51
52 #include <sys/errno.h>
53
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56 void
57 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
58 {
59 unsigned int src_index;
60 unsigned int dst_index;
61 vm_offset_t src_offset;
62 vm_offset_t dst_offset;
63 unsigned int wimg_bits_src, wimg_bits_dst;
64 unsigned int cpu_num = 0;
65 ppnum_t pn_src = (ppnum_t)(src >> PAGE_SHIFT);
66 ppnum_t pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
67
68 #ifdef __ARM_COHERENT_IO__
69 if (pmap_valid_address(src) &&
70 pmap_valid_address(dst) &&
71 (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)))) {
72 bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
73 return;
74 }
75 #endif
76
77 wimg_bits_src = pmap_cache_attributes(pn_src);
78 wimg_bits_dst = pmap_cache_attributes(pn_dst);
79
80 #ifndef __ARM_COHERENT_IO__
81 if (((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
82 ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
83 (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)))) {
84 /* Fast path - dst is writable and both source and destination have default attributes */
85 bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
86 return;
87 }
88 #endif
89
90 src_offset = src & PAGE_MASK;
91 dst_offset = dst & PAGE_MASK;
92
93 if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE)
94 panic("bcopy extends beyond copy windows");
95
96 mp_disable_preemption();
97 cpu_num = cpu_number();
98 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
99 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ|VM_PROT_WRITE, wimg_bits_dst);
100
101 bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset),
102 (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset),
103 bytes);
104
105 pmap_unmap_cpu_windows_copy(src_index);
106 pmap_unmap_cpu_windows_copy(dst_index);
107 mp_enable_preemption();
108 }
109
110 void
111 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
112 {
113 bzero_phys(src64, bytes);
114 }
115
116 /* Zero bytes starting at a physical address */
117 void
118 bzero_phys(addr64_t src, vm_size_t bytes)
119 {
120 unsigned int wimg_bits;
121 unsigned int cpu_num = cpu_number();
122 ppnum_t pn = (ppnum_t)(src >> PAGE_SHIFT);
123
124 #ifdef __ARM_COHERENT_IO__
125 if (pmap_valid_address(src)) {
126 bzero((char *)phystokv((pmap_paddr_t) src), bytes);
127 return;
128 }
129 #endif
130
131 wimg_bits = pmap_cache_attributes(pn);
132
133 #ifndef __ARM_COHERENT_IO__
134 if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
135 /* Fast path - default attributes */
136 bzero((char *)phystokv((pmap_paddr_t) src), bytes);
137 return;
138 }
139 #endif
140
141 mp_disable_preemption();
142 cpu_num = cpu_number();
143
144 while (bytes > 0) {
145 vm_offset_t offset = src & PAGE_MASK;
146 uint64_t count = PAGE_SIZE - offset;
147
148 if (count > bytes)
149 count = bytes;
150
151 pn = (ppnum_t)(src >> PAGE_SHIFT);
152
153 unsigned int index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
154
155 bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
156
157 pmap_unmap_cpu_windows_copy(index);
158
159 src += count;
160 bytes -= count;
161 }
162
163 mp_enable_preemption();
164 }
165
166 /*
167 * Read data from a physical address.
168 */
169
170
171 static unsigned long long
172 ml_phys_read_data(pmap_paddr_t paddr, int size)
173 {
174 unsigned int index;
175 unsigned int wimg_bits;
176 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
177 unsigned long long result = 0;
178 vm_offset_t copywindow_vaddr = 0;
179 unsigned char s1;
180 unsigned short s2;
181 unsigned int s4;
182
183 #ifdef __ARM_COHERENT_IO__
184 if (pmap_valid_address(paddr)) {
185 switch (size) {
186 case 1:
187 s1 = *(volatile unsigned char *)phystokv(paddr);
188 result = s1;
189 break;
190 case 2:
191 s2 = *(volatile unsigned short *)phystokv(paddr);
192 result = s2;
193 break;
194 case 4:
195 s4 = *(volatile unsigned int *)phystokv(paddr);
196 result = s4;
197 break;
198 case 8:
199 result = *(volatile unsigned long long *)phystokv(paddr);
200 break;
201 default:
202 panic("Invalid size %d for ml_phys_read_data\n", size);
203 break;
204 }
205 return result;
206 }
207 #endif
208
209 mp_disable_preemption();
210 wimg_bits = pmap_cache_attributes(pn);
211 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
212 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
213
214 switch (size) {
215 case 1:
216 s1 = *(volatile unsigned char *)copywindow_vaddr;
217 result = s1;
218 break;
219 case 2:
220 s2 = *(volatile unsigned short *)copywindow_vaddr;
221 result = s2;
222 break;
223 case 4:
224 s4 = *(volatile unsigned int *)copywindow_vaddr;
225 result = s4;
226 break;
227 case 8:
228 result = *(volatile unsigned long long*)copywindow_vaddr;
229 break;
230 default:
231 panic("Invalid size %d for ml_phys_read_data\n", size);
232 break;
233
234 }
235
236 pmap_unmap_cpu_windows_copy(index);
237 mp_enable_preemption();
238
239 return result;
240 }
241
242 unsigned int ml_phys_read( vm_offset_t paddr)
243 {
244 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
245 }
246
247 unsigned int ml_phys_read_word(vm_offset_t paddr) {
248
249 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
250 }
251
252 unsigned int ml_phys_read_64(addr64_t paddr64)
253 {
254 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
255 }
256
257 unsigned int ml_phys_read_word_64(addr64_t paddr64)
258 {
259 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
260 }
261
262 unsigned int ml_phys_read_half(vm_offset_t paddr)
263 {
264 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
265 }
266
267 unsigned int ml_phys_read_half_64(addr64_t paddr64)
268 {
269 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
270 }
271
272 unsigned int ml_phys_read_byte(vm_offset_t paddr)
273 {
274 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
275 }
276
277 unsigned int ml_phys_read_byte_64(addr64_t paddr64)
278 {
279 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
280 }
281
282 unsigned long long ml_phys_read_double(vm_offset_t paddr)
283 {
284 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
285 }
286
287 unsigned long long ml_phys_read_double_64(addr64_t paddr64)
288 {
289 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
290 }
291
292
293
294 /*
295 * Write data to a physical address.
296 */
297
298 static void
299 ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size)
300 {
301 unsigned int index;
302 unsigned int wimg_bits;
303 ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT);
304 vm_offset_t copywindow_vaddr = 0;
305
306 #ifdef __ARM_COHERENT_IO__
307 if (pmap_valid_address(paddr)) {
308 switch (size) {
309 case 1:
310 *(volatile unsigned char *)phystokv(paddr) = (unsigned char)data;
311 return;
312 case 2:
313 *(volatile unsigned short *)phystokv(paddr) = (unsigned short)data;
314 return;
315 case 4:
316 *(volatile unsigned int *)phystokv(paddr) = (unsigned int)data;
317 return;
318 case 8:
319 *(volatile unsigned long long *)phystokv(paddr) = data;
320 return;
321 default:
322 panic("Invalid size %d for ml_phys_write_data\n", size);
323 }
324 }
325 #endif
326
327 mp_disable_preemption();
328 wimg_bits = pmap_cache_attributes(pn);
329 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
330 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
331
332 switch (size) {
333 case 1:
334 *(volatile unsigned char *)(copywindow_vaddr) =
335 (unsigned char)data;
336 break;
337 case 2:
338 *(volatile unsigned short *)(copywindow_vaddr) =
339 (unsigned short)data;
340 break;
341 case 4:
342 *(volatile unsigned int *)(copywindow_vaddr) =
343 (uint32_t)data;
344 break;
345 case 8:
346 *(volatile unsigned long long *)(copywindow_vaddr) =
347 (unsigned long long)data;
348 break;
349 default:
350 panic("Invalid size %d for ml_phys_write_data\n", size);
351 break;
352 }
353
354 pmap_unmap_cpu_windows_copy(index);
355 mp_enable_preemption();
356 }
357
358 void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
359 {
360 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
361 }
362
363 void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
364 {
365 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
366 }
367
368 void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
369 {
370 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
371 }
372
373 void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
374 {
375 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
376 }
377
378 void ml_phys_write(vm_offset_t paddr, unsigned int data)
379 {
380 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
381 }
382
383 void ml_phys_write_64(addr64_t paddr64, unsigned int data)
384 {
385 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
386 }
387
388 void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
389 {
390 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
391 }
392
393 void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
394 {
395 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
396 }
397
398 void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
399 {
400 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
401 }
402
403 void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
404 {
405 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
406 }
407
408
409 /*
410 * Set indicated bit in bit string.
411 */
412 void
413 setbit(int bitno, int *s)
414 {
415 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
416 }
417
418 /*
419 * Clear indicated bit in bit string.
420 */
421 void
422 clrbit(int bitno, int *s)
423 {
424 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
425 }
426
427 /*
428 * Test if indicated bit is set in bit string.
429 */
430 int
431 testbit(int bitno, int *s)
432 {
433 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
434 }
435
436 /*
437 * Find first bit set in bit string.
438 */
439 int
440 ffsbit(int *s)
441 {
442 int offset;
443
444 for (offset = 0; !*s; offset += INT_SIZE, ++s);
445 return offset + __builtin_ctz(*s);
446 }
447
448 int
449 ffs(unsigned int mask)
450 {
451 if (mask == 0)
452 return 0;
453
454 /*
455 * NOTE: cannot use __builtin_ffs because it generates a call to
456 * 'ffs'
457 */
458 return 1 + __builtin_ctz(mask);
459 }
460
461 int
462 ffsll(unsigned long long mask)
463 {
464 if (mask == 0)
465 return 0;
466
467 /*
468 * NOTE: cannot use __builtin_ffsll because it generates a call to
469 * 'ffsll'
470 */
471 return 1 + __builtin_ctzll(mask);
472 }
473
474 /*
475 * Find last bit set in bit string.
476 */
477 int
478 fls(unsigned int mask)
479 {
480 if (mask == 0)
481 return 0;
482
483 return (sizeof (mask) << 3) - __builtin_clz(mask);
484 }
485
486 int
487 flsll(unsigned long long mask)
488 {
489 if (mask == 0)
490 return 0;
491
492 return (sizeof (mask) << 3) - __builtin_clzll(mask);
493 }
494
495 #undef bcmp
496 int
497 bcmp(
498 const void *pa,
499 const void *pb,
500 size_t len)
501 {
502 const char *a = (const char *) pa;
503 const char *b = (const char *) pb;
504
505 if (len == 0)
506 return 0;
507
508 do
509 if (*a++ != *b++)
510 break;
511 while (--len);
512
513 /*
514 * Check for the overflow case but continue to handle the non-overflow
515 * case the same way just in case someone is using the return value
516 * as more than zero/non-zero
517 */
518 if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL))
519 return 0xFFFFFFFFL;
520 else
521 return (int)len;
522 }
523
524 #undef memcmp
525 int
526 memcmp(const void *s1, const void *s2, size_t n)
527 {
528 if (n != 0) {
529 const unsigned char *p1 = s1, *p2 = s2;
530
531 do {
532 if (*p1++ != *p2++)
533 return (*--p1 - *--p2);
534 } while (--n != 0);
535 }
536 return (0);
537 }
538
539 kern_return_t
540 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
541 {
542 kern_return_t retval = KERN_SUCCESS;
543 void *from, *to;
544 #ifndef __ARM_COHERENT_IO__
545 unsigned int from_wimg_bits, to_wimg_bits;
546 #endif
547
548 from = CAST_DOWN(void *, source);
549 to = CAST_DOWN(void *, sink);
550
551 if ((which & (cppvPsrc | cppvPsnk)) == 0) /* Make sure that only
552 * one is virtual */
553 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
554
555 if (which & cppvPsrc)
556 from = (void *)phystokv(from);
557 if (which & cppvPsnk)
558 to = (void *)phystokv(to);
559
560 if ((which & (cppvPsrc | cppvKmap)) == 0) /* Source is virtual in
561 * current map */
562 retval = copyin((user_addr_t) from, to, size);
563 else if ((which & (cppvPsnk | cppvKmap)) == 0) /* Sink is virtual in
564 * current map */
565 retval = copyout(from, (user_addr_t) to, size);
566 else /* both addresses are physical or kernel map */
567 bcopy(from, to, size);
568
569 #ifndef __ARM_COHERENT_IO__
570 if (which & cppvFsrc) {
571 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
572 } else if (which & cppvPsrc) {
573 from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
574 if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU))
575 flush_dcache64(source, size, TRUE);
576 }
577
578 if (which & cppvFsnk) {
579 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
580 } else if (which & cppvPsnk) {
581 to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
582 if (to_wimg_bits != VM_WIMG_COPYBACK)
583 flush_dcache64(sink, size, TRUE);
584 }
585 #endif
586 return retval;
587 }
588
589
590 #if MACH_ASSERT
591
592 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
593
594 /*
595 * Machine-dependent routine to fill in an array with up to callstack_max
596 * levels of return pc information.
597 */
598 void
599 machine_callstack(
600 uintptr_t * buf,
601 vm_size_t callstack_max)
602 {
603 /* Captures the USER call stack */
604 uint32_t i=0;
605
606 struct arm_saved_state *state = find_user_regs(current_thread());
607
608 if (!state) {
609 while (i<callstack_max)
610 buf[i++] = 0;
611 } else {
612 if (is_saved_state64(state)) {
613 uint64_t frame[2];
614 buf[i++] = (uintptr_t)get_saved_state_pc(state);
615 frame[0] = get_saved_state_fp(state);
616 while (i<callstack_max && frame[0] != 0) {
617 if (copyinframe(frame[0], (void*) frame, TRUE))
618 break;
619 buf[i++] = (uintptr_t)frame[1];
620 }
621 }
622 else {
623 uint32_t frame[2];
624 buf[i++] = (uintptr_t)get_saved_state_pc(state);
625 frame[0] = (uint32_t)get_saved_state_fp(state);
626 while (i<callstack_max && frame[0] != 0) {
627 if (copyinframe(frame[0], (void*) frame, FALSE))
628 break;
629 buf[i++] = (uintptr_t)frame[1];
630 }
631 }
632
633 while (i<callstack_max)
634 buf[i++] = 0;
635 }
636 }
637
638 #endif /* MACH_ASSERT */
639
640 int
641 clr_be_bit(void)
642 {
643 panic("clr_be_bit");
644 return 0;
645 }
646
647 boolean_t
648 ml_probe_read(
649 __unused vm_offset_t paddr,
650 __unused unsigned int *val)
651 {
652 panic("ml_probe_read() unimplemented");
653 return 1;
654 }
655
656 boolean_t
657 ml_probe_read_64(
658 __unused addr64_t paddr,
659 __unused unsigned int *val)
660 {
661 panic("ml_probe_read_64() unimplemented");
662 return 1;
663 }
664
665
666 void
667 ml_thread_policy(
668 __unused thread_t thread,
669 __unused unsigned policy_id,
670 __unused unsigned policy_info)
671 {
672 // <rdar://problem/7141284>: Reduce print noise
673 // kprintf("ml_thread_policy() unimplemented\n");
674 }
675
676 void
677 panic_unimplemented()
678 {
679 panic("Not yet implemented.");
680 }
681
682 /* ARM64_TODO <rdar://problem/9198953> */
683 void abort(void);
684
685 void
686 abort()
687 {
688 panic("Abort.");
689 }
690
691
692 #if !MACH_KDP
693 void
694 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
695 {
696 #pragma unused(fn,arg)
697 }
698 #endif
699