]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/loose_ends.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm / loose_ends.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49
50 #include <arm/misc_protos.h>
51
52 #include <sys/errno.h>
53
54 #define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56
57 void
58 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
59 {
60 unsigned int src_index;
61 unsigned int dst_index;
62 vm_offset_t src_offset;
63 vm_offset_t dst_offset;
64 unsigned int cpu_num;
65 unsigned int wimg_bits_src, wimg_bits_dst;
66 ppnum_t pn_src = (src >> PAGE_SHIFT);
67 ppnum_t pn_dst = (dst >> PAGE_SHIFT);
68
69 wimg_bits_src = pmap_cache_attributes(pn_src);
70 wimg_bits_dst = pmap_cache_attributes(pn_dst);
71
72 if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)) &&
73 ((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
74 ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
75 /* Fast path - dst is writable and both source and destination have default attributes */
76 bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
77 return;
78 }
79
80 src_offset = src & PAGE_MASK;
81 dst_offset = dst & PAGE_MASK;
82
83 if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE) {
84 panic("bcopy extends beyond copy windows");
85 }
86
87 mp_disable_preemption();
88 cpu_num = cpu_number();
89 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
90 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
91
92 bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset),
93 (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset),
94 bytes);
95
96 pmap_unmap_cpu_windows_copy(src_index);
97 pmap_unmap_cpu_windows_copy(dst_index);
98 mp_enable_preemption();
99 }
100
101 void
102 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
103 {
104 bzero_phys(src64, bytes);
105 }
106
107 /* Zero bytes starting at a physical address */
108 void
109 bzero_phys(addr64_t src, vm_size_t bytes)
110 {
111 unsigned int wimg_bits;
112 ppnum_t pn = (src >> PAGE_SHIFT);
113
114 wimg_bits = pmap_cache_attributes(pn);
115 if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
116 /* Fast path - default attributes */
117 bzero((char *)phystokv((pmap_paddr_t) src), bytes);
118 } else {
119 mp_disable_preemption();
120
121 unsigned int cpu_num = cpu_number();
122
123 while (bytes > 0) {
124 vm_offset_t offset = src & PAGE_MASK;
125 uint32_t count = PAGE_SIZE - offset;
126
127 if (count > bytes) {
128 count = bytes;
129 }
130
131 unsigned int index = pmap_map_cpu_windows_copy(src >> PAGE_SHIFT, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
132
133 bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
134
135 pmap_unmap_cpu_windows_copy(index);
136
137 src += count;
138 bytes -= count;
139 }
140
141 mp_enable_preemption();
142 }
143 }
144
145 /*
146 * Read data from a physical address.
147 */
148
149
150 static unsigned int
151 ml_phys_read_data(pmap_paddr_t paddr, int size)
152 {
153 unsigned int index;
154 unsigned int result;
155 unsigned int wimg_bits;
156 ppnum_t pn = (paddr >> PAGE_SHIFT);
157 unsigned char s1;
158 unsigned short s2;
159 vm_offset_t copywindow_vaddr = 0;
160
161 mp_disable_preemption();
162 wimg_bits = pmap_cache_attributes(pn);
163 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
164 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);;
165
166 switch (size) {
167 case 1:
168 s1 = *(volatile unsigned char *)(copywindow_vaddr);
169 result = s1;
170 break;
171 case 2:
172 s2 = *(volatile unsigned short *)(copywindow_vaddr);
173 result = s2;
174 break;
175 case 4:
176 default:
177 result = *(volatile unsigned int *)(copywindow_vaddr);
178 break;
179 }
180
181 pmap_unmap_cpu_windows_copy(index);
182 mp_enable_preemption();
183
184 return result;
185 }
186
187 static unsigned long long
188 ml_phys_read_long_long(pmap_paddr_t paddr)
189 {
190 unsigned int index;
191 unsigned int result;
192 unsigned int wimg_bits;
193 ppnum_t pn = (paddr >> PAGE_SHIFT);
194
195 mp_disable_preemption();
196 wimg_bits = pmap_cache_attributes(pn);
197 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
198
199 result = *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
200 | ((uint32_t)paddr & PAGE_MASK));
201
202 pmap_unmap_cpu_windows_copy(index);
203 mp_enable_preemption();
204
205 return result;
206 }
207
208 unsigned int
209 ml_phys_read( vm_offset_t paddr)
210 {
211 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
212 }
213
214 unsigned int
215 ml_phys_read_word(vm_offset_t paddr)
216 {
217 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
218 }
219
220 unsigned int
221 ml_phys_read_64(addr64_t paddr64)
222 {
223 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
224 }
225
226 unsigned int
227 ml_phys_read_word_64(addr64_t paddr64)
228 {
229 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
230 }
231
232 unsigned int
233 ml_phys_read_half(vm_offset_t paddr)
234 {
235 return ml_phys_read_data((pmap_paddr_t)paddr, 2);
236 }
237
238 unsigned int
239 ml_phys_read_half_64(addr64_t paddr64)
240 {
241 return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
242 }
243
244 unsigned int
245 ml_phys_read_byte(vm_offset_t paddr)
246 {
247 return ml_phys_read_data((pmap_paddr_t)paddr, 1);
248 }
249
250 unsigned int
251 ml_phys_read_byte_64(addr64_t paddr64)
252 {
253 return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
254 }
255
256 unsigned long long
257 ml_phys_read_double(vm_offset_t paddr)
258 {
259 return ml_phys_read_long_long((pmap_paddr_t)paddr);
260 }
261
262 unsigned long long
263 ml_phys_read_double_64(addr64_t paddr64)
264 {
265 return ml_phys_read_long_long((pmap_paddr_t)paddr64);
266 }
267
268
269
270 /*
271 * Write data to a physical address.
272 */
273
274 static void
275 ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
276 {
277 unsigned int index;
278 unsigned int wimg_bits;
279 ppnum_t pn = (paddr >> PAGE_SHIFT);
280 vm_offset_t copywindow_vaddr = 0;
281
282 mp_disable_preemption();
283 wimg_bits = pmap_cache_attributes(pn);
284 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
285 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t) paddr & PAGE_MASK);
286
287 switch (size) {
288 case 1:
289 *(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data;
290 break;
291 case 2:
292 *(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data;
293 break;
294 case 4:
295 default:
296 *(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data;
297 break;
298 }
299
300 pmap_unmap_cpu_windows_copy(index);
301 mp_enable_preemption();
302 }
303
304 static void
305 ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
306 {
307 unsigned int index;
308 unsigned int wimg_bits;
309 ppnum_t pn = (paddr >> PAGE_SHIFT);
310
311 mp_disable_preemption();
312 wimg_bits = pmap_cache_attributes(pn);
313 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
314
315 *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
316 | ((uint32_t)paddr & PAGE_MASK)) = data;
317
318 pmap_unmap_cpu_windows_copy(index);
319 mp_enable_preemption();
320 }
321
322
323
324 void
325 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
326 {
327 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
328 }
329
330 void
331 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
332 {
333 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
334 }
335
336 void
337 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
338 {
339 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
340 }
341
342 void
343 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
344 {
345 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
346 }
347
348 void
349 ml_phys_write(vm_offset_t paddr, unsigned int data)
350 {
351 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
352 }
353
354 void
355 ml_phys_write_64(addr64_t paddr64, unsigned int data)
356 {
357 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
358 }
359
360 void
361 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
362 {
363 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
364 }
365
366 void
367 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
368 {
369 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
370 }
371
372 void
373 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
374 {
375 ml_phys_write_long_long((pmap_paddr_t)paddr, data);
376 }
377
378 void
379 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
380 {
381 ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
382 }
383
384
385 /*
386 * Set indicated bit in bit string.
387 */
388 void
389 setbit(int bitno, int *s)
390 {
391 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
392 }
393
394 /*
395 * Clear indicated bit in bit string.
396 */
397 void
398 clrbit(int bitno, int *s)
399 {
400 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
401 }
402
403 /*
404 * Test if indicated bit is set in bit string.
405 */
406 int
407 testbit(int bitno, int *s)
408 {
409 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
410 }
411
412 /*
413 * Find first bit set in bit string.
414 */
415 int
416 ffsbit(int *s)
417 {
418 int offset;
419
420 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
421 ;
422 }
423 return offset + __builtin_ctz(*s);
424 }
425
426 int
427 ffs(unsigned int mask)
428 {
429 if (mask == 0) {
430 return 0;
431 }
432
433 /*
434 * NOTE: cannot use __builtin_ffs because it generates a call to
435 * 'ffs'
436 */
437 return 1 + __builtin_ctz(mask);
438 }
439
440 int
441 ffsll(unsigned long long mask)
442 {
443 if (mask == 0) {
444 return 0;
445 }
446
447 /*
448 * NOTE: cannot use __builtin_ffsll because it generates a call to
449 * 'ffsll'
450 */
451 return 1 + __builtin_ctzll(mask);
452 }
453
454 /*
455 * Find last bit set in bit string.
456 */
457 int
458 fls(unsigned int mask)
459 {
460 if (mask == 0) {
461 return 0;
462 }
463
464 return (sizeof(mask) << 3) - __builtin_clz(mask);
465 }
466
467 int
468 flsll(unsigned long long mask)
469 {
470 if (mask == 0) {
471 return 0;
472 }
473
474 return (sizeof(mask) << 3) - __builtin_clzll(mask);
475 }
476
477 int
478 bcmp(
479 const void *pa,
480 const void *pb,
481 size_t len)
482 {
483 const char *a = (const char *) pa;
484 const char *b = (const char *) pb;
485
486 if (len == 0) {
487 return 0;
488 }
489
490 do{
491 if (*a++ != *b++) {
492 break;
493 }
494 } while (--len);
495
496 return len;
497 }
498
499 int
500 memcmp(const void *s1, const void *s2, size_t n)
501 {
502 if (n != 0) {
503 const unsigned char *p1 = s1, *p2 = s2;
504
505 do {
506 if (*p1++ != *p2++) {
507 return *--p1 - *--p2;
508 }
509 } while (--n != 0);
510 }
511 return 0;
512 }
513
514 kern_return_t
515 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
516 {
517 kern_return_t retval = KERN_SUCCESS;
518 void *from, *to;
519 unsigned int from_wimg_bits, to_wimg_bits;
520
521 from = CAST_DOWN(void *, source);
522 to = CAST_DOWN(void *, sink);
523
524 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only
525 * one is virtual */
526 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
527 }
528 if (which & cppvPsrc) {
529 from = (void *)phystokv((pmap_paddr_t)from);
530 }
531 if (which & cppvPsnk) {
532 to = (void *)phystokv((pmap_paddr_t)to);
533 }
534
535 if ((which & (cppvPsrc | cppvKmap)) == 0) { /* Source is virtual in
536 * current map */
537 retval = copyin((user_addr_t) from, to, size);
538 } else if ((which & (cppvPsnk | cppvKmap)) == 0) { /* Sink is virtual in
539 * current map */
540 retval = copyout(from, (user_addr_t) to, size);
541 } else { /* both addresses are physical or kernel map */
542 bcopy(from, to, size);
543 }
544
545 if (which & cppvFsrc) {
546 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
547 } else if (which & cppvPsrc) {
548 from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
549 if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU)) {
550 flush_dcache64(source, size, TRUE);
551 }
552 }
553
554 if (which & cppvFsnk) {
555 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
556 } else if (which & cppvPsnk) {
557 to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
558 if (to_wimg_bits != VM_WIMG_COPYBACK) {
559 flush_dcache64(sink, size, TRUE);
560 }
561 }
562 return retval;
563 }
564
565 /*
566 * Copy sizes bigger than this value will cause a kernel panic.
567 *
568 * Yes, this is an arbitrary fixed limit, but it's almost certainly
569 * a programming error to be copying more than this amount between
570 * user and wired kernel memory in a single invocation on this
571 * platform.
572 */
573 const int copysize_limit_panic = (64 * 1024 * 1024);
574
575 /*
576 * Validate the arguments to copy{in,out} on this platform.
577 *
578 * Called when nbytes is "large" e.g. more than a page. Such sizes are
579 * infrequent, and very large sizes are likely indications of attempts
580 * to exploit kernel programming errors (bugs).
581 */
582 static int
583 copy_validate(const user_addr_t user_addr,
584 uintptr_t kernel_addr, vm_size_t nbytes)
585 {
586 uintptr_t kernel_addr_last = kernel_addr + nbytes;
587
588 if (__improbable(kernel_addr < VM_MIN_KERNEL_ADDRESS ||
589 kernel_addr > VM_MAX_KERNEL_ADDRESS ||
590 kernel_addr_last < kernel_addr ||
591 kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) {
592 panic("%s(%p, %p, %u) - kaddr not in kernel", __func__,
593 (void *)user_addr, (void *)kernel_addr, nbytes);
594 }
595
596 user_addr_t user_addr_last = user_addr + nbytes;
597
598 if (__improbable((user_addr_last < user_addr) || ((user_addr + nbytes) > vm_map_max(current_thread()->map)) ||
599 (user_addr < vm_map_min(current_thread()->map)))) {
600 return EFAULT;
601 }
602
603 if (__improbable(nbytes > copysize_limit_panic)) {
604 panic("%s(%p, %p, %u) - transfer too large", __func__,
605 (void *)user_addr, (void *)kernel_addr, nbytes);
606 }
607
608 return 0;
609 }
610
611 int
612 copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
613 {
614 return copy_validate(ua, ka, nbytes);
615 }
616
617 int
618 copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
619 {
620 return copy_validate(ua, ka, nbytes);
621 }
622
623 #if MACH_ASSERT
624
625 extern int copyinframe(vm_address_t fp, char *frame);
626
627 /*
628 * Machine-dependent routine to fill in an array with up to callstack_max
629 * levels of return pc information.
630 */
631 void
632 machine_callstack(
633 uintptr_t * buf,
634 vm_size_t callstack_max)
635 {
636 /* Captures the USER call stack */
637 uint32_t i = 0;
638 uint32_t frame[2];
639
640 struct arm_saved_state* state = find_user_regs(current_thread());
641
642 if (!state) {
643 while (i < callstack_max) {
644 buf[i++] = 0;
645 }
646 } else {
647 buf[i++] = (uintptr_t)state->pc;
648 frame[0] = state->r[7];
649
650 while (i < callstack_max && frame[0] != 0) {
651 if (copyinframe(frame[0], (void*) frame)) {
652 break;
653 }
654 buf[i++] = (uintptr_t)frame[1];
655 }
656
657 while (i < callstack_max) {
658 buf[i++] = 0;
659 }
660 }
661 }
662
663 #endif /* MACH_ASSERT */
664
665 int
666 clr_be_bit(void)
667 {
668 panic("clr_be_bit");
669 return 0;
670 }
671
672 boolean_t
673 ml_probe_read(
674 __unused vm_offset_t paddr,
675 __unused unsigned int *val)
676 {
677 panic("ml_probe_read() unimplemented");
678 return 1;
679 }
680
681 boolean_t
682 ml_probe_read_64(
683 __unused addr64_t paddr,
684 __unused unsigned int *val)
685 {
686 panic("ml_probe_read_64() unimplemented");
687 return 1;
688 }
689
690
691 void
692 ml_thread_policy(
693 __unused thread_t thread,
694 __unused unsigned policy_id,
695 __unused unsigned policy_info)
696 {
697 // <rdar://problem/7141284>: Reduce print noise
698 // kprintf("ml_thread_policy() unimplemented\n");
699 }
700
701 #if !MACH_KDP
702 void
703 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
704 {
705 #pragma unused(fn,arg)
706 }
707 #endif