]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/loose_ends.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / arm / loose_ends.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_assert.h>
30#include <mach/vm_types.h>
31#include <mach/mach_time.h>
32#include <kern/timer.h>
33#include <kern/clock.h>
34#include <kern/machine.h>
35#include <mach/machine.h>
36#include <mach/machine/vm_param.h>
37#include <mach_kdp.h>
38#include <kdp/kdp_udp.h>
39#if !MACH_KDP
40#include <kdp/kdp_callout.h>
41#endif /* !MACH_KDP */
42#include <arm/cpu_data.h>
43#include <arm/cpu_data_internal.h>
44#include <arm/caches_internal.h>
45
46#include <vm/vm_kern.h>
47#include <vm/vm_map.h>
48#include <vm/pmap.h>
49
50#include <arm/misc_protos.h>
51
52#include <sys/errno.h>
53
54#define INT_SIZE (BYTE_SIZE * sizeof (int))
55
56
57void
58bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
59{
60 unsigned int src_index;
61 unsigned int dst_index;
62 vm_offset_t src_offset;
63 vm_offset_t dst_offset;
64 unsigned int cpu_num;
65 unsigned int wimg_bits_src, wimg_bits_dst;
66 ppnum_t pn_src = (src >> PAGE_SHIFT);
67 ppnum_t pn_dst = (dst >> PAGE_SHIFT);
68
69 wimg_bits_src = pmap_cache_attributes(pn_src);
70 wimg_bits_dst = pmap_cache_attributes(pn_dst);
71
72 if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)) &&
73 ((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
74 ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
75 /* Fast path - dst is writable and both source and destination have default attributes */
76 bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
77 return;
78 }
79
80 src_offset = src & PAGE_MASK;
81 dst_offset = dst & PAGE_MASK;
82
83 if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE)
84 panic("bcopy extends beyond copy windows");
85
86 mp_disable_preemption();
87 cpu_num = cpu_number();
88 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
89 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ|VM_PROT_WRITE, wimg_bits_dst);
90
91 bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index)+src_offset),
92 (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index)+dst_offset),
93 bytes);
94
95 pmap_unmap_cpu_windows_copy(src_index);
96 pmap_unmap_cpu_windows_copy(dst_index);
97 mp_enable_preemption();
98}
99
100void
101bzero_phys_nc(addr64_t src64, vm_size_t bytes)
102{
103 bzero_phys(src64, bytes);
104}
105
106/* Zero bytes starting at a physical address */
107void
108bzero_phys(addr64_t src, vm_size_t bytes)
109{
110 unsigned int wimg_bits;
111 ppnum_t pn = (src >> PAGE_SHIFT);
112
113 wimg_bits = pmap_cache_attributes(pn);
114 if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
115 /* Fast path - default attributes */
116 bzero((char *)phystokv((pmap_paddr_t) src), bytes);
117 } else {
118 mp_disable_preemption();
119
120 unsigned int cpu_num = cpu_number();
121
122 while (bytes > 0) {
123 vm_offset_t offset = src & PAGE_MASK;
124 uint32_t count = PAGE_SIZE - offset;
125
126 if (count > bytes)
127 count = bytes;
128
129 unsigned int index = pmap_map_cpu_windows_copy(src >> PAGE_SHIFT, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
130
131 bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
132
133 pmap_unmap_cpu_windows_copy(index);
134
135 src += count;
136 bytes -= count;
137 }
138
139 mp_enable_preemption();
140 }
141}
142
143/*
144 * Read data from a physical address.
145 */
146
147
148static unsigned int
149ml_phys_read_data(pmap_paddr_t paddr, int size)
150{
151 unsigned int index;
152 unsigned int result;
153 unsigned int wimg_bits;
154 ppnum_t pn = (paddr >> PAGE_SHIFT);
155 unsigned char s1;
156 unsigned short s2;
157 vm_offset_t copywindow_vaddr = 0;
158
159 mp_disable_preemption();
160 wimg_bits = pmap_cache_attributes(pn);
161 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
162 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);;
163
164 switch (size) {
165 case 1:
166 s1 = *(volatile unsigned char *)(copywindow_vaddr);
167 result = s1;
168 break;
169 case 2:
170 s2 = *(volatile unsigned short *)(copywindow_vaddr);
171 result = s2;
172 break;
173 case 4:
174 default:
175 result = *(volatile unsigned int *)(copywindow_vaddr);
176 break;
177 }
178
179 pmap_unmap_cpu_windows_copy(index);
180 mp_enable_preemption();
181
182 return result;
183}
184
185static unsigned long long
186ml_phys_read_long_long(pmap_paddr_t paddr)
187{
188 unsigned int index;
189 unsigned int result;
190 unsigned int wimg_bits;
191 ppnum_t pn = (paddr >> PAGE_SHIFT);
192
193 mp_disable_preemption();
194 wimg_bits = pmap_cache_attributes(pn);
195 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
196
197 result = *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
198 | ((uint32_t)paddr & PAGE_MASK));
199
200 pmap_unmap_cpu_windows_copy(index);
201 mp_enable_preemption();
202
203 return result;
204}
205
206unsigned int ml_phys_read( vm_offset_t paddr)
207{
208 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
209}
210
211unsigned int ml_phys_read_word(vm_offset_t paddr) {
212
213 return ml_phys_read_data((pmap_paddr_t)paddr, 4);
214}
215
216unsigned int ml_phys_read_64(addr64_t paddr64)
217{
218 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
219}
220
221unsigned int ml_phys_read_word_64(addr64_t paddr64)
222{
223 return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
224}
225
226unsigned int ml_phys_read_half(vm_offset_t paddr)
227{
228 return ml_phys_read_data((pmap_paddr_t)paddr, 2);
229}
230
231unsigned int ml_phys_read_half_64(addr64_t paddr64)
232{
233 return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
234}
235
236unsigned int ml_phys_read_byte(vm_offset_t paddr)
237{
238 return ml_phys_read_data((pmap_paddr_t)paddr, 1);
239}
240
241unsigned int ml_phys_read_byte_64(addr64_t paddr64)
242{
243 return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
244}
245
246unsigned long long ml_phys_read_double(vm_offset_t paddr)
247{
248 return ml_phys_read_long_long((pmap_paddr_t)paddr);
249}
250
251unsigned long long ml_phys_read_double_64(addr64_t paddr64)
252{
253 return ml_phys_read_long_long((pmap_paddr_t)paddr64);
254}
255
256
257
258/*
259 * Write data to a physical address.
260 */
261
262static void
263ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
264{
265 unsigned int index;
266 unsigned int wimg_bits;
267 ppnum_t pn = (paddr >> PAGE_SHIFT);
268 vm_offset_t copywindow_vaddr = 0;
269
270 mp_disable_preemption();
271 wimg_bits = pmap_cache_attributes(pn);
272 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
273 copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t) paddr & PAGE_MASK);
274
275 switch (size) {
276 case 1:
277 *(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data;
278 break;
279 case 2:
280 *(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data;
281 break;
282 case 4:
283 default:
284 *(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data;
285 break;
286 }
287
288 pmap_unmap_cpu_windows_copy(index);
289 mp_enable_preemption();
290}
291
292static void
293ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
294{
295 unsigned int index;
296 unsigned int wimg_bits;
297 ppnum_t pn = (paddr >> PAGE_SHIFT);
298
299 mp_disable_preemption();
300 wimg_bits = pmap_cache_attributes(pn);
301 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
302
303 *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
304 | ((uint32_t)paddr & PAGE_MASK)) = data;
305
306 pmap_unmap_cpu_windows_copy(index);
307 mp_enable_preemption();
308}
309
310
311
312void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
313{
314 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
315}
316
317void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
318{
319 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
320}
321
322void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
323{
324 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
325}
326
327void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
328{
329 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
330}
331
332void ml_phys_write(vm_offset_t paddr, unsigned int data)
333{
334 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
335}
336
337void ml_phys_write_64(addr64_t paddr64, unsigned int data)
338{
339 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
340}
341
342void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
343{
344 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
345}
346
347void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
348{
349 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
350}
351
352void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
353{
354 ml_phys_write_long_long((pmap_paddr_t)paddr, data);
355}
356
357void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
358{
359 ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
360}
361
362
363/*
364 * Set indicated bit in bit string.
365 */
366void
367setbit(int bitno, int *s)
368{
369 s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
370}
371
372/*
373 * Clear indicated bit in bit string.
374 */
375void
376clrbit(int bitno, int *s)
377{
378 s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
379}
380
381/*
382 * Test if indicated bit is set in bit string.
383 */
384int
385testbit(int bitno, int *s)
386{
387 return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
388}
389
390/*
391 * Find first bit set in bit string.
392 */
393int
394ffsbit(int *s)
395{
396 int offset;
397
398 for (offset = 0; !*s; offset += INT_SIZE, ++s);
399 return offset + __builtin_ctz(*s);
400}
401
402int
403ffs(unsigned int mask)
404{
405 if (mask == 0)
406 return 0;
407
408 /*
409 * NOTE: cannot use __builtin_ffs because it generates a call to
410 * 'ffs'
411 */
412 return 1 + __builtin_ctz(mask);
413}
414
415int
416ffsll(unsigned long long mask)
417{
418 if (mask == 0)
419 return 0;
420
421 /*
422 * NOTE: cannot use __builtin_ffsll because it generates a call to
423 * 'ffsll'
424 */
425 return 1 + __builtin_ctzll(mask);
426}
427
428/*
429 * Find last bit set in bit string.
430 */
431int
432fls(unsigned int mask)
433{
434 if (mask == 0)
435 return 0;
436
437 return (sizeof (mask) << 3) - __builtin_clz(mask);
438}
439
440int
441flsll(unsigned long long mask)
442{
443 if (mask == 0)
444 return 0;
445
446 return (sizeof (mask) << 3) - __builtin_clzll(mask);
447}
448
449int
450bcmp(
451 const void *pa,
452 const void *pb,
453 size_t len)
454{
455 const char *a = (const char *) pa;
456 const char *b = (const char *) pb;
457
458 if (len == 0)
459 return 0;
460
461 do
462 if (*a++ != *b++)
463 break;
464 while (--len);
465
466 return len;
467}
468
469int
470memcmp(const void *s1, const void *s2, size_t n)
471{
472 if (n != 0) {
473 const unsigned char *p1 = s1, *p2 = s2;
474
475 do {
476 if (*p1++ != *p2++)
477 return (*--p1 - *--p2);
478 } while (--n != 0);
479 }
480 return (0);
481}
482
483kern_return_t
484copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
485{
486 kern_return_t retval = KERN_SUCCESS;
487 void *from, *to;
488 unsigned int from_wimg_bits, to_wimg_bits;
489
490 from = CAST_DOWN(void *, source);
491 to = CAST_DOWN(void *, sink);
492
493 if ((which & (cppvPsrc | cppvPsnk)) == 0) /* Make sure that only
494 * one is virtual */
495 panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */
496
497 if (which & cppvPsrc)
d9a64523 498 from = (void *)phystokv((pmap_paddr_t)from);
5ba3f43e 499 if (which & cppvPsnk)
d9a64523 500 to = (void *)phystokv((pmap_paddr_t)to);
5ba3f43e
A
501
502 if ((which & (cppvPsrc | cppvKmap)) == 0) /* Source is virtual in
503 * current map */
504 retval = copyin((user_addr_t) from, to, size);
505 else if ((which & (cppvPsnk | cppvKmap)) == 0) /* Sink is virtual in
506 * current map */
507 retval = copyout(from, (user_addr_t) to, size);
508 else /* both addresses are physical or kernel map */
509 bcopy(from, to, size);
510
511 if (which & cppvFsrc) {
512 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
513 } else if (which & cppvPsrc) {
514 from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
515 if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU))
516 flush_dcache64(source, size, TRUE);
517 }
518
519 if (which & cppvFsnk) {
520 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
521 } else if (which & cppvPsnk) {
522 to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
523 if (to_wimg_bits != VM_WIMG_COPYBACK)
524 flush_dcache64(sink, size, TRUE);
525 }
526 return retval;
527}
528
529/*
530 * Copy sizes bigger than this value will cause a kernel panic.
531 *
532 * Yes, this is an arbitrary fixed limit, but it's almost certainly
533 * a programming error to be copying more than this amount between
534 * user and wired kernel memory in a single invocation on this
535 * platform.
536 */
537const int copysize_limit_panic = (64 * 1024 * 1024);
538
539/*
540 * Validate the arguments to copy{in,out} on this platform.
541 *
542 * Called when nbytes is "large" e.g. more than a page. Such sizes are
543 * infrequent, and very large sizes are likely indications of attempts
544 * to exploit kernel programming errors (bugs).
545 */
546static int
547copy_validate(const user_addr_t user_addr,
548 uintptr_t kernel_addr, vm_size_t nbytes)
549{
550 uintptr_t kernel_addr_last = kernel_addr + nbytes;
551
d9a64523 552 if (__improbable(kernel_addr < VM_MIN_KERNEL_ADDRESS ||
5ba3f43e
A
553 kernel_addr > VM_MAX_KERNEL_ADDRESS ||
554 kernel_addr_last < kernel_addr ||
d9a64523 555 kernel_addr_last > VM_MAX_KERNEL_ADDRESS))
5ba3f43e
A
556 panic("%s(%p, %p, %u) - kaddr not in kernel", __func__,
557 (void *)user_addr, (void *)kernel_addr, nbytes);
558
559 user_addr_t user_addr_last = user_addr + nbytes;
560
d9a64523
A
561 if (__improbable((user_addr_last < user_addr) || ((user_addr + nbytes) > vm_map_max(current_thread()->map)) ||
562 (user_addr < vm_map_min(current_thread()->map))))
5ba3f43e
A
563 return (EFAULT);
564
565 if (__improbable(nbytes > copysize_limit_panic))
566 panic("%s(%p, %p, %u) - transfer too large", __func__,
567 (void *)user_addr, (void *)kernel_addr, nbytes);
568
569 return (0);
570}
571
572int
573copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
574{
575 return (copy_validate(ua, ka, nbytes));
576}
577
578int
579copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
580{
581 return (copy_validate(ua, ka, nbytes));
582}
583
584#if MACH_ASSERT
585
586extern int copyinframe(vm_address_t fp, char *frame);
587
588/*
589 * Machine-dependent routine to fill in an array with up to callstack_max
590 * levels of return pc information.
591 */
592void
593machine_callstack(
594 uintptr_t * buf,
595 vm_size_t callstack_max)
596{
597 /* Captures the USER call stack */
598 uint32_t i=0;
599 uint32_t frame[2];
600
601 struct arm_saved_state* state = find_user_regs(current_thread());
602
603 if (!state) {
604 while (i<callstack_max)
605 buf[i++] = 0;
606 } else {
607 buf[i++] = (uintptr_t)state->pc;
608 frame[0] = state->r[7];
609
610 while (i<callstack_max && frame[0] != 0) {
611 if (copyinframe(frame[0], (void*) frame))
612 break;
613 buf[i++] = (uintptr_t)frame[1];
614 }
615
616 while (i<callstack_max)
617 buf[i++] = 0;
618 }
619}
620
621#endif /* MACH_ASSERT */
622
623int
624clr_be_bit(void)
625{
626 panic("clr_be_bit");
627 return 0;
628}
629
630boolean_t
631ml_probe_read(
632 __unused vm_offset_t paddr,
633 __unused unsigned int *val)
634{
635 panic("ml_probe_read() unimplemented");
636 return 1;
637}
638
639boolean_t
640ml_probe_read_64(
641 __unused addr64_t paddr,
642 __unused unsigned int *val)
643{
644 panic("ml_probe_read_64() unimplemented");
645 return 1;
646}
647
648
649void
650ml_thread_policy(
651 __unused thread_t thread,
652 __unused unsigned policy_id,
653 __unused unsigned policy_info)
654{
655 // <rdar://problem/7141284>: Reduce print noise
656 // kprintf("ml_thread_policy() unimplemented\n");
657}
658
659#if !MACH_KDP
660void
661kdp_register_callout(kdp_callout_fn_t fn, void *arg)
662{
663#pragma unused(fn,arg)
664}
665#endif