2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
29 #include <mach/vm_attributes.h>
30 #include <mach/vm_param.h>
34 #include <mach/thread_status.h>
35 #include <mach-o/loader.h>
36 #include <mach/vm_region.h>
37 #include <mach/vm_statistics.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_object.h>
41 #include <vm/vm_protos.h>
42 #include <kdp/kdp_core.h>
43 #include <kdp/kdp_udp.h>
44 #include <kdp/kdp_internal.h>
45 #include <arm/misc_protos.h>
46 #include <arm/caches_internal.h>
47 #include <arm/cpu_data_internal.h>
50 boolean_t kdp_trans_off
;
51 boolean_t kdp_read_io
= 0;
53 pmap_paddr_t
kdp_vtophys(pmap_t pmap
, vm_offset_t va
);
65 /* Ensure that the provided va resides within the provided pmap range. */
66 if (!pmap
|| ((pmap
!= kernel_pmap
) && ((va
< pmap
->min
) || (va
>= pmap
->max
)))) {
67 #ifdef KDP_VTOPHYS_DEBUG
68 printf("kdp_vtophys(%08x, %016lx) not in range %08x .. %08x\n", (unsigned int) pmap
,
70 (unsigned int) (pmap
? pmap
->min
: 0),
71 (unsigned int) (pmap
? pmap
->max
: 0));
73 return 0; /* Just return if no translation */
76 pa
= pmap_find_pa(pmap
, va
); /* Get the physical address */
83 * Verify that src is valid, and physically copy len bytes from src to
84 * dst, translating if necessary. If translation is enabled
85 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
86 * when translating src.
90 kdp_machine_vm_read( mach_vm_address_t src
, caddr_t dst
, mach_vm_size_t len
)
92 addr64_t cur_virt_src
, cur_virt_dst
;
93 addr64_t cur_phys_src
, cur_phys_dst
;
94 mach_vm_size_t resid
, cnt
;
97 #ifdef KDP_VM_READ_DEBUG
98 kprintf("kdp_machine_vm_read1: src %x dst %x len %x - %08X %08X\n", src
, dst
, len
, ((unsigned long *) src
)[0], ((unsigned long *) src
)[1]);
101 cur_virt_src
= (addr64_t
) src
;
102 cur_virt_dst
= (addr64_t
) dst
;
105 kdp_readphysmem64_req_t rq
;
109 rq
.nbytes
= (uint32_t)len
;
110 ret
= kdp_machine_phys_read(&rq
, dst
, 0 /* unused */);
116 pmap
= kdp_pmap
; /* If special pmap, use it */
118 pmap
= kernel_pmap
; /* otherwise, use kernel's */
122 * Always translate the destination using the
125 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0) {
129 if ((cur_phys_src
= kdp_vtophys(pmap
, cur_virt_src
)) == 0) {
133 /* Attempt to ensure that there are valid translations for src and dst. */
134 if (!kdp_read_io
&& ((!pmap_valid_address(cur_phys_dst
)) || (!pmap_valid_address(cur_phys_src
)))) {
138 cnt
= ARM_PGBYTES
- (cur_virt_src
& PAGE_MASK
); /* Get length left on
140 if (cnt
> (ARM_PGBYTES
- (cur_virt_dst
& PAGE_MASK
))) {
141 cnt
= ARM_PGBYTES
- (cur_virt_dst
& PAGE_MASK
);
148 #ifdef KDP_VM_READ_DEBUG
149 kprintf("kdp_machine_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
150 pmap
, cur_virt_src
, cur_phys_src
);
152 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
);
160 #ifdef KDP_VM_READ_DEBUG
161 kprintf("kdp_machine_vm_read: ret %08X\n", len
- resid
);
167 kdp_machine_phys_read(kdp_readphysmem64_req_t
*rq
, caddr_t dst
, uint16_t lcpu __unused
)
169 mach_vm_address_t src
= rq
->address
;
170 mach_vm_size_t len
= rq
->nbytes
;
172 addr64_t cur_virt_dst
;
173 addr64_t cur_phys_src
, cur_phys_dst
;
174 mach_vm_size_t resid
= len
;
175 mach_vm_size_t cnt
= 0, cnt_src
, cnt_dst
;
177 #ifdef KDP_VM_READ_DEBUG
178 kprintf("kdp_phys_read src %x dst %p len %x\n", src
, dst
, len
);
181 cur_virt_dst
= (addr64_t
) dst
;
182 cur_phys_src
= (addr64_t
) src
;
185 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0) {
189 /* Get length left on page */
191 cnt_src
= ARM_PGBYTES
- (cur_phys_src
& PAGE_MASK
);
192 cnt_dst
= ARM_PGBYTES
- (cur_phys_dst
& PAGE_MASK
);
193 if (cnt_src
> cnt_dst
) {
202 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
216 kdp_machine_vm_write( caddr_t src
, mach_vm_address_t dst
, mach_vm_size_t len
)
218 addr64_t cur_virt_src
, cur_virt_dst
;
219 addr64_t cur_phys_src
, cur_phys_dst
;
220 mach_vm_size_t resid
, cnt
, cnt_src
, cnt_dst
;
222 #ifdef KDP_VM_WRITE_DEBUG
223 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src
, dst
, len
, ((unsigned long *) src
)[0], ((unsigned long *) src
)[1]);
226 cur_virt_src
= (addr64_t
) src
;
227 cur_virt_dst
= (addr64_t
) dst
;
232 if ((cur_phys_dst
= kdp_vtophys(kernel_pmap
, cur_virt_dst
)) == 0) {
236 if ((cur_phys_src
= kdp_vtophys(kernel_pmap
, cur_virt_src
)) == 0) {
240 /* Attempt to ensure that there are valid translations for src and dst. */
241 /* No support for enabling writes for an invalid translation at the moment. */
242 if ((!pmap_valid_address(cur_phys_dst
)) || (!pmap_valid_address(cur_phys_src
))) {
246 cnt_src
= ((cur_phys_src
+ ARM_PGBYTES
) & (-ARM_PGBYTES
)) - cur_phys_src
;
247 cnt_dst
= ((cur_phys_dst
+ ARM_PGBYTES
) & (-ARM_PGBYTES
)) - cur_phys_dst
;
249 if (cnt_src
> cnt_dst
) {
258 #ifdef KDP_VM_WRITE_DEBUG
259 printf("kdp_vm_write: cur_phys_src %x cur_phys_src %x len %x - %08X %08X\n", src
, dst
, cnt
);
261 bcopy_phys(cur_phys_src
, cur_phys_dst
, cnt
); /* Copy stuff over */
262 flush_dcache64(cur_phys_dst
, (unsigned int)cnt
, TRUE
);
263 invalidate_icache64(cur_phys_dst
, (unsigned int)cnt
, TRUE
);
274 kdp_machine_phys_write(kdp_writephysmem64_req_t
*rq __unused
, caddr_t src __unused
,
275 uint16_t lcpu __unused
)
277 return 0; /* unimplemented */
281 kern_collectth_state_size(uint64_t * tstate_count
, uint64_t * tstate_size
)
283 uint64_t count
= ml_get_max_cpu_number() + 1;
285 *tstate_count
= count
;
286 *tstate_size
= sizeof(struct thread_command
)
287 + (sizeof(arm_state_hdr_t
)
288 #if defined(__arm64__)
289 + ARM_THREAD_STATE64_COUNT
* sizeof(uint32_t));
291 + ARM_THREAD_STATE32_COUNT
* sizeof(uint32_t));
296 kern_collectth_state(thread_t thread __unused
, void *buffer
, uint64_t size
, void ** iter
)
298 cpu_data_entry_t
*cpuentryp
= *iter
;
299 if (cpuentryp
== NULL
) {
300 cpuentryp
= &CpuDataEntries
[0];
303 if (cpuentryp
== &CpuDataEntries
[ml_get_max_cpu_number()]) {
306 *iter
= cpuentryp
+ 1;
309 struct cpu_data
*cpudatap
= cpuentryp
->cpu_data_vaddr
;
311 struct thread_command
*tc
= (struct thread_command
*)buffer
;
312 arm_state_hdr_t
*hdr
= (arm_state_hdr_t
*)(void *)(tc
+ 1);
313 #if defined(__arm64__)
314 hdr
->flavor
= ARM_THREAD_STATE64
;
315 hdr
->count
= ARM_THREAD_STATE64_COUNT
;
316 arm_thread_state64_t
*state
= (arm_thread_state64_t
*)(void *)(hdr
+ 1);
318 hdr
->flavor
= ARM_THREAD_STATE
;
319 hdr
->count
= ARM_THREAD_STATE_COUNT
;
320 arm_thread_state_t
*state
= (arm_thread_state_t
*)(void *)(hdr
+ 1);
324 tc
->cmdsize
= (uint32_t) size
;
326 if ((cpudatap
!= NULL
) && (cpudatap
->halt_status
== CPU_HALTED_WITH_STATE
)) {
327 *state
= cpudatap
->halt_state
;
331 processor_t processor
= PERCPU_GET_RELATIVE(processor
, cpu_data
, cpudatap
);
332 if ((cpudatap
== NULL
) || (processor
->active_thread
== NULL
)) {
333 bzero(state
, hdr
->count
* sizeof(uint32_t));
337 #if defined(__arm64__)
338 void *kpcb
= processor
->active_thread
->machine
.kpcb
;
340 arm_saved_state_t
*saved_state
= (arm_saved_state_t
*)kpcb
;
342 state
->fp
= saved_state
->ss_64
.fp
;
343 state
->lr
= saved_state
->ss_64
.lr
;
344 state
->sp
= saved_state
->ss_64
.sp
;
345 state
->pc
= saved_state
->ss_64
.pc
;
346 state
->cpsr
= saved_state
->ss_64
.cpsr
;
347 bcopy(&saved_state
->ss_64
.x
[0], &state
->x
[0], sizeof(state
->x
));
349 vm_offset_t kstackptr
= (vm_offset_t
) processor
->active_thread
->machine
.kstackptr
;
350 arm_kernel_saved_state_t
*saved_state
= (arm_kernel_saved_state_t
*) kstackptr
;
352 state
->fp
= saved_state
->fp
;
353 state
->lr
= saved_state
->lr
;
354 state
->sp
= saved_state
->sp
;
355 state
->pc
= saved_state
->pc
;
356 state
->cpsr
= saved_state
->cpsr
;
359 #else /* __arm64__ */
360 vm_offset_t kstackptr
= (vm_offset_t
) processor
->active_thread
->machine
.kstackptr
;
361 arm_saved_state_t
*saved_state
= (arm_saved_state_t
*) kstackptr
;
363 state
->lr
= saved_state
->lr
;
364 state
->sp
= saved_state
->sp
;
365 state
->pc
= saved_state
->pc
;
366 state
->cpsr
= saved_state
->cpsr
;
367 bcopy(&saved_state
->r
[0], &state
->r
[0], sizeof(state
->r
));
369 #endif /* !__arm64__ */
373 * kdp_core_start_addr
375 * return the address where the kernel core file starts
377 * The kernel start address is VM_MIN_KERNEL_AND_KEXT_ADDRESS
378 * unless the physical aperture has been relocated below
379 * VM_MIN_KERNEL_AND_KEXT_ADDRESS as in the case of
380 * ARM_LARGE_MEMORY systems
384 kdp_core_start_addr()
386 #if defined(__arm64__)
387 extern const vm_map_address_t physmap_base
;
388 return MIN(physmap_base
, VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
389 #else /* !defined(__arm64__) */
390 return VM_MIN_KERNEL_AND_KEXT_ADDRESS
;
391 #endif /* !defined(__arm64__) */