#include <ppc/mem.h>
#include <ppc/pmap.h>
#include <ppc/mappings.h>
+#include <ppc/cpu_data.h>
#include <mach/thread_status.h>
#include <mach-o/loader.h>
#include <mach/vm_statistics.h>
#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_protos.h>
#include <kdp/kdp_core.h>
#include <kdp/kdp_udp.h>
#include <kdp/kdp_internal.h>
+#include <ppc/misc_protos.h>
+#include <mach/vm_map.h>
+
pmap_t kdp_pmap=0;
boolean_t kdp_trans_off=0;
unsigned kdp_vm_read( caddr_t, caddr_t, unsigned);
unsigned kdp_vm_write( caddr_t, caddr_t, unsigned);
+extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
+extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
+
+/* XXX prototypes which should be in a commmon header file */
+addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
+int kern_dump(void);
+int kdp_dump_trap(int type, struct savearea *regs);
+/*
+ * XXX the following prototype doesn't match the declaration because the
+ * XXX actual declaration is wrong.
+ */
+extern int kdp_send_panic_packets(unsigned int request, char *corename,
+ unsigned int length, caddr_t txstart);
+
+
+
typedef struct {
int flavor; /* the number for this flavor */
char command_buffer[512];
-static struct vm_object test_object;
+// XXX static struct vm_object test_object;
/*
*
*/
-addr64_t kdp_vtophys(
+addr64_t
+kdp_vtophys(
pmap_t pmap,
addr64_t va)
{
pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL); /* Shove in the page offset */
return(pa);
}
-
-/*
- * Note that kdp_vm_read() does not translate the destination address.Therefore
- * there's an implicit assumption that the destination will be a statically
- * allocated structure, since those map to the same phys. and virt. addresses
+/* Verify that src is valid, and physically copy len bytes from src to
+ * dst, translating if necessary. If translation is enabled
+ * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
+ * when translating src.
*/
+
unsigned kdp_vm_read(
caddr_t src,
caddr_t dst,
unsigned len)
{
addr64_t cur_virt_src, cur_virt_dst;
- addr64_t cur_phys_src;
+ addr64_t cur_phys_src, cur_phys_dst;
unsigned resid, cnt;
unsigned int dummy;
pmap_t pmap;
while (resid != 0) {
+ if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
+ goto exit;
+
if(kdp_read_io == 0)
if(!mapping_phys_lookup((ppnum_t)(cur_virt_src >> 12), &dummy)) return 0; /* Can't read where there's not any memory */
cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
+ if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
+ cnt = 4096 - (cur_virt_dst & 0xFFF);
if (cnt > resid) cnt = resid;
- bcopy_phys(cur_virt_src, cur_virt_dst, cnt); /* Copy stuff over */
+ bcopy_phys(cur_virt_src, cur_phys_dst, cnt); /* Copy stuff over */
cur_virt_src += cnt;
cur_virt_dst += cnt;
else pmap = kernel_pmap; /* otherwise, use kernel's */
while (resid != 0) {
+/* Always translate the destination using the kernel_pmap. */
+ if((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
+ goto exit;
+
+ if((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0)
+ goto exit;
- if((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0) goto exit;
if(kdp_read_io == 0)
if(!mapping_phys_lookup((ppnum_t)(cur_phys_src >> 12), &dummy)) goto exit; /* Can't read where there's not any memory */
cnt = 4096 - (cur_virt_src & 0xFFF); /* Get length left on page */
+ if (cnt > (4096 - (cur_virt_dst & 0xFFF)))
+ cnt = 4096 - (cur_virt_dst & 0xFFF);
+
if (cnt > resid) cnt = resid;
#ifdef KDP_VM_READ_DEBUG
pmap, cur_virt_src, cur_phys_src);
#endif
- bcopy_phys(cur_phys_src, cur_virt_dst, cnt); /* Copy stuff over */
+ bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */
cur_virt_src +=cnt;
cur_virt_dst +=cnt;
while (resid != 0) {
if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0)
goto exit;
+
if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0)
goto exit;
static void
-kern_collectth_state(thread_act_t th_act, tir_t *t)
+kern_collectth_state(thread_t thread, tir_t *t)
{
vm_offset_t header;
int hoffset, i ;
flavors[i];
hoffset += sizeof(mythread_state_flavor_t);
- if (machine_thread_get_kern_state(th_act, flavors[i].flavor,
+ if (machine_thread_get_kern_state(thread, flavors[i].flavor,
(thread_state_t) (header+hoffset),
&flavors[i].count) != KERN_SUCCESS)
printf ("Failure in machine_thread_get_kern_state()\n");
int
kdp_dump_trap(
int type,
- struct savearea *regs)
+ __unused struct savearea *regs)
{
- extern int kdp_flag;
-
printf ("An unexpected trap (type %d) occurred during the kernel dump, terminating.\n", type);
kdp_send_panic_pkt (KDP_EOF, NULL, 0, ((void *) 0));
abort_panic_transfer();
kdp_reset();
kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state);
- return;
+ return( 0 );
}
+/*
+ * Kernel dump (limited to currently executing 32 bit mach_kernel only)
+ */
int
-kern_dump()
+kern_dump(void)
{
int error = 0;
vm_map_t map;
unsigned int hoffset = 0, foffset = 0, nfoffset = 0, vmoffset = 0;
unsigned int max_header_size = 0;
vm_offset_t header;
- struct machine_slot *ms;
struct mach_header *mh;
struct segment_command *sc;
- struct thread_command *tc;
vm_size_t size;
vm_prot_t prot = 0;
vm_prot_t maxprot = 0;
vm_inherit_t inherit = 0;
- vm_offset_t offset;
- int error1;
+ int error1 = 0;
mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
vm_size_t nflavors;
- int i;
+ vm_size_t i;
int nesting_depth = 0;
- kern_return_t kret;
+ kern_return_t kret = 0;
struct vm_region_submap_info_64 vbr;
int vbrcount = 0;
tir_t tir1;
unsigned int mach_section_count = 4;
unsigned int num_sects_txed = 0;
-
- extern int SEGSIZE;
-
- extern vm_offset_t sectTEXTB, sectDATAB, sectLINKB, sectPRELINKB;
- extern int sectSizeTEXT, sectSizeDATA, sectSizeLINK, sectSizePRELINK;
-
map = kernel_map;
not_in_kdp = 0; /* Tell vm functions not to acquire locks */
header = (vm_offset_t) command_buffer;
/*
- * Set up Mach-O header.
+ * Set up Mach-O header for currently executing 32 bit kernel.
*/
printf ("Generated Mach-O header size was %d\n", header_size);
mh = (struct mach_header *) header;
- ms = &machine_slot[cpu_number()];
mh->magic = MH_MAGIC;
- mh->cputype = ms->cpu_type;
- mh->cpusubtype = ms->cpu_subtype;
+ mh->cputype = cpu_type();
+ mh->cpusubtype = cpu_subtype(); /* XXX incorrect; should match kernel */
mh->filetype = MH_CORE;
mh->ncmds = segment_count + thread_count + mach_section_count;
mh->sizeofcmds = command_size;
vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
if((kret = vm_region_recurse_64(map,
&vmoffset, &size, &nesting_depth,
- &vbr, &vbrcount)) != KERN_SUCCESS) {
+ (vm_region_recurse_info_t)&vbr,
+ &vbrcount)) != KERN_SUCCESS) {
break;
}
* not followed by a normal VM region; i.e. there will be no hole that
* reaches to the end of the core file.
*/
- kern_collectth_state (current_act(), &tir1);
+ kern_collectth_state (current_thread(), &tir1);
if ((panic_error = kdp_send_panic_pkt (KDP_SEEK, NULL, sizeof(hoffset) , &hoffset)) < 0) {
printf ("kdp_send_panic_pkt failed with error %d\n", panic_error);
return (-1) ;
}
- out:
if (error == 0)
error = error1;
return (error);