+extern pmap_t kdp_pmap;
+
+#define RETURN_OFFSET 4
+int
+machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p)
+{
+ uint32_t *tracebuf = (uint32_t *)tracepos;
+ uint32_t fence = 0;
+ uint32_t stackptr = 0;
+ uint32_t stacklimit = 0xfc000000;
+ int framecount = 0;
+ uint32_t init_eip = 0;
+ uint32_t prevsp = 0;
+ uint32_t framesize = 2 * sizeof(vm_offset_t);
+
+ if (user_p) {
+ x86_saved_state32_t *iss32;
+
+ iss32 = USER_REGS32(thread);
+
+ init_eip = iss32->eip;
+ stackptr = iss32->ebp;
+
+ /* This bound isn't useful, but it doesn't hinder us*/
+ stacklimit = 0xffffffff;
+ kdp_pmap = thread->task->map->pmap;
+ }
+ else {
+ /*Examine the i386_saved_state at the base of the kernel stack*/
+ stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
+ init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
+ }
+
+ *tracebuf++ = init_eip;
+
+ for (framecount = 0; framecount < nframes; framecount++) {
+
+ if ((tracebound - ((uint32_t) tracebuf)) < (4 * framesize)) {
+ tracebuf--;
+ break;
+ }
+
+ *tracebuf++ = stackptr;
+/* Invalid frame, or hit fence */
+ if (!stackptr || (stackptr == fence)) {
+ break;
+ }
+ /* Stack grows downward */
+ if (stackptr < prevsp) {
+ break;
+ }
+ /* Unaligned frame */
+ if (stackptr & 0x0000003) {
+ break;
+ }
+ if (stackptr > stacklimit) {
+ break;
+ }
+
+ if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
+ break;
+ }
+ tracebuf++;
+
+ prevsp = stackptr;
+ if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
+ *tracebuf++ = 0;
+ break;
+ }
+ }
+
+ kdp_pmap = 0;
+
+ return ((uint32_t) tracebuf - tracepos);
+}
+
+/* This is a stub until the x86 64-bit model becomes clear */
+int
+machine_trace_thread64(__unused thread_t thread, __unused uint32_t tracepos, __unused uint32_t tracebound, __unused int nframes, __unused boolean_t user_p) {
+ return 0;
+}