]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/arm/fbt_arm.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / bsd / dev / arm / fbt_arm.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 */
4 /*
5 * CDDL HEADER START
6 *
7 * The contents of this file are subject to the terms of the
8 * Common Development and Distribution License, Version 1.0 only
9 * (the "License"). You may not use this file except in compliance
10 * with the License.
11 *
12 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
13 * or http://www.opensolaris.org/os/licensing.
14 * See the License for the specific language governing permissions
15 * and limitations under the License.
16 *
17 * When distributing Covered Code, include this CDDL HEADER in each
18 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
19 * If applicable, add the following below this CDDL HEADER, with the
20 * fields enclosed by brackets "[]" replaced with your own identifying
21 * information: Portions Copyright [yyyy] [name of copyright owner]
22 *
23 * CDDL HEADER END
24 */
25 /*
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 */
29
30 /* #pragma ident "@(#)fbt.c 1.15 05/09/19 SMI" */
31
32 #ifdef KERNEL
33 #ifndef _KERNEL
34 #define _KERNEL /* Solaris vs. Darwin */
35 #endif
36 #endif
37
38 #define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from
39 * mach/ppc/thread_status.h */
40 #include <kern/thread.h>
41 #include <mach/thread_status.h>
42 #include <arm/proc_reg.h>
43 #include <arm/caches_internal.h>
44 #include <arm/thread.h>
45
46 #include <mach-o/loader.h>
47 #include <mach-o/nlist.h>
48 #include <libkern/kernel_mach_header.h>
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/errno.h>
53 #include <sys/stat.h>
54 #include <sys/ioctl.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <miscfs/devfs/devfs.h>
58
59 #include <sys/dtrace.h>
60 #include <sys/dtrace_impl.h>
61 #include <sys/fbt.h>
62
63 #include <sys/dtrace_glue.h>
64
65 #define DTRACE_INVOP_PUSH_LR 8
66 #define DTRACE_INVOP_BL 9
67 #define DTRACE_INVOP_POP_PC 10
68
69 #define DTRACE_INVOP_THUMB_NOP_SKIP 2
70 #define DTRACE_INVOP_POP_PC_SKIP 2
71 #define DTRACE_INVOP_THUMB_SET_R7_SKIP 2
72 #define DTRACE_INVOP_THUMB_MOV_SP_TO_R7_SKIP 2
73
74 #define FBT_IS_THUMB_PUSH_LR(x) (((x) & 0x0000ff00) == 0x0000b500)
75 #define FBT_IS_THUMB_POP_R7(x) (((x) & 0x0000ff80) == 0x0000bc80)
76 #define FBT_IS_THUMB32_POP_R7LR(x,y) (((x) == 0x0000e8bd) && (((y) & 0x00004080) == 0x00004080))
77 #define FBT_IS_THUMB_POP_PC(x) (((x) & 0x0000ff00) == 0x0000bd00)
78 #define FBT_IS_THUMB_SET_R7(x) (((x) & 0x0000ff00) == 0x0000af00)
79 #define FBT_IS_THUMB_MOV_SP_TO_R7(x) (((x) & 0x0000ffff) == 0x0000466f)
80 #define FBT_THUMB_SET_R7_OFFSET(x) (((x) & 0x000000ff) << 2)
81 #define FBT_IS_THUMB_LDR_PC(x) (((x) & 0x0000f800) == 0x00004800)
82 #define FBT_IS_THUMB32_LDR_PC(x,y) ((x) == 0x0000f8df) /* Only for positive offset PC relative loads */
83 #define FBT_THUMB_STACK_REGS(x) ((x) & 0x00FF)
84 #define FBT_IS_THUMB_BX_REG(x) (((x) & 0x0000ff87) == 0x00004700)
85
86 #define FBT_PATCHVAL 0xdefc
87 #define FBT_AFRAMES_ENTRY 8
88 #define FBT_AFRAMES_RETURN 6
89
90 #define FBT_ENTRY "entry"
91 #define FBT_RETURN "return"
92 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask)
93
94 #define VFPSAVE_ALIGN_DTRACE 16 /* This value should come from VFPSAVE_ALIGN */
95
96 extern dtrace_provider_id_t fbt_id;
97 extern fbt_probe_t **fbt_probetab;
98 extern int fbt_probetab_mask;
99
100 kern_return_t fbt_perfCallback(int, struct arm_saved_state *, __unused int, __unused int);
101
102 static int fbt_uninstrumented_arm = 0;
103 static const int fbt_log_uninstrumented = 0;
104
105 extern int dtrace_arm_condition_true(int cond, int cpsr);
106
107
108 /* Calculate the address of the ldr. (From the ARM Architecture reference) */
109 /* Does not check to see if it's really a load instruction, caller must do that */
110
111 static uint32_t thumb_ldr_pc_address(uint32_t address)
112 {
113 return (address & 0xFFFFFFFC) + (*(uint16_t*) address & 0xFF) * 4 + 4;
114 }
115
116 static uint32_t thumb32_ldr_pc_address(uint32_t address)
117 {
118 return (address & 0xFFFFFFFC) + (*(uint16_t*) (address+2) & 0xFFF) + 4;
119 }
120
121 /* Extract the current ITSTATE from the CPSR */
122 static uint32_t get_itstate(uint32_t cpsr)
123 {
124 return
125 ((cpsr & 0x06000000) >> 25) |
126 ((cpsr & 0x0000FC00) >> 8);
127 }
128
129 static void clear_itstate(uint32_t* cpsr)
130 {
131 *cpsr &= ~0x0600FC00;
132 }
133
134 int
135 fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval)
136 {
137 fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
138
139 for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
140 if ((uintptr_t) fbt->fbtp_patchpoint == addr) {
141 if (0 == CPU->cpu_dtrace_invop_underway) {
142 CPU->cpu_dtrace_invop_underway = 1; /* Race not possible on
143 * this per-cpu state */
144
145 struct arm_saved_state* regs = (struct arm_saved_state*) stack;
146 uintptr_t stack4 = *((uintptr_t*) regs->sp);
147
148 if ((regs->cpsr & PSR_MODE_MASK) == PSR_FIQ_MODE) {
149 /*
150 * We do not handle probes firing from FIQ context. We used to
151 * try to undo the patch and rerun the instruction, but
152 * most of the time we can't do that successfully anyway.
153 * Instead, we just panic now so we fail fast.
154 */
155 panic("dtrace: fbt: The probe at %08x was called from FIQ_MODE",(unsigned) addr);
156 }
157
158 /*
159 * If we are not outside an IT block, and are not executing the last instruction of an IT block,
160 * then that is an instrumentation error or a code gen error. Either way, we panic.
161 */
162 uint32_t itstate = get_itstate(regs->cpsr);
163 if ((itstate & 0x7) != 0) {
164 panic("dtrace: fbt: Instruction stream error: Middle of IT block at %08x",(unsigned) addr);
165 }
166
167 if (fbt->fbtp_roffset == 0) {
168 /*
169 We need the frames to set up the backtrace, but we won't have the frame pointers
170 until after the instruction is emulated. So here we calculate the address of the
171 frame pointer from the saved instruction and put it in the stack. Yes, we end up
172 repeating this work again when we emulate the instruction.
173
174 This assumes that the frame area is immediately after the saved reg storage!
175 */
176 uint32_t offset = ((uint32_t) regs) + sizeof(struct arm_saved_state);
177 #if __ARM_VFP__
178 /* Match the stack alignment required for arm_vfpsaved_state */
179 offset &= ~(VFPSAVE_ALIGN_DTRACE - 1);
180 offset += VFPSAVE_ALIGN_DTRACE + sizeof(struct arm_vfpsaved_state);
181 #endif /* __ARM_VFP__ */
182 if (FBT_IS_THUMB_SET_R7(fbt->fbtp_savedval))
183 *((uint32_t*) offset) = regs->sp + FBT_THUMB_SET_R7_OFFSET(fbt->fbtp_savedval);
184 else
185 *((uint32_t*) offset) = regs->sp;
186
187 CPU->cpu_dtrace_caller = regs->lr;
188 dtrace_probe(fbt->fbtp_id, regs->r[0], regs->r[1], regs->r[2], regs->r[3], stack4);
189 CPU->cpu_dtrace_caller = 0;
190 } else {
191 /* Check to see if we're in the middle of an IT block. */
192 if (itstate != 0) {
193 /*
194 * We've already checked previously to see how far we are in the IT block.
195 * Here we must be getting ready to execute the last instruction.
196 */
197 int condition_it = (itstate & 0xF0) >> 4;
198
199 if (dtrace_arm_condition_true(condition_it, regs->cpsr) == 0) {
200 /* Condition wasn't true, so becomes a nop. */
201 clear_itstate(&regs->cpsr);
202 CPU->cpu_dtrace_invop_underway = 0;
203 return DTRACE_INVOP_NOP;
204 }
205 }
206
207 dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset, rval, 0, 0, 0);
208 CPU->cpu_dtrace_caller = 0;
209
210 /* The dtrace script may access cpsr, so make sure to clear only after probe fired. */
211 clear_itstate(&regs->cpsr);
212 }
213 CPU->cpu_dtrace_invop_underway = 0;
214 }
215
216 /*
217 On other architectures, we return a DTRACE constant to let the callback function
218 know what was replaced. On the ARM, since the function prologue/epilogue machine code
219 can vary, we need the actual bytes of the instruction, so return the savedval instead.
220 */
221 return (fbt->fbtp_savedval);
222 }
223 }
224
225 return (0);
226 }
227
228 #define IS_USER_TRAP(regs) (((regs)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)
229 #define T_INVALID_OPCODE EXC_BAD_INSTRUCTION
230 #define FBT_EXCEPTION_CODE T_INVALID_OPCODE
231
232 kern_return_t
233 fbt_perfCallback(
234 int trapno,
235 struct arm_saved_state * regs,
236 __unused int unused1,
237 __unused int unused2)
238 {
239 #pragma unused (unused1)
240 #pragma unused (unused2)
241 kern_return_t retval = KERN_FAILURE;
242
243 if (FBT_EXCEPTION_CODE == trapno && !IS_USER_TRAP(regs)) {
244 boolean_t oldlevel = 0;
245 machine_inst_t emul = 0;
246
247 oldlevel = ml_set_interrupts_enabled(FALSE);
248
249 __asm__ volatile(
250 "Ldtrace_invop_callsite_pre_label:\n"
251 ".data\n"
252 ".private_extern _dtrace_invop_callsite_pre\n"
253 "_dtrace_invop_callsite_pre:\n"
254 " .long Ldtrace_invop_callsite_pre_label\n"
255 ".text\n"
256 );
257
258 emul = dtrace_invop(regs->pc, (uintptr_t*) regs, regs->r[0]);
259
260 __asm__ volatile(
261 "Ldtrace_invop_callsite_post_label:\n"
262 ".data\n"
263 ".private_extern _dtrace_invop_callsite_post\n"
264 "_dtrace_invop_callsite_post:\n"
265 " .long Ldtrace_invop_callsite_post_label\n"
266 ".text\n"
267 );
268
269 /*
270 * The following emulation code does not execute properly if we are in the middle of
271 * an IT block. IT blocks need to be handled in the dtrace_invop function. If we do
272 * manage to get here and we are inside an IT block, then we missed a case somewhere
273 * prior to this point.
274 */
275 uint32_t itstate = get_itstate(regs->cpsr);
276 if (itstate != 0) {
277 panic("dtrace: fbt: Not emulated: Middle of IT block at %08x",(unsigned) regs->pc);
278 }
279
280 if (emul == DTRACE_INVOP_NOP) {
281 regs->pc += DTRACE_INVOP_THUMB_NOP_SKIP;
282 retval = KERN_SUCCESS;
283 } else if (FBT_IS_THUMB_SET_R7(emul)) {
284 regs->r[7] = regs->sp + FBT_THUMB_SET_R7_OFFSET(emul);
285 regs->pc += DTRACE_INVOP_THUMB_SET_R7_SKIP;
286 retval = KERN_SUCCESS;
287 } else if (FBT_IS_THUMB_MOV_SP_TO_R7(emul)) {
288 regs->r[7] = regs->sp;
289 regs->pc += DTRACE_INVOP_THUMB_MOV_SP_TO_R7_SKIP;
290 retval = KERN_SUCCESS;
291 } else if (FBT_IS_THUMB_POP_PC(emul)) {
292 uintptr_t* sp = (uintptr_t*) regs->sp;
293
294 machine_inst_t mask = 0x0001;
295 int regnum = 0;
296 while (mask & 0x00ff) {
297 if (emul & mask) {
298 /* Pop this register */
299 regs->r[regnum] = *sp++;
300 }
301 mask <<= 1;
302 regnum++;
303 }
304
305 regs->pc = *sp++;
306 regs->sp = (uintptr_t) sp;
307 if (regs->pc & 1) {
308 regs->cpsr |= PSR_TF;
309 } else {
310 regs->cpsr &= ~PSR_TF;
311 }
312
313 retval = KERN_SUCCESS;
314 } else if (FBT_IS_THUMB_BX_REG(emul)) {
315 regs->pc = regs->r[(emul >> 3) & 0xF];
316
317 if (regs->pc & 1) {
318 regs->cpsr |= PSR_TF;
319 } else {
320 regs->cpsr &= ~PSR_TF;
321 }
322
323 retval = KERN_SUCCESS;
324 } else if (emul == FBT_PATCHVAL) {
325 /* Means we encountered an error but handled it, try same inst again */
326 retval = KERN_SUCCESS;
327 } else {
328 retval = KERN_FAILURE;
329 }
330
331 ml_set_interrupts_enabled(oldlevel);
332 }
333
334 return retval;
335 }
336
337 void
338 fbt_provide_probe(struct modctl *ctl, uintptr_t instrLow, uintptr_t instrHigh, char *modname, char* symbolName, machine_inst_t* symbolStart)
339 {
340 unsigned int j;
341 int doenable = 0;
342 dtrace_id_t thisid;
343
344 fbt_probe_t *newfbt, *retfbt, *entryfbt;
345 machine_inst_t *instr, *pushinstr = NULL, *limit, theInstr;
346 int foundPushLR, savedRegs;
347
348 /*
349 * Guard against null symbols
350 */
351 if (!symbolStart || !instrLow || !instrHigh) {
352 kprintf("dtrace: %s has an invalid address\n", symbolName);
353 return;
354 }
355
356 /*
357 * Assume the compiler doesn't schedule instructions in the prologue.
358 */
359 foundPushLR = 0;
360 savedRegs = -1;
361 limit = (machine_inst_t *)instrHigh;
362 for (j = 0, instr = symbolStart, theInstr = 0;
363 (j < 8) && ((uintptr_t)instr >= instrLow) && (instrHigh > (uintptr_t)(instr)); j++, instr++)
364 {
365 theInstr = *instr;
366 if (FBT_IS_THUMB_PUSH_LR(theInstr)) {
367 foundPushLR = 1;
368 /* Keep track of what registers we pushed. Compare this against the pop later. */
369 savedRegs = FBT_THUMB_STACK_REGS(theInstr);
370 pushinstr = instr;
371 }
372 if (foundPushLR && (FBT_IS_THUMB_SET_R7(theInstr) || FBT_IS_THUMB_MOV_SP_TO_R7(theInstr)))
373 /* Guard against a random setting of r7 from sp, we make sure we found the push first */
374 break;
375 if (FBT_IS_THUMB_BX_REG(theInstr)) /* We've gone too far, bail. */
376 break;
377 if (FBT_IS_THUMB_POP_PC(theInstr)) /* We've gone too far, bail. */
378 break;
379
380 /* Check for 4 byte thumb instruction */
381 if (dtrace_instr_size(theInstr,1) == 4)
382 instr++;
383 }
384
385 if (!(foundPushLR && (FBT_IS_THUMB_SET_R7(theInstr) || FBT_IS_THUMB_MOV_SP_TO_R7(theInstr)))) {
386 return;
387 }
388
389 thisid = dtrace_probe_lookup(fbt_id, modname, symbolName, FBT_ENTRY);
390 newfbt = kmem_zalloc(sizeof(fbt_probe_t), KM_SLEEP);
391 newfbt->fbtp_next = NULL;
392 strlcpy( (char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS );
393
394 if (thisid != 0) {
395 /*
396 * The dtrace_probe previously existed, so we have to hook
397 * the newfbt entry onto the end of the existing fbt's
398 * chain.
399 * If we find an fbt entry that was previously patched to
400 * fire, (as indicated by the current patched value), then
401 * we want to enable this newfbt on the spot.
402 */
403 entryfbt = dtrace_probe_arg (fbt_id, thisid);
404 ASSERT (entryfbt != NULL);
405 for(; entryfbt != NULL; entryfbt = entryfbt->fbtp_next) {
406 if (entryfbt->fbtp_currentval == entryfbt->fbtp_patchval)
407 doenable++;
408
409 if (entryfbt->fbtp_next == NULL) {
410 entryfbt->fbtp_next = newfbt;
411 newfbt->fbtp_id = entryfbt->fbtp_id;
412 break;
413 }
414 }
415 }
416 else {
417 /*
418 * The dtrace_probe did not previously exist, so we
419 * create it and hook in the newfbt. Since the probe is
420 * new, we obviously do not need to enable it on the spot.
421 */
422 newfbt->fbtp_id = dtrace_probe_create(fbt_id, modname, symbolName, FBT_ENTRY, FBT_AFRAMES_ENTRY, newfbt);
423 doenable = 0;
424 }
425
426 newfbt->fbtp_patchpoint = instr;
427 newfbt->fbtp_ctl = ctl;
428 newfbt->fbtp_loadcnt = ctl->mod_loadcnt;
429 newfbt->fbtp_rval = DTRACE_INVOP_PUSH_LR;
430 newfbt->fbtp_savedval = theInstr;
431 newfbt->fbtp_patchval = FBT_PATCHVAL;
432 newfbt->fbtp_currentval = 0;
433 newfbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
434 fbt_probetab[FBT_ADDR2NDX(instr)] = newfbt;
435
436 if (doenable)
437 fbt_enable(NULL, newfbt->fbtp_id, newfbt);
438
439 /*
440 * The fbt entry chain is in place, one entry point per symbol.
441 * The fbt return chain can have multiple return points per
442 * symbol.
443 * Here we find the end of the fbt return chain.
444 */
445
446 doenable=0;
447
448 thisid = dtrace_probe_lookup(fbt_id, modname, symbolName, FBT_RETURN);
449
450 if (thisid != 0) {
451 /* The dtrace_probe previously existed, so we have to
452 * find the end of the existing fbt chain. If we find
453 * an fbt return that was previously patched to fire,
454 * (as indicated by the currrent patched value), then
455 * we want to enable any new fbts on the spot.
456 */
457 retfbt = dtrace_probe_arg (fbt_id, thisid);
458 ASSERT(retfbt != NULL);
459 for (; retfbt != NULL; retfbt = retfbt->fbtp_next) {
460 if (retfbt->fbtp_currentval == retfbt->fbtp_patchval)
461 doenable++;
462 if(retfbt->fbtp_next == NULL)
463 break;
464 }
465 }
466 else {
467 doenable = 0;
468 retfbt = NULL;
469 }
470
471 /*
472 * Go back to the start of the function, in case
473 * the compiler emitted pcrel data loads
474 * before R7 was adjusted.
475 */
476 instr = pushinstr + 1;
477 again:
478 if (instr >= limit)
479 return;
480
481 /*
482 * We (desperately) want to avoid erroneously instrumenting a
483 * jump table. To determine if we're looking at a true instruction
484 * or an inline jump table that happens to contain the same
485 * byte sequences, we resort to some heuristic sleeze: we
486 * treat this instruction as being contained within a pointer,
487 * and see if that pointer points to within the body of the
488 * function. If it does, we refuse to instrument it.
489 */
490 if (((uintptr_t)instr & 0x3) == 0) {
491 machine_inst_t *ptr = *(machine_inst_t **)(void *)instr;
492
493 if (ptr >= (machine_inst_t *)symbolStart && ptr < limit) {
494 /* kprintf("dtrace: fbt: Found jump table in %s, at %08x\n",symbolName,(unsigned)instr); */
495 instr++;
496 goto again;
497 }
498 }
499
500 /*
501 * OK, it's an instruction.
502 */
503 theInstr = *instr;
504
505 /* Walked onto the start of the next routine? If so, bail out from this function */
506 if (FBT_IS_THUMB_PUSH_LR(theInstr)) {
507 if (!retfbt)
508 kprintf("dtrace: fbt: No return probe for %s, walked to next routine at %08x\n",symbolName,(unsigned)instr);
509 return;
510 }
511
512 /* The PC relative data should be stored after the end of the function. If
513 * we see a PC relative load, assume the address to load from is the new end
514 * of the function. */
515 if (FBT_IS_THUMB_LDR_PC(theInstr)) {
516 uint32_t newlimit = thumb_ldr_pc_address((uint32_t) instr);
517 if (newlimit < (uint32_t) limit)
518 limit = (machine_inst_t*) newlimit;
519 }
520 if ((instr+1) < limit && FBT_IS_THUMB32_LDR_PC(*instr,*(instr+1))) {
521 uint32_t newlimit = thumb32_ldr_pc_address((uint32_t) instr);
522 if (newlimit < (uint32_t) limit)
523 limit = (machine_inst_t*) newlimit;
524 }
525
526 /* Look for the 1. pop { ..., pc } or 2. pop { ..., r7 } ... bx reg or 3. ldmia.w sp!, { ..., r7, lr } ... bx reg */
527 if (!FBT_IS_THUMB_POP_PC(theInstr) &&
528 !FBT_IS_THUMB_POP_R7(theInstr) &&
529 !FBT_IS_THUMB32_POP_R7LR(theInstr,*(instr+1))) {
530 instr++;
531 if (dtrace_instr_size(theInstr,1) == 4)
532 instr++;
533 goto again;
534 }
535
536 if (FBT_IS_THUMB_POP_PC(theInstr)) {
537 if (savedRegs != FBT_THUMB_STACK_REGS(theInstr)) {
538 /* What we're popping doesn't match what we're pushing, assume that we've
539 * gone too far in the function. Bail.
540 */
541 kprintf("dtrace: fbt: No return probe for %s, popped regs don't match at %08x\n",symbolName,(unsigned)instr);
542 return;
543 }
544 } else {
545 /* Scan ahead for the bx */
546 for (j = 0; (j < 4) && (instr < limit); j++, instr++) {
547 theInstr = *instr;
548 if (FBT_IS_THUMB_BX_REG(theInstr))
549 break;
550 if (dtrace_instr_size(theInstr,1) == 4)
551 instr++;
552 }
553
554 if (!FBT_IS_THUMB_BX_REG(theInstr))
555 return;
556 }
557
558 /*
559 * pop { ..., pc}, bx reg -- We have a winner!
560 */
561
562 newfbt = kmem_zalloc(sizeof(fbt_probe_t), KM_SLEEP);
563 newfbt->fbtp_next = NULL;
564 strlcpy( (char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS );
565
566 if (retfbt == NULL) {
567 newfbt->fbtp_id = dtrace_probe_create(fbt_id, modname,
568 symbolName, FBT_RETURN, FBT_AFRAMES_RETURN, newfbt);
569 } else {
570 retfbt->fbtp_next = newfbt;
571 newfbt->fbtp_id = retfbt->fbtp_id;
572 }
573
574 retfbt = newfbt;
575 newfbt->fbtp_patchpoint = instr;
576 newfbt->fbtp_ctl = ctl;
577 newfbt->fbtp_loadcnt = ctl->mod_loadcnt;
578
579 ASSERT(FBT_IS_THUMB_POP_PC(theInstr) || FBT_IS_THUMB_BX_REG(theInstr));
580 newfbt->fbtp_rval = DTRACE_INVOP_POP_PC;
581 newfbt->fbtp_roffset =
582 (uintptr_t) ((uint8_t*) instr - (uint8_t *)symbolStart);
583 newfbt->fbtp_savedval = theInstr;
584 newfbt->fbtp_patchval = FBT_PATCHVAL;
585 newfbt->fbtp_currentval = 0;
586 newfbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)];
587 fbt_probetab[FBT_ADDR2NDX(instr)] = newfbt;
588
589 if (doenable)
590 fbt_enable(NULL, newfbt->fbtp_id, newfbt);
591
592 instr++;
593 goto again;
594 }
595
596 void
597 fbt_provide_module_kernel_syms(struct modctl *ctl)
598 {
599 kernel_mach_header_t *mh;
600 struct load_command *cmd;
601 kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
602 struct symtab_command *orig_st = NULL;
603 kernel_nlist_t *sym = NULL;
604 char *strings;
605 uintptr_t instrLow, instrHigh;
606 char *modname;
607 unsigned int i;
608
609 mh = (kernel_mach_header_t *)(ctl->mod_address);
610 modname = ctl->mod_modname;
611
612 /*
613 * Employees of dtrace and their families are ineligible. Void
614 * where prohibited.
615 */
616
617 if (mh->magic != MH_MAGIC_KERNEL)
618 return;
619
620 cmd = (struct load_command *) & mh[1];
621 for (i = 0; i < mh->ncmds; i++) {
622 if (cmd->cmd == LC_SEGMENT_KERNEL) {
623 kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
624
625 if (LIT_STRNEQL(orig_sg->segname, SEG_TEXT))
626 orig_ts = orig_sg;
627 else if (LIT_STRNEQL(orig_sg->segname, SEG_LINKEDIT))
628 orig_le = orig_sg;
629 else if (LIT_STRNEQL(orig_sg->segname, ""))
630 orig_ts = orig_sg; /* kexts have a single
631 * unnamed segment */
632 } else if (cmd->cmd == LC_SYMTAB)
633 orig_st = (struct symtab_command *) cmd;
634
635 cmd = (struct load_command *) ((caddr_t) cmd + cmd->cmdsize);
636 }
637
638 if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL))
639 return;
640
641 sym = (kernel_nlist_t *)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
642 strings = (char *)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
643
644 /* Find extent of the TEXT section */
645 instrLow = (uintptr_t) orig_ts->vmaddr;
646 instrHigh = (uintptr_t) (orig_ts->vmaddr + orig_ts->vmsize);
647
648 for (i = 0; i < orig_st->nsyms; i++) {
649 uint8_t n_type = sym[i].n_type & (N_TYPE | N_EXT);
650 char *name = strings + sym[i].n_un.n_strx;
651
652 /* Check that the symbol is a global and that it has a name. */
653 if (((N_SECT | N_EXT) != n_type && (N_ABS | N_EXT) != n_type))
654 continue;
655
656 if (0 == sym[i].n_un.n_strx) /* iff a null, "", name. */
657 continue;
658
659 /* Lop off omnipresent leading underscore. */
660 if (*name == '_')
661 name += 1;
662
663
664 if (sym[i].n_sect == 1 && !(sym[i].n_desc & N_ARM_THUMB_DEF)) {
665 /* A function but not a Thumb function */
666 fbt_uninstrumented_arm++;
667 if (fbt_log_uninstrumented)
668 kprintf("dtrace: fbt: Skipping ARM mode function %s at %08x\n",name,(unsigned)sym[i].n_value);
669
670 continue;
671 }
672
673 /*
674 * We're only blacklisting functions in the kernel for now.
675 */
676 if (MOD_IS_MACH_KERNEL(ctl) && fbt_excluded(name))
677 continue;
678
679 fbt_provide_probe(ctl, instrLow, instrHigh, modname, name, (machine_inst_t*)sym[i].n_value);
680 }
681 }