+static uintptr_t
+get_interrupted_pc(bool *kernel_out)
+{
+ x86_saved_state_t *state = current_cpu_datap()->cpu_int_state;
+ if (!state) {
+ return 0;
+ }
+
+ bool state_64 = is_saved_state64(state);
+ uint64_t cs;
+ if (state_64) {
+ cs = saved_state64(state)->isf.cs;
+ } else {
+ cs = saved_state32(state)->cs;
+ }
+ bool kernel = (cs & SEL_PL) != SEL_PL_U;
+ *kernel_out = kernel;
+
+ uintptr_t pc = 0;
+ if (state_64) {
+ pc = saved_state64(state)->isf.rip;
+ } else {
+ pc = saved_state32(state)->eip;
+ }
+ if (kernel) {
+ pc = VM_KERNEL_UNSLIDE(pc);
+ }
+ return pc;
+}
+
+static void
+kpc_sample_kperf_x86(uint32_t ctr, uint32_t actionid, uint64_t count,
+ uint64_t config)
+{
+ bool kernel = false;
+ uintptr_t pc = get_interrupted_pc(&kernel);
+ kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
+ if ((config) & IA32_PERFEVT_USER_EN) {
+ flags |= KPC_USER_COUNTING;
+ }
+ if ((config) & IA32_PERFEVT_OS_EN) {
+ flags |= KPC_KERNEL_COUNTING;
+ }
+ kpc_sample_kperf(actionid, ctr,
+ config & 0xffff /* just the number and umask */, count, pc, flags);
+}
+
+void
+kpc_pmi_handler(void)