diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/x86/oprofile/op_model_ppro.c | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/x86/oprofile/op_model_ppro.c')
-rw-r--r-- | kernel/arch/x86/oprofile/op_model_ppro.c | 245 |
1 files changed, 245 insertions, 0 deletions
diff --git a/kernel/arch/x86/oprofile/op_model_ppro.c b/kernel/arch/x86/oprofile/op_model_ppro.c new file mode 100644 index 000000000..d90528ea5 --- /dev/null +++ b/kernel/arch/x86/oprofile/op_model_ppro.c @@ -0,0 +1,245 @@ +/* + * @file op_model_ppro.h + * Family 6 perfmon and architectural perfmon MSR operations + * + * @remark Copyright 2002 OProfile authors + * @remark Copyright 2008 Intel Corporation + * @remark Read the file COPYING + * + * @author John Levon + * @author Philippe Elie + * @author Graydon Hoare + * @author Andi Kleen + * @author Robert Richter <robert.richter@amd.com> + */ + +#include <linux/oprofile.h> +#include <linux/slab.h> +#include <asm/ptrace.h> +#include <asm/msr.h> +#include <asm/apic.h> +#include <asm/nmi.h> + +#include "op_x86_model.h" +#include "op_counter.h" + +static int num_counters = 2; +static int counter_width = 32; + +#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) + +static u64 reset_value[OP_MAX_COUNTER]; + +static void ppro_shutdown(struct op_msrs const * const msrs) +{ + int i; + + for (i = 0; i < num_counters; ++i) { + if (!msrs->counters[i].addr) + continue; + release_perfctr_nmi(MSR_P6_PERFCTR0 + i); + release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); + } +} + +static int ppro_fill_in_addresses(struct op_msrs * const msrs) +{ + int i; + + for (i = 0; i < num_counters; i++) { + if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) + goto fail; + if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { + release_perfctr_nmi(MSR_P6_PERFCTR0 + i); + goto fail; + } + /* both registers must be reserved */ + msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; + msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; + continue; + fail: + if (!counter_config[i].enabled) + continue; + op_x86_warn_reserved(i); + ppro_shutdown(msrs); + return -EBUSY; + } + + return 0; +} + + +static void ppro_setup_ctrs(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs) +{ + u64 val; + int i; + + if (cpu_has_arch_perfmon) { + union cpuid10_eax eax; + eax.full = cpuid_eax(0xa); + + /* + * For Core2 (family 6, model 15), don't reset the + * counter width: + */ + if (!(eax.split.version_id == 0 && + __this_cpu_read(cpu_info.x86) == 6 && + __this_cpu_read(cpu_info.x86_model) == 15)) { + + if (counter_width < eax.split.bit_width) + counter_width = eax.split.bit_width; + } + } + + /* clear all counters */ + for (i = 0; i < num_counters; ++i) { + if (!msrs->controls[i].addr) + continue; + rdmsrl(msrs->controls[i].addr, val); + if (val & ARCH_PERFMON_EVENTSEL_ENABLE) + op_x86_warn_in_use(i); + val &= model->reserved; + wrmsrl(msrs->controls[i].addr, val); + /* + * avoid a false detection of ctr overflows in NMI * + * handler + */ + wrmsrl(msrs->counters[i].addr, -1LL); + } + + /* enable active counters */ + for (i = 0; i < num_counters; ++i) { + if (counter_config[i].enabled && msrs->counters[i].addr) { + reset_value[i] = counter_config[i].count; + wrmsrl(msrs->counters[i].addr, -reset_value[i]); + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + val |= op_x86_get_ctrl(model, &counter_config[i]); + wrmsrl(msrs->controls[i].addr, val); + } else { + reset_value[i] = 0; + } + } +} + + +static int ppro_check_ctrs(struct pt_regs * const regs, + struct op_msrs const * const msrs) +{ + u64 val; + int i; + + for (i = 0; i < num_counters; ++i) { + if (!reset_value[i]) + continue; + rdmsrl(msrs->counters[i].addr, val); + if (val & (1ULL << (counter_width - 1))) + continue; + oprofile_add_sample(regs, i); + wrmsrl(msrs->counters[i].addr, -reset_value[i]); + } + + /* Only P6 based Pentium M need to re-unmask the apic vector but it + * doesn't hurt other P6 variant */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); + + /* We can't work out if we really handled an interrupt. We + * might have caught a *second* counter just after overflowing + * the interrupt for this counter then arrives + * and we don't find a counter that's overflowed, so we + * would return 0 and get dazed + confused. Instead we always + * assume we found an overflow. This sucks. + */ + return 1; +} + + +static void ppro_start(struct op_msrs const * const msrs) +{ + u64 val; + int i; + + for (i = 0; i < num_counters; ++i) { + if (reset_value[i]) { + rdmsrl(msrs->controls[i].addr, val); + val |= ARCH_PERFMON_EVENTSEL_ENABLE; + wrmsrl(msrs->controls[i].addr, val); + } + } +} + + +static void ppro_stop(struct op_msrs const * const msrs) +{ + u64 val; + int i; + + for (i = 0; i < num_counters; ++i) { + if (!reset_value[i]) + continue; + rdmsrl(msrs->controls[i].addr, val); + val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; + wrmsrl(msrs->controls[i].addr, val); + } +} + +struct op_x86_model_spec op_ppro_spec = { + .num_counters = 2, + .num_controls = 2, + .reserved = MSR_PPRO_EVENTSEL_RESERVED, + .fill_in_addresses = &ppro_fill_in_addresses, + .setup_ctrs = &ppro_setup_ctrs, + .check_ctrs = &ppro_check_ctrs, + .start = &ppro_start, + .stop = &ppro_stop, + .shutdown = &ppro_shutdown +}; + +/* + * Architectural performance monitoring. + * + * Newer Intel CPUs (Core1+) have support for architectural + * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details. + * The advantage of this is that it can be done without knowing about + * the specific CPU. + */ + +static void arch_perfmon_setup_counters(void) +{ + union cpuid10_eax eax; + + eax.full = cpuid_eax(0xa); + + /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ + if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && + __this_cpu_read(cpu_info.x86_model) == 15) { + eax.split.version_id = 2; + eax.split.num_counters = 2; + eax.split.bit_width = 40; + } + + num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); + + op_arch_perfmon_spec.num_counters = num_counters; + op_arch_perfmon_spec.num_controls = num_counters; +} + +static int arch_perfmon_init(struct oprofile_operations *ignore) +{ + arch_perfmon_setup_counters(); + return 0; +} + +struct op_x86_model_spec op_arch_perfmon_spec = { + .reserved = MSR_PPRO_EVENTSEL_RESERVED, + .init = &arch_perfmon_init, + /* num_counters/num_controls filled in at runtime */ + .fill_in_addresses = &ppro_fill_in_addresses, + /* user space does the cpuid check for available events */ + .setup_ctrs = &ppro_setup_ctrs, + .check_ctrs = &ppro_check_ctrs, + .start = &ppro_start, + .stop = &ppro_stop, + .shutdown = &ppro_shutdown +}; |