]> git.kernelconcepts.de Git - mv-sheeva.git/commitdiff
perf_counter, x86: rework counter disable functions
authorRobert Richter <robert.richter@amd.com>
Wed, 29 Apr 2009 10:47:19 +0000 (12:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 29 Apr 2009 12:51:11 +0000 (14:51 +0200)
As for the enable function, this patch reworks the disable functions
and introduces x86_pmu_disable_counter(). The internal function i/f in
struct x86_pmu changed too.

[ Impact: refactor and generalize code ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-23-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index ae55933ce79c8e06567532c9c8caaf975008a9f1..df9012bbd211aec838a145b6f50af87a4c197d5c 100644 (file)
@@ -45,7 +45,7 @@ struct x86_pmu {
        u64             (*save_disable_all)(void);
        void            (*restore_all)(u64);
        void            (*enable)(struct hw_perf_counter *, int);
-       void            (*disable)(int, u64);
+       void            (*disable)(struct hw_perf_counter *, int);
        unsigned        eventsel;
        unsigned        perfctr;
        u64             (*event_map)(int);
@@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
                              hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
 }
 
-static void intel_pmu_disable_counter(int idx, u64 config)
+static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
 {
-       wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
-}
-
-static void amd_pmu_disable_counter(int idx, u64 config)
-{
-       wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
-
-}
+       int err;
 
-static void hw_perf_disable(int idx, u64 config)
-{
        if (unlikely(!perf_counters_initialized))
                return;
 
-       x86_pmu.disable(idx, config);
+       err = checking_wrmsrl(hwc->config_base + idx,
+                             hwc->config);
 }
 
 static inline void
-__pmc_fixed_disable(struct perf_counter *counter,
-                   struct hw_perf_counter *hwc, int __idx)
+intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
 {
        int idx = __idx - X86_PMC_IDX_FIXED;
        u64 ctrl_val, mask;
@@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter,
 }
 
 static inline void
-__x86_pmu_disable(struct perf_counter *counter,
-                 struct hw_perf_counter *hwc, int idx)
+intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
 {
-       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
-               __pmc_fixed_disable(counter, hwc, idx);
-       else
-               hw_perf_disable(idx, hwc->config);
+       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+               intel_pmu_disable_fixed(hwc, idx);
+               return;
+       }
+
+       x86_pmu_disable_counter(hwc, idx);
+}
+
+static inline void
+amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       x86_pmu_disable_counter(hwc, idx);
 }
 
 static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
@@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
        if (cpuc->enabled)
                x86_pmu_enable_counter(hwc, idx);
        else
-               amd_pmu_disable_counter(idx, hwc->config);
+               x86_pmu_disable_counter(hwc, idx);
 }
 
 static int
@@ -622,7 +620,7 @@ try_generic:
 
        perf_counters_lapic_init(hwc->nmi);
 
-       __x86_pmu_disable(counter, hwc, idx);
+       x86_pmu.disable(hwc, idx);
 
        cpuc->counters[idx] = counter;
        set_bit(idx, cpuc->active);
@@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
         * could reenable again:
         */
        clear_bit(idx, cpuc->active);
-       __x86_pmu_disable(counter, hwc, idx);
+       x86_pmu.disable(hwc, idx);
 
        /*
         * Make sure the cleared pointer becomes visible before we
@@ -762,7 +760,7 @@ again:
 
                intel_pmu_save_and_restart(counter);
                if (perf_counter_overflow(counter, nmi, regs, 0))
-                       __x86_pmu_disable(counter, &counter->hw, bit);
+                       intel_pmu_disable_counter(&counter->hw, bit);
        }
 
        intel_pmu_ack_status(ack);