]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
perf_counter: x86: Remove interrupt throttle
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 25 May 2009 15:39:04 +0000 (17:39 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 25 May 2009 19:41:12 +0000 (21:41 +0200)
remove the x86 specific interrupt throttle

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.616671838@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/perf_counter.c
include/linux/perf_counter.h

index b4f64402a82a4714d4b7e087b021de98da23cfa6..89b63b5fad3371daf9fd97906b69e40e9aad404d 100644 (file)
@@ -763,8 +763,6 @@ static void local_apic_timer_interrupt(void)
        inc_irq_stat(apic_timer_irqs);
 
        evt->event_handler(evt);
-
-       perf_counter_unthrottle();
 }
 
 /*
index c14437faf5d29efb47f63f2529440521f288a3dd..8c8177f859fe145488f8633025bc31dbf2b2cef2 100644 (file)
@@ -718,11 +718,6 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
                intel_pmu_enable_counter(hwc, idx);
 }
 
-/*
- * Maximum interrupt frequency of 100KHz per CPU
- */
-#define PERFMON_MAX_INTERRUPTS (100000/HZ)
-
 /*
  * This handler is triggered by the local APIC, so the APIC IRQ handling
  * rules apply:
@@ -775,15 +770,14 @@ again:
        if (status)
                goto again;
 
-       if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS)
-               perf_enable();
+       perf_enable();
 
        return 1;
 }
 
 static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
 {
-       int cpu, idx, throttle = 0, handled = 0;
+       int cpu, idx, handled = 0;
        struct cpu_hw_counters *cpuc;
        struct perf_counter *counter;
        struct hw_perf_counter *hwc;
@@ -792,16 +786,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
        cpu = smp_processor_id();
        cpuc = &per_cpu(cpu_hw_counters, cpu);
 
-       if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
-               throttle = 1;
-               __perf_disable();
-               cpuc->enabled = 0;
-               barrier();
-       }
-
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               int disable = 0;
-
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
 
@@ -809,45 +794,23 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
                hwc = &counter->hw;
 
                if (counter->hw_event.nmi != nmi)
-                       goto next;
+                       continue;
 
                val = x86_perf_counter_update(counter, hwc, idx);
                if (val & (1ULL << (x86_pmu.counter_bits - 1)))
-                       goto next;
+                       continue;
 
                /* counter overflow */
                x86_perf_counter_set_period(counter, hwc, idx);
                handled = 1;
                inc_irq_stat(apic_perf_irqs);
-               disable = perf_counter_overflow(counter, nmi, regs, 0);
-
-next:
-               if (disable || throttle)
+               if (perf_counter_overflow(counter, nmi, regs, 0))
                        amd_pmu_disable_counter(hwc, idx);
        }
 
        return handled;
 }
 
-void perf_counter_unthrottle(void)
-{
-       struct cpu_hw_counters *cpuc;
-
-       if (!x86_pmu_initialized())
-               return;
-
-       cpuc = &__get_cpu_var(cpu_hw_counters);
-       if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
-               /*
-                * Clear them before re-enabling irqs/NMIs again:
-                */
-               cpuc->interrupts = 0;
-               perf_enable();
-       } else {
-               cpuc->interrupts = 0;
-       }
-}
-
 void smp_perf_counter_interrupt(struct pt_regs *regs)
 {
        irq_enter();
index d3e85de9bf1e15921612c5a80c3ef74ad0794cae..0c160be2078f19207061d467f04fdd5fa18f7f9e 100644 (file)
@@ -570,7 +570,6 @@ extern int perf_counter_init_task(struct task_struct *child);
 extern void perf_counter_exit_task(struct task_struct *child);
 extern void perf_counter_do_pending(void);
 extern void perf_counter_print_debug(void);
-extern void perf_counter_unthrottle(void);
 extern void __perf_disable(void);
 extern bool __perf_enable(void);
 extern void perf_disable(void);
@@ -635,7 +634,6 @@ static inline int perf_counter_init_task(struct task_struct *child) { }
 static inline void perf_counter_exit_task(struct task_struct *child)   { }
 static inline void perf_counter_do_pending(void)                       { }
 static inline void perf_counter_print_debug(void)                      { }
-static inline void perf_counter_unthrottle(void)                       { }
 static inline void perf_disable(void)                                  { }
 static inline void perf_enable(void)                                   { }
 static inline int perf_counter_task_disable(void)      { return -EINVAL; }