]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86/cleanups' into perf/uprobes
authorIngo Molnar <mingo@elte.hu>
Tue, 13 Mar 2012 15:32:54 +0000 (16:32 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 13 Mar 2012 15:33:03 +0000 (16:33 +0100)
Merge reason: We want to merge a dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
arch/powerpc/kernel/perf_event.c
arch/x86/Kconfig
arch/x86/include/asm/perf_event.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
tools/perf/util/evlist.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c

index d614ab57ccca8a9891d3ae30f5f4caa3dcdbc580,64483fde95c62cfa0cc5411e2c2cc6fce5cab453..f04c2301725e843e24d1c0a81b9962bc6d6de2f2
@@@ -865,6 -865,7 +865,7 @@@ static void power_pmu_start(struct perf
  {
        unsigned long flags;
        s64 left;
+       unsigned long val;
  
        if (!event->hw.idx || !event->hw.sample_period)
                return;
  
        event->hw.state = 0;
        left = local64_read(&event->hw.period_left);
-       write_pmc(event->hw.idx, left);
+       val = 0;
+       if (left < 0x80000000L)
+               val = 0x80000000L - left;
+       write_pmc(event->hw.idx, val);
  
        perf_event_update_userpage(event);
        perf_pmu_enable(event->pmu);
@@@ -1187,11 -1193,6 +1193,11 @@@ static int power_pmu_event_init(struct 
        return err;
  }
  
 +static int power_pmu_event_idx(struct perf_event *event)
 +{
 +      return event->hw.idx;
 +}
 +
  struct pmu power_pmu = {
        .pmu_enable     = power_pmu_enable,
        .pmu_disable    = power_pmu_disable,
        .start_txn      = power_pmu_start_txn,
        .cancel_txn     = power_pmu_cancel_txn,
        .commit_txn     = power_pmu_commit_txn,
 +      .event_idx      = power_pmu_event_idx,
  };
  
  /*
diff --combined arch/x86/Kconfig
index 481dbfcf14edb4a56e505062a4741e941146cbb6,e2b38b4bffdce8f0576efb241a0b2bd13026ba23..d2a540f7d6cbb27dfd0af40a002a5b8979d325a1
@@@ -84,7 -84,7 +84,7 @@@ config X8
        select GENERIC_IOMAP
  
  config INSTRUCTION_DECODER
 -      def_bool (KPROBES || PERF_EVENTS)
 +      def_bool (KPROBES || PERF_EVENTS || UPROBES)
  
  config OUTPUT_FORMAT
        string
@@@ -240,9 -240,6 +240,9 @@@ config ARCH_CPU_PROBE_RELEAS
        def_bool y
        depends on HOTPLUG_CPU
  
 +config ARCH_SUPPORTS_UPROBES
 +      def_bool y
 +
  source "init/Kconfig"
  source "kernel/Kconfig.freezer"
  
@@@ -2168,9 -2165,9 +2168,9 @@@ config IA32_EMULATIO
        depends on X86_64
        select COMPAT_BINFMT_ELF
        ---help---
-         Include code to run 32-bit programs under a 64-bit kernel. You should
-         likely turn this on, unless you're 100% sure that you don't have any
-         32-bit programs left.
+         Include code to run legacy 32-bit programs under a
+         64-bit kernel. You should likely turn this on, unless you're
+         100% sure that you don't have any 32-bit programs left.
  
  config IA32_AOUT
        tristate "IA32 a.out support"
        ---help---
          Support old a.out binaries in the 32bit emulation.
  
+ config X86_X32
+       bool "x32 ABI for 64-bit mode (EXPERIMENTAL)"
+       depends on X86_64 && IA32_EMULATION && EXPERIMENTAL
+       ---help---
+         Include code to run binaries for the x32 native 32-bit ABI
+         for 64-bit processors.  An x32 process gets access to the
+         full 64-bit register file and wide data path while leaving
+         pointers at 32 bits for smaller memory footprint.
+         You will need a recent binutils (2.22 or later) with
+         elf32_x86_64 support enabled to compile a kernel with this
+         option set.
  config COMPAT
        def_bool y
-       depends on IA32_EMULATION
+       depends on IA32_EMULATION || X86_X32
  
  config COMPAT_FOR_U64_ALIGNMENT
        def_bool COMPAT
index 9b922c13625425c484798624f4003f84b8aa8a34,461ce432b1c2755c6d0f70e00e3ac9f3993ecba1..e8fb2c7a5f4ff1792c16dc1c8df498a0801b8eed
@@@ -188,6 -188,8 +188,6 @@@ extern u32 get_ibs_caps(void)
  #ifdef CONFIG_PERF_EVENTS
  extern void perf_events_lapic_init(void);
  
 -#define PERF_EVENT_INDEX_OFFSET                       0
 -
  /*
   * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
   * This flag is otherwise unused and ABI specified to be 0, so nobody should
@@@ -240,4 -242,12 +240,12 @@@ static inline void perf_get_x86_pmu_cap
  static inline void perf_events_lapic_init(void)       { }
  #endif
  
+ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
+  extern void amd_pmu_enable_virt(void);
+  extern void amd_pmu_disable_virt(void);
+ #else
+  static inline void amd_pmu_enable_virt(void) { }
+  static inline void amd_pmu_disable_virt(void) { }
+ #endif
  #endif /* _ASM_X86_PERF_EVENT_H */
index 3c44b712380c6dde2abde60d49600da115cae5ae,63c0e058a40503f6a7d8f60a0b50471e7b6e437f..1c52bdbb9b8befffabb37679b17a2fbd3af676a0
  #include <linux/slab.h>
  #include <linux/cpu.h>
  #include <linux/bitops.h>
 +#include <linux/device.h>
  
  #include <asm/apic.h>
  #include <asm/stacktrace.h>
  #include <asm/nmi.h>
- #include <asm/compat.h>
  #include <asm/smp.h>
  #include <asm/alternative.h>
 +#include <asm/timer.h>
  
  #include "perf_event.h"
  
@@@ -988,6 -985,9 +987,9 @@@ static void x86_pmu_start(struct perf_e
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = event->hw.idx;
  
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
        if (WARN_ON_ONCE(idx == -1))
                return;
  
@@@ -1209,8 -1209,6 +1211,8 @@@ x86_pmu_notifier(struct notifier_block 
                break;
  
        case CPU_STARTING:
 +              if (x86_pmu.attr_rdpmc)
 +                      set_in_cr4(X86_CR4_PCE);
                if (x86_pmu.cpu_starting)
                        x86_pmu.cpu_starting(cpu);
                break;
@@@ -1320,8 -1318,6 +1322,8 @@@ static int __init init_hw_perf_events(v
                }
        }
  
 +      x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
 +
        pr_info("... version:                %d\n",     x86_pmu.version);
        pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
        pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
@@@ -1545,71 -1541,10 +1547,71 @@@ static int x86_pmu_event_init(struct pe
        return err;
  }
  
 +static int x86_pmu_event_idx(struct perf_event *event)
 +{
 +      int idx = event->hw.idx;
 +
 +      if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
 +              idx -= X86_PMC_IDX_FIXED;
 +              idx |= 1 << 30;
 +      }
 +
 +      return idx + 1;
 +}
 +
 +static ssize_t get_attr_rdpmc(struct device *cdev,
 +                            struct device_attribute *attr,
 +                            char *buf)
 +{
 +      return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
 +}
 +
 +static void change_rdpmc(void *info)
 +{
 +      bool enable = !!(unsigned long)info;
 +
 +      if (enable)
 +              set_in_cr4(X86_CR4_PCE);
 +      else
 +              clear_in_cr4(X86_CR4_PCE);
 +}
 +
 +static ssize_t set_attr_rdpmc(struct device *cdev,
 +                            struct device_attribute *attr,
 +                            const char *buf, size_t count)
 +{
 +      unsigned long val = simple_strtoul(buf, NULL, 0);
 +
 +      if (!!val != !!x86_pmu.attr_rdpmc) {
 +              x86_pmu.attr_rdpmc = !!val;
 +              smp_call_function(change_rdpmc, (void *)val, 1);
 +      }
 +
 +      return count;
 +}
 +
 +static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
 +
 +static struct attribute *x86_pmu_attrs[] = {
 +      &dev_attr_rdpmc.attr,
 +      NULL,
 +};
 +
 +static struct attribute_group x86_pmu_attr_group = {
 +      .attrs = x86_pmu_attrs,
 +};
 +
 +static const struct attribute_group *x86_pmu_attr_groups[] = {
 +      &x86_pmu_attr_group,
 +      NULL,
 +};
 +
  static struct pmu pmu = {
        .pmu_enable     = x86_pmu_enable,
        .pmu_disable    = x86_pmu_disable,
  
 +      .attr_groups    = x86_pmu_attr_groups,
 +
        .event_init     = x86_pmu_event_init,
  
        .add            = x86_pmu_add,
        .start_txn      = x86_pmu_start_txn,
        .cancel_txn     = x86_pmu_cancel_txn,
        .commit_txn     = x86_pmu_commit_txn,
 +
 +      .event_idx      = x86_pmu_event_idx,
  };
  
 +void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
 +{
 +      if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
 +              return;
 +
 +      if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
 +              return;
 +
 +      userpg->time_mult = this_cpu_read(cyc2ns);
 +      userpg->time_shift = CYC2NS_SCALE_FACTOR;
 +      userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 +}
 +
  /*
   * callchain support
   */
@@@ -1674,6 -1594,9 +1676,9 @@@ perf_callchain_kernel(struct perf_callc
  }
  
  #ifdef CONFIG_COMPAT
+ #include <asm/compat.h>
  static inline int
  perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  {
index 513d617b93c43f7145db0e9be5e62ba0d467461f,c30c807ddc7236e444508357e8186bd4d504bd6f..82db83b5c3bc1671448db3f03a82f68b039854c4
@@@ -147,7 -147,9 +147,9 @@@ struct cpu_hw_events 
        /*
         * AMD specific bits
         */
-       struct amd_nb           *amd_nb;
+       struct amd_nb                   *amd_nb;
+       /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
+       u64                             perf_ctr_virt_mask;
  
        void                            *kfree_on_online;
  };
@@@ -307,14 -309,6 +309,14 @@@ struct x86_pmu 
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
  
 +      /*
 +       * sysfs attrs
 +       */
 +      int             attr_rdpmc;
 +
 +      /*
 +       * CPU Hotplug hooks
 +       */
        int             (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
        void            (*cpu_dying)(int cpu);
@@@ -425,9 -419,11 +427,11 @@@ void x86_pmu_disable_all(void)
  static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
                                          u64 enable_mask)
  {
+       u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
        if (hwc->extra_reg.reg)
                wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
-       wrmsrl(hwc->config_base, hwc->config | enable_mask);
+       wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
  }
  
  void x86_pmu_enable_all(int added);
diff --combined kernel/events/core.c
index 7c3b9de55f6b90d39d8a998e9193b4b83383b566,1b5c081d8b9f9c8ea05f1a8ecaf861f80ab7ba1c..94afe5b91c6a5f4bb0c7df0d58c4c1cf1be97733
@@@ -2303,7 -2303,7 +2303,7 @@@ do {                                    
  static DEFINE_PER_CPU(int, perf_throttled_count);
  static DEFINE_PER_CPU(u64, perf_throttled_seq);
  
- static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
+ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
  {
        struct hw_perf_event *hwc = &event->hw;
        s64 period, sample_period;
        hwc->sample_period = sample_period;
  
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               event->pmu->stop(event, PERF_EF_UPDATE);
+               if (disable)
+                       event->pmu->stop(event, PERF_EF_UPDATE);
                local64_set(&hwc->period_left, 0);
-               event->pmu->start(event, PERF_EF_RELOAD);
+               if (disable)
+                       event->pmu->start(event, PERF_EF_RELOAD);
        }
  }
  
@@@ -2350,6 -2354,7 +2354,7 @@@ static void perf_adjust_freq_unthr_cont
                return;
  
        raw_spin_lock(&ctx->lock);
+       perf_pmu_disable(ctx->pmu);
  
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                /*
                 * restart the event
                 * reload only if value has changed
+                * we have stopped the event so tell that
+                * to perf_adjust_period() to avoid stopping it
+                * twice.
                 */
                if (delta > 0)
-                       perf_adjust_period(event, period, delta);
+                       perf_adjust_period(event, period, delta, false);
  
                event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
        }
  
+       perf_pmu_enable(ctx->pmu);
        raw_spin_unlock(&ctx->lock);
  }
  
@@@ -3229,6 -3238,10 +3238,6 @@@ int perf_event_task_disable(void
        return 0;
  }
  
 -#ifndef PERF_EVENT_INDEX_OFFSET
 -# define PERF_EVENT_INDEX_OFFSET 0
 -#endif
 -
  static int perf_event_index(struct perf_event *event)
  {
        if (event->hw.state & PERF_HES_STOPPED)
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return 0;
  
 -      return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
 +      return event->pmu->event_idx(event);
  }
  
  static void calc_timer_values(struct perf_event *event,
 +                              u64 *now,
                                u64 *enabled,
                                u64 *running)
  {
 -      u64 now, ctx_time;
 +      u64 ctx_time;
  
 -      now = perf_clock();
 -      ctx_time = event->shadow_ctx_time + now;
 +      *now = perf_clock();
 +      ctx_time = event->shadow_ctx_time + *now;
        *enabled = ctx_time - event->tstamp_enabled;
        *running = ctx_time - event->tstamp_running;
  }
  
 +void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
 +{
 +}
 +
  /*
   * Callers need to ensure there can be no nesting of this function, otherwise
   * the seqlock logic goes bad. We can not serialize this because the arch
@@@ -3266,7 -3274,7 +3275,7 @@@ void perf_event_update_userpage(struct 
  {
        struct perf_event_mmap_page *userpg;
        struct ring_buffer *rb;
 -      u64 enabled, running;
 +      u64 enabled, running, now;
  
        rcu_read_lock();
        /*
         * because of locking issue as we can be called in
         * NMI context
         */
 -      calc_timer_values(event, &enabled, &running);
 +      calc_timer_values(event, &now, &enabled, &running);
        rb = rcu_dereference(event->rb);
        if (!rb)
                goto unlock;
        barrier();
        userpg->index = perf_event_index(event);
        userpg->offset = perf_event_count(event);
 -      if (event->state == PERF_EVENT_STATE_ACTIVE)
 +      if (userpg->index)
                userpg->offset -= local64_read(&event->hw.prev_count);
  
        userpg->time_enabled = enabled +
        userpg->time_running = running +
                        atomic64_read(&event->child_total_time_running);
  
 +      perf_update_user_clock(userpg, now);
 +
        barrier();
        ++userpg->lock;
        preempt_enable();
@@@ -3562,8 -3568,6 +3571,8 @@@ static int perf_mmap(struct file *file
        event->mmap_user = get_current_user();
        vma->vm_mm->pinned_vm += event->mmap_locked;
  
 +      perf_event_update_userpage(event);
 +
  unlock:
        if (!ret)
                atomic_inc(&event->mmap_count);
@@@ -3795,7 -3799,7 +3804,7 @@@ static void perf_output_read_group(stru
  static void perf_output_read(struct perf_output_handle *handle,
                             struct perf_event *event)
  {
 -      u64 enabled = 0, running = 0;
 +      u64 enabled = 0, running = 0, now;
        u64 read_format = event->attr.read_format;
  
        /*
         * NMI context
         */
        if (read_format & PERF_FORMAT_TOTAL_TIMES)
 -              calc_timer_values(event, &enabled, &running);
 +              calc_timer_values(event, &now, &enabled, &running);
  
        if (event->attr.read_format & PERF_FORMAT_GROUP)
                perf_output_read_group(handle, event, enabled, running);
@@@ -4567,7 -4571,7 +4576,7 @@@ static int __perf_event_overflow(struc
                hwc->freq_time_stamp = now;
  
                if (delta > 0 && delta < 2*TICK_NSEC)
-                       perf_adjust_period(event, delta, hwc->last_period);
+                       perf_adjust_period(event, delta, hwc->last_period, true);
        }
  
        /*
@@@ -5027,11 -5031,6 +5036,11 @@@ static int perf_swevent_init(struct per
        return 0;
  }
  
 +static int perf_swevent_event_idx(struct perf_event *event)
 +{
 +      return 0;
 +}
 +
  static struct pmu perf_swevent = {
        .task_ctx_nr    = perf_sw_context,
  
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
 +
 +      .event_idx      = perf_swevent_event_idx,
  };
  
  #ifdef CONFIG_EVENT_TRACING
@@@ -5129,8 -5126,6 +5138,8 @@@ static struct pmu perf_tracepoint = 
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
 +
 +      .event_idx      = perf_swevent_event_idx,
  };
  
  static inline void perf_tp_register(void)
@@@ -5350,8 -5345,6 +5359,8 @@@ static struct pmu perf_cpu_clock = 
        .start          = cpu_clock_event_start,
        .stop           = cpu_clock_event_stop,
        .read           = cpu_clock_event_read,
 +
 +      .event_idx      = perf_swevent_event_idx,
  };
  
  /*
@@@ -5424,8 -5417,6 +5433,8 @@@ static struct pmu perf_task_clock = 
        .start          = task_clock_event_start,
        .stop           = task_clock_event_stop,
        .read           = task_clock_event_read,
 +
 +      .event_idx      = perf_swevent_event_idx,
  };
  
  static void perf_pmu_nop_void(struct pmu *pmu)
@@@ -5453,11 -5444,6 +5462,11 @@@ static void perf_pmu_cancel_txn(struct 
        perf_pmu_enable(pmu);
  }
  
 +static int perf_event_idx_default(struct perf_event *event)
 +{
 +      return event->hw.idx + 1;
 +}
 +
  /*
   * Ensures all contexts with the same task_ctx_nr have the same
   * pmu_cpu_context too.
@@@ -5544,7 -5530,6 +5553,7 @@@ static int pmu_dev_alloc(struct pmu *pm
        if (!pmu->dev)
                goto out;
  
 +      pmu->dev->groups = pmu->attr_groups;
        device_initialize(pmu->dev);
        ret = dev_set_name(pmu->dev, "%s", pmu->name);
        if (ret)
@@@ -5648,9 -5633,6 +5657,9 @@@ got_cpu_context
                pmu->pmu_disable = perf_pmu_nop_void;
        }
  
 +      if (!pmu->event_idx)
 +              pmu->event_idx = perf_event_idx_default;
 +
        list_add_rcu(&pmu->entry, &pmus);
        ret = 0;
  unlock:
index b0309f76d7775ed0d0229d67aded9317fe202ffb,ee706ce44aa0232301ae4fbd636f063789689e31..3330022a7ac1123f87939daab6a7b4b589accd80
@@@ -613,11 -613,6 +613,11 @@@ static void hw_breakpoint_stop(struct p
        bp->hw.state = PERF_HES_STOPPED;
  }
  
 +static int hw_breakpoint_event_idx(struct perf_event *bp)
 +{
 +      return 0;
 +}
 +
  static struct pmu perf_breakpoint = {
        .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
  
        .start          = hw_breakpoint_start,
        .stop           = hw_breakpoint_stop,
        .read           = hw_breakpoint_pmu_read,
 +
 +      .event_idx      = hw_breakpoint_event_idx,
  };
  
  int __init init_hw_breakpoint(void)
  
   err_alloc:
        for_each_possible_cpu(err_cpu) {
-               if (err_cpu == cpu)
-                       break;
                for (i = 0; i < TYPE_MAX; i++)
                        kfree(per_cpu(nr_task_bp_pinned[i], cpu));
+               if (err_cpu == cpu)
+                       break;
        }
  
        return -ENOMEM;
diff --combined tools/perf/util/evlist.c
index 5c61dc57d7c7852fcb272ad97e999e7cee3afdec,ea32a061f1c88e9ee4cf63d9019dc50451a43e0a..f8da9fada0029e07cb54951d71d42310863f26d1
@@@ -97,9 -97,9 +97,9 @@@ void perf_evlist__add(struct perf_evlis
        ++evlist->nr_entries;
  }
  
 -static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 -                                        struct list_head *list,
 -                                        int nr_entries)
 +void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 +                                 struct list_head *list,
 +                                 int nr_entries)
  {
        list_splice_tail(list, &evlist->entries);
        evlist->nr_entries += nr_entries;
@@@ -349,6 -349,10 +349,10 @@@ struct perf_evsel *perf_evlist__id2evse
        hlist_for_each_entry(sid, pos, head, node)
                if (sid->id == id)
                        return sid->evsel;
+       if (!perf_evlist__sample_id_all(evlist))
+               return list_entry(evlist->entries.next, struct perf_evsel, node);
        return NULL;
  }
  
@@@ -593,15 -597,15 +597,15 @@@ int perf_evlist__mmap(struct perf_evlis
        return perf_evlist__mmap_per_cpu(evlist, prot, mask);
  }
  
 -int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
 -                           pid_t target_tid, const char *cpu_list)
 +int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid,
 +                           const char *target_tid, uid_t uid, const char *cpu_list)
  {
 -      evlist->threads = thread_map__new(target_pid, target_tid);
 +      evlist->threads = thread_map__new_str(target_pid, target_tid, uid);
  
        if (evlist->threads == NULL)
                return -1;
  
 -      if (cpu_list == NULL && target_tid != -1)
 +      if (uid != UINT_MAX || (cpu_list == NULL && target_tid))
                evlist->cpus = cpu_map__dummy_new();
        else
                evlist->cpus = cpu_map__new(cpu_list);
@@@ -820,7 -824,7 +824,7 @@@ int perf_evlist__prepare_workload(struc
                exit(-1);
        }
  
 -      if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
 +      if (!opts->system_wide && !opts->target_tid && !opts->target_pid)
                evlist->threads->map[0] = evlist->workload.pid;
  
        close(child_ready_pipe[1]);
index c1a513e567640518cc7f7ac8ec59e5424d427d6d,e33554a562b36fb59c95d935258d525a0e810dac..15f9bb1b5f0af8aa599efce1d0b4f3941d4abe83
@@@ -273,10 -273,10 +273,10 @@@ static int add_module_to_probe_trace_ev
  /* Try to find perf_probe_event with debuginfo */
  static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                                          struct probe_trace_event **tevs,
 -                                        int max_tevs, const char *module)
 +                                        int max_tevs, const char *target)
  {
        bool need_dwarf = perf_probe_event_need_dwarf(pev);
 -      struct debuginfo *dinfo = open_debuginfo(module);
 +      struct debuginfo *dinfo = open_debuginfo(target);
        int ntevs, ret = 0;
  
        if (!dinfo) {
  
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("find %d probe_trace_events.\n", ntevs);
 -              if (module)
 +              if (target)
                        ret = add_module_to_probe_trace_events(*tevs, ntevs,
 -                                                             module);
 +                                                             target);
                return ret < 0 ? ret : ntevs;
        }
  
@@@ -1729,7 -1729,7 +1729,7 @@@ static int __add_probe_trace_events(str
        }
  
        ret = 0;
 -      printf("Add new event%s\n", (ntevs > 1) ? "s:" : ":");
 +      printf("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
        for (i = 0; i < ntevs; i++) {
                tev = &tevs[i];
                if (pev->event)
  
        if (ret >= 0) {
                /* Show how to use the event. */
 -              printf("\nYou can now use it on all perf tools, such as:\n\n");
 +              printf("\nYou can now use it in all perf tools, such as:\n\n");
                printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
                         tev->event);
        }
  
  static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                                          struct probe_trace_event **tevs,
 -                                        int max_tevs, const char *module)
 +                                        int max_tevs, const char *target)
  {
        struct symbol *sym;
        int ret = 0, i;
        struct probe_trace_event *tev;
  
        /* Convert perf_probe_event with debuginfo */
 -      ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module);
 +      ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
        if (ret != 0)
                return ret;     /* Found in debuginfo or got an error */
  
                goto error;
        }
  
 -      if (module) {
 -              tev->point.module = strdup(module);
 +      if (target) {
 +              tev->point.module = strdup(target);
                if (tev->point.module == NULL) {
                        ret = -ENOMEM;
                        goto error;
                           tev->point.symbol);
                ret = -ENOENT;
                goto error;
+       } else if (tev->point.offset > sym->end - sym->start) {
+               pr_warning("Offset specified is greater than size of %s\n",
+                          tev->point.symbol);
+               ret = -ENOENT;
+               goto error;
        }
  
        return 1;
@@@ -1884,7 -1890,7 +1890,7 @@@ struct __event_package 
  };
  
  int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
 -                        int max_tevs, const char *module, bool force_add)
 +                        int max_tevs, const char *target, bool force_add)
  {
        int i, j, ret;
        struct __event_package *pkgs;
                ret  = convert_to_probe_trace_events(pkgs[i].pev,
                                                     &pkgs[i].tevs,
                                                     max_tevs,
 -                                                   module);
 +                                                   target);
                if (ret < 0)
                        goto end;
                pkgs[i].ntevs = ret;
@@@ -1959,7 -1965,7 +1965,7 @@@ static int __del_trace_probe_event(int 
                goto error;
        }
  
 -      printf("Remove event: %s\n", ent->s);
 +      printf("Removed event: %s\n", ent->s);
        return 0;
  error:
        pr_warning("Failed to delete event: %s\n", strerror(-ret));
@@@ -2063,7 -2069,7 +2069,7 @@@ static int filter_available_functions(s
        return 1;
  }
  
 -int show_available_funcs(const char *module, struct strfilter *_filter)
 +int show_available_funcs(const char *target, struct strfilter *_filter)
  {
        struct map *map;
        int ret;
        if (ret < 0)
                return ret;
  
 -      map = kernel_get_module_map(module);
 +      map = kernel_get_module_map(target);
        if (!map) {
 -              pr_err("Failed to find %s map.\n", (module) ? : "kernel");
 +              pr_err("Failed to find %s map.\n", (target) ? : "kernel");
                return -EINVAL;
        }
        available_func_filter = _filter;
index 67dc4aed721cb0c384f5c36023c5884a7752d224,74bd2e63c4b4a7458989a08cc731c15edfb99c98..2cc162d3b78c3b8ed720bff80a3b2171a8673397
@@@ -30,6 -30,7 +30,6 @@@
  #include <stdlib.h>
  #include <string.h>
  #include <stdarg.h>
 -#include <ctype.h>
  #include <dwarf-regs.h>
  
  #include <linux/bitops.h>
@@@ -671,7 -672,7 +671,7 @@@ static int find_variable(Dwarf_Die *sc_
  static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
                                  bool retprobe, struct probe_trace_point *tp)
  {
-       Dwarf_Addr eaddr;
+       Dwarf_Addr eaddr, highaddr;
        const char *name;
  
        /* Copy the name of probe point */
                                   dwarf_diename(sp_die));
                        return -ENOENT;
                }
+               if (dwarf_highpc(sp_die, &highaddr) != 0) {
+                       pr_warning("Failed to get end address of %s\n",
+                                  dwarf_diename(sp_die));
+                       return -ENOENT;
+               }
+               if (paddr > highaddr) {
+                       pr_warning("Offset specified is greater than size of %s\n",
+                                  dwarf_diename(sp_die));
+                       return -EINVAL;
+               }
                tp->symbol = strdup(name);
                if (tp->symbol == NULL)
                        return -ENOMEM;