From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/tools/perf/util/Build | 31 +- kernel/tools/perf/util/annotate.c | 219 +- kernel/tools/perf/util/annotate.h | 28 +- kernel/tools/perf/util/auxtrace.c | 1392 ++++++++++++ kernel/tools/perf/util/auxtrace.h | 650 ++++++ kernel/tools/perf/util/bpf-loader.c | 457 ++++ kernel/tools/perf/util/bpf-loader.h | 118 + kernel/tools/perf/util/build-id.c | 105 +- kernel/tools/perf/util/build-id.h | 6 +- kernel/tools/perf/util/cache.h | 1 - kernel/tools/perf/util/callchain.c | 135 +- kernel/tools/perf/util/callchain.h | 33 +- kernel/tools/perf/util/cgroup.c | 10 +- kernel/tools/perf/util/cgroup.h | 4 +- kernel/tools/perf/util/cloexec.h | 2 +- kernel/tools/perf/util/color.c | 21 +- kernel/tools/perf/util/color.h | 1 - kernel/tools/perf/util/comm.c | 13 +- kernel/tools/perf/util/config.c | 4 + kernel/tools/perf/util/counts.c | 52 + kernel/tools/perf/util/counts.h | 37 + kernel/tools/perf/util/cpumap.c | 121 +- kernel/tools/perf/util/cpumap.h | 16 +- kernel/tools/perf/util/data-convert-bt.c | 410 +++- kernel/tools/perf/util/db-export.c | 31 +- kernel/tools/perf/util/debug.c | 5 + kernel/tools/perf/util/debug.h | 1 + kernel/tools/perf/util/dso.c | 351 ++- kernel/tools/perf/util/dso.h | 54 +- kernel/tools/perf/util/dwarf-aux.c | 241 +- kernel/tools/perf/util/dwarf-aux.h | 13 +- kernel/tools/perf/util/env.c | 86 + kernel/tools/perf/util/env.h | 44 + kernel/tools/perf/util/environment.c | 1 - kernel/tools/perf/util/event.c | 175 +- kernel/tools/perf/util/event.h | 120 +- kernel/tools/perf/util/evlist.c | 289 ++- kernel/tools/perf/util/evlist.h | 35 +- kernel/tools/perf/util/evsel.c | 280 ++- kernel/tools/perf/util/evsel.h | 98 +- kernel/tools/perf/util/header.c | 231 +- kernel/tools/perf/util/header.h | 29 +- kernel/tools/perf/util/hist.c | 177 +- kernel/tools/perf/util/hist.h | 18 +- kernel/tools/perf/util/include/dwarf-regs.h | 8 + kernel/tools/perf/util/include/linux/kernel.h | 107 - kernel/tools/perf/util/include/linux/list.h | 29 - kernel/tools/perf/util/include/linux/poison.h | 1 - kernel/tools/perf/util/include/linux/rbtree.h | 2 - .../perf/util/include/linux/rbtree_augmented.h | 2 - kernel/tools/perf/util/intel-bts.c | 933 ++++++++ kernel/tools/perf/util/intel-bts.h | 43 + kernel/tools/perf/util/intel-pt-decoder/Build | 23 + .../util/intel-pt-decoder/gen-insn-attr-x86.awk | 386 ++++ kernel/tools/perf/util/intel-pt-decoder/inat.c | 96 + kernel/tools/perf/util/intel-pt-decoder/inat.h | 221 ++ .../tools/perf/util/intel-pt-decoder/inat_types.h | 29 + kernel/tools/perf/util/intel-pt-decoder/insn.c | 594 +++++ kernel/tools/perf/util/intel-pt-decoder/insn.h | 201 ++ .../perf/util/intel-pt-decoder/intel-pt-decoder.c | 2345 ++++++++++++++++++++ .../perf/util/intel-pt-decoder/intel-pt-decoder.h | 109 + .../util/intel-pt-decoder/intel-pt-insn-decoder.c | 249 +++ .../util/intel-pt-decoder/intel-pt-insn-decoder.h | 65 + .../perf/util/intel-pt-decoder/intel-pt-log.c | 156 ++ .../perf/util/intel-pt-decoder/intel-pt-log.h | 78 + .../util/intel-pt-decoder/intel-pt-pkt-decoder.c | 518 +++++ .../util/intel-pt-decoder/intel-pt-pkt-decoder.h | 70 + .../perf/util/intel-pt-decoder/x86-opcode-map.txt | 984 ++++++++ kernel/tools/perf/util/intel-pt.c | 2164 ++++++++++++++++++ kernel/tools/perf/util/intel-pt.h | 56 + kernel/tools/perf/util/llvm-utils.c | 430 ++++ kernel/tools/perf/util/llvm-utils.h | 49 + kernel/tools/perf/util/machine.c | 357 ++- kernel/tools/perf/util/machine.h | 47 +- kernel/tools/perf/util/map.c | 311 +-- kernel/tools/perf/util/map.h | 59 +- kernel/tools/perf/util/ordered-events.c | 3 + kernel/tools/perf/util/pager.c | 5 - kernel/tools/perf/util/parse-branch-options.c | 95 + kernel/tools/perf/util/parse-branch-options.h | 5 + kernel/tools/perf/util/parse-events.c | 740 +++++- kernel/tools/perf/util/parse-events.h | 63 +- kernel/tools/perf/util/parse-events.l | 60 +- kernel/tools/perf/util/parse-events.y | 120 +- kernel/tools/perf/util/parse-options.c | 156 +- kernel/tools/perf/util/parse-options.h | 9 + kernel/tools/perf/util/parse-regs-options.c | 71 + kernel/tools/perf/util/parse-regs-options.h | 5 + kernel/tools/perf/util/perf_regs.c | 6 + kernel/tools/perf/util/perf_regs.h | 10 + kernel/tools/perf/util/pmu.c | 158 +- kernel/tools/perf/util/pmu.h | 7 +- kernel/tools/perf/util/probe-event.c | 1350 +++++------ kernel/tools/perf/util/probe-event.h | 48 +- kernel/tools/perf/util/probe-file.c | 327 +++ kernel/tools/perf/util/probe-file.h | 22 + kernel/tools/perf/util/probe-finder.c | 266 ++- kernel/tools/perf/util/probe-finder.h | 10 +- kernel/tools/perf/util/pstack.c | 7 + kernel/tools/perf/util/pstack.h | 1 + kernel/tools/perf/util/python-ext-sources | 6 +- kernel/tools/perf/util/python.c | 203 +- kernel/tools/perf/util/record.c | 53 +- .../perf/util/scripting-engines/trace-event-perl.c | 5 +- .../util/scripting-engines/trace-event-python.c | 7 +- kernel/tools/perf/util/session.c | 379 +++- kernel/tools/perf/util/session.h | 8 + kernel/tools/perf/util/sort.c | 162 +- kernel/tools/perf/util/sort.h | 47 +- kernel/tools/perf/util/srcline.c | 35 +- kernel/tools/perf/util/stat-shadow.c | 437 ++++ kernel/tools/perf/util/stat.c | 281 ++- kernel/tools/perf/util/stat.h | 68 + kernel/tools/perf/util/strbuf.c | 22 +- kernel/tools/perf/util/strbuf.h | 2 + kernel/tools/perf/util/strfilter.c | 107 + kernel/tools/perf/util/strfilter.h | 35 + kernel/tools/perf/util/string.c | 39 + kernel/tools/perf/util/strlist.c | 43 +- kernel/tools/perf/util/strlist.h | 9 +- kernel/tools/perf/util/svghelper.c | 2 +- kernel/tools/perf/util/symbol-elf.c | 69 +- kernel/tools/perf/util/symbol-minimal.c | 2 +- kernel/tools/perf/util/symbol.c | 208 +- kernel/tools/perf/util/symbol.h | 21 +- kernel/tools/perf/util/thread-stack.c | 18 +- kernel/tools/perf/util/thread-stack.h | 1 + kernel/tools/perf/util/thread.c | 18 +- kernel/tools/perf/util/thread.h | 5 +- kernel/tools/perf/util/thread_map.c | 161 +- kernel/tools/perf/util/thread_map.h | 31 +- kernel/tools/perf/util/tool.h | 14 +- kernel/tools/perf/util/trace-event-info.c | 24 +- kernel/tools/perf/util/trace-event-parse.c | 32 +- kernel/tools/perf/util/trace-event-read.c | 28 +- kernel/tools/perf/util/trace-event.c | 58 +- kernel/tools/perf/util/trace-event.h | 4 + kernel/tools/perf/util/unwind-libunwind.c | 30 +- kernel/tools/perf/util/usage.c | 5 + kernel/tools/perf/util/util.c | 364 +-- kernel/tools/perf/util/util.h | 41 +- kernel/tools/perf/util/vdso.c | 58 +- kernel/tools/perf/util/vdso.h | 4 +- kernel/tools/perf/util/xyarray.c | 8 + kernel/tools/perf/util/xyarray.h | 2 + 145 files changed, 20923 insertions(+), 2644 deletions(-) create mode 100644 kernel/tools/perf/util/auxtrace.c create mode 100644 kernel/tools/perf/util/auxtrace.h create mode 100644 kernel/tools/perf/util/bpf-loader.c create mode 100644 kernel/tools/perf/util/bpf-loader.h create mode 100644 kernel/tools/perf/util/counts.c create mode 100644 kernel/tools/perf/util/counts.h create mode 100644 kernel/tools/perf/util/env.c create mode 100644 kernel/tools/perf/util/env.h delete mode 100644 kernel/tools/perf/util/include/linux/kernel.h delete mode 100644 kernel/tools/perf/util/include/linux/list.h delete mode 100644 kernel/tools/perf/util/include/linux/poison.h delete mode 100644 kernel/tools/perf/util/include/linux/rbtree.h delete mode 100644 kernel/tools/perf/util/include/linux/rbtree_augmented.h create mode 100644 kernel/tools/perf/util/intel-bts.c create mode 100644 kernel/tools/perf/util/intel-bts.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/Build create mode 100644 kernel/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk create mode 100644 kernel/tools/perf/util/intel-pt-decoder/inat.c create mode 100644 kernel/tools/perf/util/intel-pt-decoder/inat.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/inat_types.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/insn.c create mode 100644 kernel/tools/perf/util/intel-pt-decoder/insn.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.c create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c create mode 100644 kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h create mode 100644 kernel/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt create mode 100644 kernel/tools/perf/util/intel-pt.c create mode 100644 kernel/tools/perf/util/intel-pt.h create mode 100644 kernel/tools/perf/util/llvm-utils.c create mode 100644 kernel/tools/perf/util/llvm-utils.h create mode 100644 kernel/tools/perf/util/parse-branch-options.c create mode 100644 kernel/tools/perf/util/parse-branch-options.h create mode 100644 kernel/tools/perf/util/parse-regs-options.c create mode 100644 kernel/tools/perf/util/parse-regs-options.h create mode 100644 kernel/tools/perf/util/probe-file.c create mode 100644 kernel/tools/perf/util/probe-file.h create mode 100644 kernel/tools/perf/util/stat-shadow.c (limited to 'kernel/tools/perf/util') diff --git a/kernel/tools/perf/util/Build b/kernel/tools/perf/util/Build index 797490a40..591b3fe3e 100644 --- a/kernel/tools/perf/util/Build +++ b/kernel/tools/perf/util/Build @@ -5,6 +5,7 @@ libperf-y += build-id.o libperf-y += config.o libperf-y += ctype.o libperf-y += db-export.o +libperf-y += env.o libperf-y += environment.o libperf-y += event.o libperf-y += evlist.o @@ -14,8 +15,10 @@ libperf-y += find_next_bit.o libperf-y += help.o libperf-y += kallsyms.o libperf-y += levenshtein.o +libperf-y += llvm-utils.o libperf-y += parse-options.o libperf-y += parse-events.o +libperf-y += perf_regs.o libperf-y += path.o libperf-y += rbtree.o libperf-y += bitmap.o @@ -67,15 +70,26 @@ libperf-y += target.o libperf-y += rblist.o libperf-y += intlist.o libperf-y += vdso.o +libperf-y += counts.o libperf-y += stat.o +libperf-y += stat-shadow.o libperf-y += record.o libperf-y += srcline.o libperf-y += data.o libperf-$(CONFIG_X86) += tsc.o +libperf-$(CONFIG_AUXTRACE) += tsc.o libperf-y += cloexec.o libperf-y += thread-stack.o - +libperf-$(CONFIG_AUXTRACE) += auxtrace.o +libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/ +libperf-$(CONFIG_AUXTRACE) += intel-pt.o +libperf-$(CONFIG_AUXTRACE) += intel-bts.o +libperf-y += parse-branch-options.o +libperf-y += parse-regs-options.o + +libperf-$(CONFIG_LIBBPF) += bpf-loader.o libperf-$(CONFIG_LIBELF) += symbol-elf.o +libperf-$(CONFIG_LIBELF) += probe-file.o libperf-$(CONFIG_LIBELF) += probe-event.o ifndef CONFIG_LIBELF @@ -92,7 +106,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o libperf-y += scripting-engines/ -libperf-$(CONFIG_PERF_REGS) += perf_regs.o libperf-$(CONFIG_ZLIB) += zlib.o libperf-$(CONFIG_LZMA) += lzma.o @@ -101,23 +114,23 @@ CFLAGS_exec_cmd.o += -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))" -DPREFIX="B $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c $(call rule_mkdir) - @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l + $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l $(OUTPUT)util/parse-events-bison.c: util/parse-events.y $(call rule_mkdir) - @$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_ + $(Q)$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_ $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c $(call rule_mkdir) - @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l + $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l $(OUTPUT)util/pmu-bison.c: util/pmu.y $(call rule_mkdir) - @$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_ + $(Q)$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_ CFLAGS_parse-events-flex.o += -w CFLAGS_pmu-flex.o += -w -CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w +CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -w CFLAGS_pmu-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c @@ -136,10 +149,10 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) -$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE +$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) -$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE +$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE $(call rule_mkdir) $(call if_changed_dep,cc_o_c) diff --git a/kernel/tools/perf/util/annotate.c b/kernel/tools/perf/util/annotate.c index 7f5bdfc9b..1dd1949b0 100644 --- a/kernel/tools/perf/util/annotate.c +++ b/kernel/tools/perf/util/annotate.c @@ -473,17 +473,73 @@ int symbol__alloc_hist(struct symbol *sym) return 0; } +/* The cycles histogram is lazily allocated. */ +static int symbol__alloc_hist_cycles(struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + const size_t size = symbol__size(sym); + + notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist)); + if (notes->src->cycles_hist == NULL) + return -1; + return 0; +} + void symbol__annotate_zero_histograms(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); pthread_mutex_lock(¬es->lock); - if (notes->src != NULL) + if (notes->src != NULL) { memset(notes->src->histograms, 0, notes->src->nr_histograms * notes->src->sizeof_sym_hist); + if (notes->src->cycles_hist) + memset(notes->src->cycles_hist, 0, + symbol__size(sym) * sizeof(struct cyc_hist)); + } pthread_mutex_unlock(¬es->lock); } +static int __symbol__account_cycles(struct annotation *notes, + u64 start, + unsigned offset, unsigned cycles, + unsigned have_start) +{ + struct cyc_hist *ch; + + ch = notes->src->cycles_hist; + /* + * For now we can only account one basic block per + * final jump. But multiple could be overlapping. + * Always account the longest one. So when + * a shorter one has been already seen throw it away. + * + * We separately always account the full cycles. + */ + ch[offset].num_aggr++; + ch[offset].cycles_aggr += cycles; + + if (!have_start && ch[offset].have_start) + return 0; + if (ch[offset].num) { + if (have_start && (!ch[offset].have_start || + ch[offset].start > start)) { + ch[offset].have_start = 0; + ch[offset].cycles = 0; + ch[offset].num = 0; + if (ch[offset].reset < 0xffff) + ch[offset].reset++; + } else if (have_start && + ch[offset].start < start) + return 0; + } + ch[offset].have_start = have_start; + ch[offset].start = start; + ch[offset].cycles += cycles; + ch[offset].num++; + return 0; +} + static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, struct annotation *notes, int evidx, u64 addr) { @@ -492,8 +548,11 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); - if (addr < sym->start || addr >= sym->end) + if (addr < sym->start || addr >= sym->end) { + pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", + __func__, __LINE__, sym->name, sym->start, addr, sym->end); return -ERANGE; + } offset = addr - sym->start; h = annotation__histogram(notes, evidx); @@ -506,6 +565,21 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, return 0; } +static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles) +{ + struct annotation *notes = symbol__annotation(sym); + + if (notes->src == NULL) { + if (symbol__alloc_hist(sym) < 0) + return NULL; + } + if (!notes->src->cycles_hist && cycles) { + if (symbol__alloc_hist_cycles(sym) < 0) + return NULL; + } + return notes; +} + static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, int evidx, u64 addr) { @@ -513,14 +587,71 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, if (sym == NULL) return 0; + notes = symbol__get_annotation(sym, false); + if (notes == NULL) + return -ENOMEM; + return __symbol__inc_addr_samples(sym, map, notes, evidx, addr); +} - notes = symbol__annotation(sym); - if (notes->src == NULL) { - if (symbol__alloc_hist(sym) < 0) - return -ENOMEM; +static int symbol__account_cycles(u64 addr, u64 start, + struct symbol *sym, unsigned cycles) +{ + struct annotation *notes; + unsigned offset; + + if (sym == NULL) + return 0; + notes = symbol__get_annotation(sym, true); + if (notes == NULL) + return -ENOMEM; + if (addr < sym->start || addr >= sym->end) + return -ERANGE; + + if (start) { + if (start < sym->start || start >= sym->end) + return -ERANGE; + if (start >= addr) + start = 0; } + offset = addr - sym->start; + return __symbol__account_cycles(notes, + start ? start - sym->start : 0, + offset, cycles, + !!start); +} - return __symbol__inc_addr_samples(sym, map, notes, evidx, addr); +int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, + struct addr_map_symbol *start, + unsigned cycles) +{ + u64 saddr = 0; + int err; + + if (!cycles) + return 0; + + /* + * Only set start when IPC can be computed. We can only + * compute it when the basic block is completely in a single + * function. + * Special case the case when the jump is elsewhere, but + * it starts on the function start. + */ + if (start && + (start->sym == ams->sym || + (ams->sym && + start->addr == ams->sym->start + ams->map->start))) + saddr = start->al_addr; + if (saddr == 0) + pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", + ams->addr, + start ? start->addr : 0, + ams->sym ? ams->sym->start + ams->map->start : 0, + saddr); + err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles); + if (err) + pr_debug2("account_cycles failed %d\n", err); + return err; } int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx) @@ -647,14 +778,15 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa } double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, - s64 end, const char **path) + s64 end, const char **path, u64 *nr_samples) { struct source_line *src_line = notes->src->lines; double percent = 0.0; + *nr_samples = 0; if (src_line) { size_t sizeof_src_line = sizeof(*src_line) + - sizeof(src_line->p) * (src_line->nr_pcnt - 1); + sizeof(src_line->samples) * (src_line->nr_pcnt - 1); while (offset < end) { src_line = (void *)notes->src->lines + @@ -663,7 +795,8 @@ double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, if (*path == NULL) *path = src_line->path; - percent += src_line->p[evidx].percent; + percent += src_line->samples[evidx].percent; + *nr_samples += src_line->samples[evidx].nr; offset++; } } else { @@ -673,8 +806,10 @@ double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, while (offset < end) hits += h->addr[offset++]; - if (h->sum) + if (h->sum) { + *nr_samples = hits; percent = 100.0 * hits / h->sum; + } } return percent; @@ -689,8 +824,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st if (dl->offset != -1) { const char *path = NULL; + u64 nr_samples; double percent, max_percent = 0.0; double *ppercents = &percent; + u64 *psamples = &nr_samples; int i, nr_percent = 1; const char *color; struct annotation *notes = symbol__annotation(sym); @@ -703,8 +840,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st if (perf_evsel__is_group_event(evsel)) { nr_percent = evsel->nr_members; ppercents = calloc(nr_percent, sizeof(double)); - if (ppercents == NULL) + psamples = calloc(nr_percent, sizeof(u64)); + if (ppercents == NULL || psamples == NULL) { return -1; + } } for (i = 0; i < nr_percent; i++) { @@ -712,9 +851,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st notes->src->lines ? i : evsel->idx + i, offset, next ? next->offset : (s64) len, - &path); + &path, &nr_samples); ppercents[i] = percent; + psamples[i] = nr_samples; if (percent > max_percent) max_percent = percent; } @@ -752,8 +892,14 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st for (i = 0; i < nr_percent; i++) { percent = ppercents[i]; + nr_samples = psamples[i]; color = get_percent_color(percent); - color_fprintf(stdout, color, " %7.2f", percent); + + if (symbol_conf.show_total_period) + color_fprintf(stdout, color, " %7" PRIu64, + nr_samples); + else + color_fprintf(stdout, color, " %7.2f", percent); } printf(" : "); @@ -763,6 +909,9 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st if (ppercents != &percent) free(ppercents); + if (psamples != &nr_samples) + free(psamples); + } else if (max_lines && printed >= max_lines) return 1; else { @@ -935,6 +1084,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) struct kcore_extract kce; bool delete_extract = false; int lineno = 0; + int nline; if (filename) symbol__join_symfs(symfs_filename, filename); @@ -980,6 +1130,7 @@ fallback: dso->annotate_warned = 1; pr_err("Can't annotate %s:\n\n" "No vmlinux file%s\nwas found in the path.\n\n" + "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n" "Please use:\n\n" " perf buildid-cache -vu vmlinux\n\n" "or:\n\n" @@ -1029,6 +1180,9 @@ fallback: ret = decompress_to_file(m.ext, symfs_filename, fd); + if (ret) + pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename); + free(m.ext); close(fd); @@ -1054,13 +1208,25 @@ fallback: pr_debug("Executing: %s\n", command); file = popen(command, "r"); - if (!file) + if (!file) { + pr_err("Failure running %s\n", command); + /* + * If we were using debug info should retry with + * original binary. + */ goto out_remove_tmp; + } - while (!feof(file)) + nline = 0; + while (!feof(file)) { if (symbol__parse_objdump_line(sym, map, file, privsize, &lineno) < 0) break; + nline++; + } + + if (nline == 0) + pr_err("No output from %s\n", command); /* * kallsyms does not have symbol sizes so there may a nop at the end. @@ -1096,7 +1262,7 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin ret = strcmp(iter->path, src_line->path); if (ret == 0) { for (i = 0; i < src_line->nr_pcnt; i++) - iter->p[i].percent_sum += src_line->p[i].percent; + iter->samples[i].percent_sum += src_line->samples[i].percent; return; } @@ -1107,7 +1273,7 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin } for (i = 0; i < src_line->nr_pcnt; i++) - src_line->p[i].percent_sum = src_line->p[i].percent; + src_line->samples[i].percent_sum = src_line->samples[i].percent; rb_link_node(&src_line->node, parent, p); rb_insert_color(&src_line->node, root); @@ -1118,9 +1284,9 @@ static int cmp_source_line(struct source_line *a, struct source_line *b) int i; for (i = 0; i < a->nr_pcnt; i++) { - if (a->p[i].percent_sum == b->p[i].percent_sum) + if (a->samples[i].percent_sum == b->samples[i].percent_sum) continue; - return a->p[i].percent_sum > b->p[i].percent_sum; + return a->samples[i].percent_sum > b->samples[i].percent_sum; } return 0; @@ -1172,7 +1338,7 @@ static void symbol__free_source_line(struct symbol *sym, int len) int i; sizeof_src_line = sizeof(*src_line) + - (sizeof(src_line->p) * (src_line->nr_pcnt - 1)); + (sizeof(src_line->samples) * (src_line->nr_pcnt - 1)); for (i = 0; i < len; i++) { free_srcline(src_line->path); @@ -1204,7 +1370,7 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map, h_sum += h->sum; } nr_pcnt = evsel->nr_members; - sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p); + sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples); } if (!h_sum) @@ -1224,10 +1390,10 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map, for (k = 0; k < nr_pcnt; k++) { h = annotation__histogram(notes, evidx + k); - src_line->p[k].percent = 100.0 * h->addr[i] / h->sum; + src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum; - if (src_line->p[k].percent > percent_max) - percent_max = src_line->p[k].percent; + if (src_line->samples[k].percent > percent_max) + percent_max = src_line->samples[k].percent; } if (percent_max <= 0.5) @@ -1267,7 +1433,7 @@ static void print_summary(struct rb_root *root, const char *filename) src_line = rb_entry(node, struct source_line, node); for (i = 0; i < src_line->nr_pcnt; i++) { - percent = src_line->p[i].percent_sum; + percent = src_line->samples[i].percent_sum; color = get_percent_color(percent); color_fprintf(stdout, color, " %7.2f", percent); @@ -1454,6 +1620,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, len = symbol__size(sym); if (print_lines) { + srcline_full_filename = full_paths; symbol__get_source_line(sym, map, evsel, &source_line, len); print_summary(&source_line, dso->long_name); } diff --git a/kernel/tools/perf/util/annotate.h b/kernel/tools/perf/util/annotate.h index cadbdc90a..cea323d9e 100644 --- a/kernel/tools/perf/util/annotate.h +++ b/kernel/tools/perf/util/annotate.h @@ -59,6 +59,8 @@ struct disasm_line { char *name; struct ins *ins; int line_nr; + float ipc; + u64 cycles; struct ins_operands ops; }; @@ -72,23 +74,35 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw); size_t disasm__fprintf(struct list_head *head, FILE *fp); double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, - s64 end, const char **path); + s64 end, const char **path, u64 *nr_samples); struct sym_hist { u64 sum; u64 addr[0]; }; -struct source_line_percent { +struct cyc_hist { + u64 start; + u64 cycles; + u64 cycles_aggr; + u32 num; + u32 num_aggr; + u8 have_start; + /* 1 byte padding */ + u16 reset; +}; + +struct source_line_samples { double percent; double percent_sum; + double nr; }; struct source_line { struct rb_node node; char *path; int nr_pcnt; - struct source_line_percent p[1]; + struct source_line_samples samples[1]; }; /** struct annotated_source - symbols with hits have this attached as in sannotation @@ -96,6 +110,7 @@ struct source_line { * @histogram: Array of addr hit histograms per event being monitored * @lines: If 'print_lines' is specified, per source code line percentages * @source: source parsed from a disassembler like objdump -dS + * @cyc_hist: Average cycles per basic block * * lines is allocated, percentages calculated and all sorted by percentage * when the annotation is about to be presented, so the percentages are for @@ -107,7 +122,8 @@ struct annotated_source { struct list_head source; struct source_line *lines; int nr_histograms; - int sizeof_sym_hist; + size_t sizeof_sym_hist; + struct cyc_hist *cycles_hist; struct sym_hist histograms[0]; }; @@ -129,6 +145,10 @@ static inline struct annotation *symbol__annotation(struct symbol *sym) int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx); +int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, + struct addr_map_symbol *start, + unsigned cycles); + int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr); int symbol__alloc_hist(struct symbol *sym); diff --git a/kernel/tools/perf/util/auxtrace.c b/kernel/tools/perf/util/auxtrace.c new file mode 100644 index 000000000..7f10430af --- /dev/null +++ b/kernel/tools/perf/util/auxtrace.c @@ -0,0 +1,1392 @@ +/* + * auxtrace.c: AUX area trace support + * Copyright (c) 2013-2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "util.h" +#include "evlist.h" +#include "cpumap.h" +#include "thread_map.h" +#include "asm/bug.h" +#include "auxtrace.h" + +#include + +#include "event.h" +#include "session.h" +#include "debug.h" +#include "parse-options.h" + +#include "intel-pt.h" +#include "intel-bts.h" + +int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, + struct auxtrace_mmap_params *mp, + void *userpg, int fd) +{ + struct perf_event_mmap_page *pc = userpg; + + WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); + + mm->userpg = userpg; + mm->mask = mp->mask; + mm->len = mp->len; + mm->prev = 0; + mm->idx = mp->idx; + mm->tid = mp->tid; + mm->cpu = mp->cpu; + + if (!mp->len) { + mm->base = NULL; + return 0; + } + +#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) + pr_err("Cannot use AUX area tracing mmaps\n"); + return -1; +#endif + + pc->aux_offset = mp->offset; + pc->aux_size = mp->len; + + mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset); + if (mm->base == MAP_FAILED) { + pr_debug2("failed to mmap AUX area\n"); + mm->base = NULL; + return -1; + } + + return 0; +} + +void auxtrace_mmap__munmap(struct auxtrace_mmap *mm) +{ + if (mm->base) { + munmap(mm->base, mm->len); + mm->base = NULL; + } +} + +void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, + off_t auxtrace_offset, + unsigned int auxtrace_pages, + bool auxtrace_overwrite) +{ + if (auxtrace_pages) { + mp->offset = auxtrace_offset; + mp->len = auxtrace_pages * (size_t)page_size; + mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0; + mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE); + pr_debug2("AUX area mmap length %zu\n", mp->len); + } else { + mp->len = 0; + } +} + +void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, + struct perf_evlist *evlist, int idx, + bool per_cpu) +{ + mp->idx = idx; + + if (per_cpu) { + mp->cpu = evlist->cpus->map[idx]; + if (evlist->threads) + mp->tid = thread_map__pid(evlist->threads, 0); + else + mp->tid = -1; + } else { + mp->cpu = -1; + mp->tid = thread_map__pid(evlist->threads, idx); + } +} + +#define AUXTRACE_INIT_NR_QUEUES 32 + +static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) +{ + struct auxtrace_queue *queue_array; + unsigned int max_nr_queues, i; + + max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue); + if (nr_queues > max_nr_queues) + return NULL; + + queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); + if (!queue_array) + return NULL; + + for (i = 0; i < nr_queues; i++) { + INIT_LIST_HEAD(&queue_array[i].head); + queue_array[i].priv = NULL; + } + + return queue_array; +} + +int auxtrace_queues__init(struct auxtrace_queues *queues) +{ + queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; + queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); + if (!queues->queue_array) + return -ENOMEM; + return 0; +} + +static int auxtrace_queues__grow(struct auxtrace_queues *queues, + unsigned int new_nr_queues) +{ + unsigned int nr_queues = queues->nr_queues; + struct auxtrace_queue *queue_array; + unsigned int i; + + if (!nr_queues) + nr_queues = AUXTRACE_INIT_NR_QUEUES; + + while (nr_queues && nr_queues < new_nr_queues) + nr_queues <<= 1; + + if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) + return -EINVAL; + + queue_array = auxtrace_alloc_queue_array(nr_queues); + if (!queue_array) + return -ENOMEM; + + for (i = 0; i < queues->nr_queues; i++) { + list_splice_tail(&queues->queue_array[i].head, + &queue_array[i].head); + queue_array[i].priv = queues->queue_array[i].priv; + } + + queues->nr_queues = nr_queues; + queues->queue_array = queue_array; + + return 0; +} + +static void *auxtrace_copy_data(u64 size, struct perf_session *session) +{ + int fd = perf_data_file__fd(session->file); + void *p; + ssize_t ret; + + if (size > SSIZE_MAX) + return NULL; + + p = malloc(size); + if (!p) + return NULL; + + ret = readn(fd, p, size); + if (ret != (ssize_t)size) { + free(p); + return NULL; + } + + return p; +} + +static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, + unsigned int idx, + struct auxtrace_buffer *buffer) +{ + struct auxtrace_queue *queue; + int err; + + if (idx >= queues->nr_queues) { + err = auxtrace_queues__grow(queues, idx + 1); + if (err) + return err; + } + + queue = &queues->queue_array[idx]; + + if (!queue->set) { + queue->set = true; + queue->tid = buffer->tid; + queue->cpu = buffer->cpu; + } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { + pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", + queue->cpu, queue->tid, buffer->cpu, buffer->tid); + return -EINVAL; + } + + buffer->buffer_nr = queues->next_buffer_nr++; + + list_add_tail(&buffer->list, &queue->head); + + queues->new_data = true; + queues->populated = true; + + return 0; +} + +/* Limit buffers to 32MiB on 32-bit */ +#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024) + +static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, + unsigned int idx, + struct auxtrace_buffer *buffer) +{ + u64 sz = buffer->size; + bool consecutive = false; + struct auxtrace_buffer *b; + int err; + + while (sz > BUFFER_LIMIT_FOR_32_BIT) { + b = memdup(buffer, sizeof(struct auxtrace_buffer)); + if (!b) + return -ENOMEM; + b->size = BUFFER_LIMIT_FOR_32_BIT; + b->consecutive = consecutive; + err = auxtrace_queues__add_buffer(queues, idx, b); + if (err) { + auxtrace_buffer__free(b); + return err; + } + buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; + sz -= BUFFER_LIMIT_FOR_32_BIT; + consecutive = true; + } + + buffer->size = sz; + buffer->consecutive = consecutive; + + return 0; +} + +static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues, + struct perf_session *session, + unsigned int idx, + struct auxtrace_buffer *buffer) +{ + if (session->one_mmap) { + buffer->data = buffer->data_offset - session->one_mmap_offset + + session->one_mmap_addr; + } else if (perf_data_file__is_pipe(session->file)) { + buffer->data = auxtrace_copy_data(buffer->size, session); + if (!buffer->data) + return -ENOMEM; + buffer->data_needs_freeing = true; + } else if (BITS_PER_LONG == 32 && + buffer->size > BUFFER_LIMIT_FOR_32_BIT) { + int err; + + err = auxtrace_queues__split_buffer(queues, idx, buffer); + if (err) + return err; + } + + return auxtrace_queues__add_buffer(queues, idx, buffer); +} + +int auxtrace_queues__add_event(struct auxtrace_queues *queues, + struct perf_session *session, + union perf_event *event, off_t data_offset, + struct auxtrace_buffer **buffer_ptr) +{ + struct auxtrace_buffer *buffer; + unsigned int idx; + int err; + + buffer = zalloc(sizeof(struct auxtrace_buffer)); + if (!buffer) + return -ENOMEM; + + buffer->pid = -1; + buffer->tid = event->auxtrace.tid; + buffer->cpu = event->auxtrace.cpu; + buffer->data_offset = data_offset; + buffer->offset = event->auxtrace.offset; + buffer->reference = event->auxtrace.reference; + buffer->size = event->auxtrace.size; + idx = event->auxtrace.idx; + + err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer); + if (err) + goto out_err; + + if (buffer_ptr) + *buffer_ptr = buffer; + + return 0; + +out_err: + auxtrace_buffer__free(buffer); + return err; +} + +static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, + struct perf_session *session, + off_t file_offset, size_t sz) +{ + union perf_event *event; + int err; + char buf[PERF_SAMPLE_MAX_SIZE]; + + err = perf_session__peek_event(session, file_offset, buf, + PERF_SAMPLE_MAX_SIZE, &event, NULL); + if (err) + return err; + + if (event->header.type == PERF_RECORD_AUXTRACE) { + if (event->header.size < sizeof(struct auxtrace_event) || + event->header.size != sz) { + err = -EINVAL; + goto out; + } + file_offset += event->header.size; + err = auxtrace_queues__add_event(queues, session, event, + file_offset, NULL); + } +out: + return err; +} + +void auxtrace_queues__free(struct auxtrace_queues *queues) +{ + unsigned int i; + + for (i = 0; i < queues->nr_queues; i++) { + while (!list_empty(&queues->queue_array[i].head)) { + struct auxtrace_buffer *buffer; + + buffer = list_entry(queues->queue_array[i].head.next, + struct auxtrace_buffer, list); + list_del(&buffer->list); + auxtrace_buffer__free(buffer); + } + } + + zfree(&queues->queue_array); + queues->nr_queues = 0; +} + +static void auxtrace_heapify(struct auxtrace_heap_item *heap_array, + unsigned int pos, unsigned int queue_nr, + u64 ordinal) +{ + unsigned int parent; + + while (pos) { + parent = (pos - 1) >> 1; + if (heap_array[parent].ordinal <= ordinal) + break; + heap_array[pos] = heap_array[parent]; + pos = parent; + } + heap_array[pos].queue_nr = queue_nr; + heap_array[pos].ordinal = ordinal; +} + +int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, + u64 ordinal) +{ + struct auxtrace_heap_item *heap_array; + + if (queue_nr >= heap->heap_sz) { + unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES; + + while (heap_sz <= queue_nr) + heap_sz <<= 1; + heap_array = realloc(heap->heap_array, + heap_sz * sizeof(struct auxtrace_heap_item)); + if (!heap_array) + return -ENOMEM; + heap->heap_array = heap_array; + heap->heap_sz = heap_sz; + } + + auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal); + + return 0; +} + +void auxtrace_heap__free(struct auxtrace_heap *heap) +{ + zfree(&heap->heap_array); + heap->heap_cnt = 0; + heap->heap_sz = 0; +} + +void auxtrace_heap__pop(struct auxtrace_heap *heap) +{ + unsigned int pos, last, heap_cnt = heap->heap_cnt; + struct auxtrace_heap_item *heap_array; + + if (!heap_cnt) + return; + + heap->heap_cnt -= 1; + + heap_array = heap->heap_array; + + pos = 0; + while (1) { + unsigned int left, right; + + left = (pos << 1) + 1; + if (left >= heap_cnt) + break; + right = left + 1; + if (right >= heap_cnt) { + heap_array[pos] = heap_array[left]; + return; + } + if (heap_array[left].ordinal < heap_array[right].ordinal) { + heap_array[pos] = heap_array[left]; + pos = left; + } else { + heap_array[pos] = heap_array[right]; + pos = right; + } + } + + last = heap_cnt - 1; + auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr, + heap_array[last].ordinal); +} + +size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr) +{ + if (itr) + return itr->info_priv_size(itr); + return 0; +} + +static int auxtrace_not_supported(void) +{ + pr_err("AUX area tracing is not supported on this architecture\n"); + return -EINVAL; +} + +int auxtrace_record__info_fill(struct auxtrace_record *itr, + struct perf_session *session, + struct auxtrace_info_event *auxtrace_info, + size_t priv_size) +{ + if (itr) + return itr->info_fill(itr, session, auxtrace_info, priv_size); + return auxtrace_not_supported(); +} + +void auxtrace_record__free(struct auxtrace_record *itr) +{ + if (itr) + itr->free(itr); +} + +int auxtrace_record__snapshot_start(struct auxtrace_record *itr) +{ + if (itr && itr->snapshot_start) + return itr->snapshot_start(itr); + return 0; +} + +int auxtrace_record__snapshot_finish(struct auxtrace_record *itr) +{ + if (itr && itr->snapshot_finish) + return itr->snapshot_finish(itr); + return 0; +} + +int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, + struct auxtrace_mmap *mm, + unsigned char *data, u64 *head, u64 *old) +{ + if (itr && itr->find_snapshot) + return itr->find_snapshot(itr, idx, mm, data, head, old); + return 0; +} + +int auxtrace_record__options(struct auxtrace_record *itr, + struct perf_evlist *evlist, + struct record_opts *opts) +{ + if (itr) + return itr->recording_options(itr, evlist, opts); + return 0; +} + +u64 auxtrace_record__reference(struct auxtrace_record *itr) +{ + if (itr) + return itr->reference(itr); + return 0; +} + +int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, + struct record_opts *opts, const char *str) +{ + if (!str) + return 0; + + if (itr) + return itr->parse_snapshot_options(itr, opts, str); + + pr_err("No AUX area tracing to snapshot\n"); + return -EINVAL; +} + +struct auxtrace_record *__weak +auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err) +{ + *err = 0; + return NULL; +} + +static int auxtrace_index__alloc(struct list_head *head) +{ + struct auxtrace_index *auxtrace_index; + + auxtrace_index = malloc(sizeof(struct auxtrace_index)); + if (!auxtrace_index) + return -ENOMEM; + + auxtrace_index->nr = 0; + INIT_LIST_HEAD(&auxtrace_index->list); + + list_add_tail(&auxtrace_index->list, head); + + return 0; +} + +void auxtrace_index__free(struct list_head *head) +{ + struct auxtrace_index *auxtrace_index, *n; + + list_for_each_entry_safe(auxtrace_index, n, head, list) { + list_del(&auxtrace_index->list); + free(auxtrace_index); + } +} + +static struct auxtrace_index *auxtrace_index__last(struct list_head *head) +{ + struct auxtrace_index *auxtrace_index; + int err; + + if (list_empty(head)) { + err = auxtrace_index__alloc(head); + if (err) + return NULL; + } + + auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); + + if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) { + err = auxtrace_index__alloc(head); + if (err) + return NULL; + auxtrace_index = list_entry(head->prev, struct auxtrace_index, + list); + } + + return auxtrace_index; +} + +int auxtrace_index__auxtrace_event(struct list_head *head, + union perf_event *event, off_t file_offset) +{ + struct auxtrace_index *auxtrace_index; + size_t nr; + + auxtrace_index = auxtrace_index__last(head); + if (!auxtrace_index) + return -ENOMEM; + + nr = auxtrace_index->nr; + auxtrace_index->entries[nr].file_offset = file_offset; + auxtrace_index->entries[nr].sz = event->header.size; + auxtrace_index->nr += 1; + + return 0; +} + +static int auxtrace_index__do_write(int fd, + struct auxtrace_index *auxtrace_index) +{ + struct auxtrace_index_entry ent; + size_t i; + + for (i = 0; i < auxtrace_index->nr; i++) { + ent.file_offset = auxtrace_index->entries[i].file_offset; + ent.sz = auxtrace_index->entries[i].sz; + if (writen(fd, &ent, sizeof(ent)) != sizeof(ent)) + return -errno; + } + return 0; +} + +int auxtrace_index__write(int fd, struct list_head *head) +{ + struct auxtrace_index *auxtrace_index; + u64 total = 0; + int err; + + list_for_each_entry(auxtrace_index, head, list) + total += auxtrace_index->nr; + + if (writen(fd, &total, sizeof(total)) != sizeof(total)) + return -errno; + + list_for_each_entry(auxtrace_index, head, list) { + err = auxtrace_index__do_write(fd, auxtrace_index); + if (err) + return err; + } + + return 0; +} + +static int auxtrace_index__process_entry(int fd, struct list_head *head, + bool needs_swap) +{ + struct auxtrace_index *auxtrace_index; + struct auxtrace_index_entry ent; + size_t nr; + + if (readn(fd, &ent, sizeof(ent)) != sizeof(ent)) + return -1; + + auxtrace_index = auxtrace_index__last(head); + if (!auxtrace_index) + return -1; + + nr = auxtrace_index->nr; + if (needs_swap) { + auxtrace_index->entries[nr].file_offset = + bswap_64(ent.file_offset); + auxtrace_index->entries[nr].sz = bswap_64(ent.sz); + } else { + auxtrace_index->entries[nr].file_offset = ent.file_offset; + auxtrace_index->entries[nr].sz = ent.sz; + } + + auxtrace_index->nr = nr + 1; + + return 0; +} + +int auxtrace_index__process(int fd, u64 size, struct perf_session *session, + bool needs_swap) +{ + struct list_head *head = &session->auxtrace_index; + u64 nr; + + if (readn(fd, &nr, sizeof(u64)) != sizeof(u64)) + return -1; + + if (needs_swap) + nr = bswap_64(nr); + + if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size) + return -1; + + while (nr--) { + int err; + + err = auxtrace_index__process_entry(fd, head, needs_swap); + if (err) + return -1; + } + + return 0; +} + +static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, + struct perf_session *session, + struct auxtrace_index_entry *ent) +{ + return auxtrace_queues__add_indexed_event(queues, session, + ent->file_offset, ent->sz); +} + +int auxtrace_queues__process_index(struct auxtrace_queues *queues, + struct perf_session *session) +{ + struct auxtrace_index *auxtrace_index; + struct auxtrace_index_entry *ent; + size_t i; + int err; + + list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { + for (i = 0; i < auxtrace_index->nr; i++) { + ent = &auxtrace_index->entries[i]; + err = auxtrace_queues__process_index_entry(queues, + session, + ent); + if (err) + return err; + } + } + return 0; +} + +struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, + struct auxtrace_buffer *buffer) +{ + if (buffer) { + if (list_is_last(&buffer->list, &queue->head)) + return NULL; + return list_entry(buffer->list.next, struct auxtrace_buffer, + list); + } else { + if (list_empty(&queue->head)) + return NULL; + return list_entry(queue->head.next, struct auxtrace_buffer, + list); + } +} + +void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd) +{ + size_t adj = buffer->data_offset & (page_size - 1); + size_t size = buffer->size + adj; + off_t file_offset = buffer->data_offset - adj; + void *addr; + + if (buffer->data) + return buffer->data; + + addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset); + if (addr == MAP_FAILED) + return NULL; + + buffer->mmap_addr = addr; + buffer->mmap_size = size; + + buffer->data = addr + adj; + + return buffer->data; +} + +void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer) +{ + if (!buffer->data || !buffer->mmap_addr) + return; + munmap(buffer->mmap_addr, buffer->mmap_size); + buffer->mmap_addr = NULL; + buffer->mmap_size = 0; + buffer->data = NULL; + buffer->use_data = NULL; +} + +void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer) +{ + auxtrace_buffer__put_data(buffer); + if (buffer->data_needs_freeing) { + buffer->data_needs_freeing = false; + zfree(&buffer->data); + buffer->use_data = NULL; + buffer->size = 0; + } +} + +void auxtrace_buffer__free(struct auxtrace_buffer *buffer) +{ + auxtrace_buffer__drop_data(buffer); + free(buffer); +} + +void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type, + int code, int cpu, pid_t pid, pid_t tid, u64 ip, + const char *msg) +{ + size_t size; + + memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event)); + + auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR; + auxtrace_error->type = type; + auxtrace_error->code = code; + auxtrace_error->cpu = cpu; + auxtrace_error->pid = pid; + auxtrace_error->tid = tid; + auxtrace_error->ip = ip; + strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG); + + size = (void *)auxtrace_error->msg - (void *)auxtrace_error + + strlen(auxtrace_error->msg) + 1; + auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64)); +} + +int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, + struct perf_tool *tool, + struct perf_session *session, + perf_event__handler_t process) +{ + union perf_event *ev; + size_t priv_size; + int err; + + pr_debug2("Synthesizing auxtrace information\n"); + priv_size = auxtrace_record__info_priv_size(itr); + ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size); + if (!ev) + return -ENOMEM; + + ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO; + ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) + + priv_size; + err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info, + priv_size); + if (err) + goto out_free; + + err = process(tool, ev, NULL, NULL); +out_free: + free(ev); + return err; +} + +static bool auxtrace__dont_decode(struct perf_session *session) +{ + return !session->itrace_synth_opts || + session->itrace_synth_opts->dont_decode; +} + +int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_session *session) +{ + enum auxtrace_type type = event->auxtrace_info.type; + + if (dump_trace) + fprintf(stdout, " type: %u\n", type); + + switch (type) { + case PERF_AUXTRACE_INTEL_PT: + return intel_pt_process_auxtrace_info(event, session); + case PERF_AUXTRACE_INTEL_BTS: + return intel_bts_process_auxtrace_info(event, session); + case PERF_AUXTRACE_UNKNOWN: + default: + return -EINVAL; + } +} + +s64 perf_event__process_auxtrace(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session) +{ + s64 err; + + if (dump_trace) + fprintf(stdout, " size: %#"PRIx64" offset: %#"PRIx64" ref: %#"PRIx64" idx: %u tid: %d cpu: %d\n", + event->auxtrace.size, event->auxtrace.offset, + event->auxtrace.reference, event->auxtrace.idx, + event->auxtrace.tid, event->auxtrace.cpu); + + if (auxtrace__dont_decode(session)) + return event->auxtrace.size; + + if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE) + return -EINVAL; + + err = session->auxtrace->process_auxtrace_event(session, event, tool); + if (err < 0) + return err; + + return event->auxtrace.size; +} + +#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS +#define PERF_ITRACE_DEFAULT_PERIOD 100000 +#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16 +#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024 +#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64 +#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024 + +void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts) +{ + synth_opts->instructions = true; + synth_opts->branches = true; + synth_opts->transactions = true; + synth_opts->errors = true; + synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; + synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; + synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; + synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; +} + +/* + * Please check tools/perf/Documentation/perf-script.txt for information + * about the options parsed here, which is introduced after this cset, + * when support in 'perf script' for these options is introduced. + */ +int itrace_parse_synth_opts(const struct option *opt, const char *str, + int unset) +{ + struct itrace_synth_opts *synth_opts = opt->value; + const char *p; + char *endptr; + bool period_type_set = false; + bool period_set = false; + + synth_opts->set = true; + + if (unset) { + synth_opts->dont_decode = true; + return 0; + } + + if (!str) { + itrace_synth_opts__set_default(synth_opts); + return 0; + } + + for (p = str; *p;) { + switch (*p++) { + case 'i': + synth_opts->instructions = true; + while (*p == ' ' || *p == ',') + p += 1; + if (isdigit(*p)) { + synth_opts->period = strtoull(p, &endptr, 10); + period_set = true; + p = endptr; + while (*p == ' ' || *p == ',') + p += 1; + switch (*p++) { + case 'i': + synth_opts->period_type = + PERF_ITRACE_PERIOD_INSTRUCTIONS; + period_type_set = true; + break; + case 't': + synth_opts->period_type = + PERF_ITRACE_PERIOD_TICKS; + period_type_set = true; + break; + case 'm': + synth_opts->period *= 1000; + /* Fall through */ + case 'u': + synth_opts->period *= 1000; + /* Fall through */ + case 'n': + if (*p++ != 's') + goto out_err; + synth_opts->period_type = + PERF_ITRACE_PERIOD_NANOSECS; + period_type_set = true; + break; + case '\0': + goto out; + default: + goto out_err; + } + } + break; + case 'b': + synth_opts->branches = true; + break; + case 'x': + synth_opts->transactions = true; + break; + case 'e': + synth_opts->errors = true; + break; + case 'd': + synth_opts->log = true; + break; + case 'c': + synth_opts->branches = true; + synth_opts->calls = true; + break; + case 'r': + synth_opts->branches = true; + synth_opts->returns = true; + break; + case 'g': + synth_opts->callchain = true; + synth_opts->callchain_sz = + PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; + while (*p == ' ' || *p == ',') + p += 1; + if (isdigit(*p)) { + unsigned int val; + + val = strtoul(p, &endptr, 10); + p = endptr; + if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ) + goto out_err; + synth_opts->callchain_sz = val; + } + break; + case 'l': + synth_opts->last_branch = true; + synth_opts->last_branch_sz = + PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; + while (*p == ' ' || *p == ',') + p += 1; + if (isdigit(*p)) { + unsigned int val; + + val = strtoul(p, &endptr, 10); + p = endptr; + if (!val || + val > PERF_ITRACE_MAX_LAST_BRANCH_SZ) + goto out_err; + synth_opts->last_branch_sz = val; + } + break; + case ' ': + case ',': + break; + default: + goto out_err; + } + } +out: + if (synth_opts->instructions) { + if (!period_type_set) + synth_opts->period_type = + PERF_ITRACE_DEFAULT_PERIOD_TYPE; + if (!period_set) + synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; + } + + return 0; + +out_err: + pr_err("Bad Instruction Tracing options '%s'\n", str); + return -EINVAL; +} + +static const char * const auxtrace_error_type_name[] = { + [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace", +}; + +static const char *auxtrace_error_name(int type) +{ + const char *error_type_name = NULL; + + if (type < PERF_AUXTRACE_ERROR_MAX) + error_type_name = auxtrace_error_type_name[type]; + if (!error_type_name) + error_type_name = "unknown AUX"; + return error_type_name; +} + +size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp) +{ + struct auxtrace_error_event *e = &event->auxtrace_error; + int ret; + + ret = fprintf(fp, " %s error type %u", + auxtrace_error_name(e->type), e->type); + ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n", + e->cpu, e->pid, e->tid, e->ip, e->code, e->msg); + return ret; +} + +void perf_session__auxtrace_error_inc(struct perf_session *session, + union perf_event *event) +{ + struct auxtrace_error_event *e = &event->auxtrace_error; + + if (e->type < PERF_AUXTRACE_ERROR_MAX) + session->evlist->stats.nr_auxtrace_errors[e->type] += 1; +} + +void events_stats__auxtrace_error_warn(const struct events_stats *stats) +{ + int i; + + for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) { + if (!stats->nr_auxtrace_errors[i]) + continue; + ui__warning("%u %s errors\n", + stats->nr_auxtrace_errors[i], + auxtrace_error_name(i)); + } +} + +int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_session *session) +{ + if (auxtrace__dont_decode(session)) + return 0; + + perf_event__fprintf_auxtrace_error(event, stdout); + return 0; +} + +static int __auxtrace_mmap__read(struct auxtrace_mmap *mm, + struct auxtrace_record *itr, + struct perf_tool *tool, process_auxtrace_t fn, + bool snapshot, size_t snapshot_size) +{ + u64 head, old = mm->prev, offset, ref; + unsigned char *data = mm->base; + size_t size, head_off, old_off, len1, len2, padding; + union perf_event ev; + void *data1, *data2; + + if (snapshot) { + head = auxtrace_mmap__read_snapshot_head(mm); + if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data, + &head, &old)) + return -1; + } else { + head = auxtrace_mmap__read_head(mm); + } + + if (old == head) + return 0; + + pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n", + mm->idx, old, head, head - old); + + if (mm->mask) { + head_off = head & mm->mask; + old_off = old & mm->mask; + } else { + head_off = head % mm->len; + old_off = old % mm->len; + } + + if (head_off > old_off) + size = head_off - old_off; + else + size = mm->len - (old_off - head_off); + + if (snapshot && size > snapshot_size) + size = snapshot_size; + + ref = auxtrace_record__reference(itr); + + if (head > old || size <= head || mm->mask) { + offset = head - size; + } else { + /* + * When the buffer size is not a power of 2, 'head' wraps at the + * highest multiple of the buffer size, so we have to subtract + * the remainder here. + */ + u64 rem = (0ULL - mm->len) % mm->len; + + offset = head - size - rem; + } + + if (size > head_off) { + len1 = size - head_off; + data1 = &data[mm->len - len1]; + len2 = head_off; + data2 = &data[0]; + } else { + len1 = size; + data1 = &data[head_off - len1]; + len2 = 0; + data2 = NULL; + } + + if (itr->alignment) { + unsigned int unwanted = len1 % itr->alignment; + + len1 -= unwanted; + size -= unwanted; + } + + /* padding must be written by fn() e.g. record__process_auxtrace() */ + padding = size & 7; + if (padding) + padding = 8 - padding; + + memset(&ev, 0, sizeof(ev)); + ev.auxtrace.header.type = PERF_RECORD_AUXTRACE; + ev.auxtrace.header.size = sizeof(ev.auxtrace); + ev.auxtrace.size = size + padding; + ev.auxtrace.offset = offset; + ev.auxtrace.reference = ref; + ev.auxtrace.idx = mm->idx; + ev.auxtrace.tid = mm->tid; + ev.auxtrace.cpu = mm->cpu; + + if (fn(tool, &ev, data1, len1, data2, len2)) + return -1; + + mm->prev = head; + + if (!snapshot) { + auxtrace_mmap__write_tail(mm, head); + if (itr->read_finish) { + int err; + + err = itr->read_finish(itr, mm->idx); + if (err < 0) + return err; + } + } + + return 1; +} + +int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr, + struct perf_tool *tool, process_auxtrace_t fn) +{ + return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0); +} + +int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm, + struct auxtrace_record *itr, + struct perf_tool *tool, process_auxtrace_t fn, + size_t snapshot_size) +{ + return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size); +} + +/** + * struct auxtrace_cache - hash table to implement a cache + * @hashtable: the hashtable + * @sz: hashtable size (number of hlists) + * @entry_size: size of an entry + * @limit: limit the number of entries to this maximum, when reached the cache + * is dropped and caching begins again with an empty cache + * @cnt: current number of entries + * @bits: hashtable size (@sz = 2^@bits) + */ +struct auxtrace_cache { + struct hlist_head *hashtable; + size_t sz; + size_t entry_size; + size_t limit; + size_t cnt; + unsigned int bits; +}; + +struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, + unsigned int limit_percent) +{ + struct auxtrace_cache *c; + struct hlist_head *ht; + size_t sz, i; + + c = zalloc(sizeof(struct auxtrace_cache)); + if (!c) + return NULL; + + sz = 1UL << bits; + + ht = calloc(sz, sizeof(struct hlist_head)); + if (!ht) + goto out_free; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); + + c->hashtable = ht; + c->sz = sz; + c->entry_size = entry_size; + c->limit = (c->sz * limit_percent) / 100; + c->bits = bits; + + return c; + +out_free: + free(c); + return NULL; +} + +static void auxtrace_cache__drop(struct auxtrace_cache *c) +{ + struct auxtrace_cache_entry *entry; + struct hlist_node *tmp; + size_t i; + + if (!c) + return; + + for (i = 0; i < c->sz; i++) { + hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) { + hlist_del(&entry->hash); + auxtrace_cache__free_entry(c, entry); + } + } + + c->cnt = 0; +} + +void auxtrace_cache__free(struct auxtrace_cache *c) +{ + if (!c) + return; + + auxtrace_cache__drop(c); + free(c->hashtable); + free(c); +} + +void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c) +{ + return malloc(c->entry_size); +} + +void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused, + void *entry) +{ + free(entry); +} + +int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, + struct auxtrace_cache_entry *entry) +{ + if (c->limit && ++c->cnt > c->limit) + auxtrace_cache__drop(c); + + entry->key = key; + hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]); + + return 0; +} + +void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key) +{ + struct auxtrace_cache_entry *entry; + struct hlist_head *hlist; + + if (!c) + return NULL; + + hlist = &c->hashtable[hash_32(key, c->bits)]; + hlist_for_each_entry(entry, hlist, hash) { + if (entry->key == key) + return entry; + } + + return NULL; +} diff --git a/kernel/tools/perf/util/auxtrace.h b/kernel/tools/perf/util/auxtrace.h new file mode 100644 index 000000000..b86f90db1 --- /dev/null +++ b/kernel/tools/perf/util/auxtrace.h @@ -0,0 +1,650 @@ +/* + * auxtrace.h: AUX area trace support + * Copyright (c) 2013-2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __PERF_AUXTRACE_H +#define __PERF_AUXTRACE_H + +#include +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "event.h" +#include "session.h" +#include "debug.h" + +union perf_event; +struct perf_session; +struct perf_evlist; +struct perf_tool; +struct option; +struct record_opts; +struct auxtrace_info_event; +struct events_stats; + +enum auxtrace_type { + PERF_AUXTRACE_UNKNOWN, + PERF_AUXTRACE_INTEL_PT, + PERF_AUXTRACE_INTEL_BTS, +}; + +enum itrace_period_type { + PERF_ITRACE_PERIOD_INSTRUCTIONS, + PERF_ITRACE_PERIOD_TICKS, + PERF_ITRACE_PERIOD_NANOSECS, +}; + +/** + * struct itrace_synth_opts - AUX area tracing synthesis options. + * @set: indicates whether or not options have been set + * @inject: indicates the event (not just the sample) must be fully synthesized + * because 'perf inject' will write it out + * @instructions: whether to synthesize 'instructions' events + * @branches: whether to synthesize 'branches' events + * @transactions: whether to synthesize events for transactions + * @errors: whether to synthesize decoder error events + * @dont_decode: whether to skip decoding entirely + * @log: write a decoding log + * @calls: limit branch samples to calls (can be combined with @returns) + * @returns: limit branch samples to returns (can be combined with @calls) + * @callchain: add callchain to 'instructions' events + * @last_branch: add branch context to 'instruction' events + * @callchain_sz: maximum callchain size + * @last_branch_sz: branch context size + * @period: 'instructions' events period + * @period_type: 'instructions' events period type + */ +struct itrace_synth_opts { + bool set; + bool inject; + bool instructions; + bool branches; + bool transactions; + bool errors; + bool dont_decode; + bool log; + bool calls; + bool returns; + bool callchain; + bool last_branch; + unsigned int callchain_sz; + unsigned int last_branch_sz; + unsigned long long period; + enum itrace_period_type period_type; +}; + +/** + * struct auxtrace_index_entry - indexes a AUX area tracing event within a + * perf.data file. + * @file_offset: offset within the perf.data file + * @sz: size of the event + */ +struct auxtrace_index_entry { + u64 file_offset; + u64 sz; +}; + +#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256 + +/** + * struct auxtrace_index - index of AUX area tracing events within a perf.data + * file. + * @list: linking a number of arrays of entries + * @nr: number of entries + * @entries: array of entries + */ +struct auxtrace_index { + struct list_head list; + size_t nr; + struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; +}; + +/** + * struct auxtrace - session callbacks to allow AUX area data decoding. + * @process_event: lets the decoder see all session events + * @flush_events: process any remaining data + * @free_events: free resources associated with event processing + * @free: free resources associated with the session + */ +struct auxtrace { + int (*process_event)(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool); + int (*process_auxtrace_event)(struct perf_session *session, + union perf_event *event, + struct perf_tool *tool); + int (*flush_events)(struct perf_session *session, + struct perf_tool *tool); + void (*free_events)(struct perf_session *session); + void (*free)(struct perf_session *session); +}; + +/** + * struct auxtrace_buffer - a buffer containing AUX area tracing data. + * @list: buffers are queued in a list held by struct auxtrace_queue + * @size: size of the buffer in bytes + * @pid: in per-thread mode, the pid this buffer is associated with + * @tid: in per-thread mode, the tid this buffer is associated with + * @cpu: in per-cpu mode, the cpu this buffer is associated with + * @data: actual buffer data (can be null if the data has not been loaded) + * @data_offset: file offset at which the buffer can be read + * @mmap_addr: mmap address at which the buffer can be read + * @mmap_size: size of the mmap at @mmap_addr + * @data_needs_freeing: @data was malloc'd so free it when it is no longer + * needed + * @consecutive: the original data was split up and this buffer is consecutive + * to the previous buffer + * @offset: offset as determined by aux_head / aux_tail members of struct + * perf_event_mmap_page + * @reference: an implementation-specific reference determined when the data is + * recorded + * @buffer_nr: used to number each buffer + * @use_size: implementation actually only uses this number of bytes + * @use_data: implementation actually only uses data starting at this address + */ +struct auxtrace_buffer { + struct list_head list; + size_t size; + pid_t pid; + pid_t tid; + int cpu; + void *data; + off_t data_offset; + void *mmap_addr; + size_t mmap_size; + bool data_needs_freeing; + bool consecutive; + u64 offset; + u64 reference; + u64 buffer_nr; + size_t use_size; + void *use_data; +}; + +/** + * struct auxtrace_queue - a queue of AUX area tracing data buffers. + * @head: head of buffer list + * @tid: in per-thread mode, the tid this queue is associated with + * @cpu: in per-cpu mode, the cpu this queue is associated with + * @set: %true once this queue has been dedicated to a specific thread or cpu + * @priv: implementation-specific data + */ +struct auxtrace_queue { + struct list_head head; + pid_t tid; + int cpu; + bool set; + void *priv; +}; + +/** + * struct auxtrace_queues - an array of AUX area tracing queues. + * @queue_array: array of queues + * @nr_queues: number of queues + * @new_data: set whenever new data is queued + * @populated: queues have been fully populated using the auxtrace_index + * @next_buffer_nr: used to number each buffer + */ +struct auxtrace_queues { + struct auxtrace_queue *queue_array; + unsigned int nr_queues; + bool new_data; + bool populated; + u64 next_buffer_nr; +}; + +/** + * struct auxtrace_heap_item - element of struct auxtrace_heap. + * @queue_nr: queue number + * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected + * to be a timestamp + */ +struct auxtrace_heap_item { + unsigned int queue_nr; + u64 ordinal; +}; + +/** + * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues. + * @heap_array: the heap + * @heap_cnt: the number of elements in the heap + * @heap_sz: maximum number of elements (grows as needed) + */ +struct auxtrace_heap { + struct auxtrace_heap_item *heap_array; + unsigned int heap_cnt; + unsigned int heap_sz; +}; + +/** + * struct auxtrace_mmap - records an mmap of the auxtrace buffer. + * @base: address of mapped area + * @userpg: pointer to buffer's perf_event_mmap_page + * @mask: %0 if @len is not a power of two, otherwise (@len - %1) + * @len: size of mapped area + * @prev: previous aux_head + * @idx: index of this mmap + * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu + * mmap) otherwise %0 + * @cpu: cpu number for a per-cpu mmap otherwise %-1 + */ +struct auxtrace_mmap { + void *base; + void *userpg; + size_t mask; + size_t len; + u64 prev; + int idx; + pid_t tid; + int cpu; +}; + +/** + * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap. + * @mask: %0 if @len is not a power of two, otherwise (@len - %1) + * @offset: file offset of mapped area + * @len: size of mapped area + * @prot: mmap memory protection + * @idx: index of this mmap + * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu + * mmap) otherwise %0 + * @cpu: cpu number for a per-cpu mmap otherwise %-1 + */ +struct auxtrace_mmap_params { + size_t mask; + off_t offset; + size_t len; + int prot; + int idx; + pid_t tid; + int cpu; +}; + +/** + * struct auxtrace_record - callbacks for recording AUX area data. + * @recording_options: validate and process recording options + * @info_priv_size: return the size of the private data in auxtrace_info_event + * @info_fill: fill-in the private data in auxtrace_info_event + * @free: free this auxtrace record structure + * @snapshot_start: starting a snapshot + * @snapshot_finish: finishing a snapshot + * @find_snapshot: find data to snapshot within auxtrace mmap + * @parse_snapshot_options: parse snapshot options + * @reference: provide a 64-bit reference number for auxtrace_event + * @read_finish: called after reading from an auxtrace mmap + */ +struct auxtrace_record { + int (*recording_options)(struct auxtrace_record *itr, + struct perf_evlist *evlist, + struct record_opts *opts); + size_t (*info_priv_size)(struct auxtrace_record *itr); + int (*info_fill)(struct auxtrace_record *itr, + struct perf_session *session, + struct auxtrace_info_event *auxtrace_info, + size_t priv_size); + void (*free)(struct auxtrace_record *itr); + int (*snapshot_start)(struct auxtrace_record *itr); + int (*snapshot_finish)(struct auxtrace_record *itr); + int (*find_snapshot)(struct auxtrace_record *itr, int idx, + struct auxtrace_mmap *mm, unsigned char *data, + u64 *head, u64 *old); + int (*parse_snapshot_options)(struct auxtrace_record *itr, + struct record_opts *opts, + const char *str); + u64 (*reference)(struct auxtrace_record *itr); + int (*read_finish)(struct auxtrace_record *itr, int idx); + unsigned int alignment; +}; + +#ifdef HAVE_AUXTRACE_SUPPORT + +/* + * In snapshot mode the mmapped page is read-only which makes using + * __sync_val_compare_and_swap() problematic. However, snapshot mode expects + * the buffer is not updated while the snapshot is made (e.g. Intel PT disables + * the event) so there is not a race anyway. + */ +static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->userpg; + u64 head = ACCESS_ONCE(pc->aux_head); + + /* Ensure all reads are done after we read the head */ + rmb(); + return head; +} + +static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->userpg; +#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) + u64 head = ACCESS_ONCE(pc->aux_head); +#else + u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); +#endif + + /* Ensure all reads are done after we read the head */ + rmb(); + return head; +} + +static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) +{ + struct perf_event_mmap_page *pc = mm->userpg; +#if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) + u64 old_tail; +#endif + + /* Ensure all reads are done before we write the tail out */ + mb(); +#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) + pc->aux_tail = tail; +#else + do { + old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0); + } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail)); +#endif +} + +int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, + struct auxtrace_mmap_params *mp, + void *userpg, int fd); +void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); +void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, + off_t auxtrace_offset, + unsigned int auxtrace_pages, + bool auxtrace_overwrite); +void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, + struct perf_evlist *evlist, int idx, + bool per_cpu); + +typedef int (*process_auxtrace_t)(struct perf_tool *tool, + union perf_event *event, void *data1, + size_t len1, void *data2, size_t len2); + +int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr, + struct perf_tool *tool, process_auxtrace_t fn); + +int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm, + struct auxtrace_record *itr, + struct perf_tool *tool, process_auxtrace_t fn, + size_t snapshot_size); + +int auxtrace_queues__init(struct auxtrace_queues *queues); +int auxtrace_queues__add_event(struct auxtrace_queues *queues, + struct perf_session *session, + union perf_event *event, off_t data_offset, + struct auxtrace_buffer **buffer_ptr); +void auxtrace_queues__free(struct auxtrace_queues *queues); +int auxtrace_queues__process_index(struct auxtrace_queues *queues, + struct perf_session *session); +struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, + struct auxtrace_buffer *buffer); +void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd); +void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer); +void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer); +void auxtrace_buffer__free(struct auxtrace_buffer *buffer); + +int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, + u64 ordinal); +void auxtrace_heap__pop(struct auxtrace_heap *heap); +void auxtrace_heap__free(struct auxtrace_heap *heap); + +struct auxtrace_cache_entry { + struct hlist_node hash; + u32 key; +}; + +struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, + unsigned int limit_percent); +void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache); +void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); +void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); +int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, + struct auxtrace_cache_entry *entry); +void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); + +struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist, + int *err); + +int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, + struct record_opts *opts, + const char *str); +int auxtrace_record__options(struct auxtrace_record *itr, + struct perf_evlist *evlist, + struct record_opts *opts); +size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr); +int auxtrace_record__info_fill(struct auxtrace_record *itr, + struct perf_session *session, + struct auxtrace_info_event *auxtrace_info, + size_t priv_size); +void auxtrace_record__free(struct auxtrace_record *itr); +int auxtrace_record__snapshot_start(struct auxtrace_record *itr); +int auxtrace_record__snapshot_finish(struct auxtrace_record *itr); +int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, + struct auxtrace_mmap *mm, + unsigned char *data, u64 *head, u64 *old); +u64 auxtrace_record__reference(struct auxtrace_record *itr); + +int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, + off_t file_offset); +int auxtrace_index__write(int fd, struct list_head *head); +int auxtrace_index__process(int fd, u64 size, struct perf_session *session, + bool needs_swap); +void auxtrace_index__free(struct list_head *head); + +void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type, + int code, int cpu, pid_t pid, pid_t tid, u64 ip, + const char *msg); + +int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, + struct perf_tool *tool, + struct perf_session *session, + perf_event__handler_t process); +int perf_event__process_auxtrace_info(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); +s64 perf_event__process_auxtrace(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); +int perf_event__process_auxtrace_error(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); +int itrace_parse_synth_opts(const struct option *opt, const char *str, + int unset); +void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts); + +size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); +void perf_session__auxtrace_error_inc(struct perf_session *session, + union perf_event *event); +void events_stats__auxtrace_error_warn(const struct events_stats *stats); + +static inline int auxtrace__process_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool) +{ + if (!session->auxtrace) + return 0; + + return session->auxtrace->process_event(session, event, sample, tool); +} + +static inline int auxtrace__flush_events(struct perf_session *session, + struct perf_tool *tool) +{ + if (!session->auxtrace) + return 0; + + return session->auxtrace->flush_events(session, tool); +} + +static inline void auxtrace__free_events(struct perf_session *session) +{ + if (!session->auxtrace) + return; + + return session->auxtrace->free_events(session); +} + +static inline void auxtrace__free(struct perf_session *session) +{ + if (!session->auxtrace) + return; + + return session->auxtrace->free(session); +} + +#else + +static inline struct auxtrace_record * +auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, + int *err __maybe_unused) +{ + *err = 0; + return NULL; +} + +static inline +void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) +{ +} + +static inline int +perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused, + struct perf_tool *tool __maybe_unused, + struct perf_session *session __maybe_unused, + perf_event__handler_t process __maybe_unused) +{ + return -EINVAL; +} + +static inline +int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused, + struct perf_evlist *evlist __maybe_unused, + struct record_opts *opts __maybe_unused) +{ + return 0; +} + +#define perf_event__process_auxtrace_info 0 +#define perf_event__process_auxtrace 0 +#define perf_event__process_auxtrace_error 0 + +static inline +void perf_session__auxtrace_error_inc(struct perf_session *session + __maybe_unused, + union perf_event *event + __maybe_unused) +{ +} + +static inline +void events_stats__auxtrace_error_warn(const struct events_stats *stats + __maybe_unused) +{ +} + +static inline +int itrace_parse_synth_opts(const struct option *opt __maybe_unused, + const char *str __maybe_unused, + int unset __maybe_unused) +{ + pr_err("AUX area tracing not supported\n"); + return -EINVAL; +} + +static inline +int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, + struct record_opts *opts __maybe_unused, + const char *str) +{ + if (!str) + return 0; + pr_err("AUX area tracing not supported\n"); + return -EINVAL; +} + +static inline +int auxtrace__process_event(struct perf_session *session __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_tool *tool __maybe_unused) +{ + return 0; +} + +static inline +int auxtrace__flush_events(struct perf_session *session __maybe_unused, + struct perf_tool *tool __maybe_unused) +{ + return 0; +} + +static inline +void auxtrace__free_events(struct perf_session *session __maybe_unused) +{ +} + +static inline +void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused) +{ +} + +static inline +void auxtrace__free(struct perf_session *session __maybe_unused) +{ +} + +static inline +int auxtrace_index__write(int fd __maybe_unused, + struct list_head *head __maybe_unused) +{ + return -EINVAL; +} + +static inline +int auxtrace_index__process(int fd __maybe_unused, + u64 size __maybe_unused, + struct perf_session *session __maybe_unused, + bool needs_swap __maybe_unused) +{ + return -EINVAL; +} + +static inline +void auxtrace_index__free(struct list_head *head __maybe_unused) +{ +} + +int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, + struct auxtrace_mmap_params *mp, + void *userpg, int fd); +void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); +void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, + off_t auxtrace_offset, + unsigned int auxtrace_pages, + bool auxtrace_overwrite); +void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, + struct perf_evlist *evlist, int idx, + bool per_cpu); + +#endif + +#endif diff --git a/kernel/tools/perf/util/bpf-loader.c b/kernel/tools/perf/util/bpf-loader.c new file mode 100644 index 000000000..4c5041137 --- /dev/null +++ b/kernel/tools/perf/util/bpf-loader.c @@ -0,0 +1,457 @@ +/* + * bpf-loader.c + * + * Copyright (C) 2015 Wang Nan + * Copyright (C) 2015 Huawei Inc. + */ + +#include +#include +#include "perf.h" +#include "debug.h" +#include "bpf-loader.h" +#include "probe-event.h" +#include "probe-finder.h" // for MAX_PROBES +#include "llvm-utils.h" + +#define DEFINE_PRINT_FN(name, level) \ +static int libbpf_##name(const char *fmt, ...) \ +{ \ + va_list args; \ + int ret; \ + \ + va_start(args, fmt); \ + ret = veprintf(level, verbose, pr_fmt(fmt), args);\ + va_end(args); \ + return ret; \ +} + +DEFINE_PRINT_FN(warning, 1) +DEFINE_PRINT_FN(info, 1) +DEFINE_PRINT_FN(debug, 1) + +struct bpf_prog_priv { + struct perf_probe_event pev; +}; + +static bool libbpf_initialized; + +struct bpf_object * +bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) +{ + struct bpf_object *obj; + + if (!libbpf_initialized) { + libbpf_set_print(libbpf_warning, + libbpf_info, + libbpf_debug); + libbpf_initialized = true; + } + + obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); + if (IS_ERR(obj)) { + pr_debug("bpf: failed to load buffer\n"); + return ERR_PTR(-EINVAL); + } + + return obj; +} + +struct bpf_object *bpf__prepare_load(const char *filename, bool source) +{ + struct bpf_object *obj; + + if (!libbpf_initialized) { + libbpf_set_print(libbpf_warning, + libbpf_info, + libbpf_debug); + libbpf_initialized = true; + } + + if (source) { + int err; + void *obj_buf; + size_t obj_buf_sz; + + err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); + if (err) + return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); + obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); + free(obj_buf); + } else + obj = bpf_object__open(filename); + + if (IS_ERR(obj)) { + pr_debug("bpf: failed to load %s\n", filename); + return obj; + } + + return obj; +} + +void bpf__clear(void) +{ + struct bpf_object *obj, *tmp; + + bpf_object__for_each_safe(obj, tmp) { + bpf__unprobe(obj); + bpf_object__close(obj); + } +} + +static void +bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused, + void *_priv) +{ + struct bpf_prog_priv *priv = _priv; + + cleanup_perf_probe_events(&priv->pev, 1); + free(priv); +} + +static int +config_bpf_program(struct bpf_program *prog) +{ + struct perf_probe_event *pev = NULL; + struct bpf_prog_priv *priv = NULL; + const char *config_str; + int err; + + config_str = bpf_program__title(prog, false); + if (IS_ERR(config_str)) { + pr_debug("bpf: unable to get title for program\n"); + return PTR_ERR(config_str); + } + + priv = calloc(sizeof(*priv), 1); + if (!priv) { + pr_debug("bpf: failed to alloc priv\n"); + return -ENOMEM; + } + pev = &priv->pev; + + pr_debug("bpf: config program '%s'\n", config_str); + err = parse_perf_probe_command(config_str, pev); + if (err < 0) { + pr_debug("bpf: '%s' is not a valid config string\n", + config_str); + err = -BPF_LOADER_ERRNO__CONFIG; + goto errout; + } + + if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { + pr_debug("bpf: '%s': group for event is set and not '%s'.\n", + config_str, PERF_BPF_PROBE_GROUP); + err = -BPF_LOADER_ERRNO__GROUP; + goto errout; + } else if (!pev->group) + pev->group = strdup(PERF_BPF_PROBE_GROUP); + + if (!pev->group) { + pr_debug("bpf: strdup failed\n"); + err = -ENOMEM; + goto errout; + } + + if (!pev->event) { + pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", + config_str); + err = -BPF_LOADER_ERRNO__EVENTNAME; + goto errout; + } + pr_debug("bpf: config '%s' is ok\n", config_str); + + err = bpf_program__set_private(prog, priv, bpf_prog_priv__clear); + if (err) { + pr_debug("Failed to set priv for program '%s'\n", config_str); + goto errout; + } + + return 0; + +errout: + if (pev) + clear_perf_probe_event(pev); + free(priv); + return err; +} + +static int bpf__prepare_probe(void) +{ + static int err = 0; + static bool initialized = false; + + /* + * Make err static, so if init failed the first, bpf__prepare_probe() + * fails each time without calling init_probe_symbol_maps multiple + * times. + */ + if (initialized) + return err; + + initialized = true; + err = init_probe_symbol_maps(false); + if (err < 0) + pr_debug("Failed to init_probe_symbol_maps\n"); + probe_conf.max_probes = MAX_PROBES; + return err; +} + +int bpf__probe(struct bpf_object *obj) +{ + int err = 0; + struct bpf_program *prog; + struct bpf_prog_priv *priv; + struct perf_probe_event *pev; + + err = bpf__prepare_probe(); + if (err) { + pr_debug("bpf__prepare_probe failed\n"); + return err; + } + + bpf_object__for_each_program(prog, obj) { + err = config_bpf_program(prog); + if (err) + goto out; + + err = bpf_program__get_private(prog, (void **)&priv); + if (err || !priv) + goto out; + pev = &priv->pev; + + err = convert_perf_probe_events(pev, 1); + if (err < 0) { + pr_debug("bpf_probe: failed to convert perf probe events"); + goto out; + } + + err = apply_perf_probe_events(pev, 1); + if (err < 0) { + pr_debug("bpf_probe: failed to apply perf probe events"); + goto out; + } + } +out: + return err < 0 ? err : 0; +} + +#define EVENTS_WRITE_BUFSIZE 4096 +int bpf__unprobe(struct bpf_object *obj) +{ + int err, ret = 0; + struct bpf_program *prog; + struct bpf_prog_priv *priv; + + bpf_object__for_each_program(prog, obj) { + int i; + + err = bpf_program__get_private(prog, (void **)&priv); + if (err || !priv) + continue; + + for (i = 0; i < priv->pev.ntevs; i++) { + struct probe_trace_event *tev = &priv->pev.tevs[i]; + char name_buf[EVENTS_WRITE_BUFSIZE]; + struct strfilter *delfilter; + + snprintf(name_buf, EVENTS_WRITE_BUFSIZE, + "%s:%s", tev->group, tev->event); + name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0'; + + delfilter = strfilter__new(name_buf, NULL); + if (!delfilter) { + pr_debug("Failed to create filter for unprobing\n"); + ret = -ENOMEM; + continue; + } + + err = del_perf_probe_events(delfilter); + strfilter__delete(delfilter); + if (err) { + pr_debug("Failed to delete %s\n", name_buf); + ret = err; + continue; + } + } + } + return ret; +} + +int bpf__load(struct bpf_object *obj) +{ + int err; + + err = bpf_object__load(obj); + if (err) { + pr_debug("bpf: load objects failed\n"); + return err; + } + return 0; +} + +int bpf__foreach_tev(struct bpf_object *obj, + bpf_prog_iter_callback_t func, + void *arg) +{ + struct bpf_program *prog; + int err; + + bpf_object__for_each_program(prog, obj) { + struct probe_trace_event *tev; + struct perf_probe_event *pev; + struct bpf_prog_priv *priv; + int i, fd; + + err = bpf_program__get_private(prog, + (void **)&priv); + if (err || !priv) { + pr_debug("bpf: failed to get private field\n"); + return -BPF_LOADER_ERRNO__INTERNAL; + } + + pev = &priv->pev; + for (i = 0; i < pev->ntevs; i++) { + tev = &pev->tevs[i]; + + fd = bpf_program__fd(prog); + if (fd < 0) { + pr_debug("bpf: failed to get file descriptor\n"); + return fd; + } + + err = (*func)(tev, fd, arg); + if (err) { + pr_debug("bpf: call back failed, stop iterate\n"); + return err; + } + } + } + return 0; +} + +#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) +#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) +#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) + +static const char *bpf_loader_strerror_table[NR_ERRNO] = { + [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", + [ERRCODE_OFFSET(GROUP)] = "Invalid group name", + [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", + [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", + [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", +}; + +static int +bpf_loader_strerror(int err, char *buf, size_t size) +{ + char sbuf[STRERR_BUFSIZE]; + const char *msg; + + if (!buf || !size) + return -1; + + err = err > 0 ? err : -err; + + if (err >= __LIBBPF_ERRNO__START) + return libbpf_strerror(err, buf, size); + + if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { + msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; + snprintf(buf, size, "%s", msg); + buf[size - 1] = '\0'; + return 0; + } + + if (err >= __BPF_LOADER_ERRNO__END) + snprintf(buf, size, "Unknown bpf loader error %d", err); + else + snprintf(buf, size, "%s", + strerror_r(err, sbuf, sizeof(sbuf))); + + buf[size - 1] = '\0'; + return -1; +} + +#define bpf__strerror_head(err, buf, size) \ + char sbuf[STRERR_BUFSIZE], *emsg;\ + if (!size)\ + return 0;\ + if (err < 0)\ + err = -err;\ + bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ + emsg = sbuf;\ + switch (err) {\ + default:\ + scnprintf(buf, size, "%s", emsg);\ + break; + +#define bpf__strerror_entry(val, fmt...)\ + case val: {\ + scnprintf(buf, size, fmt);\ + break;\ + } + +#define bpf__strerror_end(buf, size)\ + }\ + buf[size - 1] = '\0'; + +int bpf__strerror_prepare_load(const char *filename, bool source, + int err, char *buf, size_t size) +{ + size_t n; + int ret; + + n = snprintf(buf, size, "Failed to load %s%s: ", + filename, source ? " from source" : ""); + if (n >= size) { + buf[size - 1] = '\0'; + return 0; + } + buf += n; + size -= n; + + ret = bpf_loader_strerror(err, buf, size); + buf[size - 1] = '\0'; + return ret; +} + +int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, + int err, char *buf, size_t size) +{ + bpf__strerror_head(err, buf, size); + bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'"); + bpf__strerror_entry(EACCES, "You need to be root"); + bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); + bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); + bpf__strerror_end(buf, size); + return 0; +} + +int bpf__strerror_load(struct bpf_object *obj, + int err, char *buf, size_t size) +{ + bpf__strerror_head(err, buf, size); + case LIBBPF_ERRNO__KVER: { + unsigned int obj_kver = bpf_object__get_kversion(obj); + unsigned int real_kver; + + if (fetch_kernel_version(&real_kver, NULL, 0)) { + scnprintf(buf, size, "Unable to fetch kernel version"); + break; + } + + if (obj_kver != real_kver) { + scnprintf(buf, size, + "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", + KVER_PARAM(obj_kver), + KVER_PARAM(real_kver)); + break; + } + + scnprintf(buf, size, "Failed to load program for unknown reason"); + break; + } + bpf__strerror_end(buf, size); + return 0; +} diff --git a/kernel/tools/perf/util/bpf-loader.h b/kernel/tools/perf/util/bpf-loader.h new file mode 100644 index 000000000..9caf3ae4a --- /dev/null +++ b/kernel/tools/perf/util/bpf-loader.h @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2015, Wang Nan + * Copyright (C) 2015, Huawei Inc. + */ +#ifndef __BPF_LOADER_H +#define __BPF_LOADER_H + +#include +#include +#include +#include +#include "probe-event.h" +#include "debug.h" + +enum bpf_loader_errno { + __BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100, + /* Invalid config string */ + BPF_LOADER_ERRNO__CONFIG = __BPF_LOADER_ERRNO__START, + BPF_LOADER_ERRNO__GROUP, /* Invalid group name */ + BPF_LOADER_ERRNO__EVENTNAME, /* Event name is missing */ + BPF_LOADER_ERRNO__INTERNAL, /* BPF loader internal error */ + BPF_LOADER_ERRNO__COMPILE, /* Error when compiling BPF scriptlet */ + __BPF_LOADER_ERRNO__END, +}; + +struct bpf_object; +#define PERF_BPF_PROBE_GROUP "perf_bpf_probe" + +typedef int (*bpf_prog_iter_callback_t)(struct probe_trace_event *tev, + int fd, void *arg); + +#ifdef HAVE_LIBBPF_SUPPORT +struct bpf_object *bpf__prepare_load(const char *filename, bool source); +int bpf__strerror_prepare_load(const char *filename, bool source, + int err, char *buf, size_t size); + +struct bpf_object *bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, + const char *name); + +void bpf__clear(void); + +int bpf__probe(struct bpf_object *obj); +int bpf__unprobe(struct bpf_object *obj); +int bpf__strerror_probe(struct bpf_object *obj, int err, + char *buf, size_t size); + +int bpf__load(struct bpf_object *obj); +int bpf__strerror_load(struct bpf_object *obj, int err, + char *buf, size_t size); +int bpf__foreach_tev(struct bpf_object *obj, + bpf_prog_iter_callback_t func, void *arg); +#else +static inline struct bpf_object * +bpf__prepare_load(const char *filename __maybe_unused, + bool source __maybe_unused) +{ + pr_debug("ERROR: eBPF object loading is disabled during compiling.\n"); + return ERR_PTR(-ENOTSUP); +} + +static inline struct bpf_object * +bpf__prepare_load_buffer(void *obj_buf __maybe_unused, + size_t obj_buf_sz __maybe_unused) +{ + return ERR_PTR(-ENOTSUP); +} + +static inline void bpf__clear(void) { } + +static inline int bpf__probe(struct bpf_object *obj __maybe_unused) { return 0;} +static inline int bpf__unprobe(struct bpf_object *obj __maybe_unused) { return 0;} +static inline int bpf__load(struct bpf_object *obj __maybe_unused) { return 0; } + +static inline int +bpf__foreach_tev(struct bpf_object *obj __maybe_unused, + bpf_prog_iter_callback_t func __maybe_unused, + void *arg __maybe_unused) +{ + return 0; +} + +static inline int +__bpf_strerror(char *buf, size_t size) +{ + if (!size) + return 0; + strncpy(buf, + "ERROR: eBPF object loading is disabled during compiling.\n", + size); + buf[size - 1] = '\0'; + return 0; +} + +static inline +int bpf__strerror_prepare_load(const char *filename __maybe_unused, + bool source __maybe_unused, + int err __maybe_unused, + char *buf, size_t size) +{ + return __bpf_strerror(buf, size); +} + +static inline int +bpf__strerror_probe(struct bpf_object *obj __maybe_unused, + int err __maybe_unused, + char *buf, size_t size) +{ + return __bpf_strerror(buf, size); +} + +static inline int bpf__strerror_load(struct bpf_object *obj __maybe_unused, + int err __maybe_unused, + char *buf, size_t size) +{ + return __bpf_strerror(buf, size); +} +#endif +#endif diff --git a/kernel/tools/perf/util/build-id.c b/kernel/tools/perf/util/build-id.c index 61867dff5..6a7e273a5 100644 --- a/kernel/tools/perf/util/build-id.c +++ b/kernel/tools/perf/util/build-id.c @@ -43,6 +43,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, if (al.map != NULL) al.map->dso->hit = 1; + thread__put(thread); return 0; } @@ -59,8 +60,10 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, event->fork.ppid, event->fork.ptid); - if (thread) + if (thread) { machine__remove_thread(machine, thread); + thread__put(thread); + } return 0; } @@ -73,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = { .exit = perf_event__exit_del_thread, .attr = perf_event__process_attr, .build_id = perf_event__process_build_id, + .ordered_events = true, }; int build_id__sprintf(const u8 *build_id, int len, char *bf) @@ -87,7 +91,39 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf) bid += 2; } - return raw - build_id; + return (bid - bf) + 1; +} + +int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id) +{ + char notes[PATH_MAX]; + u8 build_id[BUILD_ID_SIZE]; + int ret; + + if (!root_dir) + root_dir = ""; + + scnprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir); + + ret = sysfs__read_build_id(notes, build_id, sizeof(build_id)); + if (ret < 0) + return ret; + + return build_id__sprintf(build_id, sizeof(build_id), sbuild_id); +} + +int filename__sprintf_build_id(const char *pathname, char *sbuild_id) +{ + u8 build_id[BUILD_ID_SIZE]; + int ret; + + ret = filename__read_build_id(pathname, build_id, sizeof(build_id)); + if (ret < 0) + return ret; + else if (ret != sizeof(build_id)) + return -EINVAL; + + return build_id__sprintf(build_id, sizeof(build_id), sbuild_id); } /* asnprintf consolidates asprintf and snprintf */ @@ -121,7 +157,7 @@ static char *build_id__filename(const char *sbuild_id, char *bf, size_t size) char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size) { - char build_id_hex[BUILD_ID_SIZE * 2 + 1]; + char build_id_hex[SBUILD_ID_SIZE]; if (!dso->has_build_id) return NULL; @@ -159,15 +195,20 @@ static int write_buildid(const char *name, size_t name_len, u8 *build_id, return write_padded(fd, name, name_len + 1, len); } -static int __dsos__write_buildid_table(struct list_head *head, - struct machine *machine, - pid_t pid, u16 misc, int fd) +static int machine__write_buildid_table(struct machine *machine, int fd) { + int err = 0; char nm[PATH_MAX]; struct dso *pos; + u16 kmisc = PERF_RECORD_MISC_KERNEL, + umisc = PERF_RECORD_MISC_USER; + + if (!machine__is_host(machine)) { + kmisc = PERF_RECORD_MISC_GUEST_KERNEL; + umisc = PERF_RECORD_MISC_GUEST_USER; + } - dsos__for_each_with_build_id(pos, head) { - int err; + dsos__for_each_with_build_id(pos, &machine->dsos.head) { const char *name; size_t name_len; @@ -186,32 +227,12 @@ static int __dsos__write_buildid_table(struct list_head *head, name_len = pos->long_name_len + 1; } - err = write_buildid(name, name_len, pos->build_id, - pid, misc, fd); + err = write_buildid(name, name_len, pos->build_id, machine->pid, + pos->kernel ? kmisc : umisc, fd); if (err) - return err; - } - - return 0; -} - -static int machine__write_buildid_table(struct machine *machine, int fd) -{ - int err; - u16 kmisc = PERF_RECORD_MISC_KERNEL, - umisc = PERF_RECORD_MISC_USER; - - if (!machine__is_host(machine)) { - kmisc = PERF_RECORD_MISC_GUEST_KERNEL; - umisc = PERF_RECORD_MISC_GUEST_USER; + break; } - err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine, - machine->pid, kmisc, fd); - if (err == 0) - err = __dsos__write_buildid_table(&machine->user_dsos.head, - machine, machine->pid, umisc, - fd); return err; } @@ -244,13 +265,7 @@ static int __dsos__hit_all(struct list_head *head) static int machine__hit_all_dsos(struct machine *machine) { - int err; - - err = __dsos__hit_all(&machine->kernel_dsos.head); - if (err) - return err; - - return __dsos__hit_all(&machine->user_dsos.head); + return __dsos__hit_all(&machine->dsos.head); } int dsos__hit_all(struct perf_session *session) @@ -309,7 +324,7 @@ int build_id_cache__list_build_ids(const char *pathname, struct dirent *d; int ret = 0; - list = strlist__new(true, NULL); + list = strlist__new(NULL, NULL); dir_name = build_id_cache__dirname_from_path(pathname, false, false); if (!list || !dir_name) { ret = -ENOMEM; @@ -402,7 +417,7 @@ static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, const char *name, bool is_kallsyms, bool is_vdso) { - char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + char sbuild_id[SBUILD_ID_SIZE]; build_id__sprintf(build_id, build_id_size, sbuild_id); @@ -490,9 +505,7 @@ static int __dsos__cache_build_ids(struct list_head *head, static int machine__cache_build_ids(struct machine *machine) { - int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine); - ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine); - return ret; + return __dsos__cache_build_ids(&machine->dsos.head, machine); } int perf_session__cache_build_ids(struct perf_session *session) @@ -517,11 +530,7 @@ int perf_session__cache_build_ids(struct perf_session *session) static bool machine__read_build_ids(struct machine *machine, bool with_hits) { - bool ret; - - ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits); - ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits); - return ret; + return __dsos__read_build_ids(&machine->dsos.head, with_hits); } bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) diff --git a/kernel/tools/perf/util/build-id.h b/kernel/tools/perf/util/build-id.h index 85011222c..27a14a8a9 100644 --- a/kernel/tools/perf/util/build-id.h +++ b/kernel/tools/perf/util/build-id.h @@ -1,7 +1,8 @@ #ifndef PERF_BUILD_ID_H_ #define PERF_BUILD_ID_H_ 1 -#define BUILD_ID_SIZE 20 +#define BUILD_ID_SIZE 20 +#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1) #include "tool.h" #include "strlist.h" @@ -11,6 +12,9 @@ extern struct perf_tool build_id__mark_dso_hit_ops; struct dso; int build_id__sprintf(const u8 *build_id, int len, char *bf); +int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id); +int filename__sprintf_build_id(const char *pathname, char *sbuild_id); + char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, diff --git a/kernel/tools/perf/util/cache.h b/kernel/tools/perf/util/cache.h index fbcca21d6..c861373aa 100644 --- a/kernel/tools/perf/util/cache.h +++ b/kernel/tools/perf/util/cache.h @@ -30,7 +30,6 @@ extern const char *perf_config_dirname(const char *, const char *); /* pager.c */ extern void setup_pager(void); -extern const char *pager_program; extern int pager_in_use(void); extern int pager_use_color; diff --git a/kernel/tools/perf/util/callchain.c b/kernel/tools/perf/util/callchain.c index 9f643ee77..735ad48e1 100644 --- a/kernel/tools/perf/util/callchain.c +++ b/kernel/tools/perf/util/callchain.c @@ -25,96 +25,9 @@ __thread struct callchain_cursor callchain_cursor; -#ifdef HAVE_DWARF_UNWIND_SUPPORT -static int get_stack_size(const char *str, unsigned long *_size) -{ - char *endptr; - unsigned long size; - unsigned long max_size = round_down(USHRT_MAX, sizeof(u64)); - - size = strtoul(str, &endptr, 0); - - do { - if (*endptr) - break; - - size = round_up(size, sizeof(u64)); - if (!size || size > max_size) - break; - - *_size = size; - return 0; - - } while (0); - - pr_err("callchain: Incorrect stack dump size (max %ld): %s\n", - max_size, str); - return -1; -} -#endif /* HAVE_DWARF_UNWIND_SUPPORT */ - -int parse_callchain_record_opt(const char *arg) +int parse_callchain_record_opt(const char *arg, struct callchain_param *param) { - char *tok, *name, *saveptr = NULL; - char *buf; - int ret = -1; - - /* We need buffer that we know we can write to. */ - buf = malloc(strlen(arg) + 1); - if (!buf) - return -ENOMEM; - - strcpy(buf, arg); - - tok = strtok_r((char *)buf, ",", &saveptr); - name = tok ? : (char *)buf; - - do { - /* Framepointer style */ - if (!strncmp(name, "fp", sizeof("fp"))) { - if (!strtok_r(NULL, ",", &saveptr)) { - callchain_param.record_mode = CALLCHAIN_FP; - ret = 0; - } else - pr_err("callchain: No more arguments " - "needed for --call-graph fp\n"); - break; - -#ifdef HAVE_DWARF_UNWIND_SUPPORT - /* Dwarf style */ - } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) { - const unsigned long default_stack_dump_size = 8192; - - ret = 0; - callchain_param.record_mode = CALLCHAIN_DWARF; - callchain_param.dump_size = default_stack_dump_size; - - tok = strtok_r(NULL, ",", &saveptr); - if (tok) { - unsigned long size = 0; - - ret = get_stack_size(tok, &size); - callchain_param.dump_size = size; - } -#endif /* HAVE_DWARF_UNWIND_SUPPORT */ - } else if (!strncmp(name, "lbr", sizeof("lbr"))) { - if (!strtok_r(NULL, ",", &saveptr)) { - callchain_param.record_mode = CALLCHAIN_LBR; - ret = 0; - } else - pr_err("callchain: No more arguments " - "needed for --call-graph lbr\n"); - break; - } else { - pr_err("callchain: Unknown --call-graph option " - "value: %s\n", arg); - break; - } - - } while (0); - - free(buf); - return ret; + return parse_callchain_record(arg, param); } static int parse_callchain_mode(const char *value) @@ -138,10 +51,12 @@ static int parse_callchain_order(const char *value) { if (!strncmp(value, "caller", strlen(value))) { callchain_param.order = ORDER_CALLER; + callchain_param.order_set = true; return 0; } if (!strncmp(value, "callee", strlen(value))) { callchain_param.order = ORDER_CALLEE; + callchain_param.order_set = true; return 0; } return -1; @@ -164,12 +79,14 @@ static int parse_callchain_sort_key(const char *value) return -1; } -int -parse_callchain_report_opt(const char *arg) +static int +__parse_callchain_report_opt(const char *arg, bool allow_record_opt) { char *tok; char *endptr; bool minpcnt_set = false; + bool record_opt_set = false; + bool try_stack_size = false; symbol_conf.use_callchain = true; @@ -187,6 +104,28 @@ parse_callchain_report_opt(const char *arg) !parse_callchain_order(tok) || !parse_callchain_sort_key(tok)) { /* parsing ok - move on to the next */ + try_stack_size = false; + goto next; + } else if (allow_record_opt && !record_opt_set) { + if (parse_callchain_record(tok, &callchain_param)) + goto try_numbers; + + /* assume that number followed by 'dwarf' is stack size */ + if (callchain_param.record_mode == CALLCHAIN_DWARF) + try_stack_size = true; + + record_opt_set = true; + goto next; + } + +try_numbers: + if (try_stack_size) { + unsigned long size = 0; + + if (get_stack_size(tok, &size) < 0) + return -1; + callchain_param.dump_size = size; + try_stack_size = false; } else if (!minpcnt_set) { /* try to get the min percent */ callchain_param.min_percent = strtod(tok, &endptr); @@ -199,7 +138,7 @@ parse_callchain_report_opt(const char *arg) if (tok == endptr) return -1; } - +next: arg = NULL; } @@ -210,6 +149,16 @@ parse_callchain_report_opt(const char *arg) return 0; } +int parse_callchain_report_opt(const char *arg) +{ + return __parse_callchain_report_opt(arg, false); +} + +int parse_callchain_top_opt(const char *arg) +{ + return __parse_callchain_report_opt(arg, true); +} + int perf_callchain_config(const char *var, const char *value) { char *endptr; @@ -219,7 +168,7 @@ int perf_callchain_config(const char *var, const char *value) var += sizeof("call-graph.") - 1; if (!strcmp(var, "record-mode")) - return parse_callchain_record_opt(value); + return parse_callchain_record_opt(value, &callchain_param); #ifdef HAVE_DWARF_UNWIND_SUPPORT if (!strcmp(var, "dump-size")) { unsigned long size = 0; diff --git a/kernel/tools/perf/util/callchain.h b/kernel/tools/perf/util/callchain.h index 6033a0a21..fce8161e5 100644 --- a/kernel/tools/perf/util/callchain.h +++ b/kernel/tools/perf/util/callchain.h @@ -7,6 +7,30 @@ #include "event.h" #include "symbol.h" +#define HELP_PAD "\t\t\t\t" + +#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n" + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n" +#else +# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n" +#endif + +#define RECORD_SIZE_HELP \ + HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording ()\n" \ + HELP_PAD "\t\tdefault: 8192 (bytes)\n" + +#define CALLCHAIN_RECORD_HELP CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP + +#define CALLCHAIN_REPORT_HELP \ + HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|none)\n" \ + HELP_PAD "threshold:\tminimum call graph inclusion threshold ()\n" \ + HELP_PAD "print_limit:\tmaximum number of call graph entry ()\n" \ + HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \ + HELP_PAD "sort_key:\tcall graph sort key (function|address)\n" \ + HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n" + enum perf_call_graph_mode { CALLCHAIN_NONE, CALLCHAIN_FP, @@ -63,6 +87,7 @@ struct callchain_param { double min_percent; sort_chain_func_t sort; enum chain_order order; + bool order_set; enum chain_key key; bool branch_callstack; }; @@ -72,6 +97,10 @@ extern struct callchain_param callchain_param; struct callchain_list { u64 ip; struct map_symbol ms; + struct /* for TUI */ { + bool unfolded; + bool has_children; + }; char *srcline; struct list_head list; }; @@ -173,8 +202,10 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node * bool hide_unresolved); extern const char record_callchain_help[]; -int parse_callchain_record_opt(const char *arg); +extern int parse_callchain_record(const char *arg, struct callchain_param *param); +int parse_callchain_record_opt(const char *arg, struct callchain_param *param); int parse_callchain_report_opt(const char *arg); +int parse_callchain_top_opt(const char *arg); int perf_callchain_config(const char *var, const char *value); static inline void callchain_cursor_snapshot(struct callchain_cursor *dest, diff --git a/kernel/tools/perf/util/cgroup.c b/kernel/tools/perf/util/cgroup.c index 88f7be399..32e12ecfe 100644 --- a/kernel/tools/perf/util/cgroup.c +++ b/kernel/tools/perf/util/cgroup.c @@ -115,23 +115,19 @@ static int add_cgroup(struct perf_evlist *evlist, char *str) goto found; n++; } - if (cgrp->refcnt == 0) + if (atomic_read(&cgrp->refcnt) == 0) free(cgrp); return -1; found: - cgrp->refcnt++; + atomic_inc(&cgrp->refcnt); counter->cgrp = cgrp; return 0; } void close_cgroup(struct cgroup_sel *cgrp) { - if (!cgrp) - return; - - /* XXX: not reentrant */ - if (--cgrp->refcnt == 0) { + if (cgrp && atomic_dec_and_test(&cgrp->refcnt)) { close(cgrp->fd); zfree(&cgrp->name); free(cgrp); diff --git a/kernel/tools/perf/util/cgroup.h b/kernel/tools/perf/util/cgroup.h index 89acd6deb..b4b8cb42f 100644 --- a/kernel/tools/perf/util/cgroup.h +++ b/kernel/tools/perf/util/cgroup.h @@ -1,12 +1,14 @@ #ifndef __CGROUP_H__ #define __CGROUP_H__ +#include + struct option; struct cgroup_sel { char *name; int fd; - int refcnt; + atomic_t refcnt; }; diff --git a/kernel/tools/perf/util/cloexec.h b/kernel/tools/perf/util/cloexec.h index 68888c29b..3bee6773d 100644 --- a/kernel/tools/perf/util/cloexec.h +++ b/kernel/tools/perf/util/cloexec.h @@ -4,7 +4,7 @@ unsigned long perf_event_open_cloexec_flag(void); #ifdef __GLIBC_PREREQ -#if !__GLIBC_PREREQ(2, 6) +#if !__GLIBC_PREREQ(2, 6) && !defined(__UCLIBC__) extern int sched_getcpu(void) __THROW; #endif #endif diff --git a/kernel/tools/perf/util/color.c b/kernel/tools/perf/util/color.c index 55355b3d4..9b9565416 100644 --- a/kernel/tools/perf/util/color.c +++ b/kernel/tools/perf/util/color.c @@ -67,8 +67,9 @@ static int __color_vsnprintf(char *bf, size_t size, const char *color, return r; } +/* Colors are not included in return value */ static int __color_vfprintf(FILE *fp, const char *color, const char *fmt, - va_list args, const char *trail) + va_list args) { int r = 0; @@ -83,12 +84,10 @@ static int __color_vfprintf(FILE *fp, const char *color, const char *fmt, } if (perf_use_color_default && *color) - r += fprintf(fp, "%s", color); + fprintf(fp, "%s", color); r += vfprintf(fp, fmt, args); if (perf_use_color_default && *color) - r += fprintf(fp, "%s", PERF_COLOR_RESET); - if (trail) - r += fprintf(fp, "%s", trail); + fprintf(fp, "%s", PERF_COLOR_RESET); return r; } @@ -100,7 +99,7 @@ int color_vsnprintf(char *bf, size_t size, const char *color, int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args) { - return __color_vfprintf(fp, color, fmt, args, NULL); + return __color_vfprintf(fp, color, fmt, args); } int color_snprintf(char *bf, size_t size, const char *color, @@ -126,16 +125,6 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) return r; } -int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...) -{ - va_list args; - int r; - va_start(args, fmt); - r = __color_vfprintf(fp, color, fmt, args, "\n"); - va_end(args); - return r; -} - /* * This function splits the buffer by newlines and colors the lines individually. * diff --git a/kernel/tools/perf/util/color.h b/kernel/tools/perf/util/color.h index 38146f922..a93997f16 100644 --- a/kernel/tools/perf/util/color.h +++ b/kernel/tools/perf/util/color.h @@ -35,7 +35,6 @@ int color_vsnprintf(char *bf, size_t size, const char *color, int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args); int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...); -int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); int value_color_snprintf(char *bf, size_t size, const char *fmt, double value); int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...); diff --git a/kernel/tools/perf/util/comm.c b/kernel/tools/perf/util/comm.c index b2bb59df6..21b7ff382 100644 --- a/kernel/tools/perf/util/comm.c +++ b/kernel/tools/perf/util/comm.c @@ -2,24 +2,27 @@ #include "util.h" #include #include +#include struct comm_str { char *str; struct rb_node rb_node; - int ref; + atomic_t refcnt; }; /* Should perhaps be moved to struct machine */ static struct rb_root comm_str_root; -static void comm_str__get(struct comm_str *cs) +static struct comm_str *comm_str__get(struct comm_str *cs) { - cs->ref++; + if (cs) + atomic_inc(&cs->refcnt); + return cs; } static void comm_str__put(struct comm_str *cs) { - if (!--cs->ref) { + if (cs && atomic_dec_and_test(&cs->refcnt)) { rb_erase(&cs->rb_node, &comm_str_root); zfree(&cs->str); free(cs); @@ -40,6 +43,8 @@ static struct comm_str *comm_str__alloc(const char *str) return NULL; } + atomic_set(&cs->refcnt, 0); + return cs; } diff --git a/kernel/tools/perf/util/config.c b/kernel/tools/perf/util/config.c index e18f653cd..2e452ac13 100644 --- a/kernel/tools/perf/util/config.c +++ b/kernel/tools/perf/util/config.c @@ -12,6 +12,7 @@ #include "cache.h" #include "exec_cmd.h" #include "util/hist.h" /* perf_hist_config */ +#include "util/llvm-utils.h" /* perf_llvm_config */ #define MAXNAME (256) @@ -408,6 +409,9 @@ int perf_default_config(const char *var, const char *value, if (!prefixcmp(var, "call-graph.")) return perf_callchain_config(var, value); + if (!prefixcmp(var, "llvm.")) + return perf_llvm_config(var, value); + /* Add other config variables here. */ return 0; } diff --git a/kernel/tools/perf/util/counts.c b/kernel/tools/perf/util/counts.c new file mode 100644 index 000000000..e3fde313d --- /dev/null +++ b/kernel/tools/perf/util/counts.c @@ -0,0 +1,52 @@ +#include +#include "evsel.h" +#include "counts.h" + +struct perf_counts *perf_counts__new(int ncpus, int nthreads) +{ + struct perf_counts *counts = zalloc(sizeof(*counts)); + + if (counts) { + struct xyarray *values; + + values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values)); + if (!values) { + free(counts); + return NULL; + } + + counts->values = values; + } + + return counts; +} + +void perf_counts__delete(struct perf_counts *counts) +{ + if (counts) { + xyarray__delete(counts->values); + free(counts); + } +} + +static void perf_counts__reset(struct perf_counts *counts) +{ + xyarray__reset(counts->values); +} + +void perf_evsel__reset_counts(struct perf_evsel *evsel) +{ + perf_counts__reset(evsel->counts); +} + +int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + evsel->counts = perf_counts__new(ncpus, nthreads); + return evsel->counts != NULL ? 0 : -ENOMEM; +} + +void perf_evsel__free_counts(struct perf_evsel *evsel) +{ + perf_counts__delete(evsel->counts); + evsel->counts = NULL; +} diff --git a/kernel/tools/perf/util/counts.h b/kernel/tools/perf/util/counts.h new file mode 100644 index 000000000..34d8baaf5 --- /dev/null +++ b/kernel/tools/perf/util/counts.h @@ -0,0 +1,37 @@ +#ifndef __PERF_COUNTS_H +#define __PERF_COUNTS_H + +#include "xyarray.h" + +struct perf_counts_values { + union { + struct { + u64 val; + u64 ena; + u64 run; + }; + u64 values[3]; + }; +}; + +struct perf_counts { + s8 scaled; + struct perf_counts_values aggr; + struct xyarray *values; +}; + + +static inline struct perf_counts_values* +perf_counts(struct perf_counts *counts, int cpu, int thread) +{ + return xyarray__entry(counts->values, cpu, thread); +} + +struct perf_counts *perf_counts__new(int ncpus, int nthreads); +void perf_counts__delete(struct perf_counts *counts); + +void perf_evsel__reset_counts(struct perf_evsel *evsel); +int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads); +void perf_evsel__free_counts(struct perf_evsel *evsel); + +#endif /* __PERF_COUNTS_H */ diff --git a/kernel/tools/perf/util/cpumap.c b/kernel/tools/perf/util/cpumap.c index c4e55b710..10af1e752 100644 --- a/kernel/tools/perf/util/cpumap.c +++ b/kernel/tools/perf/util/cpumap.c @@ -5,6 +5,7 @@ #include #include #include +#include "asm/bug.h" static struct cpu_map *cpu_map__default_new(void) { @@ -22,6 +23,7 @@ static struct cpu_map *cpu_map__default_new(void) cpus->map[i] = i; cpus->nr = nr_cpus; + atomic_set(&cpus->refcnt, 1); } return cpus; @@ -35,6 +37,7 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) if (cpus != NULL) { cpus->nr = nr_cpus; memcpy(cpus->map, tmp_cpus, payload_size); + atomic_set(&cpus->refcnt, 1); } return cpus; @@ -194,42 +197,77 @@ struct cpu_map *cpu_map__dummy_new(void) if (cpus != NULL) { cpus->nr = 1; cpus->map[0] = -1; + atomic_set(&cpus->refcnt, 1); } return cpus; } -void cpu_map__delete(struct cpu_map *map) +struct cpu_map *cpu_map__empty_new(int nr) { - free(map); + struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr); + + if (cpus != NULL) { + int i; + + cpus->nr = nr; + for (i = 0; i < nr; i++) + cpus->map[i] = -1; + + atomic_set(&cpus->refcnt, 1); + } + + return cpus; } -int cpu_map__get_socket(struct cpu_map *map, int idx) +static void cpu_map__delete(struct cpu_map *map) { - FILE *fp; - const char *mnt; - char path[PATH_MAX]; - int cpu, ret; + if (map) { + WARN_ONCE(atomic_read(&map->refcnt) != 0, + "cpu_map refcnt unbalanced\n"); + free(map); + } +} - if (idx > map->nr) - return -1; +struct cpu_map *cpu_map__get(struct cpu_map *map) +{ + if (map) + atomic_inc(&map->refcnt); + return map; +} - cpu = map->map[idx]; +void cpu_map__put(struct cpu_map *map) +{ + if (map && atomic_dec_and_test(&map->refcnt)) + cpu_map__delete(map); +} - mnt = sysfs__mountpoint(); - if (!mnt) - return -1; +static int cpu__get_topology_int(int cpu, const char *name, int *value) +{ + char path[PATH_MAX]; snprintf(path, PATH_MAX, - "%s/devices/system/cpu/cpu%d/topology/physical_package_id", - mnt, cpu); + "devices/system/cpu/cpu%d/topology/%s", cpu, name); - fp = fopen(path, "r"); - if (!fp) + return sysfs__read_int(path, value); +} + +int cpu_map__get_socket_id(int cpu) +{ + int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value); + return ret ?: value; +} + +int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused) +{ + int cpu; + + if (idx > map->nr) return -1; - ret = fscanf(fp, "%d", &cpu); - fclose(fp); - return ret == 1 ? cpu : -1; + + cpu = map->map[idx]; + + return cpu_map__get_socket_id(cpu); } static int cmp_ids(const void *a, const void *b) @@ -237,8 +275,9 @@ static int cmp_ids(const void *a, const void *b) return *(int *)a - *(int *)b; } -static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, - int (*f)(struct cpu_map *map, int cpu)) +int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, + int (*f)(struct cpu_map *map, int cpu, void *data), + void *data) { struct cpu_map *c; int nr = cpus->nr; @@ -250,7 +289,7 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, return -1; for (cpu = 0; cpu < nr; cpu++) { - s1 = f(cpus, cpu); + s1 = f(cpus, cpu, data); for (s2 = 0; s2 < c->nr; s2++) { if (s1 == c->map[s2]) break; @@ -263,39 +302,29 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, /* ensure we process id in increasing order */ qsort(c->map, c->nr, sizeof(int), cmp_ids); + atomic_set(&c->refcnt, 1); *res = c; return 0; } -int cpu_map__get_core(struct cpu_map *map, int idx) +int cpu_map__get_core_id(int cpu) { - FILE *fp; - const char *mnt; - char path[PATH_MAX]; - int cpu, ret, s; + int value, ret = cpu__get_topology_int(cpu, "core_id", &value); + return ret ?: value; +} + +int cpu_map__get_core(struct cpu_map *map, int idx, void *data) +{ + int cpu, s; if (idx > map->nr) return -1; cpu = map->map[idx]; - mnt = sysfs__mountpoint(); - if (!mnt) - return -1; - - snprintf(path, PATH_MAX, - "%s/devices/system/cpu/cpu%d/topology/core_id", - mnt, cpu); - - fp = fopen(path, "r"); - if (!fp) - return -1; - ret = fscanf(fp, "%d", &cpu); - fclose(fp); - if (ret != 1) - return -1; + cpu = cpu_map__get_core_id(cpu); - s = cpu_map__get_socket(map, idx); + s = cpu_map__get_socket(map, idx, data); if (s == -1) return -1; @@ -310,12 +339,12 @@ int cpu_map__get_core(struct cpu_map *map, int idx) int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp) { - return cpu_map__build_map(cpus, sockp, cpu_map__get_socket); + return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL); } int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep) { - return cpu_map__build_map(cpus, corep, cpu_map__get_core); + return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL); } /* setup simple routines to easily access node numbers given a cpu number */ diff --git a/kernel/tools/perf/util/cpumap.h b/kernel/tools/perf/util/cpumap.h index 61a654849..85f777245 100644 --- a/kernel/tools/perf/util/cpumap.h +++ b/kernel/tools/perf/util/cpumap.h @@ -3,25 +3,32 @@ #include #include +#include #include "perf.h" #include "util/debug.h" struct cpu_map { + atomic_t refcnt; int nr; int map[]; }; struct cpu_map *cpu_map__new(const char *cpu_list); +struct cpu_map *cpu_map__empty_new(int nr); struct cpu_map *cpu_map__dummy_new(void); -void cpu_map__delete(struct cpu_map *map); struct cpu_map *cpu_map__read(FILE *file); size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); -int cpu_map__get_socket(struct cpu_map *map, int idx); -int cpu_map__get_core(struct cpu_map *map, int idx); +int cpu_map__get_socket_id(int cpu); +int cpu_map__get_socket(struct cpu_map *map, int idx, void *data); +int cpu_map__get_core_id(int cpu); +int cpu_map__get_core(struct cpu_map *map, int idx, void *data); int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp); int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep); +struct cpu_map *cpu_map__get(struct cpu_map *map); +void cpu_map__put(struct cpu_map *map); + static inline int cpu_map__socket(struct cpu_map *sock, int s) { if (!sock || s > sock->nr || s < 0) @@ -81,4 +88,7 @@ static inline int cpu__get_node(int cpu) return cpunode_map[cpu]; } +int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, + int (*f)(struct cpu_map *map, int cpu, void *data), + void *data); #endif /* __PERF_CPUMAP_H */ diff --git a/kernel/tools/perf/util/data-convert-bt.c b/kernel/tools/perf/util/data-convert-bt.c index dd17c9a32..5bfc1198a 100644 --- a/kernel/tools/perf/util/data-convert-bt.c +++ b/kernel/tools/perf/util/data-convert-bt.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include "asm/bug.h" @@ -38,12 +39,21 @@ struct evsel_priv { struct bt_ctf_event_class *event_class; }; +#define MAX_CPUS 4096 + +struct ctf_stream { + struct bt_ctf_stream *stream; + int cpu; + u32 count; +}; + struct ctf_writer { /* writer primitives */ - struct bt_ctf_writer *writer; - struct bt_ctf_stream *stream; - struct bt_ctf_stream_class *stream_class; - struct bt_ctf_clock *clock; + struct bt_ctf_writer *writer; + struct ctf_stream **stream; + int stream_cnt; + struct bt_ctf_stream_class *stream_class; + struct bt_ctf_clock *clock; /* data types */ union { @@ -65,6 +75,9 @@ struct convert { u64 events_size; u64 events_count; + + /* Ordered events configured queue size. */ + u64 queue_size; }; static int value_set(struct bt_ctf_field_type *type, @@ -153,6 +166,43 @@ get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) return cw->data.u32; } +static unsigned long long adjust_signedness(unsigned long long value_int, int size) +{ + unsigned long long value_mask; + + /* + * value_mask = (1 << (size * 8 - 1)) - 1. + * Directly set value_mask for code readers. + */ + switch (size) { + case 1: + value_mask = 0x7fULL; + break; + case 2: + value_mask = 0x7fffULL; + break; + case 4: + value_mask = 0x7fffffffULL; + break; + case 8: + /* + * For 64 bit value, return it self. There is no need + * to fill high bit. + */ + /* Fall through */ + default: + /* BUG! */ + return value_int; + } + + /* If it is a positive value, don't adjust. */ + if ((value_int & (~0ULL - value_mask)) == 0) + return value_int; + + /* Fill upper part of value_int with 1 to make it a negative long long. */ + return (value_int & value_mask) | ~value_mask; +} + static int add_tracepoint_field_value(struct ctf_writer *cw, struct bt_ctf_event_class *event_class, struct bt_ctf_event *event, @@ -164,7 +214,6 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, struct bt_ctf_field *field; const char *name = fmtf->name; void *data = sample->raw_data; - unsigned long long value_int; unsigned long flags = fmtf->flags; unsigned int n_items; unsigned int i; @@ -172,6 +221,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, unsigned int len; int ret; + name = fmtf->alias; offset = fmtf->offset; len = fmtf->size; if (flags & FIELD_IS_STRING) @@ -208,11 +258,6 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, type = get_tracepoint_field_type(cw, fmtf); for (i = 0; i < n_items; i++) { - if (!(flags & FIELD_IS_STRING)) - value_int = pevent_read_number( - fmtf->event->pevent, - data + offset + i * len, len); - if (flags & FIELD_IS_ARRAY) field = bt_ctf_field_array_get_field(array_field, i); else @@ -226,12 +271,21 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, if (flags & FIELD_IS_STRING) ret = bt_ctf_field_string_set_value(field, data + offset + i * len); - else if (!(flags & FIELD_IS_SIGNED)) - ret = bt_ctf_field_unsigned_integer_set_value( - field, value_int); - else - ret = bt_ctf_field_signed_integer_set_value( - field, value_int); + else { + unsigned long long value_int; + + value_int = pevent_read_number( + fmtf->event->pevent, + data + offset + i * len, len); + + if (!(flags & FIELD_IS_SIGNED)) + ret = bt_ctf_field_unsigned_integer_set_value( + field, value_int); + else + ret = bt_ctf_field_signed_integer_set_value( + field, adjust_signedness(value_int, len)); + } + if (ret) { pr_err("failed to set file value %s\n", name); goto err_put_field; @@ -346,12 +400,6 @@ static int add_generic_values(struct ctf_writer *cw, return -1; } - if (type & PERF_SAMPLE_CPU) { - ret = value_set_u32(cw, event, "perf_cpu", sample->cpu); - if (ret) - return -1; - } - if (type & PERF_SAMPLE_PERIOD) { ret = value_set_u64(cw, event, "perf_period", sample->period); if (ret) @@ -381,6 +429,129 @@ static int add_generic_values(struct ctf_writer *cw, return 0; } +static int ctf_stream__flush(struct ctf_stream *cs) +{ + int err = 0; + + if (cs) { + err = bt_ctf_stream_flush(cs->stream); + if (err) + pr_err("CTF stream %d flush failed\n", cs->cpu); + + pr("Flush stream for cpu %d (%u samples)\n", + cs->cpu, cs->count); + + cs->count = 0; + } + + return err; +} + +static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) +{ + struct ctf_stream *cs; + struct bt_ctf_field *pkt_ctx = NULL; + struct bt_ctf_field *cpu_field = NULL; + struct bt_ctf_stream *stream = NULL; + int ret; + + cs = zalloc(sizeof(*cs)); + if (!cs) { + pr_err("Failed to allocate ctf stream\n"); + return NULL; + } + + stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); + if (!stream) { + pr_err("Failed to create CTF stream\n"); + goto out; + } + + pkt_ctx = bt_ctf_stream_get_packet_context(stream); + if (!pkt_ctx) { + pr_err("Failed to obtain packet context\n"); + goto out; + } + + cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); + bt_ctf_field_put(pkt_ctx); + if (!cpu_field) { + pr_err("Failed to obtain cpu field\n"); + goto out; + } + + ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); + if (ret) { + pr_err("Failed to update CPU number\n"); + goto out; + } + + bt_ctf_field_put(cpu_field); + + cs->cpu = cpu; + cs->stream = stream; + return cs; + +out: + if (cpu_field) + bt_ctf_field_put(cpu_field); + if (stream) + bt_ctf_stream_put(stream); + + free(cs); + return NULL; +} + +static void ctf_stream__delete(struct ctf_stream *cs) +{ + if (cs) { + bt_ctf_stream_put(cs->stream); + free(cs); + } +} + +static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) +{ + struct ctf_stream *cs = cw->stream[cpu]; + + if (!cs) { + cs = ctf_stream__create(cw, cpu); + cw->stream[cpu] = cs; + } + + return cs; +} + +static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, + struct perf_evsel *evsel) +{ + int cpu = 0; + + if (evsel->attr.sample_type & PERF_SAMPLE_CPU) + cpu = sample->cpu; + + if (cpu > cw->stream_cnt) { + pr_err("Event was recorded for CPU %d, limit is at %d.\n", + cpu, cw->stream_cnt); + cpu = 0; + } + + return cpu; +} + +#define STREAM_FLUSH_COUNT 100000 + +/* + * Currently we have no other way to determine the + * time for the stream flush other than keep track + * of the number of events and check it against + * threshold. + */ +static bool is_flush_needed(struct ctf_stream *cs) +{ + return cs->count >= STREAM_FLUSH_COUNT; +} + static int process_sample_event(struct perf_tool *tool, union perf_event *_event __maybe_unused, struct perf_sample *sample, @@ -390,6 +561,7 @@ static int process_sample_event(struct perf_tool *tool, struct convert *c = container_of(tool, struct convert, tool); struct evsel_priv *priv = evsel->priv; struct ctf_writer *cw = &c->writer; + struct ctf_stream *cs; struct bt_ctf_event_class *event_class; struct bt_ctf_event *event; int ret; @@ -424,9 +596,93 @@ static int process_sample_event(struct perf_tool *tool, return -1; } - bt_ctf_stream_append_event(cw->stream, event); + cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); + if (cs) { + if (is_flush_needed(cs)) + ctf_stream__flush(cs); + + cs->count++; + bt_ctf_stream_append_event(cs->stream, event); + } + bt_ctf_event_put(event); - return 0; + return cs ? 0 : -1; +} + +/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ +static char *change_name(char *name, char *orig_name, int dup) +{ + char *new_name = NULL; + size_t len; + + if (!name) + name = orig_name; + + if (dup >= 10) + goto out; + /* + * Add '_' prefix to potential keywork. According to + * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), + * futher CTF spec updating may require us to use '$'. + */ + if (dup < 0) + len = strlen(name) + sizeof("_"); + else + len = strlen(orig_name) + sizeof("_dupl_X"); + + new_name = malloc(len); + if (!new_name) + goto out; + + if (dup < 0) + snprintf(new_name, len, "_%s", name); + else + snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); + +out: + if (name != orig_name) + free(name); + return new_name; +} + +static int event_class_add_field(struct bt_ctf_event_class *event_class, + struct bt_ctf_field_type *type, + struct format_field *field) +{ + struct bt_ctf_field_type *t = NULL; + char *name; + int dup = 1; + int ret; + + /* alias was already assigned */ + if (field->alias != field->name) + return bt_ctf_event_class_add_field(event_class, type, + (char *)field->alias); + + name = field->name; + + /* If 'name' is a keywork, add prefix. */ + if (bt_ctf_validate_identifier(name)) + name = change_name(name, field->name, -1); + + if (!name) { + pr_err("Failed to fix invalid identifier."); + return -1; + } + while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { + bt_ctf_field_type_put(t); + name = change_name(name, field->name, dup++); + if (!name) { + pr_err("Failed to create dup name for '%s'\n", field->name); + return -1; + } + } + + ret = bt_ctf_event_class_add_field(event_class, type, name); + if (!ret) + field->alias = name; + + return ret; } static int add_tracepoint_fields_types(struct ctf_writer *cw, @@ -457,14 +713,14 @@ static int add_tracepoint_fields_types(struct ctf_writer *cw, if (flags & FIELD_IS_ARRAY) type = bt_ctf_field_type_array_create(type, field->arraylen); - ret = bt_ctf_event_class_add_field(event_class, type, - field->name); + ret = event_class_add_field(event_class, type, field); if (flags & FIELD_IS_ARRAY) bt_ctf_field_type_put(type); if (ret) { - pr_err("Failed to add field '%s\n", field->name); + pr_err("Failed to add field '%s': %d\n", + field->name, ret); return -1; } } @@ -508,7 +764,7 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, do { \ pr2(" field '%s'\n", n); \ if (bt_ctf_event_class_add_field(cl, t, n)) { \ - pr_err("Failed to add field '%s;\n", n); \ + pr_err("Failed to add field '%s';\n", n); \ return -1; \ } \ } while (0) @@ -528,9 +784,6 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, if (type & PERF_SAMPLE_STREAM_ID) ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); - if (type & PERF_SAMPLE_CPU) - ADD_FIELD(event_class, cw->data.u32, "perf_cpu"); - if (type & PERF_SAMPLE_PERIOD) ADD_FIELD(event_class, cw->data.u64, "perf_period"); @@ -604,6 +857,39 @@ static int setup_events(struct ctf_writer *cw, struct perf_session *session) return 0; } +static int setup_streams(struct ctf_writer *cw, struct perf_session *session) +{ + struct ctf_stream **stream; + struct perf_header *ph = &session->header; + int ncpus; + + /* + * Try to get the number of cpus used in the data file, + * if not present fallback to the MAX_CPUS. + */ + ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; + + stream = zalloc(sizeof(*stream) * ncpus); + if (!stream) { + pr_err("Failed to allocate streams.\n"); + return -ENOMEM; + } + + cw->stream = stream; + cw->stream_cnt = ncpus; + return 0; +} + +static void free_streams(struct ctf_writer *cw) +{ + int cpu; + + for (cpu = 0; cpu < cw->stream_cnt; cpu++) + ctf_stream__delete(cw->stream[cpu]); + + free(cw->stream); +} + static int ctf_writer__setup_env(struct ctf_writer *cw, struct perf_session *session) { @@ -713,7 +999,7 @@ static void ctf_writer__cleanup(struct ctf_writer *cw) ctf_writer__cleanup_data(cw); bt_ctf_clock_put(cw->clock); - bt_ctf_stream_put(cw->stream); + free_streams(cw); bt_ctf_stream_class_put(cw->stream_class); bt_ctf_writer_put(cw->writer); @@ -725,8 +1011,9 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path) { struct bt_ctf_writer *writer; struct bt_ctf_stream_class *stream_class; - struct bt_ctf_stream *stream; struct bt_ctf_clock *clock; + struct bt_ctf_field_type *pkt_ctx_type; + int ret; /* CTF writer */ writer = bt_ctf_writer_create(path); @@ -767,14 +1054,15 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path) if (ctf_writer__init_data(cw)) goto err_cleanup; - /* CTF stream instance */ - stream = bt_ctf_writer_create_stream(writer, stream_class); - if (!stream) { - pr("Failed to create CTF stream.\n"); + /* Add cpu_id for packet context */ + pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); + if (!pkt_ctx_type) goto err_cleanup; - } - cw->stream = stream; + ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); + bt_ctf_field_type_put(pkt_ctx_type); + if (ret) + goto err_cleanup; /* CTF clock writer setup */ if (bt_ctf_writer_add_clock(writer, clock)) { @@ -791,6 +1079,28 @@ err: return -1; } +static int ctf_writer__flush_streams(struct ctf_writer *cw) +{ + int cpu, ret = 0; + + for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) + ret = ctf_stream__flush(cw->stream[cpu]); + + return ret; +} + +static int convert__config(const char *var, const char *value, void *cb) +{ + struct convert *c = cb; + + if (!strcmp(var, "convert.queue-size")) { + c->queue_size = perf_config_u64(var, value); + return 0; + } + + return perf_default_config(var, value, cb); +} + int bt_convert__perf2ctf(const char *input, const char *path, bool force) { struct perf_session *session; @@ -817,6 +1127,8 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force) struct ctf_writer *cw = &c.writer; int err = -1; + perf_config(convert__config, &c); + /* CTF writer */ if (ctf_writer__init(cw, path)) return -1; @@ -826,6 +1138,11 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force) if (!session) goto free_writer; + if (c.queue_size) { + ordered_events__set_alloc_size(&session->ordered_events, + c.queue_size); + } + /* CTF writer env/clock setup */ if (ctf_writer__setup_env(cw, session)) goto free_session; @@ -834,9 +1151,14 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force) if (setup_events(cw, session)) goto free_session; + if (setup_streams(cw, session)) + goto free_session; + err = perf_session__process_events(session); if (!err) - err = bt_ctf_stream_flush(cw->stream); + err = ctf_writer__flush_streams(cw); + else + pr_err("Error during conversion.\n"); fprintf(stderr, "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", @@ -847,11 +1169,15 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force) (double) c.events_size / 1024.0 / 1024.0, c.events_count); - /* its all good */ -free_session: perf_session__delete(session); + ctf_writer__cleanup(cw); + + return err; +free_session: + perf_session__delete(session); free_writer: ctf_writer__cleanup(cw); + pr_err("Error during conversion setup.\n"); return err; } diff --git a/kernel/tools/perf/util/db-export.c b/kernel/tools/perf/util/db-export.c index bb39a3ffc..1c9689e4c 100644 --- a/kernel/tools/perf/util/db-export.c +++ b/kernel/tools/perf/util/db-export.c @@ -122,6 +122,7 @@ int db_export__machine(struct db_export *dbe, struct machine *machine) int db_export__thread(struct db_export *dbe, struct thread *thread, struct machine *machine, struct comm *comm) { + struct thread *main_thread; u64 main_thread_db_id = 0; int err; @@ -131,8 +132,6 @@ int db_export__thread(struct db_export *dbe, struct thread *thread, thread->db_id = ++dbe->thread_last_db_id; if (thread->pid_ != -1) { - struct thread *main_thread; - if (thread->pid_ == thread->tid) { main_thread = thread; } else { @@ -144,14 +143,16 @@ int db_export__thread(struct db_export *dbe, struct thread *thread, err = db_export__thread(dbe, main_thread, machine, comm); if (err) - return err; + goto out_put; if (comm) { err = db_export__comm_thread(dbe, comm, thread); if (err) - return err; + goto out_put; } } main_thread_db_id = main_thread->db_id; + if (main_thread != thread) + thread__put(main_thread); } if (dbe->export_thread) @@ -159,6 +160,10 @@ int db_export__thread(struct db_export *dbe, struct thread *thread, machine); return 0; + +out_put: + thread__put(main_thread); + return err; } int db_export__comm(struct db_export *dbe, struct comm *comm, @@ -229,7 +234,7 @@ int db_export__symbol(struct db_export *dbe, struct symbol *sym, static struct thread *get_main_thread(struct machine *machine, struct thread *thread) { if (thread->pid_ == thread->tid) - return thread; + return thread__get(thread); if (thread->pid_ == -1) return NULL; @@ -309,12 +314,12 @@ int db_export__sample(struct db_export *dbe, union perf_event *event, err = db_export__thread(dbe, thread, al->machine, comm); if (err) - return err; + goto out_put; if (comm) { err = db_export__comm(dbe, comm, main_thread); if (err) - return err; + goto out_put; es.comm_db_id = comm->db_id; } @@ -322,7 +327,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event, err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset); if (err) - return err; + goto out_put; if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && sample_addr_correlates_sym(&evsel->attr)) { @@ -332,20 +337,22 @@ int db_export__sample(struct db_export *dbe, union perf_event *event, err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id, &es.addr_sym_db_id, &es.addr_offset); if (err) - return err; + goto out_put; if (dbe->crp) { err = thread_stack__process(thread, comm, sample, al, &addr_al, es.db_id, dbe->crp); if (err) - return err; + goto out_put; } } if (dbe->export_sample) - return dbe->export_sample(dbe, &es); + err = dbe->export_sample(dbe, &es); - return 0; +out_put: + thread__put(main_thread); + return err; } static struct { diff --git a/kernel/tools/perf/util/debug.c b/kernel/tools/perf/util/debug.c index 2da5581ec..86d9c7302 100644 --- a/kernel/tools/perf/util/debug.c +++ b/kernel/tools/perf/util/debug.c @@ -36,6 +36,11 @@ static int _eprintf(int level, int var, const char *fmt, va_list args) return ret; } +int veprintf(int level, int var, const char *fmt, va_list args) +{ + return _eprintf(level, var, fmt, args); +} + int eprintf(int level, int var, const char *fmt, ...) { va_list args; diff --git a/kernel/tools/perf/util/debug.h b/kernel/tools/perf/util/debug.h index caac2fdc6..8b9a088c3 100644 --- a/kernel/tools/perf/util/debug.h +++ b/kernel/tools/perf/util/debug.h @@ -50,6 +50,7 @@ void pr_stat(const char *fmt, ...); int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4))); int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5))); +int veprintf(int level, int var, const char *fmt, va_list args); int perf_debug_option(const char *str); diff --git a/kernel/tools/perf/util/dso.c b/kernel/tools/perf/util/dso.c index fc0ddd579..425df5c86 100644 --- a/kernel/tools/perf/util/dso.c +++ b/kernel/tools/perf/util/dso.c @@ -4,6 +4,7 @@ #include "symbol.h" #include "dso.h" #include "machine.h" +#include "auxtrace.h" #include "util.h" #include "debug.h" @@ -165,12 +166,28 @@ bool is_supported_compression(const char *ext) return false; } -bool is_kernel_module(const char *pathname) +bool is_kernel_module(const char *pathname, int cpumode) { struct kmod_path m; - - if (kmod_path__parse(&m, pathname)) - return NULL; + int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; + + WARN_ONCE(mode != cpumode, + "Internal error: passing unmasked cpumode (%x) to is_kernel_module", + cpumode); + + switch (mode) { + case PERF_RECORD_MISC_USER: + case PERF_RECORD_MISC_HYPERVISOR: + case PERF_RECORD_MISC_GUEST_USER: + return false; + /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ + default: + if (kmod_path__parse(&m, pathname)) { + pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", + pathname); + return true; + } + } return m.kmod; } @@ -214,12 +231,33 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, { const char *name = strrchr(path, '/'); const char *ext = strrchr(path, '.'); + bool is_simple_name = false; memset(m, 0x0, sizeof(*m)); name = name ? name + 1 : path; + /* + * '.' is also a valid character for module name. For example: + * [aaa.bbb] is a valid module name. '[' should have higher + * priority than '.ko' suffix. + * + * The kernel names are from machine__mmap_name. Such + * name should belong to kernel itself, not kernel module. + */ + if (name[0] == '[') { + is_simple_name = true; + if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || + (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || + (strncmp(name, "[vdso]", 6) == 0) || + (strncmp(name, "[vsyscall]", 10) == 0)) { + m->kmod = false; + + } else + m->kmod = true; + } + /* No extension, just return name. */ - if (ext == NULL) { + if ((ext == NULL) || is_simple_name) { if (alloc_name) { m->name = strdup(name); return m->name ? 0 : -ENOMEM; @@ -264,6 +302,7 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, */ static LIST_HEAD(dso__data_open); static long dso__data_open_cnt; +static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; static void dso__list_add(struct dso *dso) { @@ -433,18 +472,12 @@ static void check_data_close(void) */ void dso__data_close(struct dso *dso) { + pthread_mutex_lock(&dso__data_open_lock); close_dso(dso); + pthread_mutex_unlock(&dso__data_open_lock); } -/** - * dso__data_fd - Get dso's data file descriptor - * @dso: dso object - * @machine: machine object - * - * External interface to find dso's file, open it and - * returns file descriptor. - */ -int dso__data_fd(struct dso *dso, struct machine *machine) +static void try_to_open_dso(struct dso *dso, struct machine *machine) { enum dso_binary_type binary_type_data[] = { DSO_BINARY_TYPE__BUILD_ID_CACHE, @@ -453,11 +486,8 @@ int dso__data_fd(struct dso *dso, struct machine *machine) }; int i = 0; - if (dso->data.status == DSO_DATA_STATUS_ERROR) - return -1; - if (dso->data.fd >= 0) - goto out; + return; if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { dso->data.fd = open_dso(dso, machine); @@ -477,10 +507,38 @@ out: dso->data.status = DSO_DATA_STATUS_OK; else dso->data.status = DSO_DATA_STATUS_ERROR; +} + +/** + * dso__data_get_fd - Get dso's data file descriptor + * @dso: dso object + * @machine: machine object + * + * External interface to find dso's file, open it and + * returns file descriptor. It should be paired with + * dso__data_put_fd() if it returns non-negative value. + */ +int dso__data_get_fd(struct dso *dso, struct machine *machine) +{ + if (dso->data.status == DSO_DATA_STATUS_ERROR) + return -1; + + if (pthread_mutex_lock(&dso__data_open_lock) < 0) + return -1; + + try_to_open_dso(dso, machine); + + if (dso->data.fd < 0) + pthread_mutex_unlock(&dso__data_open_lock); return dso->data.fd; } +void dso__data_put_fd(struct dso *dso __maybe_unused) +{ + pthread_mutex_unlock(&dso__data_open_lock); +} + bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) { u32 flag = 1 << by; @@ -494,10 +552,12 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) } static void -dso_cache__free(struct rb_root *root) +dso_cache__free(struct dso *dso) { + struct rb_root *root = &dso->data.cache; struct rb_node *next = rb_first(root); + pthread_mutex_lock(&dso->lock); while (next) { struct dso_cache *cache; @@ -506,10 +566,12 @@ dso_cache__free(struct rb_root *root) rb_erase(&cache->rb_node, root); free(cache); } + pthread_mutex_unlock(&dso->lock); } -static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset) +static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset) { + const struct rb_root *root = &dso->data.cache; struct rb_node * const *p = &root->rb_node; const struct rb_node *parent = NULL; struct dso_cache *cache; @@ -528,17 +590,20 @@ static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset) else return cache; } + return NULL; } -static void -dso_cache__insert(struct rb_root *root, struct dso_cache *new) +static struct dso_cache * +dso_cache__insert(struct dso *dso, struct dso_cache *new) { + struct rb_root *root = &dso->data.cache; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct dso_cache *cache; u64 offset = new->offset; + pthread_mutex_lock(&dso->lock); while (*p != NULL) { u64 end; @@ -550,10 +615,17 @@ dso_cache__insert(struct rb_root *root, struct dso_cache *new) p = &(*p)->rb_left; else if (offset >= end) p = &(*p)->rb_right; + else + goto out; } rb_link_node(&new->rb_node, parent, p); rb_insert_color(&new->rb_node, root); + + cache = NULL; +out: + pthread_mutex_unlock(&dso->lock); + return cache; } static ssize_t @@ -568,19 +640,33 @@ dso_cache__memcpy(struct dso_cache *cache, u64 offset, } static ssize_t -dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size) +dso_cache__read(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) { struct dso_cache *cache; + struct dso_cache *old; ssize_t ret; do { u64 cache_offset; - ret = -ENOMEM; - cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); if (!cache) + return -ENOMEM; + + pthread_mutex_lock(&dso__data_open_lock); + + /* + * dso->data.fd might be closed if other thread opened another + * file (dso) due to open file limit (RLIMIT_NOFILE). + */ + try_to_open_dso(dso, machine); + + if (dso->data.fd < 0) { + ret = -errno; + dso->data.status = DSO_DATA_STATUS_ERROR; break; + } cache_offset = offset & DSO__DATA_CACHE_MASK; @@ -590,11 +676,20 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size) cache->offset = cache_offset; cache->size = ret; - dso_cache__insert(&dso->data.cache, cache); + } while (0); - ret = dso_cache__memcpy(cache, offset, data, size); + pthread_mutex_unlock(&dso__data_open_lock); - } while (0); + if (ret > 0) { + old = dso_cache__insert(dso, cache); + if (old) { + /* we lose the race */ + free(cache); + cache = old; + } + + ret = dso_cache__memcpy(cache, offset, data, size); + } if (ret <= 0) free(cache); @@ -602,16 +697,16 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size) return ret; } -static ssize_t dso_cache_read(struct dso *dso, u64 offset, - u8 *data, ssize_t size) +static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) { struct dso_cache *cache; - cache = dso_cache__find(&dso->data.cache, offset); + cache = dso_cache__find(dso, offset); if (cache) return dso_cache__memcpy(cache, offset, data, size); else - return dso_cache__read(dso, offset, data, size); + return dso_cache__read(dso, machine, offset, data, size); } /* @@ -619,7 +714,8 @@ static ssize_t dso_cache_read(struct dso *dso, u64 offset, * in the rb_tree. Any read to already cached data is served * by cached data. */ -static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size) +static ssize_t cached_read(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) { ssize_t r = 0; u8 *p = data; @@ -627,7 +723,7 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size) do { ssize_t ret; - ret = dso_cache_read(dso, offset, p, size); + ret = dso_cache_read(dso, machine, offset, p, size); if (ret < 0) return ret; @@ -647,21 +743,44 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size) return r; } -static int data_file_size(struct dso *dso) +static int data_file_size(struct dso *dso, struct machine *machine) { + int ret = 0; struct stat st; char sbuf[STRERR_BUFSIZE]; - if (!dso->data.file_size) { - if (fstat(dso->data.fd, &st)) { - pr_err("dso mmap failed, fstat: %s\n", - strerror_r(errno, sbuf, sizeof(sbuf))); - return -1; - } - dso->data.file_size = st.st_size; + if (dso->data.file_size) + return 0; + + if (dso->data.status == DSO_DATA_STATUS_ERROR) + return -1; + + pthread_mutex_lock(&dso__data_open_lock); + + /* + * dso->data.fd might be closed if other thread opened another + * file (dso) due to open file limit (RLIMIT_NOFILE). + */ + try_to_open_dso(dso, machine); + + if (dso->data.fd < 0) { + ret = -errno; + dso->data.status = DSO_DATA_STATUS_ERROR; + goto out; } - return 0; + if (fstat(dso->data.fd, &st) < 0) { + ret = -errno; + pr_err("dso cache fstat failed: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); + dso->data.status = DSO_DATA_STATUS_ERROR; + goto out; + } + dso->data.file_size = st.st_size; + +out: + pthread_mutex_unlock(&dso__data_open_lock); + return ret; } /** @@ -673,23 +792,17 @@ static int data_file_size(struct dso *dso) */ off_t dso__data_size(struct dso *dso, struct machine *machine) { - int fd; - - fd = dso__data_fd(dso, machine); - if (fd < 0) - return fd; - - if (data_file_size(dso)) + if (data_file_size(dso, machine)) return -1; /* For now just estimate dso data size is close to file size */ return dso->data.file_size; } -static ssize_t data_read_offset(struct dso *dso, u64 offset, - u8 *data, ssize_t size) +static ssize_t data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) { - if (data_file_size(dso)) + if (data_file_size(dso, machine)) return -1; /* Check the offset sanity. */ @@ -699,7 +812,7 @@ static ssize_t data_read_offset(struct dso *dso, u64 offset, if (offset + size < offset) return -1; - return cached_read(dso, offset, data, size); + return cached_read(dso, machine, offset, data, size); } /** @@ -716,10 +829,10 @@ static ssize_t data_read_offset(struct dso *dso, u64 offset, ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, u64 offset, u8 *data, ssize_t size) { - if (dso__data_fd(dso, machine) < 0) + if (dso->data.status == DSO_DATA_STATUS_ERROR) return -1; - return data_read_offset(dso, offset, data, size); + return data_read_offset(dso, machine, offset, data, size); } /** @@ -751,13 +864,13 @@ struct map *dso__new_map(const char *name) return map; } -struct dso *dso__kernel_findnew(struct machine *machine, const char *name, - const char *short_name, int dso_type) +struct dso *machine__findnew_kernel(struct machine *machine, const char *name, + const char *short_name, int dso_type) { /* * The kernel dso could be created by build_id processing. */ - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name); + struct dso *dso = machine__findnew_dso(machine, name); /* * We need to run this in all cases, since during the build_id @@ -776,8 +889,8 @@ struct dso *dso__kernel_findnew(struct machine *machine, const char *name, * Either one of the dso or name parameter must be non-NULL or the * function will not work. */ -static struct dso *dso__findlink_by_longname(struct rb_root *root, - struct dso *dso, const char *name) +static struct dso *__dso__findlink_by_longname(struct rb_root *root, + struct dso *dso, const char *name) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -820,27 +933,43 @@ static struct dso *dso__findlink_by_longname(struct rb_root *root, /* Add new node and rebalance tree */ rb_link_node(&dso->rb_node, parent, p); rb_insert_color(&dso->rb_node, root); + dso->root = root; } return NULL; } -static inline struct dso * -dso__find_by_longname(const struct rb_root *root, const char *name) +static inline struct dso *__dso__find_by_longname(struct rb_root *root, + const char *name) { - return dso__findlink_by_longname((struct rb_root *)root, NULL, name); + return __dso__findlink_by_longname(root, NULL, name); } void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) { + struct rb_root *root = dso->root; + if (name == NULL) return; if (dso->long_name_allocated) free((char *)dso->long_name); + if (root) { + rb_erase(&dso->rb_node, root); + /* + * __dso__findlink_by_longname() isn't guaranteed to add it + * back, so a clean removal is required here. + */ + RB_CLEAR_NODE(&dso->rb_node); + dso->root = NULL; + } + dso->long_name = name; dso->long_name_len = strlen(name); dso->long_name_allocated = name_allocated; + + if (root) + __dso__findlink_by_longname(root, dso, NULL); } void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) @@ -933,8 +1062,11 @@ struct dso *dso__new(const char *name) dso->kernel = DSO_TYPE_USER; dso->needs_swap = DSO_SWAP__UNSET; RB_CLEAR_NODE(&dso->rb_node); + dso->root = NULL; INIT_LIST_HEAD(&dso->node); INIT_LIST_HEAD(&dso->data.open_entry); + pthread_mutex_init(&dso->lock, NULL); + atomic_set(&dso->refcnt, 1); } return dso; @@ -961,12 +1093,27 @@ void dso__delete(struct dso *dso) } dso__data_close(dso); - dso_cache__free(&dso->data.cache); + auxtrace_cache__free(dso->auxtrace_cache); + dso_cache__free(dso); dso__free_a2l(dso); zfree(&dso->symsrc_filename); + pthread_mutex_destroy(&dso->lock); free(dso); } +struct dso *dso__get(struct dso *dso) +{ + if (dso) + atomic_inc(&dso->refcnt); + return dso; +} + +void dso__put(struct dso *dso) +{ + if (dso && atomic_dec_and_test(&dso->refcnt)) + dso__delete(dso); +} + void dso__set_build_id(struct dso *dso, void *build_id) { memcpy(dso->build_id, build_id, sizeof(dso->build_id)); @@ -1033,14 +1180,41 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits) return have_build_id; } -void dsos__add(struct dsos *dsos, struct dso *dso) +void __dsos__add(struct dsos *dsos, struct dso *dso) { list_add_tail(&dso->node, &dsos->head); - dso__findlink_by_longname(&dsos->root, dso, NULL); + __dso__findlink_by_longname(&dsos->root, dso, NULL); + /* + * It is now in the linked list, grab a reference, then garbage collect + * this when needing memory, by looking at LRU dso instances in the + * list with atomic_read(&dso->refcnt) == 1, i.e. no references + * anywhere besides the one for the list, do, under a lock for the + * list: remove it from the list, then a dso__put(), that probably will + * be the last and will then call dso__delete(), end of life. + * + * That, or at the end of the 'struct machine' lifetime, when all + * 'struct dso' instances will be removed from the list, in + * dsos__exit(), if they have no other reference from some other data + * structure. + * + * E.g.: after processing a 'perf.data' file and storing references + * to objects instantiated while processing events, we will have + * references to the 'thread', 'map', 'dso' structs all from 'struct + * hist_entry' instances, but we may not need anything not referenced, + * so we might as well call machines__exit()/machines__delete() and + * garbage collect it. + */ + dso__get(dso); } -struct dso *dsos__find(const struct dsos *dsos, const char *name, - bool cmp_short) +void dsos__add(struct dsos *dsos, struct dso *dso) +{ + pthread_rwlock_wrlock(&dsos->lock); + __dsos__add(dsos, dso); + pthread_rwlock_unlock(&dsos->lock); +} + +struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) { struct dso *pos; @@ -1050,15 +1224,24 @@ struct dso *dsos__find(const struct dsos *dsos, const char *name, return pos; return NULL; } - return dso__find_by_longname(&dsos->root, name); + return __dso__find_by_longname(&dsos->root, name); +} + +struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) +{ + struct dso *dso; + pthread_rwlock_rdlock(&dsos->lock); + dso = __dsos__find(dsos, name, cmp_short); + pthread_rwlock_unlock(&dsos->lock); + return dso; } -struct dso *dsos__addnew(struct dsos *dsos, const char *name) +struct dso *__dsos__addnew(struct dsos *dsos, const char *name) { struct dso *dso = dso__new(name); if (dso != NULL) { - dsos__add(dsos, dso); + __dsos__add(dsos, dso); dso__set_basename(dso); } return dso; @@ -1066,9 +1249,18 @@ struct dso *dsos__addnew(struct dsos *dsos, const char *name) struct dso *__dsos__findnew(struct dsos *dsos, const char *name) { - struct dso *dso = dsos__find(dsos, name, false); + struct dso *dso = __dsos__find(dsos, name, false); - return dso ? dso : dsos__addnew(dsos, name); + return dso ? dso : __dsos__addnew(dsos, name); +} + +struct dso *dsos__findnew(struct dsos *dsos, const char *name) +{ + struct dso *dso; + pthread_rwlock_wrlock(&dsos->lock); + dso = dso__get(__dsos__findnew(dsos, name)); + pthread_rwlock_unlock(&dsos->lock); + return dso; } size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, @@ -1130,12 +1322,15 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) enum dso_type dso__type(struct dso *dso, struct machine *machine) { int fd; + enum dso_type type = DSO__TYPE_UNKNOWN; - fd = dso__data_fd(dso, machine); - if (fd < 0) - return DSO__TYPE_UNKNOWN; + fd = dso__data_get_fd(dso, machine); + if (fd >= 0) { + type = dso__type_fd(fd); + dso__data_put_fd(dso); + } - return dso__type_fd(fd); + return type; } int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) diff --git a/kernel/tools/perf/util/dso.h b/kernel/tools/perf/util/dso.h index e0901b4ed..45ec4d0a5 100644 --- a/kernel/tools/perf/util/dso.h +++ b/kernel/tools/perf/util/dso.h @@ -1,9 +1,11 @@ #ifndef __PERF_DSO #define __PERF_DSO +#include #include #include #include +#include #include #include #include "map.h" @@ -124,13 +126,22 @@ struct dso_cache { struct dsos { struct list_head head; struct rb_root root; /* rbtree root sorted by long name */ + pthread_rwlock_t lock; }; +struct auxtrace_cache; + struct dso { + pthread_mutex_t lock; struct list_head node; struct rb_node rb_node; /* rbtree node sorted by long name */ + struct rb_root *root; /* root of rbtree that rb_node is in */ struct rb_root symbols[MAP__NR_TYPES]; struct rb_root symbol_names[MAP__NR_TYPES]; + struct { + u64 addr; + struct symbol *symbol; + } last_find_result[MAP__NR_TYPES]; void *a2l; char *symsrc_filename; unsigned int a2l_fails; @@ -156,6 +167,7 @@ struct dso { u16 long_name_len; u16 short_name_len; void *dwfl; /* DWARF debug info */ + struct auxtrace_cache *auxtrace_cache; /* dso data file */ struct { @@ -173,7 +185,7 @@ struct dso { void *priv; u64 db_id; }; - + atomic_t refcnt; char name[0]; }; @@ -200,6 +212,17 @@ void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated); int dso__name_len(const struct dso *dso); +struct dso *dso__get(struct dso *dso); +void dso__put(struct dso *dso); + +static inline void __dso__zput(struct dso **dso) +{ + dso__put(*dso); + *dso = NULL; +} + +#define dso__zput(dso) __dso__zput(&dso) + bool dso__loaded(const struct dso *dso, enum map_type type); bool dso__sorted_by_name(const struct dso *dso, enum map_type type); @@ -216,7 +239,7 @@ char dso__symtab_origin(const struct dso *dso); int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type, char *root_dir, char *filename, size_t size); bool is_supported_compression(const char *ext); -bool is_kernel_module(const char *pathname); +bool is_kernel_module(const char *pathname, int cpumode); bool decompress_to_file(const char *ext, const char *filename, int output_fd); bool dso__needs_decompress(struct dso *dso); @@ -236,7 +259,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, /* * The dso__data_* external interface provides following functions: - * dso__data_fd + * dso__data_get_fd + * dso__data_put_fd * dso__data_close * dso__data_size * dso__data_read_offset @@ -253,8 +277,11 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, * The current usage of the dso__data_* interface is as follows: * * Get DSO's fd: - * int fd = dso__data_fd(dso, machine); - * USE 'fd' SOMEHOW + * int fd = dso__data_get_fd(dso, machine); + * if (fd >= 0) { + * USE 'fd' SOMEHOW + * dso__data_put_fd(dso); + * } * * Read DSO's data: * n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE); @@ -273,7 +300,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, * * TODO */ -int dso__data_fd(struct dso *dso, struct machine *machine); +int dso__data_get_fd(struct dso *dso, struct machine *machine); +void dso__data_put_fd(struct dso *dso __maybe_unused); void dso__data_close(struct dso *dso); off_t dso__data_size(struct dso *dso, struct machine *machine); @@ -285,16 +313,20 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map, bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by); struct map *dso__new_map(const char *name); -struct dso *dso__kernel_findnew(struct machine *machine, const char *name, - const char *short_name, int dso_type); +struct dso *machine__findnew_kernel(struct machine *machine, const char *name, + const char *short_name, int dso_type); +void __dsos__add(struct dsos *dsos, struct dso *dso); void dsos__add(struct dsos *dsos, struct dso *dso); -struct dso *dsos__addnew(struct dsos *dsos, const char *name); -struct dso *dsos__find(const struct dsos *dsos, const char *name, - bool cmp_short); +struct dso *__dsos__addnew(struct dsos *dsos, const char *name); +struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short); +struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short); struct dso *__dsos__findnew(struct dsos *dsos, const char *name); +struct dso *dsos__findnew(struct dsos *dsos, const char *name); bool __dsos__read_build_ids(struct list_head *head, bool with_hits); +void dso__reset_find_symbol_cache(struct dso *dso); + size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm); size_t __dsos__fprintf(struct list_head *head, FILE *fp); diff --git a/kernel/tools/perf/util/dwarf-aux.c b/kernel/tools/perf/util/dwarf-aux.c index c34e02402..a509aa843 100644 --- a/kernel/tools/perf/util/dwarf-aux.c +++ b/kernel/tools/perf/util/dwarf-aux.c @@ -139,10 +139,26 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, bool die_compare_name(Dwarf_Die *dw_die, const char *tname) { const char *name; + name = dwarf_diename(dw_die); return name ? (strcmp(tname, name) == 0) : false; } +/** + * die_match_name - Match diename and glob + * @dw_die: a DIE + * @glob: a string of target glob pattern + * + * Glob matching the name of @dw_die and @glob. Return false if matching fail. + */ +bool die_match_name(Dwarf_Die *dw_die, const char *glob) +{ + const char *name; + + name = dwarf_diename(dw_die); + return name ? strglobmatch(name, glob) : false; +} + /** * die_get_call_lineno - Get callsite line number of inline-function instance * @in_die: a DIE of an inlined function instance @@ -417,6 +433,43 @@ struct __addr_die_search_param { Dwarf_Die *die_mem; }; +static int __die_search_func_tail_cb(Dwarf_Die *fn_die, void *data) +{ + struct __addr_die_search_param *ad = data; + Dwarf_Addr addr = 0; + + if (dwarf_tag(fn_die) == DW_TAG_subprogram && + !dwarf_highpc(fn_die, &addr) && + addr == ad->addr) { + memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); + return DWARF_CB_ABORT; + } + return DWARF_CB_OK; +} + +/** + * die_find_tailfunc - Search for a non-inlined function with tail call at + * given address + * @cu_die: a CU DIE which including @addr + * @addr: target address + * @die_mem: a buffer for result DIE + * + * Search for a non-inlined function DIE with tail call at @addr. Stores the + * DIE to @die_mem and returns it if found. Returns NULL if failed. + */ +Dwarf_Die *die_find_tailfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, + Dwarf_Die *die_mem) +{ + struct __addr_die_search_param ad; + ad.addr = addr; + ad.die_mem = die_mem; + /* dwarf_getscopes can't find subprogram. */ + if (!dwarf_getfuncs(cu_die, __die_search_func_tail_cb, &ad, 0)) + return NULL; + else + return die_mem; +} + /* die_find callback for non-inlined function search */ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) { @@ -681,15 +734,18 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) Dwarf_Lines *lines; Dwarf_Line *line; Dwarf_Addr addr; - const char *fname; + const char *fname, *decf = NULL; int lineno, ret = 0; + int decl = 0, inl; Dwarf_Die die_mem, *cu_die; size_t nlines, i; /* Get the CU die */ - if (dwarf_tag(rt_die) != DW_TAG_compile_unit) + if (dwarf_tag(rt_die) != DW_TAG_compile_unit) { cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL); - else + dwarf_decl_line(rt_die, &decl); + decf = dwarf_decl_file(rt_die); + } else cu_die = rt_die; if (!cu_die) { pr_debug2("Failed to get CU from given DIE.\n"); @@ -714,15 +770,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) continue; } /* Filter lines based on address */ - if (rt_die != cu_die) + if (rt_die != cu_die) { /* * Address filtering * The line is included in given function, and * no inline block includes it. */ - if (!dwarf_haspc(rt_die, addr) || - die_find_inlinefunc(rt_die, addr, &die_mem)) + if (!dwarf_haspc(rt_die, addr)) continue; + if (die_find_inlinefunc(rt_die, addr, &die_mem)) { + dwarf_decl_line(&die_mem, &inl); + if (inl != decl || + decf != dwarf_decl_file(&die_mem)) + continue; + } + } /* Get source line */ fname = dwarf_linesrc(line, NULL, NULL); @@ -832,19 +894,17 @@ Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, /** * die_get_typename - Get the name of given variable DIE * @vr_die: a variable DIE - * @buf: a buffer for result type name - * @len: a max-length of @buf + * @buf: a strbuf for result type name * - * Get the name of @vr_die and stores it to @buf. Return the actual length - * of type name if succeeded. Return -E2BIG if @len is not enough long, and - * Return -ENOENT if failed to find type name. + * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded. + * and Return -ENOENT if failed to find type name. * Note that the result will stores typedef name if possible, and stores * "*(function_type)" if the type is a function pointer. */ -int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) +int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf) { Dwarf_Die type; - int tag, ret, ret2; + int tag, ret; const char *tmp = ""; if (__die_get_real_type(vr_die, &type) == NULL) @@ -855,8 +915,8 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) tmp = "*"; else if (tag == DW_TAG_subroutine_type) { /* Function pointer */ - ret = snprintf(buf, len, "(function_type)"); - return (ret >= len) ? -E2BIG : ret; + strbuf_addf(buf, "(function_type)"); + return 0; } else { if (!dwarf_diename(&type)) return -ENOENT; @@ -867,39 +927,156 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) else if (tag == DW_TAG_enumeration_type) tmp = "enum "; /* Write a base name */ - ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); - return (ret >= len) ? -E2BIG : ret; - } - ret = die_get_typename(&type, buf, len); - if (ret > 0) { - ret2 = snprintf(buf + ret, len - ret, "%s", tmp); - ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; + strbuf_addf(buf, "%s%s", tmp, dwarf_diename(&type)); + return 0; } + ret = die_get_typename(&type, buf); + if (ret == 0) + strbuf_addf(buf, "%s", tmp); + return ret; } /** * die_get_varname - Get the name and type of given variable DIE * @vr_die: a variable DIE - * @buf: a buffer for type and variable name - * @len: the max-length of @buf + * @buf: a strbuf for type and variable name * * Get the name and type of @vr_die and stores it in @buf as "type\tname". */ -int die_get_varname(Dwarf_Die *vr_die, char *buf, int len) +int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf) { - int ret, ret2; + int ret; - ret = die_get_typename(vr_die, buf, len); + ret = die_get_typename(vr_die, buf); if (ret < 0) { pr_debug("Failed to get type, make it unknown.\n"); - ret = snprintf(buf, len, "(unknown_type)"); + strbuf_addf(buf, "(unknown_type)"); } - if (ret > 0) { - ret2 = snprintf(buf + ret, len - ret, "\t%s", - dwarf_diename(vr_die)); - ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; + + strbuf_addf(buf, "\t%s", dwarf_diename(vr_die)); + + return 0; +} + +/** + * die_get_var_innermost_scope - Get innermost scope range of given variable DIE + * @sp_die: a subprogram DIE + * @vr_die: a variable DIE + * @buf: a strbuf for variable byte offset range + * + * Get the innermost scope range of @vr_die and stores it in @buf as + * "@". + */ +static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die, + struct strbuf *buf) +{ + Dwarf_Die *scopes; + int count; + size_t offset = 0; + Dwarf_Addr base; + Dwarf_Addr start, end; + Dwarf_Addr entry; + int ret; + bool first = true; + const char *name; + + ret = dwarf_entrypc(sp_die, &entry); + if (ret) + return ret; + + name = dwarf_diename(sp_die); + if (!name) + return -ENOENT; + + count = dwarf_getscopes_die(vr_die, &scopes); + + /* (*SCOPES)[1] is the DIE for the scope containing that scope */ + if (count <= 1) { + ret = -EINVAL; + goto out; } + + while ((offset = dwarf_ranges(&scopes[1], offset, &base, + &start, &end)) > 0) { + start -= entry; + end -= entry; + + if (first) { + strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64, + name, start, end); + first = false; + } else { + strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64, + start, end); + } + } + + if (!first) + strbuf_addf(buf, "]>"); + +out: + free(scopes); return ret; } +/** + * die_get_var_range - Get byte offset range of given variable DIE + * @sp_die: a subprogram DIE + * @vr_die: a variable DIE + * @buf: a strbuf for type and variable name and byte offset range + * + * Get the byte offset range of @vr_die and stores it in @buf as + * "@". + */ +int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf) +{ + int ret = 0; + Dwarf_Addr base; + Dwarf_Addr start, end; + Dwarf_Addr entry; + Dwarf_Op *op; + size_t nops; + size_t offset = 0; + Dwarf_Attribute attr; + bool first = true; + const char *name; + + ret = dwarf_entrypc(sp_die, &entry); + if (ret) + return ret; + + name = dwarf_diename(sp_die); + if (!name) + return -ENOENT; + + if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL) + return -EINVAL; + + while ((offset = dwarf_getlocations( + &attr, offset, &base, + &start, &end, &op, &nops)) > 0) { + if (start == 0) { + /* Single Location Descriptions */ + ret = die_get_var_innermost_scope(sp_die, vr_die, buf); + return ret; + } + + /* Location Lists */ + start -= entry; + end -= entry; + if (first) { + strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64, + name, start, end); + first = false; + } else { + strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64, + start, end); + } + } + + if (!first) + strbuf_addf(buf, "]>"); + + return ret; +} diff --git a/kernel/tools/perf/util/dwarf-aux.h b/kernel/tools/perf/util/dwarf-aux.h index af7dbcd5f..c42ec366f 100644 --- a/kernel/tools/perf/util/dwarf-aux.h +++ b/kernel/tools/perf/util/dwarf-aux.h @@ -47,6 +47,9 @@ extern bool die_is_func_instance(Dwarf_Die *dw_die); /* Compare diename and tname */ extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); +/* Matching diename with glob pattern */ +extern bool die_match_name(Dwarf_Die *dw_die, const char *glob); + /* Get callsite line number of inline-function instance */ extern int die_get_call_lineno(Dwarf_Die *in_die); @@ -82,6 +85,10 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die, extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, Dwarf_Die *die_mem); +/* Search a non-inlined function with tail call at given address */ +Dwarf_Die *die_find_tailfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, + Dwarf_Die *die_mem); + /* Search the top inlined function including given address */ extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_mem); @@ -114,8 +121,10 @@ extern Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, Dwarf_Die *die_mem); /* Get the name of given variable DIE */ -extern int die_get_typename(Dwarf_Die *vr_die, char *buf, int len); +extern int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf); /* Get the name and type of given variable DIE, stored as "type\tname" */ -extern int die_get_varname(Dwarf_Die *vr_die, char *buf, int len); +extern int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf); +extern int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, + struct strbuf *buf); #endif diff --git a/kernel/tools/perf/util/env.c b/kernel/tools/perf/util/env.c new file mode 100644 index 000000000..6af4f7c36 --- /dev/null +++ b/kernel/tools/perf/util/env.c @@ -0,0 +1,86 @@ +#include "cpumap.h" +#include "env.h" +#include "util.h" + +struct perf_env perf_env; + +void perf_env__exit(struct perf_env *env) +{ + zfree(&env->hostname); + zfree(&env->os_release); + zfree(&env->version); + zfree(&env->arch); + zfree(&env->cpu_desc); + zfree(&env->cpuid); + zfree(&env->cmdline); + zfree(&env->cmdline_argv); + zfree(&env->sibling_cores); + zfree(&env->sibling_threads); + zfree(&env->numa_nodes); + zfree(&env->pmu_mappings); + zfree(&env->cpu); +} + +int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]) +{ + int i; + + /* + * If env->cmdline_argv has already been set, do not override it. This allows + * a command to set the cmdline, parse args and then call another + * builtin function that implements a command -- e.g, cmd_kvm calling + * cmd_record. + */ + if (env->cmdline_argv != NULL) + return 0; + + /* do not include NULL termination */ + env->cmdline_argv = calloc(argc, sizeof(char *)); + if (env->cmdline_argv == NULL) + goto out_enomem; + + /* + * Must copy argv contents because it gets moved around during option + * parsing: + */ + for (i = 0; i < argc ; i++) { + env->cmdline_argv[i] = argv[i]; + if (env->cmdline_argv[i] == NULL) + goto out_free; + } + + env->nr_cmdline = argc; + + return 0; +out_free: + zfree(&env->cmdline_argv); +out_enomem: + return -ENOMEM; +} + +int perf_env__read_cpu_topology_map(struct perf_env *env) +{ + int cpu, nr_cpus; + + if (env->cpu != NULL) + return 0; + + if (env->nr_cpus_avail == 0) + env->nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF); + + nr_cpus = env->nr_cpus_avail; + if (nr_cpus == -1) + return -EINVAL; + + env->cpu = calloc(nr_cpus, sizeof(env->cpu[0])); + if (env->cpu == NULL) + return -ENOMEM; + + for (cpu = 0; cpu < nr_cpus; ++cpu) { + env->cpu[cpu].core_id = cpu_map__get_core_id(cpu); + env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu); + } + + env->nr_cpus_avail = nr_cpus; + return 0; +} diff --git a/kernel/tools/perf/util/env.h b/kernel/tools/perf/util/env.h new file mode 100644 index 000000000..0132b9557 --- /dev/null +++ b/kernel/tools/perf/util/env.h @@ -0,0 +1,44 @@ +#ifndef __PERF_ENV_H +#define __PERF_ENV_H + +struct cpu_topology_map { + int socket_id; + int core_id; +}; + +struct perf_env { + char *hostname; + char *os_release; + char *version; + char *arch; + int nr_cpus_online; + int nr_cpus_avail; + char *cpu_desc; + char *cpuid; + unsigned long long total_mem; + unsigned int msr_pmu_type; + + int nr_cmdline; + int nr_sibling_cores; + int nr_sibling_threads; + int nr_numa_nodes; + int nr_pmu_mappings; + int nr_groups; + char *cmdline; + const char **cmdline_argv; + char *sibling_cores; + char *sibling_threads; + char *numa_nodes; + char *pmu_mappings; + struct cpu_topology_map *cpu; +}; + +extern struct perf_env perf_env; + +void perf_env__exit(struct perf_env *env); + +int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]); + +int perf_env__read_cpu_topology_map(struct perf_env *env); + +#endif /* __PERF_ENV_H */ diff --git a/kernel/tools/perf/util/environment.c b/kernel/tools/perf/util/environment.c index 275b0ee34..740512369 100644 --- a/kernel/tools/perf/util/environment.c +++ b/kernel/tools/perf/util/environment.c @@ -5,5 +5,4 @@ */ #include "cache.h" -const char *pager_program; int pager_use_color = 1; diff --git a/kernel/tools/perf/util/event.c b/kernel/tools/perf/util/event.c index ff866c4d2..8b10621b4 100644 --- a/kernel/tools/perf/util/event.c +++ b/kernel/tools/perf/util/event.c @@ -23,12 +23,20 @@ static const char *perf_event__names[] = { [PERF_RECORD_FORK] = "FORK", [PERF_RECORD_READ] = "READ", [PERF_RECORD_SAMPLE] = "SAMPLE", + [PERF_RECORD_AUX] = "AUX", + [PERF_RECORD_ITRACE_START] = "ITRACE_START", + [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", + [PERF_RECORD_SWITCH] = "SWITCH", + [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", [PERF_RECORD_HEADER_ATTR] = "ATTR", [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", [PERF_RECORD_ID_INDEX] = "ID_INDEX", + [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", + [PERF_RECORD_AUXTRACE] = "AUXTRACE", + [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", }; const char *perf_event__name(unsigned int id) @@ -59,7 +67,8 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, char filename[PATH_MAX]; char bf[4096]; int fd; - size_t size = 0, n; + size_t size = 0; + ssize_t n; char *nl, *name, *tgids, *ppids; *tgid = -1; @@ -159,7 +168,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid, return 0; } -static pid_t perf_event__synthesize_comm(struct perf_tool *tool, +pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine) @@ -212,10 +221,14 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, - bool mmap_data) + bool mmap_data, + unsigned int proc_map_timeout) { char filename[PATH_MAX]; FILE *fp; + unsigned long long t; + bool truncation = false; + unsigned long long timeout = proc_map_timeout * 1000000ULL; int rc = 0; if (machine__is_default_guest(machine)) @@ -234,6 +247,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, } event->header.type = PERF_RECORD_MMAP2; + t = rdclock(); while (1) { char bf[BUFSIZ]; @@ -247,6 +261,15 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, if (fgets(bf, sizeof(bf), fp) == NULL) break; + if ((rdclock() - t) > timeout) { + pr_warning("Reading %s time out. " + "You may want to increase " + "the time limit by --proc-map-timeout\n", + filename); + truncation = true; + goto out; + } + /* ensure null termination since stack will be reused. */ strcpy(execname, ""); @@ -295,6 +318,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; } +out: + if (truncation) + event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; + if (!strcmp(execname, "")) strcpy(execname, anonstr); @@ -313,6 +340,9 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, rc = -1; break; } + + if (truncation) + break; } fclose(fp); @@ -324,8 +354,9 @@ int perf_event__synthesize_modules(struct perf_tool *tool, struct machine *machine) { int rc = 0; - struct rb_node *nd; + struct map *pos; struct map_groups *kmaps = &machine->kmaps; + struct maps *maps = &kmaps->maps[MAP__FUNCTION]; union perf_event *event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); if (event == NULL) { @@ -345,12 +376,10 @@ int perf_event__synthesize_modules(struct perf_tool *tool, else event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); - nd; nd = rb_next(nd)) { + for (pos = maps__first(maps); pos; pos = map__next(pos)) { size_t size; - struct map *pos = rb_entry(nd, struct map, rb_node); - if (pos->dso->kernel) + if (__map__is_kernel(pos)) continue; size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); @@ -381,7 +410,9 @@ static int __event__synthesize_thread(union perf_event *comm_event, pid_t pid, int full, perf_event__handler_t process, struct perf_tool *tool, - struct machine *machine, bool mmap_data) + struct machine *machine, + bool mmap_data, + unsigned int proc_map_timeout) { char filename[PATH_MAX]; DIR *tasks; @@ -398,7 +429,8 @@ static int __event__synthesize_thread(union perf_event *comm_event, return -1; return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine, mmap_data); + process, machine, mmap_data, + proc_map_timeout); } if (machine__is_default_guest(machine)) @@ -439,7 +471,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, if (_pid == pid) { /* process the parent's maps too */ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine, mmap_data); + process, machine, mmap_data, proc_map_timeout); if (rc) break; } @@ -453,7 +485,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, struct machine *machine, - bool mmap_data) + bool mmap_data, + unsigned int proc_map_timeout) { union perf_event *comm_event, *mmap_event, *fork_event; int err = -1, thread, j; @@ -474,9 +507,9 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, for (thread = 0; thread < threads->nr; ++thread) { if (__event__synthesize_thread(comm_event, mmap_event, fork_event, - threads->map[thread], 0, + thread_map__pid(threads, thread), 0, process, tool, machine, - mmap_data)) { + mmap_data, proc_map_timeout)) { err = -1; break; } @@ -485,12 +518,12 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, * comm.pid is set to thread group id by * perf_event__synthesize_comm */ - if ((int) comm_event->comm.pid != threads->map[thread]) { + if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { bool need_leader = true; /* is thread group leader in thread_map? */ for (j = 0; j < threads->nr; ++j) { - if ((int) comm_event->comm.pid == threads->map[j]) { + if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { need_leader = false; break; } @@ -502,7 +535,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, fork_event, comm_event->comm.pid, 0, process, tool, machine, - mmap_data)) { + mmap_data, proc_map_timeout)) { err = -1; break; } @@ -519,7 +552,9 @@ out: int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, bool mmap_data) + struct machine *machine, + bool mmap_data, + unsigned int proc_map_timeout) { DIR *proc; char proc_path[PATH_MAX]; @@ -559,7 +594,8 @@ int perf_event__synthesize_threads(struct perf_tool *tool, * one thread couldn't be synthesized. */ __event__synthesize_thread(comm_event, mmap_event, fork_event, pid, - 1, process, tool, machine, mmap_data); + 1, process, tool, machine, mmap_data, + proc_map_timeout); } err = 0; @@ -614,12 +650,12 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, size_t size; const char *mmap_name; char name_buff[PATH_MAX]; - struct map *map; + struct map *map = machine__kernel_map(machine); struct kmap *kmap; int err; union perf_event *event; - if (machine->vmlinux_maps[0] == NULL) + if (map == NULL) return -1; /* @@ -645,7 +681,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; } - map = machine->vmlinux_maps[MAP__FUNCTION]; kmap = map__kmap(map); size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; @@ -692,6 +727,38 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused, return machine__process_lost_event(machine, event, sample); } +int perf_event__process_aux(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine) +{ + return machine__process_aux_event(machine, event); +} + +int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine) +{ + return machine__process_itrace_start_event(machine, event); +} + +int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_lost_samples_event(machine, event, sample); +} + +int perf_event__process_switch(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine) +{ + return machine__process_switch_event(machine, event); +} + size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) { return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", @@ -755,6 +822,35 @@ int perf_event__process_exit(struct perf_tool *tool __maybe_unused, return machine__process_exit_event(machine, event, sample); } +size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) +{ + return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n", + event->aux.aux_offset, event->aux.aux_size, + event->aux.flags, + event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", + event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : ""); +} + +size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) +{ + return fprintf(fp, " pid: %u tid: %u\n", + event->itrace_start.pid, event->itrace_start.tid); +} + +size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) +{ + bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; + const char *in_out = out ? "OUT" : "IN "; + + if (event->header.type == PERF_RECORD_SWITCH) + return fprintf(fp, " %s\n", in_out); + + return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", + in_out, out ? "next" : "prev", + event->context_switch.next_prev_pid, + event->context_switch.next_prev_tid); +} + size_t perf_event__fprintf(union perf_event *event, FILE *fp) { size_t ret = fprintf(fp, "PERF_RECORD_%s", @@ -774,6 +870,16 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp) case PERF_RECORD_MMAP2: ret += perf_event__fprintf_mmap2(event, fp); break; + case PERF_RECORD_AUX: + ret += perf_event__fprintf_aux(event, fp); + break; + case PERF_RECORD_ITRACE_START: + ret += perf_event__fprintf_itrace_start(event, fp); + break; + case PERF_RECORD_SWITCH: + case PERF_RECORD_SWITCH_CPU_WIDE: + ret += perf_event__fprintf_switch(event, fp); + break; default: ret += fprintf(fp, "\n"); } @@ -877,6 +983,10 @@ void thread__find_addr_location(struct thread *thread, al->sym = NULL; } +/* + * Callers need to drop the reference to al->thread, obtained in + * machine__findnew_thread() + */ int perf_event__preprocess_sample(const union perf_event *event, struct machine *machine, struct addr_location *al, @@ -898,7 +1008,7 @@ int perf_event__preprocess_sample(const union perf_event *event, * it now. */ if (cpumode == PERF_RECORD_MISC_KERNEL && - machine->vmlinux_maps[MAP__FUNCTION] == NULL) + machine__kernel_map(machine) == NULL) machine__create_kernel_maps(machine); thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al); @@ -911,6 +1021,14 @@ int perf_event__preprocess_sample(const union perf_event *event, al->sym = NULL; al->cpu = sample->cpu; + al->socket = -1; + + if (al->cpu >= 0) { + struct perf_env *env = machine->env; + + if (env && env->cpu) + al->socket = env->cpu[al->cpu].socket_id; + } if (al->map) { struct dso *dso = al->map->dso; @@ -937,6 +1055,17 @@ int perf_event__preprocess_sample(const union perf_event *event, return 0; } +/* + * The preprocess_sample method will return with reference counts for the + * in it, when done using (and perhaps getting ref counts if needing to + * keep a pointer to one of those entries) it must be paired with + * addr_location__put(), so that the refcounts can be decremented. + */ +void addr_location__put(struct addr_location *al) +{ + thread__zput(al->thread); +} + bool is_bts_event(struct perf_event_attr *attr) { return attr->type == PERF_TYPE_HARDWARE && diff --git a/kernel/tools/perf/util/event.h b/kernel/tools/perf/util/event.h index 09b9e8d3f..a0dbcbd4f 100644 --- a/kernel/tools/perf/util/event.h +++ b/kernel/tools/perf/util/event.h @@ -52,6 +52,11 @@ struct lost_event { u64 lost; }; +struct lost_samples_event { + struct perf_event_header header; + u64 lost; +}; + /* * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID */ @@ -129,7 +134,8 @@ struct branch_flags { u64 predicted:1; u64 in_tx:1; u64 abort:1; - u64 reserved:60; + u64 cycles:16; + u64 reserved:44; }; struct branch_entry { @@ -157,6 +163,8 @@ enum { PERF_IP_FLAG_IN_TX = 1ULL << 10, }; +#define PERF_IP_FLAG_CHARS "bcrosyiABEx" + #define PERF_BRANCH_MASK (\ PERF_IP_FLAG_BRANCH |\ PERF_IP_FLAG_CALL |\ @@ -215,9 +223,17 @@ enum perf_user_event_type { /* above any possible kernel type */ PERF_RECORD_HEADER_BUILD_ID = 67, PERF_RECORD_FINISHED_ROUND = 68, PERF_RECORD_ID_INDEX = 69, + PERF_RECORD_AUXTRACE_INFO = 70, + PERF_RECORD_AUXTRACE = 71, + PERF_RECORD_AUXTRACE_ERROR = 72, PERF_RECORD_HEADER_MAX }; +enum auxtrace_error_type { + PERF_AUXTRACE_ERROR_ITRACE = 1, + PERF_AUXTRACE_ERROR_MAX +}; + /* * The kernel collects the number of events it couldn't send in a stretch and * when possible sends this number in a PERF_RECORD_LOST event. The number of @@ -225,6 +241,12 @@ enum perf_user_event_type { /* above any possible kernel type */ * total_lost tells exactly how many events the kernel in fact lost, i.e. it is * the sum of all struct lost_event.lost fields reported. * + * The kernel discards mixed up samples and sends the number in a + * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored + * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells + * exactly how many samples the kernel in fact dropped, i.e. it is the sum of + * all struct lost_samples_event.lost fields reported. + * * The total_period is needed because by default auto-freq is used, so * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get * the total number of low level events, it is necessary to to sum all struct @@ -234,6 +256,8 @@ struct events_stats { u64 total_period; u64 total_non_filtered_period; u64 total_lost; + u64 total_lost_samples; + u64 total_aux_lost; u64 total_invalid_chains; u32 nr_events[PERF_RECORD_HEADER_MAX]; u32 nr_non_filtered_samples; @@ -242,6 +266,8 @@ struct events_stats { u32 nr_invalid_chains; u32 nr_unknown_id; u32 nr_unprocessable_samples; + u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX]; + u32 nr_proc_map_timeout; }; struct attr_event { @@ -280,6 +306,56 @@ struct id_index_event { struct id_index_entry entries[0]; }; +struct auxtrace_info_event { + struct perf_event_header header; + u32 type; + u32 reserved__; /* For alignment */ + u64 priv[]; +}; + +struct auxtrace_event { + struct perf_event_header header; + u64 size; + u64 offset; + u64 reference; + u32 idx; + u32 tid; + u32 cpu; + u32 reserved__; /* For alignment */ +}; + +#define MAX_AUXTRACE_ERROR_MSG 64 + +struct auxtrace_error_event { + struct perf_event_header header; + u32 type; + u32 code; + u32 cpu; + u32 pid; + u32 tid; + u32 reserved__; /* For alignment */ + u64 ip; + char msg[MAX_AUXTRACE_ERROR_MSG]; +}; + +struct aux_event { + struct perf_event_header header; + u64 aux_offset; + u64 aux_size; + u64 flags; +}; + +struct itrace_start_event { + struct perf_event_header header; + u32 pid, tid; +}; + +struct context_switch_event { + struct perf_event_header header; + u32 next_prev_pid; + u32 next_prev_tid; +}; + union perf_event { struct perf_event_header header; struct mmap_event mmap; @@ -287,6 +363,7 @@ union perf_event { struct comm_event comm; struct fork_event fork; struct lost_event lost; + struct lost_samples_event lost_samples; struct read_event read; struct throttle_event throttle; struct sample_event sample; @@ -295,6 +372,12 @@ union perf_event { struct tracing_data_event tracing_data; struct build_id_event build_id; struct id_index_event id_index; + struct auxtrace_info_event auxtrace_info; + struct auxtrace_event auxtrace; + struct auxtrace_error_event auxtrace_error; + struct aux_event aux; + struct itrace_start_event itrace_start; + struct context_switch_event context_switch; }; void perf_event__print_totals(void); @@ -310,10 +393,12 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, - struct machine *machine, bool mmap_data); + struct machine *machine, bool mmap_data, + unsigned int proc_map_timeout); int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, bool mmap_data); + struct machine *machine, bool mmap_data, + unsigned int proc_map_timeout); int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine); @@ -330,6 +415,22 @@ int perf_event__process_lost(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine); +int perf_event__process_lost_samples(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_aux(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_itrace_start(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_switch(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); int perf_event__process_mmap(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, @@ -358,6 +459,8 @@ int perf_event__preprocess_sample(const union perf_event *event, struct addr_location *al, struct perf_sample *sample); +void addr_location__put(struct addr_location *al); + struct thread; bool is_bts_event(struct perf_event_attr *attr); @@ -376,17 +479,26 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, const struct perf_sample *sample, bool swapped); +pid_t perf_event__synthesize_comm(struct perf_tool *tool, + union perf_event *event, pid_t pid, + perf_event__handler_t process, + struct machine *machine); + int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, - bool mmap_data); + bool mmap_data, + unsigned int proc_map_timeout); size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp); size_t perf_event__fprintf(union perf_event *event, FILE *fp); u64 kallsyms__get_function_start(const char *kallsyms_filename, diff --git a/kernel/tools/perf/util/evlist.c b/kernel/tools/perf/util/evlist.c index 080be93ee..d1392194a 100644 --- a/kernel/tools/perf/util/evlist.c +++ b/kernel/tools/perf/util/evlist.c @@ -25,6 +25,7 @@ #include #include #include +#include static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); @@ -98,6 +99,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist) evlist__for_each_safe(evlist, n, pos) { list_del_init(&pos->node); + pos->evlist = NULL; perf_evsel__delete(pos); } @@ -114,8 +116,8 @@ void perf_evlist__delete(struct perf_evlist *evlist) { perf_evlist__munmap(evlist); perf_evlist__close(evlist); - cpu_map__delete(evlist->cpus); - thread_map__delete(evlist->threads); + cpu_map__put(evlist->cpus); + thread_map__put(evlist->threads); evlist->cpus = NULL; evlist->threads = NULL; perf_evlist__purge(evlist); @@ -123,26 +125,62 @@ void perf_evlist__delete(struct perf_evlist *evlist) free(evlist); } +static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + /* + * We already have cpus for evsel (via PMU sysfs) so + * keep it, if there's no target cpu list defined. + */ + if (!evsel->own_cpus || evlist->has_user_cpus) { + cpu_map__put(evsel->cpus); + evsel->cpus = cpu_map__get(evlist->cpus); + } else if (evsel->cpus != evsel->own_cpus) { + cpu_map__put(evsel->cpus); + evsel->cpus = cpu_map__get(evsel->own_cpus); + } + + thread_map__put(evsel->threads); + evsel->threads = thread_map__get(evlist->threads); +} + +static void perf_evlist__propagate_maps(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) + __perf_evlist__propagate_maps(evlist, evsel); +} + void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) { + entry->evlist = evlist; list_add_tail(&entry->node, &evlist->entries); entry->idx = evlist->nr_entries; entry->tracking = !entry->idx; if (!evlist->nr_entries++) perf_evlist__set_id_pos(evlist); + + __perf_evlist__propagate_maps(evlist, entry); +} + +void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel) +{ + evsel->evlist = NULL; + list_del_init(&evsel->node); + evlist->nr_entries -= 1; } void perf_evlist__splice_list_tail(struct perf_evlist *evlist, - struct list_head *list, - int nr_entries) + struct list_head *list) { - bool set_id_pos = !evlist->nr_entries; + struct perf_evsel *evsel, *temp; - list_splice_tail(list, &evlist->entries); - evlist->nr_entries += nr_entries; - if (set_id_pos) - perf_evlist__set_id_pos(evlist); + __evlist__for_each_safe(list, temp, evsel) { + list_del_init(&evsel->node); + perf_evlist__add(evlist, evsel); + } } void __perf_evlist__set_leader(struct list_head *list) @@ -167,6 +205,20 @@ void perf_evlist__set_leader(struct perf_evlist *evlist) } } +void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr) +{ + attr->precise_ip = 3; + + while (attr->precise_ip != 0) { + int fd = sys_perf_event_open(attr, 0, -1, -1, 0); + if (fd != -1) { + close(fd); + break; + } + --attr->precise_ip; + } +} + int perf_evlist__add_default(struct perf_evlist *evlist) { struct perf_event_attr attr = { @@ -177,13 +229,15 @@ int perf_evlist__add_default(struct perf_evlist *evlist) event_attr_init(&attr); + perf_event_attr__set_max_precise_ip(&attr); + evsel = perf_evsel__new(&attr); if (evsel == NULL) goto error; - /* use strdup() because free(evsel) assumes name is allocated */ - evsel->name = strdup("cycles"); - if (!evsel->name) + /* use asprintf() because free(evsel) assumes name is allocated */ + if (asprintf(&evsel->name, "cycles%.*s", + attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0) goto error_free; perf_evlist__add(evlist, evsel); @@ -208,7 +262,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, list_add_tail(&evsel->node, &head); } - perf_evlist__splice_list_tail(evlist, &head, nr_attrs); + perf_evlist__splice_list_tail(evlist, &head); return 0; @@ -263,7 +317,7 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist, { struct perf_evsel *evsel = perf_evsel__newtp(sys, name); - if (evsel == NULL) + if (IS_ERR(evsel)) return -1; evsel->handler = handler; @@ -297,6 +351,8 @@ void perf_evlist__disable(struct perf_evlist *evlist) PERF_EVENT_IOC_DISABLE, 0); } } + + evlist->enabled = false; } void perf_evlist__enable(struct perf_evlist *evlist) @@ -316,6 +372,13 @@ void perf_evlist__enable(struct perf_evlist *evlist) PERF_EVENT_IOC_ENABLE, 0); } } + + evlist->enabled = true; +} + +void perf_evlist__toggle_enable(struct perf_evlist *evlist) +{ + (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist); } int perf_evlist__disable_event(struct perf_evlist *evlist, @@ -539,7 +602,7 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, else sid->cpu = -1; if (!evsel->system_wide && evlist->threads && thread >= 0) - sid->tid = evlist->threads->map[thread]; + sid->tid = thread_map__pid(evlist->threads, thread); else sid->tid = -1; } @@ -564,7 +627,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) { struct perf_sample_id *sid; - if (evlist->nr_entries == 1) + if (evlist->nr_entries == 1 || !id) return perf_evlist__first(evlist); sid = perf_evlist__id2sid(evlist, id); @@ -577,6 +640,21 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) return NULL; } +struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, + u64 id) +{ + struct perf_sample_id *sid; + + if (!id) + return NULL; + + sid = perf_evlist__id2sid(evlist, id); + if (sid) + return sid->evsel; + + return NULL; +} + static int perf_evlist__event2id(struct perf_evlist *evlist, union perf_event *event, u64 *id) { @@ -634,11 +712,18 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) { struct perf_mmap *md = &evlist->mmap[idx]; - u64 head = perf_mmap__read_head(md); + u64 head; u64 old = md->prev; unsigned char *data = md->base + page_size; union perf_event *event = NULL; + /* + * Check if event was unmapped due to a POLLHUP/POLLERR. + */ + if (!atomic_read(&md->refcnt)) + return NULL; + + head = perf_mmap__read_head(md); if (evlist->overwrite) { /* * If we're further behind than half the buffer, there's a chance @@ -695,19 +780,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) static bool perf_mmap__empty(struct perf_mmap *md) { - return perf_mmap__read_head(md) == md->prev; + return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; } static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) { - ++evlist->mmap[idx].refcnt; + atomic_inc(&evlist->mmap[idx].refcnt); } static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) { - BUG_ON(evlist->mmap[idx].refcnt == 0); + BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0); - if (--evlist->mmap[idx].refcnt == 0) + if (atomic_dec_and_test(&evlist->mmap[idx].refcnt)) __perf_evlist__munmap(evlist, idx); } @@ -721,17 +806,46 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) perf_mmap__write_tail(md, old); } - if (md->refcnt == 1 && perf_mmap__empty(md)) + if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) perf_evlist__mmap_put(evlist, idx); } +int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, + struct auxtrace_mmap_params *mp __maybe_unused, + void *userpg __maybe_unused, + int fd __maybe_unused) +{ + return 0; +} + +void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) +{ +} + +void __weak auxtrace_mmap_params__init( + struct auxtrace_mmap_params *mp __maybe_unused, + off_t auxtrace_offset __maybe_unused, + unsigned int auxtrace_pages __maybe_unused, + bool auxtrace_overwrite __maybe_unused) +{ +} + +void __weak auxtrace_mmap_params__set_idx( + struct auxtrace_mmap_params *mp __maybe_unused, + struct perf_evlist *evlist __maybe_unused, + int idx __maybe_unused, + bool per_cpu __maybe_unused) +{ +} + static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) { if (evlist->mmap[idx].base != NULL) { munmap(evlist->mmap[idx].base, evlist->mmap_len); evlist->mmap[idx].base = NULL; - evlist->mmap[idx].refcnt = 0; + atomic_set(&evlist->mmap[idx].refcnt, 0); } + auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); } void perf_evlist__munmap(struct perf_evlist *evlist) @@ -759,6 +873,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) struct mmap_params { int prot; int mask; + struct auxtrace_mmap_params auxtrace_mp; }; static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, @@ -777,7 +892,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, * evlist layer can't just drop it when filtering events in * perf_evlist__filter_pollfd(). */ - evlist->mmap[idx].refcnt = 2; + atomic_set(&evlist->mmap[idx].refcnt, 2); evlist->mmap[idx].prev = 0; evlist->mmap[idx].mask = mp->mask; evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, @@ -789,6 +904,10 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, return -1; } + if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, + &mp->auxtrace_mp, evlist->mmap[idx].base, fd)) + return -1; + return 0; } @@ -853,6 +972,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, for (cpu = 0; cpu < nr_cpus; cpu++) { int output = -1; + auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, + true); + for (thread = 0; thread < nr_threads; thread++) { if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, thread, &output)) @@ -878,6 +1000,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, for (thread = 0; thread < nr_threads; thread++) { int output = -1; + auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, + false); + if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, &output)) goto out_unmap; @@ -960,10 +1085,8 @@ static long parse_pages_arg(const char *str, unsigned long min, return pages; } -int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, - int unset __maybe_unused) +int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) { - unsigned int *mmap_pages = opt->value; unsigned long max = UINT_MAX; long pages; @@ -980,20 +1103,32 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, return 0; } +int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + return __perf_evlist__parse_mmap_pages(opt->value, str); +} + /** - * perf_evlist__mmap - Create mmaps to receive events. + * perf_evlist__mmap_ex - Create mmaps to receive events. * @evlist: list of events * @pages: map length in pages * @overwrite: overwrite older events? + * @auxtrace_pages - auxtrace map length in pages + * @auxtrace_overwrite - overwrite older auxtrace data? * * If @overwrite is %false the user needs to signal event consumption using * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this * automatically. * + * Similarly, if @auxtrace_overwrite is %false the user needs to signal data + * consumption using auxtrace_mmap__write_tail(). + * * Return: %0 on success, negative error code otherwise. */ -int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, - bool overwrite) +int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, + bool overwrite, unsigned int auxtrace_pages, + bool auxtrace_overwrite) { struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; @@ -1013,6 +1148,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, pr_debug("mmap size %zuB\n", evlist->mmap_len); mp.mask = evlist->mmap_len - page_size - 1; + auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, + auxtrace_pages, auxtrace_overwrite); + evlist__for_each(evlist, evsel) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && @@ -1026,30 +1164,64 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, return perf_evlist__mmap_per_cpu(evlist, &mp); } +int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, + bool overwrite) +{ + return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); +} + int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) { - evlist->threads = thread_map__new_str(target->pid, target->tid, - target->uid); + struct cpu_map *cpus; + struct thread_map *threads; + + threads = thread_map__new_str(target->pid, target->tid, target->uid); - if (evlist->threads == NULL) + if (!threads) return -1; if (target__uses_dummy_map(target)) - evlist->cpus = cpu_map__dummy_new(); + cpus = cpu_map__dummy_new(); else - evlist->cpus = cpu_map__new(target->cpu_list); + cpus = cpu_map__new(target->cpu_list); - if (evlist->cpus == NULL) + if (!cpus) goto out_delete_threads; + evlist->has_user_cpus = !!target->cpu_list; + + perf_evlist__set_maps(evlist, cpus, threads); + return 0; out_delete_threads: - thread_map__delete(evlist->threads); - evlist->threads = NULL; + thread_map__put(threads); return -1; } +void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, + struct thread_map *threads) +{ + /* + * Allow for the possibility that one or another of the maps isn't being + * changed i.e. don't put it. Note we are assuming the maps that are + * being applied are brand new and evlist is taking ownership of the + * original reference count of 1. If that is not the case it is up to + * the caller to increase the reference count. + */ + if (cpus != evlist->cpus) { + cpu_map__put(evlist->cpus); + evlist->cpus = cpus; + } + + if (threads != evlist->threads) { + thread_map__put(evlist->threads); + evlist->threads = threads; + } + + perf_evlist__propagate_maps(evlist); +} + int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) { struct perf_evsel *evsel; @@ -1061,7 +1233,11 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e if (evsel->filter == NULL) continue; - err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); + /* + * filters only work for tracepoint event, which doesn't have cpu limit. + * So evlist and evsel should always be same. + */ + err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter); if (err) { *err_evsel = evsel; break; @@ -1075,11 +1251,9 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) { struct perf_evsel *evsel; int err = 0; - const int ncpus = cpu_map__nr(evlist->cpus), - nthreads = thread_map__nr(evlist->threads); evlist__for_each(evlist, evsel) { - err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); + err = perf_evsel__set_filter(evsel, filter); if (err) break; } @@ -1157,6 +1331,16 @@ u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) return __perf_evlist__combined_sample_type(evlist); } +u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + u64 branch_type = 0; + + evlist__for_each(evlist, evsel) + branch_type |= evsel->attr.branch_sample_type; + return branch_type; +} + bool perf_evlist__valid_read_format(struct perf_evlist *evlist) { struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; @@ -1255,6 +1439,8 @@ void perf_evlist__close(struct perf_evlist *evlist) static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) { + struct cpu_map *cpus; + struct thread_map *threads; int err = -ENOMEM; /* @@ -1266,20 +1452,19 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) * error, and we may not want to do that fallback to a * default cpu identity map :-\ */ - evlist->cpus = cpu_map__new(NULL); - if (evlist->cpus == NULL) + cpus = cpu_map__new(NULL); + if (!cpus) goto out; - evlist->threads = thread_map__new_dummy(); - if (evlist->threads == NULL) - goto out_free_cpus; + threads = thread_map__new_dummy(); + if (!threads) + goto out_put; - err = 0; + perf_evlist__set_maps(evlist, cpus, threads); out: return err; -out_free_cpus: - cpu_map__delete(evlist->cpus); - evlist->cpus = NULL; +out_put: + cpu_map__put(cpus); goto out; } @@ -1400,7 +1585,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar __func__, __LINE__); goto out_close_pipes; } - evlist->threads->map[0] = evlist->workload.pid; + thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); } close(child_ready_pipe[1]); diff --git a/kernel/tools/perf/util/evlist.h b/kernel/tools/perf/util/evlist.h index b5cce95d6..a459fe71b 100644 --- a/kernel/tools/perf/util/evlist.h +++ b/kernel/tools/perf/util/evlist.h @@ -1,6 +1,7 @@ #ifndef __PERF_EVLIST_H #define __PERF_EVLIST_H 1 +#include #include #include #include @@ -8,6 +9,7 @@ #include "event.h" #include "evsel.h" #include "util.h" +#include "auxtrace.h" #include struct pollfd; @@ -26,8 +28,9 @@ struct record_opts; struct perf_mmap { void *base; int mask; - int refcnt; + atomic_t refcnt; u64 prev; + struct auxtrace_mmap auxtrace_mmap; char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); }; @@ -37,6 +40,9 @@ struct perf_evlist { int nr_entries; int nr_groups; int nr_mmaps; + bool overwrite; + bool enabled; + bool has_user_cpus; size_t mmap_len; int id_pos; int is_pos; @@ -45,13 +51,13 @@ struct perf_evlist { int cork_fd; pid_t pid; } workload; - bool overwrite; struct fdarray pollfd; struct perf_mmap *mmap; struct thread_map *threads; struct cpu_map *cpus; struct perf_evsel *selected; struct events_stats stats; + struct perf_env *env; }; struct perf_evsel_str_handler { @@ -67,6 +73,7 @@ void perf_evlist__exit(struct perf_evlist *evlist); void perf_evlist__delete(struct perf_evlist *evlist); void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); +void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel); int perf_evlist__add_default(struct perf_evlist *evlist); int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs); @@ -98,6 +105,8 @@ int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mas int perf_evlist__poll(struct perf_evlist *evlist, int timeout); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); +struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, + u64 id); struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); @@ -110,6 +119,8 @@ void perf_evlist__close(struct perf_evlist *evlist); void perf_evlist__set_id_pos(struct perf_evlist *evlist); bool perf_can_sample_identifier(void); +bool perf_can_record_switch_events(void); +bool perf_can_record_cpu_wide(void); void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts); int record_opts__config(struct record_opts *opts); @@ -122,16 +133,21 @@ int perf_evlist__start_workload(struct perf_evlist *evlist); struct option; +int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str); int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset); +int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, + bool overwrite, unsigned int auxtrace_pages, + bool auxtrace_overwrite); int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, bool overwrite); void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__disable(struct perf_evlist *evlist); void perf_evlist__enable(struct perf_evlist *evlist); +void perf_evlist__toggle_enable(struct perf_evlist *evlist); int perf_evlist__disable_event(struct perf_evlist *evlist, struct perf_evsel *evsel); @@ -143,14 +159,8 @@ int perf_evlist__enable_event_idx(struct perf_evlist *evlist, void perf_evlist__set_selected(struct perf_evlist *evlist, struct perf_evsel *evsel); -static inline void perf_evlist__set_maps(struct perf_evlist *evlist, - struct cpu_map *cpus, - struct thread_map *threads) -{ - evlist->cpus = cpus; - evlist->threads = threads; -} - +void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, + struct thread_map *threads); int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); @@ -160,6 +170,7 @@ void perf_evlist__set_leader(struct perf_evlist *evlist); u64 perf_evlist__read_format(struct perf_evlist *evlist); u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist); u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist); +u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist); bool perf_evlist__sample_id_all(struct perf_evlist *evlist); u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); @@ -171,8 +182,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); bool perf_evlist__valid_read_format(struct perf_evlist *evlist); void perf_evlist__splice_list_tail(struct perf_evlist *evlist, - struct list_head *list, - int nr_entries); + struct list_head *list); static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) { @@ -281,4 +291,5 @@ void perf_evlist__to_front(struct perf_evlist *evlist, void perf_evlist__set_tracking_event(struct perf_evlist *evlist, struct perf_evsel *tracking_evsel); +void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr); #endif /* __PERF_EVLIST_H */ diff --git a/kernel/tools/perf/util/evsel.c b/kernel/tools/perf/util/evsel.c index 33e3fd8c2..397fb4ed3 100644 --- a/kernel/tools/perf/util/evsel.c +++ b/kernel/tools/perf/util/evsel.c @@ -9,10 +9,11 @@ #include #include -#include +#include #include #include #include +#include #include #include "asm/bug.h" #include "callchain.h" @@ -26,6 +27,7 @@ #include "perf_regs.h" #include "debug.h" #include "trace-event.h" +#include "stat.h" static struct { bool sample_id_all; @@ -205,10 +207,14 @@ void perf_evsel__init(struct perf_evsel *evsel, evsel->leader = evsel; evsel->unit = ""; evsel->scale = 1.0; + evsel->evlist = NULL; + evsel->bpf_fd = -1; INIT_LIST_HEAD(&evsel->node); + INIT_LIST_HEAD(&evsel->config_terms); perf_evsel__object.init(evsel); evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); perf_evsel__calc_id_pos(evsel); + evsel->cmdline_group_boundary = false; } struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) @@ -221,11 +227,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) return evsel; } +/* + * Returns pointer with encoded error via interface. + */ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) { struct perf_evsel *evsel = zalloc(perf_evsel__object.size); + int err = -ENOMEM; - if (evsel != NULL) { + if (evsel == NULL) { + goto out_err; + } else { struct perf_event_attr attr = { .type = PERF_TYPE_TRACEPOINT, .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | @@ -236,8 +248,10 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int goto out_free; evsel->tp_format = trace_event__tp_format(sys, name); - if (evsel->tp_format == NULL) + if (IS_ERR(evsel->tp_format)) { + err = PTR_ERR(evsel->tp_format); goto out_free; + } event_attr_init(&attr); attr.config = evsel->tp_format->id; @@ -250,7 +264,8 @@ struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int out_free: zfree(&evsel->name); free(evsel); - return NULL; +out_err: + return ERR_PTR(err); } const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { @@ -542,14 +557,15 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) static void perf_evsel__config_callgraph(struct perf_evsel *evsel, - struct record_opts *opts) + struct record_opts *opts, + struct callchain_param *param) { bool function = perf_evsel__is_function_event(evsel); struct perf_event_attr *attr = &evsel->attr; perf_evsel__set_sample_bit(evsel, CALLCHAIN); - if (callchain_param.record_mode == CALLCHAIN_LBR) { + if (param->record_mode == CALLCHAIN_LBR) { if (!opts->branch_stack) { if (attr->exclude_user) { pr_warning("LBR callstack option is only available " @@ -565,12 +581,12 @@ perf_evsel__config_callgraph(struct perf_evsel *evsel, "Falling back to framepointers.\n"); } - if (callchain_param.record_mode == CALLCHAIN_DWARF) { + if (param->record_mode == CALLCHAIN_DWARF) { if (!function) { perf_evsel__set_sample_bit(evsel, REGS_USER); perf_evsel__set_sample_bit(evsel, STACK_USER); attr->sample_regs_user = PERF_REGS_MASK; - attr->sample_stack_user = callchain_param.dump_size; + attr->sample_stack_user = param->dump_size; attr->exclude_callchain_user = 1; } else { pr_info("Cannot use DWARF unwind for function trace event," @@ -584,6 +600,106 @@ perf_evsel__config_callgraph(struct perf_evsel *evsel, } } +static void +perf_evsel__reset_callgraph(struct perf_evsel *evsel, + struct callchain_param *param) +{ + struct perf_event_attr *attr = &evsel->attr; + + perf_evsel__reset_sample_bit(evsel, CALLCHAIN); + if (param->record_mode == CALLCHAIN_LBR) { + perf_evsel__reset_sample_bit(evsel, BRANCH_STACK); + attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_CALL_STACK); + } + if (param->record_mode == CALLCHAIN_DWARF) { + perf_evsel__reset_sample_bit(evsel, REGS_USER); + perf_evsel__reset_sample_bit(evsel, STACK_USER); + } +} + +static void apply_config_terms(struct perf_evsel *evsel, + struct record_opts *opts) +{ + struct perf_evsel_config_term *term; + struct list_head *config_terms = &evsel->config_terms; + struct perf_event_attr *attr = &evsel->attr; + struct callchain_param param; + u32 dump_size = 0; + char *callgraph_buf = NULL; + + /* callgraph default */ + param.record_mode = callchain_param.record_mode; + + list_for_each_entry(term, config_terms, list) { + switch (term->type) { + case PERF_EVSEL__CONFIG_TERM_PERIOD: + attr->sample_period = term->val.period; + attr->freq = 0; + break; + case PERF_EVSEL__CONFIG_TERM_FREQ: + attr->sample_freq = term->val.freq; + attr->freq = 1; + break; + case PERF_EVSEL__CONFIG_TERM_TIME: + if (term->val.time) + perf_evsel__set_sample_bit(evsel, TIME); + else + perf_evsel__reset_sample_bit(evsel, TIME); + break; + case PERF_EVSEL__CONFIG_TERM_CALLGRAPH: + callgraph_buf = term->val.callgraph; + break; + case PERF_EVSEL__CONFIG_TERM_STACK_USER: + dump_size = term->val.stack_user; + break; + case PERF_EVSEL__CONFIG_TERM_INHERIT: + /* + * attr->inherit should has already been set by + * perf_evsel__config. If user explicitly set + * inherit using config terms, override global + * opt->no_inherit setting. + */ + attr->inherit = term->val.inherit ? 1 : 0; + break; + default: + break; + } + } + + /* User explicitly set per-event callgraph, clear the old setting and reset. */ + if ((callgraph_buf != NULL) || (dump_size > 0)) { + + /* parse callgraph parameters */ + if (callgraph_buf != NULL) { + if (!strcmp(callgraph_buf, "no")) { + param.enabled = false; + param.record_mode = CALLCHAIN_NONE; + } else { + param.enabled = true; + if (parse_callchain_record(callgraph_buf, ¶m)) { + pr_err("per-event callgraph setting for %s failed. " + "Apply callgraph global setting for it\n", + evsel->name); + return; + } + } + } + if (dump_size > 0) { + dump_size = round_up(dump_size, sizeof(u64)); + param.dump_size = dump_size; + } + + /* If global callgraph set, clear it */ + if (callchain_param.enabled) + perf_evsel__reset_callgraph(evsel, &callchain_param); + + /* set perf-event callgraph */ + if (param.enabled) + perf_evsel__config_callgraph(evsel, opts, ¶m); + } +} + /* * The enable_on_exec/disabled value strategy: * @@ -688,10 +804,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) evsel->attr.exclude_callchain_user = 1; if (callchain_param.enabled && !evsel->no_aux_samples) - perf_evsel__config_callgraph(evsel, opts); + perf_evsel__config_callgraph(evsel, opts, &callchain_param); if (opts->sample_intr_regs) { - attr->sample_regs_intr = PERF_REGS_MASK; + attr->sample_regs_intr = opts->sample_intr_regs; perf_evsel__set_sample_bit(evsel, REGS_INTR); } @@ -706,7 +822,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) */ if (opts->sample_time && (!perf_missing_features.sample_id_all && - (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu))) + (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu || + opts->sample_time_set))) perf_evsel__set_sample_bit(evsel, TIME); if (opts->raw_samples && !evsel->no_aux_samples) { @@ -735,6 +852,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) attr->mmap2 = track && !perf_missing_features.mmap2; attr->comm = track; + if (opts->record_switch_events) + attr->context_switch = track; + if (opts->sample_transaction) perf_evsel__set_sample_bit(evsel, TRANSACTION); @@ -771,6 +891,15 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) attr->use_clockid = 1; attr->clockid = opts->clockid; } + + if (evsel->precise_max) + perf_event_attr__set_max_precise_ip(attr); + + /* + * Apply event specific term settings, + * it overloads any global configuration. + */ + apply_config_terms(evsel, opts); } static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) @@ -814,14 +943,44 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea return 0; } -int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, - const char *filter) +int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter) { return perf_evsel__run_ioctl(evsel, ncpus, nthreads, PERF_EVENT_IOC_SET_FILTER, (void *)filter); } +int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter) +{ + char *new_filter = strdup(filter); + + if (new_filter != NULL) { + free(evsel->filter); + evsel->filter = new_filter; + return 0; + } + + return -1; +} + +int perf_evsel__append_filter(struct perf_evsel *evsel, + const char *op, const char *filter) +{ + char *new_filter; + + if (evsel->filter == NULL) + return perf_evsel__set_filter(evsel, filter); + + if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) { + free(evsel->filter); + evsel->filter = new_filter; + return 0; + } + + return -1; +} + int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) { return perf_evsel__run_ioctl(evsel, ncpus, nthreads, @@ -851,19 +1010,6 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) return 0; } -void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus) -{ - memset(evsel->counts, 0, (sizeof(*evsel->counts) + - (ncpus * sizeof(struct perf_counts_values)))); -} - -int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) -{ - evsel->counts = zalloc((sizeof(*evsel->counts) + - (ncpus * sizeof(struct perf_counts_values)))); - return evsel->counts != NULL ? 0 : -ENOMEM; -} - static void perf_evsel__free_fd(struct perf_evsel *evsel) { xyarray__delete(evsel->fd); @@ -877,6 +1023,16 @@ static void perf_evsel__free_id(struct perf_evsel *evsel) zfree(&evsel->id); } +static void perf_evsel__free_config_terms(struct perf_evsel *evsel) +{ + struct perf_evsel_config_term *term, *h; + + list_for_each_entry_safe(term, h, &evsel->config_terms, list) { + list_del(&term->list); + free(term); + } +} + void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) { int cpu, thread; @@ -891,17 +1047,17 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) } } -void perf_evsel__free_counts(struct perf_evsel *evsel) -{ - zfree(&evsel->counts); -} - void perf_evsel__exit(struct perf_evsel *evsel) { assert(list_empty(&evsel->node)); + assert(evsel->evlist == NULL); perf_evsel__free_fd(evsel); perf_evsel__free_id(evsel); + perf_evsel__free_config_terms(evsel); close_cgroup(evsel->cgrp); + cpu_map__put(evsel->cpus); + cpu_map__put(evsel->own_cpus); + thread_map__put(evsel->threads); zfree(&evsel->group_name); zfree(&evsel->name); perf_evsel__object.fini(evsel); @@ -913,7 +1069,7 @@ void perf_evsel__delete(struct perf_evsel *evsel) free(evsel); } -void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, +void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count) { struct perf_counts_values tmp; @@ -925,8 +1081,8 @@ void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, tmp = evsel->prev_raw_counts->aggr; evsel->prev_raw_counts->aggr = *count; } else { - tmp = evsel->prev_raw_counts->cpu[cpu]; - evsel->prev_raw_counts->cpu[cpu] = *count; + tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); + *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; } count->val = count->val - tmp.val; @@ -954,20 +1110,18 @@ void perf_counts_values__scale(struct perf_counts_values *count, *pscaled = scaled; } -int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, - perf_evsel__read_cb_t cb) +int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + struct perf_counts_values *count) { - struct perf_counts_values count; - - memset(&count, 0, sizeof(count)); + memset(count, 0, sizeof(*count)); if (FD(evsel, cpu, thread) < 0) return -EINVAL; - if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0) + if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0) return -errno; - return cb(evsel, cpu, thread, &count); + return 0; } int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, @@ -979,15 +1133,15 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, if (FD(evsel, cpu, thread) < 0) return -EINVAL; - if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) + if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) return -ENOMEM; if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) return -errno; - perf_evsel__compute_deltas(evsel, cpu, &count); + perf_evsel__compute_deltas(evsel, cpu, thread, &count); perf_counts_values__scale(&count, scale, NULL); - evsel->counts->cpu[cpu] = count; + *perf_counts(evsel->counts, cpu, thread) = count; return 0; } @@ -1037,7 +1191,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value) bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), - bit_name(IDENTIFIER), bit_name(REGS_INTR), + bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC), { .name = NULL, } }; #undef bit_name @@ -1058,7 +1212,7 @@ static void __p_read_format(char *buf, size_t size, u64 value) #define BUF_SIZE 1024 -#define p_hex(val) snprintf(buf, BUF_SIZE, "%"PRIx64, (uint64_t)(val)) +#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) @@ -1112,15 +1266,18 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, PRINT_ATTRf(mmap2, p_unsigned); PRINT_ATTRf(comm_exec, p_unsigned); PRINT_ATTRf(use_clockid, p_unsigned); + PRINT_ATTRf(context_switch, p_unsigned); PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); PRINT_ATTRf(bp_type, p_unsigned); PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); + PRINT_ATTRf(branch_sample_type, p_unsigned); PRINT_ATTRf(sample_regs_user, p_hex); PRINT_ATTRf(sample_stack_user, p_unsigned); PRINT_ATTRf(clockid, p_signed); PRINT_ATTRf(sample_regs_intr, p_hex); + PRINT_ATTRf(aux_watermark, p_unsigned); return ret; } @@ -1183,7 +1340,7 @@ retry_sample_id: int group_fd; if (!evsel->cgrp && !evsel->system_wide) - pid = threads->map[thread]; + pid = thread_map__pid(threads, thread); group_fd = get_group_fd(evsel, cpu, thread); retry_open: @@ -1200,6 +1357,22 @@ retry_open: err); goto try_fallback; } + + if (evsel->bpf_fd >= 0) { + int evt_fd = FD(evsel, cpu, thread); + int bpf_fd = evsel->bpf_fd; + + err = ioctl(evt_fd, + PERF_EVENT_IOC_SET_BPF, + bpf_fd); + if (err && errno != EEXIST) { + pr_err("failed to attach bpf fd %d: %s\n", + bpf_fd, strerror(errno)); + err = -EINVAL; + goto out_close; + } + } + set_rlimit = NO_CHANGE; /* @@ -2091,8 +2264,13 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, printed += perf_event_attr__fprintf(fp, &evsel->attr, __print_attr__fprintf, &first); } else if (details->freq) { - printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, - (u64)evsel->attr.sample_freq); + const char *term = "sample_freq"; + + if (!evsel->attr.freq) + term = "sample_period"; + + printed += comma_fprintf(fp, &first, " %s=%" PRIu64, + term, (u64)evsel->attr.sample_freq); } out: fputc('\n', fp); @@ -2148,7 +2326,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, case EMFILE: return scnprintf(msg, size, "%s", "Too many events are opened.\n" - "Try again after reducing the number of events."); + "Probably the maximum number of open file descriptors has been reached.\n" + "Hint: Try again after reducing the number of events.\n" + "Hint: Try increasing the limit with 'ulimit -n '"); case ENODEV: if (target->cpu_list) return scnprintf(msg, size, "%s", diff --git a/kernel/tools/perf/util/evsel.h b/kernel/tools/perf/util/evsel.h index e486151b0..0e49bd742 100644 --- a/kernel/tools/perf/util/evsel.h +++ b/kernel/tools/perf/util/evsel.h @@ -8,23 +8,8 @@ #include #include "xyarray.h" #include "symbol.h" - -struct perf_counts_values { - union { - struct { - u64 val; - u64 ena; - u64 run; - }; - u64 values[3]; - }; -}; - -struct perf_counts { - s8 scaled; - struct perf_counts_values aggr; - struct perf_counts_values cpu[]; -}; +#include "cpumap.h" +#include "counts.h" struct perf_evsel; @@ -46,8 +31,40 @@ struct perf_sample_id { struct cgroup_sel; +/* + * The 'struct perf_evsel_config_term' is used to pass event + * specific configuration data to perf_evsel__config routine. + * It is allocated within event parsing and attached to + * perf_evsel::config_terms list head. +*/ +enum { + PERF_EVSEL__CONFIG_TERM_PERIOD, + PERF_EVSEL__CONFIG_TERM_FREQ, + PERF_EVSEL__CONFIG_TERM_TIME, + PERF_EVSEL__CONFIG_TERM_CALLGRAPH, + PERF_EVSEL__CONFIG_TERM_STACK_USER, + PERF_EVSEL__CONFIG_TERM_INHERIT, + PERF_EVSEL__CONFIG_TERM_MAX, +}; + +struct perf_evsel_config_term { + struct list_head list; + int type; + union { + u64 period; + u64 freq; + bool time; + char *callgraph; + u64 stack_user; + bool inherit; + } val; +}; + /** struct perf_evsel - event selector * + * @evlist - evlist this evsel is in, if it is in one. + * @node - To insert it into evlist->entries or in other list_heads, say in + * the event parsing routines. * @name - Can be set to retain the original event name passed by the user, * so that when showing results in tools such as 'perf stat', we * show the name used, not some alias. @@ -61,6 +78,7 @@ struct cgroup_sel; */ struct perf_evsel { struct list_head node; + struct perf_evlist *evlist; struct perf_event_attr attr; char *filter; struct xyarray *fd; @@ -73,19 +91,21 @@ struct perf_evsel { char *name; double scale; const char *unit; - bool snapshot; struct event_format *tp_format; + off_t id_offset; union { void *priv; - off_t id_offset; u64 db_id; }; struct cgroup_sel *cgrp; void *handler; struct cpu_map *cpus; + struct cpu_map *own_cpus; + struct thread_map *threads; unsigned int sample_size; int id_pos; int is_pos; + bool snapshot; bool supported; bool needs_swap; bool no_aux_samples; @@ -93,13 +113,17 @@ struct perf_evsel { bool system_wide; bool tracking; bool per_pkg; - unsigned long *per_pkg_mask; + bool precise_max; /* parse modifier helper */ int exclude_GH; int nr_members; int sample_read; + unsigned long *per_pkg_mask; struct perf_evsel *leader; char *group_name; + bool cmdline_group_boundary; + struct list_head config_terms; + int bpf_fd; }; union u64_swap { @@ -110,13 +134,22 @@ union u64_swap { struct cpu_map; struct target; struct thread_map; -struct perf_evlist; struct record_opts; +static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +{ + return evsel->cpus; +} + +static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) +{ + return perf_evsel__cpus(evsel)->nr; +} + void perf_counts_values__scale(struct perf_counts_values *count, bool scale, s8 *pscaled); -void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, +void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count); int perf_evsel__object_config(size_t object_size, @@ -132,6 +165,9 @@ static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx); +/* + * Returns pointer with encoded error via interface. + */ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name) { return perf_evsel__newtp_idx(sys, name, 0); @@ -170,9 +206,6 @@ const char *perf_evsel__group_name(struct perf_evsel *evsel); int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); -int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); -void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus); -void perf_evsel__free_counts(struct perf_evsel *evsel); void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, @@ -189,8 +222,11 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, void perf_evsel__set_sample_id(struct perf_evsel *evsel, bool use_sample_identifier); -int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, - const char *filter); +int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter); +int perf_evsel__append_filter(struct perf_evsel *evsel, + const char *op, const char *filter); +int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter); int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__open_per_cpu(struct perf_evsel *evsel, @@ -236,12 +272,8 @@ static inline bool perf_evsel__match2(struct perf_evsel *e1, (a)->attr.type == (b)->attr.type && \ (a)->attr.config == (b)->attr.config) -typedef int (perf_evsel__read_cb_t)(struct perf_evsel *evsel, - int cpu, int thread, - struct perf_counts_values *count); - -int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, - perf_evsel__read_cb_t cb); +int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + struct perf_counts_values *count); int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale); diff --git a/kernel/tools/perf/util/header.c b/kernel/tools/perf/util/header.c index 918fd8ae2..43838003c 100644 --- a/kernel/tools/perf/util/header.c +++ b/kernel/tools/perf/util/header.c @@ -24,9 +24,6 @@ #include "build-id.h" #include "data.h" -static u32 header_argc; -static const char **header_argv; - /* * magic2 = "PERFILE2" * must be a numerical value to let the endianness @@ -88,6 +85,9 @@ int write_padded(int fd, const void *bf, size_t count, size_t count_aligned) return err; } +#define string_size(str) \ + (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) + static int do_write_string(int fd, const char *str) { u32 len, olen; @@ -135,37 +135,6 @@ static char *do_read_string(int fd, struct perf_header *ph) return NULL; } -int -perf_header__set_cmdline(int argc, const char **argv) -{ - int i; - - /* - * If header_argv has already been set, do not override it. - * This allows a command to set the cmdline, parse args and - * then call another builtin function that implements a - * command -- e.g, cmd_kvm calling cmd_record. - */ - if (header_argv) - return 0; - - header_argc = (u32)argc; - - /* do not include NULL termination */ - header_argv = calloc(argc, sizeof(char *)); - if (!header_argv) - return -ENOMEM; - - /* - * must copy argv contents because it gets moved - * around during option parsing - */ - for (i = 0; i < argc ; i++) - header_argv[i] = argv[i]; - - return 0; -} - static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, struct perf_evlist *evlist) { @@ -402,8 +371,8 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused, { char buf[MAXPATHLEN]; char proc[32]; - u32 i, n; - int ret; + u32 n; + int i, ret; /* * actual atual path to perf binary @@ -417,7 +386,7 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused, buf[ret] = '\0'; /* account for binary path */ - n = header_argc + 1; + n = perf_env.nr_cmdline + 1; ret = do_write(fd, &n, sizeof(n)); if (ret < 0) @@ -427,8 +396,8 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused, if (ret < 0) return ret; - for (i = 0 ; i < header_argc; i++) { - ret = do_write_string(fd, header_argv[i]); + for (i = 0 ; i < perf_env.nr_cmdline; i++) { + ret = do_write_string(fd, perf_env.cmdline_argv[i]); if (ret < 0) return ret; } @@ -441,6 +410,7 @@ static int write_cmdline(int fd, struct perf_header *h __maybe_unused, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" struct cpu_topo { + u32 cpu_nr; u32 core_sib; u32 thread_sib; char **core_siblings; @@ -551,7 +521,7 @@ static struct cpu_topo *build_cpu_topology(void) return NULL; tp = addr; - + tp->cpu_nr = nr; addr += sizeof(*tp); tp->core_siblings = addr; addr += sz; @@ -574,7 +544,7 @@ static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, { struct cpu_topo *tp; u32 i; - int ret; + int ret, j; tp = build_cpu_topology(); if (!tp) @@ -598,6 +568,21 @@ static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, if (ret < 0) break; } + + ret = perf_env__read_cpu_topology_map(&perf_env); + if (ret < 0) + goto done; + + for (j = 0; j < perf_env.nr_cpus_avail; j++) { + ret = do_write(fd, &perf_env.cpu[j].core_id, + sizeof(perf_env.cpu[j].core_id)); + if (ret < 0) + return ret; + ret = do_write(fd, &perf_env.cpu[j].socket_id, + sizeof(perf_env.cpu[j].socket_id)); + if (ret < 0) + return ret; + } done: free_cpu_topo(tp); return ret; @@ -869,6 +854,20 @@ static int write_branch_stack(int fd __maybe_unused, return 0; } +static int write_auxtrace(int fd, struct perf_header *h, + struct perf_evlist *evlist __maybe_unused) +{ + struct perf_session *session; + int err; + + session = container_of(h, struct perf_session, header); + + err = auxtrace_index__write(fd, &session->auxtrace_index); + if (err < 0) + pr_err("Failed to write auxtrace index\n"); + return err; +} + static void print_hostname(struct perf_header *ph, int fd __maybe_unused, FILE *fp) { @@ -909,17 +908,13 @@ static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, FILE *fp) { int nr, i; - char *str; nr = ph->env.nr_cmdline; - str = ph->env.cmdline; fprintf(fp, "# cmdline : "); - for (i = 0; i < nr; i++) { - fprintf(fp, "%s ", str); - str += strlen(str) + 1; - } + for (i = 0; i < nr; i++) + fprintf(fp, "%s ", ph->env.cmdline_argv[i]); fputc('\n', fp); } @@ -928,6 +923,7 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, { int nr, i; char *str; + int cpu_nr = ph->env.nr_cpus_online; nr = ph->env.nr_sibling_cores; str = ph->env.sibling_cores; @@ -944,6 +940,13 @@ static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, fprintf(fp, "# sibling threads : %s\n", str); str += strlen(str) + 1; } + + if (ph->env.cpu != NULL) { + for (i = 0; i < cpu_nr; i++) + fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, + ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); + } else + fprintf(fp, "# Core ID and Socket ID information is not available\n"); } static void free_event_desc(struct perf_evsel *events) @@ -1049,8 +1052,7 @@ out: free(buf); return events; error: - if (events) - free_event_desc(events); + free_event_desc(events); events = NULL; goto out; } @@ -1151,6 +1153,12 @@ static void print_branch_stack(struct perf_header *ph __maybe_unused, fprintf(fp, "# contains samples with branch stack\n"); } +static void print_auxtrace(struct perf_header *ph __maybe_unused, + int fd __maybe_unused, FILE *fp) +{ + fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); +} + static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, FILE *fp) { @@ -1218,9 +1226,8 @@ static int __event_process_build_id(struct build_id_event *bev, struct perf_session *session) { int err = -1; - struct dsos *dsos; struct machine *machine; - u16 misc; + u16 cpumode; struct dso *dso; enum dso_kernel_type dso_type; @@ -1228,39 +1235,37 @@ static int __event_process_build_id(struct build_id_event *bev, if (!machine) goto out; - misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - switch (misc) { + switch (cpumode) { case PERF_RECORD_MISC_KERNEL: dso_type = DSO_TYPE_KERNEL; - dsos = &machine->kernel_dsos; break; case PERF_RECORD_MISC_GUEST_KERNEL: dso_type = DSO_TYPE_GUEST_KERNEL; - dsos = &machine->kernel_dsos; break; case PERF_RECORD_MISC_USER: case PERF_RECORD_MISC_GUEST_USER: dso_type = DSO_TYPE_USER; - dsos = &machine->user_dsos; break; default: goto out; } - dso = __dsos__findnew(dsos, filename); + dso = machine__findnew_dso(machine, filename); if (dso != NULL) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; dso__set_build_id(dso, &bev->build_id); - if (!is_kernel_module(filename)) + if (!is_kernel_module(filename, cpumode)) dso->kernel = dso_type; build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); pr_debug("build id event received for %s: %s\n", dso->long_name, sbuild_id); + dso__put(dso); } err = 0; @@ -1426,7 +1431,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, if (ph->needs_swap) nr = bswap_32(nr); - ph->env.nr_cpus_online = nr; + ph->env.nr_cpus_avail = nr; ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) @@ -1435,7 +1440,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, if (ph->needs_swap) nr = bswap_32(nr); - ph->env.nr_cpus_avail = nr; + ph->env.nr_cpus_online = nr; return 0; } @@ -1525,14 +1530,13 @@ process_event_desc(struct perf_file_section *section __maybe_unused, return 0; } -static int process_cmdline(struct perf_file_section *section __maybe_unused, +static int process_cmdline(struct perf_file_section *section, struct perf_header *ph, int fd, void *data __maybe_unused) { ssize_t ret; - char *str; - u32 nr, i; - struct strbuf sb; + char *str, *cmdline = NULL, **argv = NULL; + u32 nr, i, len = 0; ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) @@ -1542,26 +1546,36 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused, nr = bswap_32(nr); ph->env.nr_cmdline = nr; - strbuf_init(&sb, 128); + + cmdline = zalloc(section->size + nr + 1); + if (!cmdline) + return -1; + + argv = zalloc(sizeof(char *) * (nr + 1)); + if (!argv) + goto error; for (i = 0; i < nr; i++) { str = do_read_string(fd, ph); if (!str) goto error; - /* include a NULL character at the end */ - strbuf_add(&sb, str, strlen(str) + 1); + argv[i] = cmdline + len; + memcpy(argv[i], str, strlen(str) + 1); + len += strlen(str) + 1; free(str); } - ph->env.cmdline = strbuf_detach(&sb, NULL); + ph->env.cmdline = cmdline; + ph->env.cmdline_argv = (const char **) argv; return 0; error: - strbuf_release(&sb); + free(argv); + free(cmdline); return -1; } -static int process_cpu_topology(struct perf_file_section *section __maybe_unused, +static int process_cpu_topology(struct perf_file_section *section, struct perf_header *ph, int fd, void *data __maybe_unused) { @@ -1569,15 +1583,22 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused u32 nr, i; char *str; struct strbuf sb; + int cpu_nr = ph->env.nr_cpus_online; + u64 size = 0; + + ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); + if (!ph->env.cpu) + return -1; ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) - return -1; + goto free_cpu; if (ph->needs_swap) nr = bswap_32(nr); ph->env.nr_sibling_cores = nr; + size += sizeof(u32); strbuf_init(&sb, 128); for (i = 0; i < nr; i++) { @@ -1587,6 +1608,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused /* include a NULL character at the end */ strbuf_add(&sb, str, strlen(str) + 1); + size += string_size(str); free(str); } ph->env.sibling_cores = strbuf_detach(&sb, NULL); @@ -1599,6 +1621,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused nr = bswap_32(nr); ph->env.nr_sibling_threads = nr; + size += sizeof(u32); for (i = 0; i < nr; i++) { str = do_read_string(fd, ph); @@ -1607,13 +1630,57 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused /* include a NULL character at the end */ strbuf_add(&sb, str, strlen(str) + 1); + size += string_size(str); free(str); } ph->env.sibling_threads = strbuf_detach(&sb, NULL); + + /* + * The header may be from old perf, + * which doesn't include core id and socket id information. + */ + if (section->size <= size) { + zfree(&ph->env.cpu); + return 0; + } + + for (i = 0; i < (u32)cpu_nr; i++) { + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + goto free_cpu; + + if (ph->needs_swap) + nr = bswap_32(nr); + + if (nr > (u32)cpu_nr) { + pr_debug("core_id number is too big." + "You may need to upgrade the perf tool.\n"); + goto free_cpu; + } + ph->env.cpu[i].core_id = nr; + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + goto free_cpu; + + if (ph->needs_swap) + nr = bswap_32(nr); + + if (nr > (u32)cpu_nr) { + pr_debug("socket_id number is too big." + "You may need to upgrade the perf tool.\n"); + goto free_cpu; + } + + ph->env.cpu[i].socket_id = nr; + } + return 0; error: strbuf_release(&sb); +free_cpu: + zfree(&ph->env.cpu); return -1; } @@ -1716,6 +1783,9 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused /* include a NULL character at the end */ strbuf_add(&sb, "", 1); + if (!strcmp(name, "msr")) + ph->env.msr_pmu_type = type; + free(name); pmu_num--; } @@ -1821,6 +1891,22 @@ out_free: return ret; } +static int process_auxtrace(struct perf_file_section *section, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + struct perf_session *session; + int err; + + session = container_of(ph, struct perf_session, header); + + err = auxtrace_index__process(fd, section->size, session, + ph->needs_swap); + if (err < 0) + pr_err("Failed to process auxtrace index\n"); + return err; +} + struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); @@ -1861,6 +1947,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), FEAT_OPP(HEADER_GROUP_DESC, group_desc), + FEAT_OPP(HEADER_AUXTRACE, auxtrace), }; struct header_print_data { @@ -2476,6 +2563,8 @@ int perf_session__read_header(struct perf_session *session) if (session->evlist == NULL) return -ENOMEM; + session->evlist->env = &header->env; + session->machines.host.env = &header->env; if (perf_data_file__is_pipe(file)) return perf_header__read_pipe(session); diff --git a/kernel/tools/perf/util/header.h b/kernel/tools/perf/util/header.h index 3bb90ac17..05f27cb6b 100644 --- a/kernel/tools/perf/util/header.h +++ b/kernel/tools/perf/util/header.h @@ -7,7 +7,7 @@ #include #include #include "event.h" - +#include "env.h" enum { HEADER_RESERVED = 0, /* always cleared */ @@ -30,6 +30,7 @@ enum { HEADER_BRANCH_STACK, HEADER_PMU_MAPPINGS, HEADER_GROUP_DESC, + HEADER_AUXTRACE, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; @@ -65,30 +66,6 @@ struct perf_header; int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd); -struct perf_session_env { - char *hostname; - char *os_release; - char *version; - char *arch; - int nr_cpus_online; - int nr_cpus_avail; - char *cpu_desc; - char *cpuid; - unsigned long long total_mem; - - int nr_cmdline; - int nr_sibling_cores; - int nr_sibling_threads; - int nr_numa_nodes; - int nr_pmu_mappings; - int nr_groups; - char *cmdline; - char *sibling_cores; - char *sibling_threads; - char *numa_nodes; - char *pmu_mappings; -}; - struct perf_header { enum perf_header_version version; bool needs_swap; @@ -96,7 +73,7 @@ struct perf_header { u64 data_size; u64 feat_offset; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); - struct perf_session_env env; + struct perf_env env; }; struct perf_evlist; diff --git a/kernel/tools/perf/util/hist.c b/kernel/tools/perf/util/hist.c index cc22b9158..4fd37d670 100644 --- a/kernel/tools/perf/util/hist.c +++ b/kernel/tools/perf/util/hist.c @@ -15,6 +15,8 @@ static bool hists__filter_entry_by_thread(struct hists *hists, struct hist_entry *he); static bool hists__filter_entry_by_symbol(struct hists *hists, struct hist_entry *he); +static bool hists__filter_entry_by_socket(struct hists *hists, + struct hist_entry *he); u16 hists__col_len(struct hists *hists, enum hist_column col) { @@ -130,6 +132,18 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); } + + if (h->mem_info->iaddr.sym) { + symlen = (int)h->mem_info->iaddr.sym->namelen + 4 + + unresolved_col_width + 2; + hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, + symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, + symlen); + } + if (h->mem_info->daddr.map) { symlen = dso__name_len(h->mem_info->daddr.map->dso); hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, @@ -141,9 +155,12 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) } else { symlen = unresolved_col_width + 4 + 2; hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); + hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen); hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); } + hists__new_col_len(hists, HISTC_CPU, 3); + hists__new_col_len(hists, HISTC_SOCKET, 6); hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); hists__new_col_len(hists, HISTC_MEM_TLB, 22); hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); @@ -151,6 +168,12 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); + if (h->srcline) + hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline)); + + if (h->srcfile) + hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile)); + if (h->transaction) hists__new_col_len(hists, HISTC_TRANSACTION, hist_entry__transaction_len()); @@ -313,8 +336,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, memset(&he->stat, 0, sizeof(he->stat)); } - if (he->ms.map) - he->ms.map->referenced = true; + map__get(he->ms.map); if (he->branch_info) { /* @@ -324,6 +346,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, */ he->branch_info = malloc(sizeof(*he->branch_info)); if (he->branch_info == NULL) { + map__zput(he->ms.map); free(he->stat_acc); free(he); return NULL; @@ -332,17 +355,13 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, memcpy(he->branch_info, template->branch_info, sizeof(*he->branch_info)); - if (he->branch_info->from.map) - he->branch_info->from.map->referenced = true; - if (he->branch_info->to.map) - he->branch_info->to.map->referenced = true; + map__get(he->branch_info->from.map); + map__get(he->branch_info->to.map); } if (he->mem_info) { - if (he->mem_info->iaddr.map) - he->mem_info->iaddr.map->referenced = true; - if (he->mem_info->daddr.map) - he->mem_info->daddr.map->referenced = true; + map__get(he->mem_info->iaddr.map); + map__get(he->mem_info->daddr.map); } if (symbol_conf.use_callchain) @@ -362,10 +381,10 @@ static u8 symbol__parent_filter(const struct symbol *parent) return 0; } -static struct hist_entry *add_hist_entry(struct hists *hists, - struct hist_entry *entry, - struct addr_location *al, - bool sample_self) +static struct hist_entry *hists__findnew_entry(struct hists *hists, + struct hist_entry *entry, + struct addr_location *al, + bool sample_self) { struct rb_node **p; struct rb_node *parent = NULL; @@ -407,9 +426,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, * the history counter to increment. */ if (he->ms.map != entry->ms.map) { - he->ms.map = entry->ms.map; - if (he->ms.map) - he->ms.map->referenced = true; + map__put(he->ms.map); + he->ms.map = map__get(entry->ms.map); } goto out; } @@ -451,6 +469,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists, .map = al->map, .sym = al->sym, }, + .socket = al->socket, .cpu = al->cpu, .cpumode = al->cpumode, .ip = al->addr, @@ -468,7 +487,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists, .transaction = transaction, }; - return add_hist_entry(hists, &entry, al, sample_self); + return hists__findnew_entry(hists, &entry, al, sample_self); } static int @@ -548,9 +567,9 @@ iter_finish_mem_entry(struct hist_entry_iter *iter, out: /* - * We don't need to free iter->priv (mem_info) here since - * the mem info was either already freed in add_hist_entry() or - * passed to a new hist entry by hist_entry__new(). + * We don't need to free iter->priv (mem_info) here since the mem info + * was either already freed in hists__findnew_entry() or passed to a + * new hist entry by hist_entry__new(). */ iter->priv = NULL; @@ -623,7 +642,8 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a * and not events sampled. Thus we use a pseudo period of 1. */ he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL, - 1, 1, 0, true); + 1, bi->flags.cycles ? bi->flags.cycles : 1, + 0, true); if (he == NULL) return -ENOMEM; @@ -688,7 +708,7 @@ iter_finish_normal_entry(struct hist_entry_iter *iter, } static int -iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, +iter_prepare_cumulative_entry(struct hist_entry_iter *iter, struct addr_location *al __maybe_unused) { struct hist_entry **he_cache; @@ -700,7 +720,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, * cumulated only one time to prevent entries more than 100% * overhead. */ - he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1)); + he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); if (he_cache == NULL) return -ENOMEM; @@ -765,6 +785,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, struct hist_entry **he_cache = iter->priv; struct hist_entry *he; struct hist_entry he_tmp = { + .hists = evsel__hists(evsel), .cpu = al->cpu, .thread = al->thread, .comm = thread__comm(al->thread), @@ -851,18 +872,16 @@ const struct hist_iter_ops hist_iter_cumulative = { }; int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, - struct perf_evsel *evsel, struct perf_sample *sample, int max_stack_depth, void *arg) { int err, err2; - err = sample__resolve_callchain(sample, &iter->parent, evsel, al, - max_stack_depth); + err = sample__resolve_callchain(iter->sample, &iter->parent, + iter->evsel, al, max_stack_depth); if (err) return err; - iter->evsel = evsel; - iter->sample = sample; + iter->max_stack = max_stack_depth; err = iter->ops->prepare_entry(iter, al); if (err) @@ -937,10 +956,24 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) void hist_entry__delete(struct hist_entry *he) { thread__zput(he->thread); - zfree(&he->branch_info); - zfree(&he->mem_info); + map__zput(he->ms.map); + + if (he->branch_info) { + map__zput(he->branch_info->from.map); + map__zput(he->branch_info->to.map); + zfree(&he->branch_info); + } + + if (he->mem_info) { + map__zput(he->mem_info->iaddr.map); + map__zput(he->mem_info->daddr.map); + zfree(&he->mem_info); + } + zfree(&he->stat_acc); free_srcline(he->srcline); + if (he->srcfile && he->srcfile[0]) + free(he->srcfile); free_callchain(he->callchain); free(he); } @@ -1011,6 +1044,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he) hists__filter_entry_by_dso(hists, he); hists__filter_entry_by_thread(hists, he); hists__filter_entry_by_symbol(hists, he); + hists__filter_entry_by_socket(hists, he); } void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) @@ -1096,13 +1130,14 @@ void hists__inc_stats(struct hists *hists, struct hist_entry *h) static void __hists__insert_output_entry(struct rb_root *entries, struct hist_entry *he, - u64 min_callchain_hits) + u64 min_callchain_hits, + bool use_callchain) { struct rb_node **p = &entries->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; - if (symbol_conf.use_callchain) + if (use_callchain) callchain_param.sort(&he->sorted_chain, he->callchain, min_callchain_hits, &callchain_param); @@ -1126,6 +1161,13 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog) struct rb_node *next; struct hist_entry *n; u64 min_callchain_hits; + struct perf_evsel *evsel = hists_to_evsel(hists); + bool use_callchain; + + if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph) + use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN; + else + use_callchain = symbol_conf.use_callchain; min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); @@ -1144,7 +1186,7 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog) n = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&n->rb_node_in); - __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); + __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); hists__inc_stats(hists, n); if (!n->filtered) @@ -1163,7 +1205,7 @@ static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h return; /* force fold unfiltered entry for simplicity */ - h->ms.unfolded = false; + h->unfolded = false; h->row_offset = 0; h->nr_rows = 0; @@ -1271,6 +1313,37 @@ void hists__filter_by_symbol(struct hists *hists) } } +static bool hists__filter_entry_by_socket(struct hists *hists, + struct hist_entry *he) +{ + if ((hists->socket_filter > -1) && + (he->socket != hists->socket_filter)) { + he->filtered |= (1 << HIST_FILTER__SOCKET); + return true; + } + + return false; +} + +void hists__filter_by_socket(struct hists *hists) +{ + struct rb_node *nd; + + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); + hists__reset_col_len(hists); + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + + if (hists__filter_entry_by_socket(hists, h)) + continue; + + hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET); + } +} + void events_stats__inc(struct events_stats *stats, u32 type) { ++stats->nr_events[0]; @@ -1411,6 +1484,39 @@ int hists__link(struct hists *leader, struct hists *other) return 0; } +void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, + struct perf_sample *sample, bool nonany_branch_mode) +{ + struct branch_info *bi; + + /* If we have branch cycles always annotate them. */ + if (bs && bs->nr && bs->entries[0].flags.cycles) { + int i; + + bi = sample__resolve_bstack(sample, al); + if (bi) { + struct addr_map_symbol *prev = NULL; + + /* + * Ignore errors, still want to process the + * other entries. + * + * For non standard branch modes always + * force no IPC (prev == NULL) + * + * Note that perf stores branches reversed from + * program order! + */ + for (i = bs->nr - 1; i >= 0; i--) { + addr_map_symbol__account_cycles(&bi[i].from, + nonany_branch_mode ? NULL : prev, + bi[i].flags.cycles); + prev = &bi[i].to; + } + free(bi); + } + } +} size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) { @@ -1463,6 +1569,7 @@ static int hists_evsel__init(struct perf_evsel *evsel) hists->entries_collapsed = RB_ROOT; hists->entries = RB_ROOT; pthread_mutex_init(&hists->lock, NULL); + hists->socket_filter = -1; return 0; } diff --git a/kernel/tools/perf/util/hist.h b/kernel/tools/perf/util/hist.h index 9f31b89a5..a48a2078d 100644 --- a/kernel/tools/perf/util/hist.h +++ b/kernel/tools/perf/util/hist.h @@ -20,6 +20,7 @@ enum hist_filter { HIST_FILTER__SYMBOL, HIST_FILTER__GUEST, HIST_FILTER__HOST, + HIST_FILTER__SOCKET, }; enum hist_column { @@ -29,7 +30,9 @@ enum hist_column { HISTC_COMM, HISTC_PARENT, HISTC_CPU, + HISTC_SOCKET, HISTC_SRCLINE, + HISTC_SRCFILE, HISTC_MISPREDICT, HISTC_IN_TX, HISTC_ABORT, @@ -46,7 +49,9 @@ enum hist_column { HISTC_MEM_LVL, HISTC_MEM_SNOOP, HISTC_MEM_DCACHELINE, + HISTC_MEM_IADDR_SYMBOL, HISTC_TRANSACTION, + HISTC_CYCLES, HISTC_NR_COLS, /* Last entry */ }; @@ -68,6 +73,7 @@ struct hists { struct events_stats stats; u64 event_stream; u16 col_len[HISTC_NR_COLS]; + int socket_filter; }; struct hist_entry_iter; @@ -85,6 +91,7 @@ struct hist_entry_iter { int curr; bool hide_unresolved; + int max_stack; struct perf_evsel *evsel; struct perf_sample *sample; @@ -111,7 +118,6 @@ struct hist_entry *__hists__add_entry(struct hists *hists, u64 weight, u64 transaction, bool sample_self); int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, - struct perf_evsel *evsel, struct perf_sample *sample, int max_stack_depth, void *arg); int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); @@ -143,11 +149,12 @@ size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp); void hists__filter_by_dso(struct hists *hists); void hists__filter_by_thread(struct hists *hists); void hists__filter_by_symbol(struct hists *hists); +void hists__filter_by_socket(struct hists *hists); static inline bool hists__has_filter(struct hists *hists) { return hists->thread_filter || hists->dso_filter || - hists->symbol_filter_str; + hists->symbol_filter_str || (hists->socket_filter > -1); } u16 hists__col_len(struct hists *hists, enum hist_column col); @@ -312,7 +319,7 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, struct hist_browser_timer *hbt, float min_pcnt, - struct perf_session_env *env); + struct perf_env *env); int script_browse(const char *script_opt); #else static inline @@ -320,7 +327,7 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, const char *help __maybe_unused, struct hist_browser_timer *hbt __maybe_unused, float min_pcnt __maybe_unused, - struct perf_session_env *env __maybe_unused) + struct perf_env *env __maybe_unused) { return 0; } @@ -350,6 +357,9 @@ static inline int script_browse(const char *script_opt __maybe_unused) unsigned int hists__sort_list_width(struct hists *hists); +void hist__account_cycles(struct branch_stack *bs, struct addr_location *al, + struct perf_sample *sample, bool nonany_branch_mode); + struct option; int parse_filter_percentage(const struct option *opt __maybe_unused, const char *arg, int unset __maybe_unused); diff --git a/kernel/tools/perf/util/include/dwarf-regs.h b/kernel/tools/perf/util/include/dwarf-regs.h index 8f149655f..07c644ed6 100644 --- a/kernel/tools/perf/util/include/dwarf-regs.h +++ b/kernel/tools/perf/util/include/dwarf-regs.h @@ -5,4 +5,12 @@ const char *get_arch_regstr(unsigned int n); #endif +#ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET +/* + * Arch should support fetching the offset of a register in pt_regs + * by its name. See kernel's regs_query_register_offset in + * arch/xxx/kernel/ptrace.c. + */ +int regs_query_register_offset(const char *name); +#endif #endif diff --git a/kernel/tools/perf/util/include/linux/kernel.h b/kernel/tools/perf/util/include/linux/kernel.h deleted file mode 100644 index 09e8e7aea..000000000 --- a/kernel/tools/perf/util/include/linux/kernel.h +++ /dev/null @@ -1,107 +0,0 @@ -#ifndef PERF_LINUX_KERNEL_H_ -#define PERF_LINUX_KERNEL_H_ - -#include -#include -#include -#include - -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) - -#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) -#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) - -#ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) -#endif - -#ifndef container_of -/** - * container_of - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - */ -#define container_of(ptr, type, member) ({ \ - const typeof(((type *)0)->member) * __mptr = (ptr); \ - (type *)((char *)__mptr - offsetof(type, member)); }) -#endif - -#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) - -#ifndef max -#define max(x, y) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - (void) (&_max1 == &_max2); \ - _max1 > _max2 ? _max1 : _max2; }) -#endif - -#ifndef min -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) -#endif - -#ifndef roundup -#define roundup(x, y) ( \ -{ \ - const typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -} \ -) -#endif - -#ifndef BUG_ON -#ifdef NDEBUG -#define BUG_ON(cond) do { if (cond) {} } while (0) -#else -#define BUG_ON(cond) assert(!(cond)) -#endif -#endif - -/* - * Both need more care to handle endianness - * (Don't use bitmap_copy_le() for now) - */ -#define cpu_to_le64(x) (x) -#define cpu_to_le32(x) (x) - -static inline int -vscnprintf(char *buf, size_t size, const char *fmt, va_list args) -{ - int i; - ssize_t ssize = size; - - i = vsnprintf(buf, size, fmt, args); - - return (i >= ssize) ? (ssize - 1) : i; -} - -static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) -{ - va_list args; - ssize_t ssize = size; - int i; - - va_start(args, fmt); - i = vsnprintf(buf, size, fmt, args); - va_end(args); - - return (i >= ssize) ? (ssize - 1) : i; -} - -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - -#endif diff --git a/kernel/tools/perf/util/include/linux/list.h b/kernel/tools/perf/util/include/linux/list.h deleted file mode 100644 index 76ddbc726..000000000 --- a/kernel/tools/perf/util/include/linux/list.h +++ /dev/null @@ -1,29 +0,0 @@ -#include -#include - -#include "../../../../include/linux/list.h" - -#ifndef PERF_LIST_H -#define PERF_LIST_H -/** - * list_del_range - deletes range of entries from list. - * @begin: first element in the range to delete from the list. - * @end: last element in the range to delete from the list. - * Note: list_empty on the range of entries does not return true after this, - * the entries is in an undefined state. - */ -static inline void list_del_range(struct list_head *begin, - struct list_head *end) -{ - begin->prev->next = end->next; - end->next->prev = begin->prev; -} - -/** - * list_for_each_from - iterate over a list from one of its nodes - * @pos: the &struct list_head to use as a loop cursor, from where to start - * @head: the head for your list. - */ -#define list_for_each_from(pos, head) \ - for (; pos != (head); pos = pos->next) -#endif diff --git a/kernel/tools/perf/util/include/linux/poison.h b/kernel/tools/perf/util/include/linux/poison.h deleted file mode 100644 index fef6dbc9c..000000000 --- a/kernel/tools/perf/util/include/linux/poison.h +++ /dev/null @@ -1 +0,0 @@ -#include "../../../../include/linux/poison.h" diff --git a/kernel/tools/perf/util/include/linux/rbtree.h b/kernel/tools/perf/util/include/linux/rbtree.h deleted file mode 100644 index 2a030c5af..000000000 --- a/kernel/tools/perf/util/include/linux/rbtree.h +++ /dev/null @@ -1,2 +0,0 @@ -#include -#include "../../../../include/linux/rbtree.h" diff --git a/kernel/tools/perf/util/include/linux/rbtree_augmented.h b/kernel/tools/perf/util/include/linux/rbtree_augmented.h deleted file mode 100644 index 9d6fcdf17..000000000 --- a/kernel/tools/perf/util/include/linux/rbtree_augmented.h +++ /dev/null @@ -1,2 +0,0 @@ -#include -#include "../../../../include/linux/rbtree_augmented.h" diff --git a/kernel/tools/perf/util/intel-bts.c b/kernel/tools/perf/util/intel-bts.c new file mode 100644 index 000000000..eb0e7f8bf --- /dev/null +++ b/kernel/tools/perf/util/intel-bts.c @@ -0,0 +1,933 @@ +/* + * intel-bts.c: Intel Processor Trace support + * Copyright (c) 2013-2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "cpumap.h" +#include "color.h" +#include "evsel.h" +#include "evlist.h" +#include "machine.h" +#include "session.h" +#include "util.h" +#include "thread.h" +#include "thread-stack.h" +#include "debug.h" +#include "tsc.h" +#include "auxtrace.h" +#include "intel-pt-decoder/intel-pt-insn-decoder.h" +#include "intel-bts.h" + +#define MAX_TIMESTAMP (~0ULL) + +#define INTEL_BTS_ERR_NOINSN 5 +#define INTEL_BTS_ERR_LOST 9 + +#if __BYTE_ORDER == __BIG_ENDIAN +#define le64_to_cpu bswap_64 +#else +#define le64_to_cpu +#endif + +struct intel_bts { + struct auxtrace auxtrace; + struct auxtrace_queues queues; + struct auxtrace_heap heap; + u32 auxtrace_type; + struct perf_session *session; + struct machine *machine; + bool sampling_mode; + bool snapshot_mode; + bool data_queued; + u32 pmu_type; + struct perf_tsc_conversion tc; + bool cap_user_time_zero; + struct itrace_synth_opts synth_opts; + bool sample_branches; + u32 branches_filter; + u64 branches_sample_type; + u64 branches_id; + size_t branches_event_size; + bool synth_needs_swap; +}; + +struct intel_bts_queue { + struct intel_bts *bts; + unsigned int queue_nr; + struct auxtrace_buffer *buffer; + bool on_heap; + bool done; + pid_t pid; + pid_t tid; + int cpu; + u64 time; + struct intel_pt_insn intel_pt_insn; + u32 sample_flags; +}; + +struct branch { + u64 from; + u64 to; + u64 misc; +}; + +static void intel_bts_dump(struct intel_bts *bts __maybe_unused, + unsigned char *buf, size_t len) +{ + struct branch *branch; + size_t i, pos = 0, br_sz = sizeof(struct branch), sz; + const char *color = PERF_COLOR_BLUE; + + color_fprintf(stdout, color, + ". ... Intel BTS data: size %zu bytes\n", + len); + + while (len) { + if (len >= br_sz) + sz = br_sz; + else + sz = len; + printf("."); + color_fprintf(stdout, color, " %08x: ", pos); + for (i = 0; i < sz; i++) + color_fprintf(stdout, color, " %02x", buf[i]); + for (; i < br_sz; i++) + color_fprintf(stdout, color, " "); + if (len >= br_sz) { + branch = (struct branch *)buf; + color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n", + le64_to_cpu(branch->from), + le64_to_cpu(branch->to), + le64_to_cpu(branch->misc) & 0x10 ? + "pred" : "miss"); + } else { + color_fprintf(stdout, color, " Bad record!\n"); + } + pos += sz; + buf += sz; + len -= sz; + } +} + +static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf, + size_t len) +{ + printf(".\n"); + intel_bts_dump(bts, buf, len); +} + +static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample) +{ + union perf_event event; + int err; + + auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, + INTEL_BTS_ERR_LOST, sample->cpu, sample->pid, + sample->tid, 0, "Lost trace data"); + + err = perf_session__deliver_synth_event(bts->session, &event, NULL); + if (err) + pr_err("Intel BTS: failed to deliver error event, error %d\n", + err); + + return err; +} + +static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts, + unsigned int queue_nr) +{ + struct intel_bts_queue *btsq; + + btsq = zalloc(sizeof(struct intel_bts_queue)); + if (!btsq) + return NULL; + + btsq->bts = bts; + btsq->queue_nr = queue_nr; + btsq->pid = -1; + btsq->tid = -1; + btsq->cpu = -1; + + return btsq; +} + +static int intel_bts_setup_queue(struct intel_bts *bts, + struct auxtrace_queue *queue, + unsigned int queue_nr) +{ + struct intel_bts_queue *btsq = queue->priv; + + if (list_empty(&queue->head)) + return 0; + + if (!btsq) { + btsq = intel_bts_alloc_queue(bts, queue_nr); + if (!btsq) + return -ENOMEM; + queue->priv = btsq; + + if (queue->cpu != -1) + btsq->cpu = queue->cpu; + btsq->tid = queue->tid; + } + + if (bts->sampling_mode) + return 0; + + if (!btsq->on_heap && !btsq->buffer) { + int ret; + + btsq->buffer = auxtrace_buffer__next(queue, NULL); + if (!btsq->buffer) + return 0; + + ret = auxtrace_heap__add(&bts->heap, queue_nr, + btsq->buffer->reference); + if (ret) + return ret; + btsq->on_heap = true; + } + + return 0; +} + +static int intel_bts_setup_queues(struct intel_bts *bts) +{ + unsigned int i; + int ret; + + for (i = 0; i < bts->queues.nr_queues; i++) { + ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], + i); + if (ret) + return ret; + } + return 0; +} + +static inline int intel_bts_update_queues(struct intel_bts *bts) +{ + if (bts->queues.new_data) { + bts->queues.new_data = false; + return intel_bts_setup_queues(bts); + } + return 0; +} + +static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a, + unsigned char *buf_b, size_t len_b) +{ + size_t offs, len; + + if (len_a > len_b) + offs = len_a - len_b; + else + offs = 0; + + for (; offs < len_a; offs += sizeof(struct branch)) { + len = len_a - offs; + if (!memcmp(buf_a + offs, buf_b, len)) + return buf_b + len; + } + + return buf_b; +} + +static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue, + struct auxtrace_buffer *b) +{ + struct auxtrace_buffer *a; + void *start; + + if (b->list.prev == &queue->head) + return 0; + a = list_entry(b->list.prev, struct auxtrace_buffer, list); + start = intel_bts_find_overlap(a->data, a->size, b->data, b->size); + if (!start) + return -EINVAL; + b->use_size = b->data + b->size - start; + b->use_data = start; + return 0; +} + +static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, + struct branch *branch) +{ + int ret; + struct intel_bts *bts = btsq->bts; + union perf_event event; + struct perf_sample sample = { .ip = 0, }; + + event.sample.header.type = PERF_RECORD_SAMPLE; + event.sample.header.misc = PERF_RECORD_MISC_USER; + event.sample.header.size = sizeof(struct perf_event_header); + + sample.ip = le64_to_cpu(branch->from); + sample.pid = btsq->pid; + sample.tid = btsq->tid; + sample.addr = le64_to_cpu(branch->to); + sample.id = btsq->bts->branches_id; + sample.stream_id = btsq->bts->branches_id; + sample.period = 1; + sample.cpu = btsq->cpu; + sample.flags = btsq->sample_flags; + sample.insn_len = btsq->intel_pt_insn.length; + + if (bts->synth_opts.inject) { + event.sample.header.size = bts->branches_event_size; + ret = perf_event__synthesize_sample(&event, + bts->branches_sample_type, + 0, &sample, + bts->synth_needs_swap); + if (ret) + return ret; + } + + ret = perf_session__deliver_synth_event(bts->session, &event, &sample); + if (ret) + pr_err("Intel BTS: failed to deliver branch event, error %d\n", + ret); + + return ret; +} + +static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip) +{ + struct machine *machine = btsq->bts->machine; + struct thread *thread; + struct addr_location al; + unsigned char buf[1024]; + size_t bufsz; + ssize_t len; + int x86_64; + uint8_t cpumode; + int err = -1; + + bufsz = intel_pt_insn_max_size(); + + if (machine__kernel_ip(machine, ip)) + cpumode = PERF_RECORD_MISC_KERNEL; + else + cpumode = PERF_RECORD_MISC_USER; + + thread = machine__find_thread(machine, -1, btsq->tid); + if (!thread) + return -1; + + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al); + if (!al.map || !al.map->dso) + goto out_put; + + len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, bufsz); + if (len <= 0) + goto out_put; + + /* Load maps to ensure dso->is_64_bit has been updated */ + map__load(al.map, machine->symbol_filter); + + x86_64 = al.map->dso->is_64_bit; + + if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn)) + goto out_put; + + err = 0; +out_put: + thread__put(thread); + return err; +} + +static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid, + pid_t tid, u64 ip) +{ + union perf_event event; + int err; + + auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, + INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip, + "Failed to get instruction"); + + err = perf_session__deliver_synth_event(bts->session, &event, NULL); + if (err) + pr_err("Intel BTS: failed to deliver error event, error %d\n", + err); + + return err; +} + +static int intel_bts_get_branch_type(struct intel_bts_queue *btsq, + struct branch *branch) +{ + int err; + + if (!branch->from) { + if (branch->to) + btsq->sample_flags = PERF_IP_FLAG_BRANCH | + PERF_IP_FLAG_TRACE_BEGIN; + else + btsq->sample_flags = 0; + btsq->intel_pt_insn.length = 0; + } else if (!branch->to) { + btsq->sample_flags = PERF_IP_FLAG_BRANCH | + PERF_IP_FLAG_TRACE_END; + btsq->intel_pt_insn.length = 0; + } else { + err = intel_bts_get_next_insn(btsq, branch->from); + if (err) { + btsq->sample_flags = 0; + btsq->intel_pt_insn.length = 0; + if (!btsq->bts->synth_opts.errors) + return 0; + err = intel_bts_synth_error(btsq->bts, btsq->cpu, + btsq->pid, btsq->tid, + branch->from); + return err; + } + btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op); + /* Check for an async branch into the kernel */ + if (!machine__kernel_ip(btsq->bts->machine, branch->from) && + machine__kernel_ip(btsq->bts->machine, branch->to) && + btsq->sample_flags != (PERF_IP_FLAG_BRANCH | + PERF_IP_FLAG_CALL | + PERF_IP_FLAG_SYSCALLRET)) + btsq->sample_flags = PERF_IP_FLAG_BRANCH | + PERF_IP_FLAG_CALL | + PERF_IP_FLAG_ASYNC | + PERF_IP_FLAG_INTERRUPT; + } + + return 0; +} + +static int intel_bts_process_buffer(struct intel_bts_queue *btsq, + struct auxtrace_buffer *buffer) +{ + struct branch *branch; + size_t sz, bsz = sizeof(struct branch); + u32 filter = btsq->bts->branches_filter; + int err = 0; + + if (buffer->use_data) { + sz = buffer->use_size; + branch = buffer->use_data; + } else { + sz = buffer->size; + branch = buffer->data; + } + + if (!btsq->bts->sample_branches) + return 0; + + for (; sz > bsz; branch += 1, sz -= bsz) { + if (!branch->from && !branch->to) + continue; + intel_bts_get_branch_type(btsq, branch); + if (filter && !(filter & btsq->sample_flags)) + continue; + err = intel_bts_synth_branch_sample(btsq, branch); + if (err) + break; + } + return err; +} + +static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp) +{ + struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer; + struct auxtrace_queue *queue; + struct thread *thread; + int err; + + if (btsq->done) + return 1; + + if (btsq->pid == -1) { + thread = machine__find_thread(btsq->bts->machine, -1, + btsq->tid); + if (thread) + btsq->pid = thread->pid_; + } else { + thread = machine__findnew_thread(btsq->bts->machine, btsq->pid, + btsq->tid); + } + + queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; + + if (!buffer) + buffer = auxtrace_buffer__next(queue, NULL); + + if (!buffer) { + if (!btsq->bts->sampling_mode) + btsq->done = 1; + err = 1; + goto out_put; + } + + /* Currently there is no support for split buffers */ + if (buffer->consecutive) { + err = -EINVAL; + goto out_put; + } + + if (!buffer->data) { + int fd = perf_data_file__fd(btsq->bts->session->file); + + buffer->data = auxtrace_buffer__get_data(buffer, fd); + if (!buffer->data) { + err = -ENOMEM; + goto out_put; + } + } + + if (btsq->bts->snapshot_mode && !buffer->consecutive && + intel_bts_do_fix_overlap(queue, buffer)) { + err = -ENOMEM; + goto out_put; + } + + if (!btsq->bts->synth_opts.callchain && thread && + (!old_buffer || btsq->bts->sampling_mode || + (btsq->bts->snapshot_mode && !buffer->consecutive))) + thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1); + + err = intel_bts_process_buffer(btsq, buffer); + + auxtrace_buffer__drop_data(buffer); + + btsq->buffer = auxtrace_buffer__next(queue, buffer); + if (btsq->buffer) { + if (timestamp) + *timestamp = btsq->buffer->reference; + } else { + if (!btsq->bts->sampling_mode) + btsq->done = 1; + } +out_put: + thread__put(thread); + return err; +} + +static int intel_bts_flush_queue(struct intel_bts_queue *btsq) +{ + u64 ts = 0; + int ret; + + while (1) { + ret = intel_bts_process_queue(btsq, &ts); + if (ret < 0) + return ret; + if (ret) + break; + } + return 0; +} + +static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid) +{ + struct auxtrace_queues *queues = &bts->queues; + unsigned int i; + + for (i = 0; i < queues->nr_queues; i++) { + struct auxtrace_queue *queue = &bts->queues.queue_array[i]; + struct intel_bts_queue *btsq = queue->priv; + + if (btsq && btsq->tid == tid) + return intel_bts_flush_queue(btsq); + } + return 0; +} + +static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp) +{ + while (1) { + unsigned int queue_nr; + struct auxtrace_queue *queue; + struct intel_bts_queue *btsq; + u64 ts = 0; + int ret; + + if (!bts->heap.heap_cnt) + return 0; + + if (bts->heap.heap_array[0].ordinal > timestamp) + return 0; + + queue_nr = bts->heap.heap_array[0].queue_nr; + queue = &bts->queues.queue_array[queue_nr]; + btsq = queue->priv; + + auxtrace_heap__pop(&bts->heap); + + ret = intel_bts_process_queue(btsq, &ts); + if (ret < 0) { + auxtrace_heap__add(&bts->heap, queue_nr, ts); + return ret; + } + + if (!ret) { + ret = auxtrace_heap__add(&bts->heap, queue_nr, ts); + if (ret < 0) + return ret; + } else { + btsq->on_heap = false; + } + } + + return 0; +} + +static int intel_bts_process_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool) +{ + struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, + auxtrace); + u64 timestamp; + int err; + + if (dump_trace) + return 0; + + if (!tool->ordered_events) { + pr_err("Intel BTS requires ordered events\n"); + return -EINVAL; + } + + if (sample->time && sample->time != (u64)-1) + timestamp = perf_time_to_tsc(sample->time, &bts->tc); + else + timestamp = 0; + + err = intel_bts_update_queues(bts); + if (err) + return err; + + err = intel_bts_process_queues(bts, timestamp); + if (err) + return err; + if (event->header.type == PERF_RECORD_EXIT) { + err = intel_bts_process_tid_exit(bts, event->fork.tid); + if (err) + return err; + } + + if (event->header.type == PERF_RECORD_AUX && + (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && + bts->synth_opts.errors) + err = intel_bts_lost(bts, sample); + + return err; +} + +static int intel_bts_process_auxtrace_event(struct perf_session *session, + union perf_event *event, + struct perf_tool *tool __maybe_unused) +{ + struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, + auxtrace); + + if (bts->sampling_mode) + return 0; + + if (!bts->data_queued) { + struct auxtrace_buffer *buffer; + off_t data_offset; + int fd = perf_data_file__fd(session->file); + int err; + + if (perf_data_file__is_pipe(session->file)) { + data_offset = 0; + } else { + data_offset = lseek(fd, 0, SEEK_CUR); + if (data_offset == -1) + return -errno; + } + + err = auxtrace_queues__add_event(&bts->queues, session, event, + data_offset, &buffer); + if (err) + return err; + + /* Dump here now we have copied a piped trace out of the pipe */ + if (dump_trace) { + if (auxtrace_buffer__get_data(buffer, fd)) { + intel_bts_dump_event(bts, buffer->data, + buffer->size); + auxtrace_buffer__put_data(buffer); + } + } + } + + return 0; +} + +static int intel_bts_flush(struct perf_session *session __maybe_unused, + struct perf_tool *tool __maybe_unused) +{ + struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, + auxtrace); + int ret; + + if (dump_trace || bts->sampling_mode) + return 0; + + if (!tool->ordered_events) + return -EINVAL; + + ret = intel_bts_update_queues(bts); + if (ret < 0) + return ret; + + return intel_bts_process_queues(bts, MAX_TIMESTAMP); +} + +static void intel_bts_free_queue(void *priv) +{ + struct intel_bts_queue *btsq = priv; + + if (!btsq) + return; + free(btsq); +} + +static void intel_bts_free_events(struct perf_session *session) +{ + struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, + auxtrace); + struct auxtrace_queues *queues = &bts->queues; + unsigned int i; + + for (i = 0; i < queues->nr_queues; i++) { + intel_bts_free_queue(queues->queue_array[i].priv); + queues->queue_array[i].priv = NULL; + } + auxtrace_queues__free(queues); +} + +static void intel_bts_free(struct perf_session *session) +{ + struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, + auxtrace); + + auxtrace_heap__free(&bts->heap); + intel_bts_free_events(session); + session->auxtrace = NULL; + free(bts); +} + +struct intel_bts_synth { + struct perf_tool dummy_tool; + struct perf_session *session; +}; + +static int intel_bts_event_synth(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) +{ + struct intel_bts_synth *intel_bts_synth = + container_of(tool, struct intel_bts_synth, dummy_tool); + + return perf_session__deliver_synth_event(intel_bts_synth->session, + event, NULL); +} + +static int intel_bts_synth_event(struct perf_session *session, + struct perf_event_attr *attr, u64 id) +{ + struct intel_bts_synth intel_bts_synth; + + memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth)); + intel_bts_synth.session = session; + + return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1, + &id, intel_bts_event_synth); +} + +static int intel_bts_synth_events(struct intel_bts *bts, + struct perf_session *session) +{ + struct perf_evlist *evlist = session->evlist; + struct perf_evsel *evsel; + struct perf_event_attr attr; + bool found = false; + u64 id; + int err; + + evlist__for_each(evlist, evsel) { + if (evsel->attr.type == bts->pmu_type && evsel->ids) { + found = true; + break; + } + } + + if (!found) { + pr_debug("There are no selected events with Intel BTS data\n"); + return 0; + } + + memset(&attr, 0, sizeof(struct perf_event_attr)); + attr.size = sizeof(struct perf_event_attr); + attr.type = PERF_TYPE_HARDWARE; + attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK; + attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | + PERF_SAMPLE_PERIOD; + attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; + attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; + attr.exclude_user = evsel->attr.exclude_user; + attr.exclude_kernel = evsel->attr.exclude_kernel; + attr.exclude_hv = evsel->attr.exclude_hv; + attr.exclude_host = evsel->attr.exclude_host; + attr.exclude_guest = evsel->attr.exclude_guest; + attr.sample_id_all = evsel->attr.sample_id_all; + attr.read_format = evsel->attr.read_format; + + id = evsel->id[0] + 1000000000; + if (!id) + id = 1; + + if (bts->synth_opts.branches) { + attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; + attr.sample_period = 1; + attr.sample_type |= PERF_SAMPLE_ADDR; + pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n", + id, (u64)attr.sample_type); + err = intel_bts_synth_event(session, &attr, id); + if (err) { + pr_err("%s: failed to synthesize 'branches' event type\n", + __func__); + return err; + } + bts->sample_branches = true; + bts->branches_sample_type = attr.sample_type; + bts->branches_id = id; + /* + * We only use sample types from PERF_SAMPLE_MASK so we can use + * __perf_evsel__sample_size() here. + */ + bts->branches_event_size = sizeof(struct sample_event) + + __perf_evsel__sample_size(attr.sample_type); + } + + bts->synth_needs_swap = evsel->needs_swap; + + return 0; +} + +static const char * const intel_bts_info_fmts[] = { + [INTEL_BTS_PMU_TYPE] = " PMU Type %"PRId64"\n", + [INTEL_BTS_TIME_SHIFT] = " Time Shift %"PRIu64"\n", + [INTEL_BTS_TIME_MULT] = " Time Muliplier %"PRIu64"\n", + [INTEL_BTS_TIME_ZERO] = " Time Zero %"PRIu64"\n", + [INTEL_BTS_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", + [INTEL_BTS_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", +}; + +static void intel_bts_print_info(u64 *arr, int start, int finish) +{ + int i; + + if (!dump_trace) + return; + + for (i = start; i <= finish; i++) + fprintf(stdout, intel_bts_info_fmts[i], arr[i]); +} + +u64 intel_bts_auxtrace_info_priv[INTEL_BTS_AUXTRACE_PRIV_SIZE]; + +int intel_bts_process_auxtrace_info(union perf_event *event, + struct perf_session *session) +{ + struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info; + size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE; + struct intel_bts *bts; + int err; + + if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) + + min_sz) + return -EINVAL; + + bts = zalloc(sizeof(struct intel_bts)); + if (!bts) + return -ENOMEM; + + err = auxtrace_queues__init(&bts->queues); + if (err) + goto err_free; + + bts->session = session; + bts->machine = &session->machines.host; /* No kvm support */ + bts->auxtrace_type = auxtrace_info->type; + bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE]; + bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT]; + bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT]; + bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO]; + bts->cap_user_time_zero = + auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO]; + bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE]; + + bts->sampling_mode = false; + + bts->auxtrace.process_event = intel_bts_process_event; + bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event; + bts->auxtrace.flush_events = intel_bts_flush; + bts->auxtrace.free_events = intel_bts_free_events; + bts->auxtrace.free = intel_bts_free; + session->auxtrace = &bts->auxtrace; + + intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE, + INTEL_BTS_SNAPSHOT_MODE); + + if (dump_trace) + return 0; + + if (session->itrace_synth_opts && session->itrace_synth_opts->set) + bts->synth_opts = *session->itrace_synth_opts; + else + itrace_synth_opts__set_default(&bts->synth_opts); + + if (bts->synth_opts.calls) + bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | + PERF_IP_FLAG_TRACE_END; + if (bts->synth_opts.returns) + bts->branches_filter |= PERF_IP_FLAG_RETURN | + PERF_IP_FLAG_TRACE_BEGIN; + + err = intel_bts_synth_events(bts, session); + if (err) + goto err_free_queues; + + err = auxtrace_queues__process_index(&bts->queues, session); + if (err) + goto err_free_queues; + + if (bts->queues.populated) + bts->data_queued = true; + + return 0; + +err_free_queues: + auxtrace_queues__free(&bts->queues); + session->auxtrace = NULL; +err_free: + free(bts); + return err; +} diff --git a/kernel/tools/perf/util/intel-bts.h b/kernel/tools/perf/util/intel-bts.h new file mode 100644 index 000000000..ca65e21b3 --- /dev/null +++ b/kernel/tools/perf/util/intel-bts.h @@ -0,0 +1,43 @@ +/* + * intel-bts.h: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef INCLUDE__PERF_INTEL_BTS_H__ +#define INCLUDE__PERF_INTEL_BTS_H__ + +#define INTEL_BTS_PMU_NAME "intel_bts" + +enum { + INTEL_BTS_PMU_TYPE, + INTEL_BTS_TIME_SHIFT, + INTEL_BTS_TIME_MULT, + INTEL_BTS_TIME_ZERO, + INTEL_BTS_CAP_USER_TIME_ZERO, + INTEL_BTS_SNAPSHOT_MODE, + INTEL_BTS_AUXTRACE_PRIV_MAX, +}; + +#define INTEL_BTS_AUXTRACE_PRIV_SIZE (INTEL_BTS_AUXTRACE_PRIV_MAX * sizeof(u64)) + +struct auxtrace_record; +struct perf_tool; +union perf_event; +struct perf_session; + +struct auxtrace_record *intel_bts_recording_init(int *err); + +int intel_bts_process_auxtrace_info(union perf_event *event, + struct perf_session *session); + +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/Build b/kernel/tools/perf/util/intel-pt-decoder/Build new file mode 100644 index 000000000..0611d619a --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/Build @@ -0,0 +1,23 @@ +libperf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o + +inat_tables_script = util/intel-pt-decoder/gen-insn-attr-x86.awk +inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt + +$(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) + $(call rule_mkdir) + @$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@ + +$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c + @(test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \ + diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \ + diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \ + diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \ + diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \ + diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \ + diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \ + diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \ + || echo "Warning: Intel PT: x86 instruction decoder differs from kernel" >&2 )) || true + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) + +CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder -Wno-override-init diff --git a/kernel/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk b/kernel/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk new file mode 100644 index 000000000..517567347 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk @@ -0,0 +1,386 @@ +#!/bin/awk -f +# gen-insn-attr-x86.awk: Instruction attribute table generator +# Written by Masami Hiramatsu +# +# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c + +# Awk implementation sanity check +function check_awk_implement() { + if (sprintf("%x", 0) != "0") + return "Your awk has a printf-format problem." + return "" +} + +# Clear working vars +function clear_vars() { + delete table + delete lptable2 + delete lptable1 + delete lptable3 + eid = -1 # escape id + gid = -1 # group id + aid = -1 # AVX id + tname = "" +} + +BEGIN { + # Implementation error checking + awkchecked = check_awk_implement() + if (awkchecked != "") { + print "Error: " awkchecked > "/dev/stderr" + print "Please try to use gawk." > "/dev/stderr" + exit 1 + } + + # Setup generating tables + print "/* x86 opcode map generated from x86-opcode-map.txt */" + print "/* Do not change this code. */\n" + ggid = 1 + geid = 1 + gaid = 0 + delete etable + delete gtable + delete atable + + opnd_expr = "^[A-Za-z/]" + ext_expr = "^\\(" + sep_expr = "^\\|$" + group_expr = "^Grp[0-9A-Za-z]+" + + imm_expr = "^[IJAOL][a-z]" + imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" + imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)" + imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)" + imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)" + imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" + imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" + imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" + imm_flag["Ob"] = "INAT_MOFFSET" + imm_flag["Ov"] = "INAT_MOFFSET" + imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + + modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])" + force64_expr = "\\([df]64\\)" + rex_expr = "^REX(\\.[XRWB]+)*" + fpu_expr = "^ESC" # TODO + + lprefix1_expr = "\\((66|!F3)\\)" + lprefix2_expr = "\\(F3\\)" + lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" + lprefix_expr = "\\((66|F2|F3)\\)" + max_lprefix = 4 + + # All opcodes starting with lower-case 'v' or with (v1) superscript + # accepts VEX prefix + vexok_opcode_expr = "^v.*" + vexok_expr = "\\(v1\\)" + # All opcodes with (v) superscript supports *only* VEX prefix + vexonly_expr = "\\(v\\)" + + prefix_expr = "\\(Prefix\\)" + prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" + prefix_num["REPNE"] = "INAT_PFX_REPNE" + prefix_num["REP/REPE"] = "INAT_PFX_REPE" + prefix_num["XACQUIRE"] = "INAT_PFX_REPNE" + prefix_num["XRELEASE"] = "INAT_PFX_REPE" + prefix_num["LOCK"] = "INAT_PFX_LOCK" + prefix_num["SEG=CS"] = "INAT_PFX_CS" + prefix_num["SEG=DS"] = "INAT_PFX_DS" + prefix_num["SEG=ES"] = "INAT_PFX_ES" + prefix_num["SEG=FS"] = "INAT_PFX_FS" + prefix_num["SEG=GS"] = "INAT_PFX_GS" + prefix_num["SEG=SS"] = "INAT_PFX_SS" + prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" + prefix_num["VEX+1byte"] = "INAT_PFX_VEX2" + prefix_num["VEX+2byte"] = "INAT_PFX_VEX3" + + clear_vars() +} + +function semantic_error(msg) { + print "Semantic error at " NR ": " msg > "/dev/stderr" + exit 1 +} + +function debug(msg) { + print "DEBUG: " msg +} + +function array_size(arr, i,c) { + c = 0 + for (i in arr) + c++ + return c +} + +/^Table:/ { + print "/* " $0 " */" + if (tname != "") + semantic_error("Hit Table: before EndTable:."); +} + +/^Referrer:/ { + if (NF != 1) { + # escape opcode table + ref = "" + for (i = 2; i <= NF; i++) + ref = ref $i + eid = escape[ref] + tname = sprintf("inat_escape_table_%d", eid) + } +} + +/^AVXcode:/ { + if (NF != 1) { + # AVX/escape opcode table + aid = $2 + if (gaid <= aid) + gaid = aid + 1 + if (tname == "") # AVX only opcode table + tname = sprintf("inat_avx_table_%d", $2) + } + if (aid == -1 && eid == -1) # primary opcode table + tname = "inat_primary_table" +} + +/^GrpTable:/ { + print "/* " $0 " */" + if (!($2 in group)) + semantic_error("No group: " $2 ) + gid = group[$2] + tname = "inat_group_table_" gid +} + +function print_table(tbl,name,fmt,n) +{ + print "const insn_attr_t " name " = {" + for (i = 0; i < n; i++) { + id = sprintf(fmt, i) + if (tbl[id]) + print " [" id "] = " tbl[id] "," + } + print "};" +} + +/^EndTable/ { + if (gid != -1) { + # print group tables + if (array_size(table) != 0) { + print_table(table, tname "[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,0] = tname + } + if (array_size(lptable1) != 0) { + print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,1] = tname "_1" + } + if (array_size(lptable2) != 0) { + print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,2] = tname "_2" + } + if (array_size(lptable3) != 0) { + print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,3] = tname "_3" + } + } else { + # print primary/escaped tables + if (array_size(table) != 0) { + print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,0] = tname + if (aid >= 0) + atable[aid,0] = tname + } + if (array_size(lptable1) != 0) { + print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,1] = tname "_1" + if (aid >= 0) + atable[aid,1] = tname "_1" + } + if (array_size(lptable2) != 0) { + print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,2] = tname "_2" + if (aid >= 0) + atable[aid,2] = tname "_2" + } + if (array_size(lptable3) != 0) { + print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,3] = tname "_3" + if (aid >= 0) + atable[aid,3] = tname "_3" + } + } + print "" + clear_vars() +} + +function add_flags(old,new) { + if (old && new) + return old " | " new + else if (old) + return old + else + return new +} + +# convert operands to flags. +function convert_operands(count,opnd, i,j,imm,mod) +{ + imm = null + mod = null + for (j = 1; j <= count; j++) { + i = opnd[j] + if (match(i, imm_expr) == 1) { + if (!imm_flag[i]) + semantic_error("Unknown imm opnd: " i) + if (imm) { + if (i != "Ib") + semantic_error("Second IMM error") + imm = add_flags(imm, "INAT_SCNDIMM") + } else + imm = imm_flag[i] + } else if (match(i, modrm_expr)) + mod = "INAT_MODRM" + } + return add_flags(imm, mod) +} + +/^[0-9a-f]+\:/ { + if (NR == 1) + next + # get index + idx = "0x" substr($1, 1, index($1,":") - 1) + if (idx in table) + semantic_error("Redefine " idx " in " tname) + + # check if escaped opcode + if ("escape" == $2) { + if ($3 != "#") + semantic_error("No escaped name") + ref = "" + for (i = 4; i <= NF; i++) + ref = ref $i + if (ref in escape) + semantic_error("Redefine escape (" ref ")") + escape[ref] = geid + geid++ + table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")" + next + } + + variant = null + # converts + i = 2 + while (i <= NF) { + opcode = $(i++) + delete opnds + ext = null + flags = null + opnd = null + # parse one opcode + if (match($i, opnd_expr)) { + opnd = $i + count = split($(i++), opnds, ",") + flags = convert_operands(count, opnds) + } + if (match($i, ext_expr)) + ext = $(i++) + if (match($i, sep_expr)) + i++ + else if (i < NF) + semantic_error($i " is not a separator") + + # check if group opcode + if (match(opcode, group_expr)) { + if (!(opcode in group)) { + group[opcode] = ggid + ggid++ + } + flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")") + } + # check force(or default) 64bit + if (match(ext, force64_expr)) + flags = add_flags(flags, "INAT_FORCE64") + + # check REX prefix + if (match(opcode, rex_expr)) + flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)") + + # check coprocessor escape : TODO + if (match(opcode, fpu_expr)) + flags = add_flags(flags, "INAT_MODRM") + + # check VEX codes + if (match(ext, vexonly_expr)) + flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") + else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr)) + flags = add_flags(flags, "INAT_VEXOK") + + # check prefixes + if (match(ext, prefix_expr)) { + if (!prefix_num[opcode]) + semantic_error("Unknown prefix: " opcode) + flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")") + } + if (length(flags) == 0) + continue + # check if last prefix + if (match(ext, lprefix1_expr)) { + lptable1[idx] = add_flags(lptable1[idx],flags) + variant = "INAT_VARIANT" + } + if (match(ext, lprefix2_expr)) { + lptable2[idx] = add_flags(lptable2[idx],flags) + variant = "INAT_VARIANT" + } + if (match(ext, lprefix3_expr)) { + lptable3[idx] = add_flags(lptable3[idx],flags) + variant = "INAT_VARIANT" + } + if (!match(ext, lprefix_expr)){ + table[idx] = add_flags(table[idx],flags) + } + } + if (variant) + table[idx] = add_flags(table[idx],variant) +} + +END { + if (awkchecked != "") + exit 1 + # print escape opcode map's array + print "/* Escape opcode map array */" + print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \ + "[INAT_LSTPFX_MAX + 1] = {" + for (i = 0; i < geid; i++) + for (j = 0; j < max_lprefix; j++) + if (etable[i,j]) + print " ["i"]["j"] = "etable[i,j]"," + print "};\n" + # print group opcode map's array + print "/* Group opcode map array */" + print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\ + "[INAT_LSTPFX_MAX + 1] = {" + for (i = 0; i < ggid; i++) + for (j = 0; j < max_lprefix; j++) + if (gtable[i,j]) + print " ["i"]["j"] = "gtable[i,j]"," + print "};\n" + # print AVX opcode map's array + print "/* AVX opcode map array */" + print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\ + "[INAT_LSTPFX_MAX + 1] = {" + for (i = 0; i < gaid; i++) + for (j = 0; j < max_lprefix; j++) + if (atable[i,j]) + print " ["i"]["j"] = "atable[i,j]"," + print "};" +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/inat.c b/kernel/tools/perf/util/intel-pt-decoder/inat.c new file mode 100644 index 000000000..906d94aa0 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/inat.c @@ -0,0 +1,96 @@ +/* + * x86 instruction attribute tables + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include "insn.h" + +/* Attribute tables are generated from opcode map */ +#include "inat-tables.c" + +/* Attribute search APIs */ +insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) +{ + return inat_primary_table[opcode]; +} + +int inat_get_last_prefix_id(insn_byte_t last_pfx) +{ + insn_attr_t lpfx_attr; + + lpfx_attr = inat_get_opcode_attribute(last_pfx); + return inat_last_prefix_id(lpfx_attr); +} + +insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id, + insn_attr_t esc_attr) +{ + const insn_attr_t *table; + int n; + + n = inat_escape_id(esc_attr); + + table = inat_escape_tables[n][0]; + if (!table) + return 0; + if (inat_has_variant(table[opcode]) && lpfx_id) { + table = inat_escape_tables[n][lpfx_id]; + if (!table) + return 0; + } + return table[opcode]; +} + +insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id, + insn_attr_t grp_attr) +{ + const insn_attr_t *table; + int n; + + n = inat_group_id(grp_attr); + + table = inat_group_tables[n][0]; + if (!table) + return inat_group_common_attribute(grp_attr); + if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) { + table = inat_group_tables[n][lpfx_id]; + if (!table) + return inat_group_common_attribute(grp_attr); + } + return table[X86_MODRM_REG(modrm)] | + inat_group_common_attribute(grp_attr); +} + +insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, + insn_byte_t vex_p) +{ + const insn_attr_t *table; + if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) + return 0; + /* At first, this checks the master table */ + table = inat_avx_tables[vex_m][0]; + if (!table) + return 0; + if (!inat_is_group(table[opcode]) && vex_p) { + /* If this is not a group, get attribute directly */ + table = inat_avx_tables[vex_m][vex_p]; + if (!table) + return 0; + } + return table[opcode]; +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/inat.h b/kernel/tools/perf/util/intel-pt-decoder/inat.h new file mode 100644 index 000000000..611645e90 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/inat.h @@ -0,0 +1,221 @@ +#ifndef _ASM_X86_INAT_H +#define _ASM_X86_INAT_H +/* + * x86 instruction attributes + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include "inat_types.h" + +/* + * Internal bits. Don't use bitmasks directly, because these bits are + * unstable. You should use checking functions. + */ + +#define INAT_OPCODE_TABLE_SIZE 256 +#define INAT_GROUP_TABLE_SIZE 8 + +/* Legacy last prefixes */ +#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ +#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */ +#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */ +/* Other Legacy prefixes */ +#define INAT_PFX_LOCK 4 /* 0xF0 */ +#define INAT_PFX_CS 5 /* 0x2E */ +#define INAT_PFX_DS 6 /* 0x3E */ +#define INAT_PFX_ES 7 /* 0x26 */ +#define INAT_PFX_FS 8 /* 0x64 */ +#define INAT_PFX_GS 9 /* 0x65 */ +#define INAT_PFX_SS 10 /* 0x36 */ +#define INAT_PFX_ADDRSZ 11 /* 0x67 */ +/* x86-64 REX prefix */ +#define INAT_PFX_REX 12 /* 0x4X */ +/* AVX VEX prefixes */ +#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */ +#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */ + +#define INAT_LSTPFX_MAX 3 +#define INAT_LGCPFX_MAX 11 + +/* Immediate size */ +#define INAT_IMM_BYTE 1 +#define INAT_IMM_WORD 2 +#define INAT_IMM_DWORD 3 +#define INAT_IMM_QWORD 4 +#define INAT_IMM_PTR 5 +#define INAT_IMM_VWORD32 6 +#define INAT_IMM_VWORD 7 + +/* Legacy prefix */ +#define INAT_PFX_OFFS 0 +#define INAT_PFX_BITS 4 +#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1) +#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS) +/* Escape opcodes */ +#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS) +#define INAT_ESC_BITS 2 +#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1) +#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS) +/* Group opcodes (1-16) */ +#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS) +#define INAT_GRP_BITS 5 +#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1) +#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS) +/* Immediates */ +#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS) +#define INAT_IMM_BITS 3 +#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) +/* Flags */ +#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) +#define INAT_MODRM (1 << (INAT_FLAG_OFFS)) +#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1)) +#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2)) +#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3)) +#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) +#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5)) +#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6)) +/* Attribute making macros for attribute tables */ +#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) +#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) +#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) +#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) + +/* Attribute search APIs */ +extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); +extern int inat_get_last_prefix_id(insn_byte_t last_pfx); +extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, + int lpfx_id, + insn_attr_t esc_attr); +extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, + int lpfx_id, + insn_attr_t esc_attr); +extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, + insn_byte_t vex_m, + insn_byte_t vex_pp); + +/* Attribute checking functions */ +static inline int inat_is_legacy_prefix(insn_attr_t attr) +{ + attr &= INAT_PFX_MASK; + return attr && attr <= INAT_LGCPFX_MAX; +} + +static inline int inat_is_address_size_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ; +} + +static inline int inat_is_operand_size_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; +} + +static inline int inat_is_rex_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_REX; +} + +static inline int inat_last_prefix_id(insn_attr_t attr) +{ + if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX) + return 0; + else + return attr & INAT_PFX_MASK; +} + +static inline int inat_is_vex_prefix(insn_attr_t attr) +{ + attr &= INAT_PFX_MASK; + return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3; +} + +static inline int inat_is_vex3_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3; +} + +static inline int inat_is_escape(insn_attr_t attr) +{ + return attr & INAT_ESC_MASK; +} + +static inline int inat_escape_id(insn_attr_t attr) +{ + return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS; +} + +static inline int inat_is_group(insn_attr_t attr) +{ + return attr & INAT_GRP_MASK; +} + +static inline int inat_group_id(insn_attr_t attr) +{ + return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS; +} + +static inline int inat_group_common_attribute(insn_attr_t attr) +{ + return attr & ~INAT_GRP_MASK; +} + +static inline int inat_has_immediate(insn_attr_t attr) +{ + return attr & INAT_IMM_MASK; +} + +static inline int inat_immediate_size(insn_attr_t attr) +{ + return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; +} + +static inline int inat_has_modrm(insn_attr_t attr) +{ + return attr & INAT_MODRM; +} + +static inline int inat_is_force64(insn_attr_t attr) +{ + return attr & INAT_FORCE64; +} + +static inline int inat_has_second_immediate(insn_attr_t attr) +{ + return attr & INAT_SCNDIMM; +} + +static inline int inat_has_moffset(insn_attr_t attr) +{ + return attr & INAT_MOFFSET; +} + +static inline int inat_has_variant(insn_attr_t attr) +{ + return attr & INAT_VARIANT; +} + +static inline int inat_accept_vex(insn_attr_t attr) +{ + return attr & INAT_VEXOK; +} + +static inline int inat_must_vex(insn_attr_t attr) +{ + return attr & INAT_VEXONLY; +} +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/inat_types.h b/kernel/tools/perf/util/intel-pt-decoder/inat_types.h new file mode 100644 index 000000000..cb3c20ce3 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/inat_types.h @@ -0,0 +1,29 @@ +#ifndef _ASM_X86_INAT_TYPES_H +#define _ASM_X86_INAT_TYPES_H +/* + * x86 instruction attributes + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* Instruction attributes */ +typedef unsigned int insn_attr_t; +typedef unsigned char insn_byte_t; +typedef signed int insn_value_t; + +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/insn.c b/kernel/tools/perf/util/intel-pt-decoder/insn.c new file mode 100644 index 000000000..47314a643 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/insn.c @@ -0,0 +1,594 @@ +/* + * x86 instruction analysis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004, 2009 + */ + +#ifdef __KERNEL__ +#include +#else +#include +#endif +#include "inat.h" +#include "insn.h" + +/* Verify next sizeof(t) bytes can be on the same instruction */ +#define validate_next(t, insn, n) \ + ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) + +#define __get_next(t, insn) \ + ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) + +#define __peek_nbyte_next(t, insn, n) \ + ({ t r = *(t*)((insn)->next_byte + n); r; }) + +#define get_next(t, insn) \ + ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); }) + +#define peek_nbyte_next(t, insn, n) \ + ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); }) + +#define peek_next(t, insn) peek_nbyte_next(t, insn, 0) + +/** + * insn_init() - initialize struct insn + * @insn: &struct insn to be initialized + * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @x86_64: !0 for 64-bit kernel or 64-bit app + */ +void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) +{ + /* + * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid + * even if the input buffer is long enough to hold them. + */ + if (buf_len > MAX_INSN_SIZE) + buf_len = MAX_INSN_SIZE; + + memset(insn, 0, sizeof(*insn)); + insn->kaddr = kaddr; + insn->end_kaddr = kaddr + buf_len; + insn->next_byte = kaddr; + insn->x86_64 = x86_64 ? 1 : 0; + insn->opnd_bytes = 4; + if (x86_64) + insn->addr_bytes = 8; + else + insn->addr_bytes = 4; +} + +/** + * insn_get_prefixes - scan x86 instruction prefix bytes + * @insn: &struct insn containing instruction + * + * Populates the @insn->prefixes bitmap, and updates @insn->next_byte + * to point to the (first) opcode. No effect if @insn->prefixes.got + * is already set. + */ +void insn_get_prefixes(struct insn *insn) +{ + struct insn_field *prefixes = &insn->prefixes; + insn_attr_t attr; + insn_byte_t b, lb; + int i, nb; + + if (prefixes->got) + return; + + nb = 0; + lb = 0; + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + while (inat_is_legacy_prefix(attr)) { + /* Skip if same prefix */ + for (i = 0; i < nb; i++) + if (prefixes->bytes[i] == b) + goto found; + if (nb == 4) + /* Invalid instruction */ + break; + prefixes->bytes[nb++] = b; + if (inat_is_address_size_prefix(attr)) { + /* address size switches 2/4 or 4/8 */ + if (insn->x86_64) + insn->addr_bytes ^= 12; + else + insn->addr_bytes ^= 6; + } else if (inat_is_operand_size_prefix(attr)) { + /* oprand size switches 2/4 */ + insn->opnd_bytes ^= 6; + } +found: + prefixes->nbytes++; + insn->next_byte++; + lb = b; + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + } + /* Set the last prefix */ + if (lb && lb != insn->prefixes.bytes[3]) { + if (unlikely(insn->prefixes.bytes[3])) { + /* Swap the last prefix */ + b = insn->prefixes.bytes[3]; + for (i = 0; i < nb; i++) + if (prefixes->bytes[i] == lb) + prefixes->bytes[i] = b; + } + insn->prefixes.bytes[3] = lb; + } + + /* Decode REX prefix */ + if (insn->x86_64) { + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + if (inat_is_rex_prefix(attr)) { + insn->rex_prefix.value = b; + insn->rex_prefix.nbytes = 1; + insn->next_byte++; + if (X86_REX_W(b)) + /* REX.W overrides opnd_size */ + insn->opnd_bytes = 8; + } + } + insn->rex_prefix.got = 1; + + /* Decode VEX prefix */ + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + if (inat_is_vex_prefix(attr)) { + insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); + if (!insn->x86_64) { + /* + * In 32-bits mode, if the [7:6] bits (mod bits of + * ModRM) on the second byte are not 11b, it is + * LDS or LES. + */ + if (X86_MODRM_MOD(b2) != 3) + goto vex_end; + } + insn->vex_prefix.bytes[0] = b; + insn->vex_prefix.bytes[1] = b2; + if (inat_is_vex3_prefix(attr)) { + b2 = peek_nbyte_next(insn_byte_t, insn, 2); + insn->vex_prefix.bytes[2] = b2; + insn->vex_prefix.nbytes = 3; + insn->next_byte += 3; + if (insn->x86_64 && X86_VEX_W(b2)) + /* VEX.W overrides opnd_size */ + insn->opnd_bytes = 8; + } else { + /* + * For VEX2, fake VEX3-like byte#2. + * Makes it easier to decode vex.W, vex.vvvv, + * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0. + */ + insn->vex_prefix.bytes[2] = b2 & 0x7f; + insn->vex_prefix.nbytes = 2; + insn->next_byte += 2; + } + } +vex_end: + insn->vex_prefix.got = 1; + + prefixes->got = 1; + +err_out: + return; +} + +/** + * insn_get_opcode - collect opcode(s) + * @insn: &struct insn containing instruction + * + * Populates @insn->opcode, updates @insn->next_byte to point past the + * opcode byte(s), and set @insn->attr (except for groups). + * If necessary, first collects any preceding (prefix) bytes. + * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got + * is already 1. + */ +void insn_get_opcode(struct insn *insn) +{ + struct insn_field *opcode = &insn->opcode; + insn_byte_t op; + int pfx_id; + if (opcode->got) + return; + if (!insn->prefixes.got) + insn_get_prefixes(insn); + + /* Get first opcode */ + op = get_next(insn_byte_t, insn); + opcode->bytes[0] = op; + opcode->nbytes = 1; + + /* Check if there is VEX prefix or not */ + if (insn_is_avx(insn)) { + insn_byte_t m, p; + m = insn_vex_m_bits(insn); + p = insn_vex_p_bits(insn); + insn->attr = inat_get_avx_attribute(op, m, p); + if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr)) + insn->attr = 0; /* This instruction is bad */ + goto end; /* VEX has only 1 byte for opcode */ + } + + insn->attr = inat_get_opcode_attribute(op); + while (inat_is_escape(insn->attr)) { + /* Get escaped opcode */ + op = get_next(insn_byte_t, insn); + opcode->bytes[opcode->nbytes++] = op; + pfx_id = insn_last_prefix_id(insn); + insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr); + } + if (inat_must_vex(insn->attr)) + insn->attr = 0; /* This instruction is bad */ +end: + opcode->got = 1; + +err_out: + return; +} + +/** + * insn_get_modrm - collect ModRM byte, if any + * @insn: &struct insn containing instruction + * + * Populates @insn->modrm and updates @insn->next_byte to point past the + * ModRM byte, if any. If necessary, first collects the preceding bytes + * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. + */ +void insn_get_modrm(struct insn *insn) +{ + struct insn_field *modrm = &insn->modrm; + insn_byte_t pfx_id, mod; + if (modrm->got) + return; + if (!insn->opcode.got) + insn_get_opcode(insn); + + if (inat_has_modrm(insn->attr)) { + mod = get_next(insn_byte_t, insn); + modrm->value = mod; + modrm->nbytes = 1; + if (inat_is_group(insn->attr)) { + pfx_id = insn_last_prefix_id(insn); + insn->attr = inat_get_group_attribute(mod, pfx_id, + insn->attr); + if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) + insn->attr = 0; /* This is bad */ + } + } + + if (insn->x86_64 && inat_is_force64(insn->attr)) + insn->opnd_bytes = 8; + modrm->got = 1; + +err_out: + return; +} + + +/** + * insn_rip_relative() - Does instruction use RIP-relative addressing mode? + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * ModRM byte. No effect if @insn->x86_64 is 0. + */ +int insn_rip_relative(struct insn *insn) +{ + struct insn_field *modrm = &insn->modrm; + + if (!insn->x86_64) + return 0; + if (!modrm->got) + insn_get_modrm(insn); + /* + * For rip-relative instructions, the mod field (top 2 bits) + * is zero and the r/m field (bottom 3 bits) is 0x5. + */ + return (modrm->nbytes && (modrm->value & 0xc7) == 0x5); +} + +/** + * insn_get_sib() - Get the SIB byte of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * ModRM byte. + */ +void insn_get_sib(struct insn *insn) +{ + insn_byte_t modrm; + + if (insn->sib.got) + return; + if (!insn->modrm.got) + insn_get_modrm(insn); + if (insn->modrm.nbytes) { + modrm = (insn_byte_t)insn->modrm.value; + if (insn->addr_bytes != 2 && + X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { + insn->sib.value = get_next(insn_byte_t, insn); + insn->sib.nbytes = 1; + } + } + insn->sib.got = 1; + +err_out: + return; +} + + +/** + * insn_get_displacement() - Get the displacement of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * SIB byte. + * Displacement value is sign-expanded. + */ +void insn_get_displacement(struct insn *insn) +{ + insn_byte_t mod, rm, base; + + if (insn->displacement.got) + return; + if (!insn->sib.got) + insn_get_sib(insn); + if (insn->modrm.nbytes) { + /* + * Interpreting the modrm byte: + * mod = 00 - no displacement fields (exceptions below) + * mod = 01 - 1-byte displacement field + * mod = 10 - displacement field is 4 bytes, or 2 bytes if + * address size = 2 (0x67 prefix in 32-bit mode) + * mod = 11 - no memory operand + * + * If address size = 2... + * mod = 00, r/m = 110 - displacement field is 2 bytes + * + * If address size != 2... + * mod != 11, r/m = 100 - SIB byte exists + * mod = 00, SIB base = 101 - displacement field is 4 bytes + * mod = 00, r/m = 101 - rip-relative addressing, displacement + * field is 4 bytes + */ + mod = X86_MODRM_MOD(insn->modrm.value); + rm = X86_MODRM_RM(insn->modrm.value); + base = X86_SIB_BASE(insn->sib.value); + if (mod == 3) + goto out; + if (mod == 1) { + insn->displacement.value = get_next(char, insn); + insn->displacement.nbytes = 1; + } else if (insn->addr_bytes == 2) { + if ((mod == 0 && rm == 6) || mod == 2) { + insn->displacement.value = + get_next(short, insn); + insn->displacement.nbytes = 2; + } + } else { + if ((mod == 0 && rm == 5) || mod == 2 || + (mod == 0 && base == 5)) { + insn->displacement.value = get_next(int, insn); + insn->displacement.nbytes = 4; + } + } + } +out: + insn->displacement.got = 1; + +err_out: + return; +} + +/* Decode moffset16/32/64. Return 0 if failed */ +static int __get_moffset(struct insn *insn) +{ + switch (insn->addr_bytes) { + case 2: + insn->moffset1.value = get_next(short, insn); + insn->moffset1.nbytes = 2; + break; + case 4: + insn->moffset1.value = get_next(int, insn); + insn->moffset1.nbytes = 4; + break; + case 8: + insn->moffset1.value = get_next(int, insn); + insn->moffset1.nbytes = 4; + insn->moffset2.value = get_next(int, insn); + insn->moffset2.nbytes = 4; + break; + default: /* opnd_bytes must be modified manually */ + goto err_out; + } + insn->moffset1.got = insn->moffset2.got = 1; + + return 1; + +err_out: + return 0; +} + +/* Decode imm v32(Iz). Return 0 if failed */ +static int __get_immv32(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate.value = get_next(short, insn); + insn->immediate.nbytes = 2; + break; + case 4: + case 8: + insn->immediate.value = get_next(int, insn); + insn->immediate.nbytes = 4; + break; + default: /* opnd_bytes must be modified manually */ + goto err_out; + } + + return 1; + +err_out: + return 0; +} + +/* Decode imm v64(Iv/Ov), Return 0 if failed */ +static int __get_immv(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate1.value = get_next(short, insn); + insn->immediate1.nbytes = 2; + break; + case 4: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + break; + case 8: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + insn->immediate2.value = get_next(int, insn); + insn->immediate2.nbytes = 4; + break; + default: /* opnd_bytes must be modified manually */ + goto err_out; + } + insn->immediate1.got = insn->immediate2.got = 1; + + return 1; +err_out: + return 0; +} + +/* Decode ptr16:16/32(Ap) */ +static int __get_immptr(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate1.value = get_next(short, insn); + insn->immediate1.nbytes = 2; + break; + case 4: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + break; + case 8: + /* ptr16:64 is not exist (no segment) */ + return 0; + default: /* opnd_bytes must be modified manually */ + goto err_out; + } + insn->immediate2.value = get_next(unsigned short, insn); + insn->immediate2.nbytes = 2; + insn->immediate1.got = insn->immediate2.got = 1; + + return 1; +err_out: + return 0; +} + +/** + * insn_get_immediate() - Get the immediates of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * displacement bytes. + * Basically, most of immediates are sign-expanded. Unsigned-value can be + * get by bit masking with ((1 << (nbytes * 8)) - 1) + */ +void insn_get_immediate(struct insn *insn) +{ + if (insn->immediate.got) + return; + if (!insn->displacement.got) + insn_get_displacement(insn); + + if (inat_has_moffset(insn->attr)) { + if (!__get_moffset(insn)) + goto err_out; + goto done; + } + + if (!inat_has_immediate(insn->attr)) + /* no immediates */ + goto done; + + switch (inat_immediate_size(insn->attr)) { + case INAT_IMM_BYTE: + insn->immediate.value = get_next(char, insn); + insn->immediate.nbytes = 1; + break; + case INAT_IMM_WORD: + insn->immediate.value = get_next(short, insn); + insn->immediate.nbytes = 2; + break; + case INAT_IMM_DWORD: + insn->immediate.value = get_next(int, insn); + insn->immediate.nbytes = 4; + break; + case INAT_IMM_QWORD: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + insn->immediate2.value = get_next(int, insn); + insn->immediate2.nbytes = 4; + break; + case INAT_IMM_PTR: + if (!__get_immptr(insn)) + goto err_out; + break; + case INAT_IMM_VWORD32: + if (!__get_immv32(insn)) + goto err_out; + break; + case INAT_IMM_VWORD: + if (!__get_immv(insn)) + goto err_out; + break; + default: + /* Here, insn must have an immediate, but failed */ + goto err_out; + } + if (inat_has_second_immediate(insn->attr)) { + insn->immediate2.value = get_next(char, insn); + insn->immediate2.nbytes = 1; + } +done: + insn->immediate.got = 1; + +err_out: + return; +} + +/** + * insn_get_length() - Get the length of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * immediates bytes. + */ +void insn_get_length(struct insn *insn) +{ + if (insn->length) + return; + if (!insn->immediate.got) + insn_get_immediate(insn); + insn->length = (unsigned char)((unsigned long)insn->next_byte + - (unsigned long)insn->kaddr); +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/insn.h b/kernel/tools/perf/util/intel-pt-decoder/insn.h new file mode 100644 index 000000000..dd12da0f4 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/insn.h @@ -0,0 +1,201 @@ +#ifndef _ASM_X86_INSN_H +#define _ASM_X86_INSN_H +/* + * x86 instruction analysis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + */ + +/* insn_attr_t is defined in inat.h */ +#include "inat.h" + +struct insn_field { + union { + insn_value_t value; + insn_byte_t bytes[4]; + }; + /* !0 if we've run insn_get_xxx() for this field */ + unsigned char got; + unsigned char nbytes; +}; + +struct insn { + struct insn_field prefixes; /* + * Prefixes + * prefixes.bytes[3]: last prefix + */ + struct insn_field rex_prefix; /* REX prefix */ + struct insn_field vex_prefix; /* VEX prefix */ + struct insn_field opcode; /* + * opcode.bytes[0]: opcode1 + * opcode.bytes[1]: opcode2 + * opcode.bytes[2]: opcode3 + */ + struct insn_field modrm; + struct insn_field sib; + struct insn_field displacement; + union { + struct insn_field immediate; + struct insn_field moffset1; /* for 64bit MOV */ + struct insn_field immediate1; /* for 64bit imm or off16/32 */ + }; + union { + struct insn_field moffset2; /* for 64bit MOV */ + struct insn_field immediate2; /* for 64bit imm or seg16 */ + }; + + insn_attr_t attr; + unsigned char opnd_bytes; + unsigned char addr_bytes; + unsigned char length; + unsigned char x86_64; + + const insn_byte_t *kaddr; /* kernel address of insn to analyze */ + const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */ + const insn_byte_t *next_byte; +}; + +#define MAX_INSN_SIZE 15 + +#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) +#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) +#define X86_MODRM_RM(modrm) ((modrm) & 0x07) + +#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6) +#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3) +#define X86_SIB_BASE(sib) ((sib) & 0x07) + +#define X86_REX_W(rex) ((rex) & 8) +#define X86_REX_R(rex) ((rex) & 4) +#define X86_REX_X(rex) ((rex) & 2) +#define X86_REX_B(rex) ((rex) & 1) + +/* VEX bit flags */ +#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */ +#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */ +#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */ +#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */ +#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */ +/* VEX bit fields */ +#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */ +#define X86_VEX2_M 1 /* VEX2.M always 1 */ +#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */ +#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ +#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ + +extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64); +extern void insn_get_prefixes(struct insn *insn); +extern void insn_get_opcode(struct insn *insn); +extern void insn_get_modrm(struct insn *insn); +extern void insn_get_sib(struct insn *insn); +extern void insn_get_displacement(struct insn *insn); +extern void insn_get_immediate(struct insn *insn); +extern void insn_get_length(struct insn *insn); + +/* Attribute will be determined after getting ModRM (for opcode groups) */ +static inline void insn_get_attribute(struct insn *insn) +{ + insn_get_modrm(insn); +} + +/* Instruction uses RIP-relative addressing */ +extern int insn_rip_relative(struct insn *insn); + +/* Init insn for kernel text */ +static inline void kernel_insn_init(struct insn *insn, + const void *kaddr, int buf_len) +{ +#ifdef CONFIG_X86_64 + insn_init(insn, kaddr, buf_len, 1); +#else /* CONFIG_X86_32 */ + insn_init(insn, kaddr, buf_len, 0); +#endif +} + +static inline int insn_is_avx(struct insn *insn) +{ + if (!insn->prefixes.got) + insn_get_prefixes(insn); + return (insn->vex_prefix.value != 0); +} + +/* Ensure this instruction is decoded completely */ +static inline int insn_complete(struct insn *insn) +{ + return insn->opcode.got && insn->modrm.got && insn->sib.got && + insn->displacement.got && insn->immediate.got; +} + +static inline insn_byte_t insn_vex_m_bits(struct insn *insn) +{ + if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ + return X86_VEX2_M; + else + return X86_VEX3_M(insn->vex_prefix.bytes[1]); +} + +static inline insn_byte_t insn_vex_p_bits(struct insn *insn) +{ + if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ + return X86_VEX_P(insn->vex_prefix.bytes[1]); + else + return X86_VEX_P(insn->vex_prefix.bytes[2]); +} + +/* Get the last prefix id from last prefix or VEX prefix */ +static inline int insn_last_prefix_id(struct insn *insn) +{ + if (insn_is_avx(insn)) + return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */ + + if (insn->prefixes.bytes[3]) + return inat_get_last_prefix_id(insn->prefixes.bytes[3]); + + return 0; +} + +/* Offset of each field from kaddr */ +static inline int insn_offset_rex_prefix(struct insn *insn) +{ + return insn->prefixes.nbytes; +} +static inline int insn_offset_vex_prefix(struct insn *insn) +{ + return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; +} +static inline int insn_offset_opcode(struct insn *insn) +{ + return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes; +} +static inline int insn_offset_modrm(struct insn *insn) +{ + return insn_offset_opcode(insn) + insn->opcode.nbytes; +} +static inline int insn_offset_sib(struct insn *insn) +{ + return insn_offset_modrm(insn) + insn->modrm.nbytes; +} +static inline int insn_offset_displacement(struct insn *insn) +{ + return insn_offset_sib(insn) + insn->sib.nbytes; +} +static inline int insn_offset_immediate(struct insn *insn) +{ + return insn_offset_displacement(insn) + insn->displacement.nbytes; +} + +#endif /* _ASM_X86_INSN_H */ diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c new file mode 100644 index 000000000..9409d014b --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -0,0 +1,2345 @@ +/* + * intel_pt_decoder.c: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include + +#include "../cache.h" +#include "../util.h" + +#include "intel-pt-insn-decoder.h" +#include "intel-pt-pkt-decoder.h" +#include "intel-pt-decoder.h" +#include "intel-pt-log.h" + +#define INTEL_PT_BLK_SIZE 1024 + +#define BIT63 (((uint64_t)1 << 63)) + +#define INTEL_PT_RETURN 1 + +/* Maximum number of loops with no packets consumed i.e. stuck in a loop */ +#define INTEL_PT_MAX_LOOPS 10000 + +struct intel_pt_blk { + struct intel_pt_blk *prev; + uint64_t ip[INTEL_PT_BLK_SIZE]; +}; + +struct intel_pt_stack { + struct intel_pt_blk *blk; + struct intel_pt_blk *spare; + int pos; +}; + +enum intel_pt_pkt_state { + INTEL_PT_STATE_NO_PSB, + INTEL_PT_STATE_NO_IP, + INTEL_PT_STATE_ERR_RESYNC, + INTEL_PT_STATE_IN_SYNC, + INTEL_PT_STATE_TNT, + INTEL_PT_STATE_TIP, + INTEL_PT_STATE_TIP_PGD, + INTEL_PT_STATE_FUP, + INTEL_PT_STATE_FUP_NO_TIP, +}; + +#ifdef INTEL_PT_STRICT +#define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB +#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB +#define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_NO_PSB +#define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_NO_PSB +#else +#define INTEL_PT_STATE_ERR1 (decoder->pkt_state) +#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_IP +#define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_ERR_RESYNC +#define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_IN_SYNC +#endif + +struct intel_pt_decoder { + int (*get_trace)(struct intel_pt_buffer *buffer, void *data); + int (*walk_insn)(struct intel_pt_insn *intel_pt_insn, + uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip, + uint64_t max_insn_cnt, void *data); + void *data; + struct intel_pt_state state; + const unsigned char *buf; + size_t len; + bool return_compression; + bool mtc_insn; + bool pge; + bool have_tma; + bool have_cyc; + uint64_t pos; + uint64_t last_ip; + uint64_t ip; + uint64_t cr3; + uint64_t timestamp; + uint64_t tsc_timestamp; + uint64_t ref_timestamp; + uint64_t ret_addr; + uint64_t ctc_timestamp; + uint64_t ctc_delta; + uint64_t cycle_cnt; + uint64_t cyc_ref_timestamp; + uint32_t last_mtc; + uint32_t tsc_ctc_ratio_n; + uint32_t tsc_ctc_ratio_d; + uint32_t tsc_ctc_mult; + uint32_t tsc_slip; + uint32_t ctc_rem_mask; + int mtc_shift; + struct intel_pt_stack stack; + enum intel_pt_pkt_state pkt_state; + struct intel_pt_pkt packet; + struct intel_pt_pkt tnt; + int pkt_step; + int pkt_len; + int last_packet_type; + unsigned int cbr; + unsigned int max_non_turbo_ratio; + double max_non_turbo_ratio_fp; + double cbr_cyc_to_tsc; + double calc_cyc_to_tsc; + bool have_calc_cyc_to_tsc; + int exec_mode; + unsigned int insn_bytes; + uint64_t sign_bit; + uint64_t sign_bits; + uint64_t period; + enum intel_pt_period_type period_type; + uint64_t tot_insn_cnt; + uint64_t period_insn_cnt; + uint64_t period_mask; + uint64_t period_ticks; + uint64_t last_masked_timestamp; + bool continuous_period; + bool overflow; + bool set_fup_tx_flags; + unsigned int fup_tx_flags; + unsigned int tx_flags; + uint64_t timestamp_insn_cnt; + uint64_t stuck_ip; + int no_progress; + int stuck_ip_prd; + int stuck_ip_cnt; + const unsigned char *next_buf; + size_t next_len; + unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ]; +}; + +static uint64_t intel_pt_lower_power_of_2(uint64_t x) +{ + int i; + + for (i = 0; x != 1; i++) + x >>= 1; + + return x << i; +} + +static void intel_pt_setup_period(struct intel_pt_decoder *decoder) +{ + if (decoder->period_type == INTEL_PT_PERIOD_TICKS) { + uint64_t period; + + period = intel_pt_lower_power_of_2(decoder->period); + decoder->period_mask = ~(period - 1); + decoder->period_ticks = period; + } +} + +static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d) +{ + if (!d) + return 0; + return (t / d) * n + ((t % d) * n) / d; +} + +struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) +{ + struct intel_pt_decoder *decoder; + + if (!params->get_trace || !params->walk_insn) + return NULL; + + decoder = zalloc(sizeof(struct intel_pt_decoder)); + if (!decoder) + return NULL; + + decoder->get_trace = params->get_trace; + decoder->walk_insn = params->walk_insn; + decoder->data = params->data; + decoder->return_compression = params->return_compression; + + decoder->sign_bit = (uint64_t)1 << 47; + decoder->sign_bits = ~(((uint64_t)1 << 48) - 1); + + decoder->period = params->period; + decoder->period_type = params->period_type; + + decoder->max_non_turbo_ratio = params->max_non_turbo_ratio; + decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio; + + intel_pt_setup_period(decoder); + + decoder->mtc_shift = params->mtc_period; + decoder->ctc_rem_mask = (1 << decoder->mtc_shift) - 1; + + decoder->tsc_ctc_ratio_n = params->tsc_ctc_ratio_n; + decoder->tsc_ctc_ratio_d = params->tsc_ctc_ratio_d; + + if (!decoder->tsc_ctc_ratio_n) + decoder->tsc_ctc_ratio_d = 0; + + if (decoder->tsc_ctc_ratio_d) { + if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) + decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / + decoder->tsc_ctc_ratio_d; + + /* + * Allow for timestamps appearing to backwards because a TSC + * packet has slipped past a MTC packet, so allow 2 MTC ticks + * or ... + */ + decoder->tsc_slip = multdiv(2 << decoder->mtc_shift, + decoder->tsc_ctc_ratio_n, + decoder->tsc_ctc_ratio_d); + } + /* ... or 0x100 paranoia */ + if (decoder->tsc_slip < 0x100) + decoder->tsc_slip = 0x100; + + intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); + intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); + intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder->tsc_ctc_ratio_d); + intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult); + intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip); + + return decoder; +} + +static void intel_pt_pop_blk(struct intel_pt_stack *stack) +{ + struct intel_pt_blk *blk = stack->blk; + + stack->blk = blk->prev; + if (!stack->spare) + stack->spare = blk; + else + free(blk); +} + +static uint64_t intel_pt_pop(struct intel_pt_stack *stack) +{ + if (!stack->pos) { + if (!stack->blk) + return 0; + intel_pt_pop_blk(stack); + if (!stack->blk) + return 0; + stack->pos = INTEL_PT_BLK_SIZE; + } + return stack->blk->ip[--stack->pos]; +} + +static int intel_pt_alloc_blk(struct intel_pt_stack *stack) +{ + struct intel_pt_blk *blk; + + if (stack->spare) { + blk = stack->spare; + stack->spare = NULL; + } else { + blk = malloc(sizeof(struct intel_pt_blk)); + if (!blk) + return -ENOMEM; + } + + blk->prev = stack->blk; + stack->blk = blk; + stack->pos = 0; + return 0; +} + +static int intel_pt_push(struct intel_pt_stack *stack, uint64_t ip) +{ + int err; + + if (!stack->blk || stack->pos == INTEL_PT_BLK_SIZE) { + err = intel_pt_alloc_blk(stack); + if (err) + return err; + } + + stack->blk->ip[stack->pos++] = ip; + return 0; +} + +static void intel_pt_clear_stack(struct intel_pt_stack *stack) +{ + while (stack->blk) + intel_pt_pop_blk(stack); + stack->pos = 0; +} + +static void intel_pt_free_stack(struct intel_pt_stack *stack) +{ + intel_pt_clear_stack(stack); + zfree(&stack->blk); + zfree(&stack->spare); +} + +void intel_pt_decoder_free(struct intel_pt_decoder *decoder) +{ + intel_pt_free_stack(&decoder->stack); + free(decoder); +} + +static int intel_pt_ext_err(int code) +{ + switch (code) { + case -ENOMEM: + return INTEL_PT_ERR_NOMEM; + case -ENOSYS: + return INTEL_PT_ERR_INTERN; + case -EBADMSG: + return INTEL_PT_ERR_BADPKT; + case -ENODATA: + return INTEL_PT_ERR_NODATA; + case -EILSEQ: + return INTEL_PT_ERR_NOINSN; + case -ENOENT: + return INTEL_PT_ERR_MISMAT; + case -EOVERFLOW: + return INTEL_PT_ERR_OVR; + case -ENOSPC: + return INTEL_PT_ERR_LOST; + case -ELOOP: + return INTEL_PT_ERR_NELOOP; + default: + return INTEL_PT_ERR_UNK; + } +} + +static const char *intel_pt_err_msgs[] = { + [INTEL_PT_ERR_NOMEM] = "Memory allocation failed", + [INTEL_PT_ERR_INTERN] = "Internal error", + [INTEL_PT_ERR_BADPKT] = "Bad packet", + [INTEL_PT_ERR_NODATA] = "No more data", + [INTEL_PT_ERR_NOINSN] = "Failed to get instruction", + [INTEL_PT_ERR_MISMAT] = "Trace doesn't match instruction", + [INTEL_PT_ERR_OVR] = "Overflow packet", + [INTEL_PT_ERR_LOST] = "Lost trace data", + [INTEL_PT_ERR_UNK] = "Unknown error!", + [INTEL_PT_ERR_NELOOP] = "Never-ending loop", +}; + +int intel_pt__strerror(int code, char *buf, size_t buflen) +{ + if (code < 1 || code > INTEL_PT_ERR_MAX) + code = INTEL_PT_ERR_UNK; + strlcpy(buf, intel_pt_err_msgs[code], buflen); + return 0; +} + +static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, + const struct intel_pt_pkt *packet, + uint64_t last_ip) +{ + uint64_t ip; + + switch (packet->count) { + case 2: + ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | + packet->payload; + break; + case 4: + ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | + packet->payload; + break; + case 6: + ip = packet->payload; + break; + default: + return 0; + } + + if (ip & decoder->sign_bit) + return ip | decoder->sign_bits; + + return ip; +} + +static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) +{ + decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet, + decoder->last_ip); +} + +static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) +{ + intel_pt_set_last_ip(decoder); + decoder->ip = decoder->last_ip; +} + +static void intel_pt_decoder_log_packet(struct intel_pt_decoder *decoder) +{ + intel_pt_log_packet(&decoder->packet, decoder->pkt_len, decoder->pos, + decoder->buf); +} + +static int intel_pt_bug(struct intel_pt_decoder *decoder) +{ + intel_pt_log("ERROR: Internal error\n"); + decoder->pkt_state = INTEL_PT_STATE_NO_PSB; + return -ENOSYS; +} + +static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder *decoder) +{ + decoder->tx_flags = 0; +} + +static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder) +{ + decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX; +} + +static int intel_pt_bad_packet(struct intel_pt_decoder *decoder) +{ + intel_pt_clear_tx_flags(decoder); + decoder->have_tma = false; + decoder->pkt_len = 1; + decoder->pkt_step = 1; + intel_pt_decoder_log_packet(decoder); + if (decoder->pkt_state != INTEL_PT_STATE_NO_PSB) { + intel_pt_log("ERROR: Bad packet\n"); + decoder->pkt_state = INTEL_PT_STATE_ERR1; + } + return -EBADMSG; +} + +static int intel_pt_get_data(struct intel_pt_decoder *decoder) +{ + struct intel_pt_buffer buffer = { .buf = 0, }; + int ret; + + decoder->pkt_step = 0; + + intel_pt_log("Getting more data\n"); + ret = decoder->get_trace(&buffer, decoder->data); + if (ret) + return ret; + decoder->buf = buffer.buf; + decoder->len = buffer.len; + if (!decoder->len) { + intel_pt_log("No more data\n"); + return -ENODATA; + } + if (!buffer.consecutive) { + decoder->ip = 0; + decoder->pkt_state = INTEL_PT_STATE_NO_PSB; + decoder->ref_timestamp = buffer.ref_timestamp; + decoder->timestamp = 0; + decoder->have_tma = false; + decoder->state.trace_nr = buffer.trace_nr; + intel_pt_log("Reference timestamp 0x%" PRIx64 "\n", + decoder->ref_timestamp); + return -ENOLINK; + } + + return 0; +} + +static int intel_pt_get_next_data(struct intel_pt_decoder *decoder) +{ + if (!decoder->next_buf) + return intel_pt_get_data(decoder); + + decoder->buf = decoder->next_buf; + decoder->len = decoder->next_len; + decoder->next_buf = 0; + decoder->next_len = 0; + return 0; +} + +static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder) +{ + unsigned char *buf = decoder->temp_buf; + size_t old_len, len, n; + int ret; + + old_len = decoder->len; + len = decoder->len; + memcpy(buf, decoder->buf, len); + + ret = intel_pt_get_data(decoder); + if (ret) { + decoder->pos += old_len; + return ret < 0 ? ret : -EINVAL; + } + + n = INTEL_PT_PKT_MAX_SZ - len; + if (n > decoder->len) + n = decoder->len; + memcpy(buf + len, decoder->buf, n); + len += n; + + ret = intel_pt_get_packet(buf, len, &decoder->packet); + if (ret < (int)old_len) { + decoder->next_buf = decoder->buf; + decoder->next_len = decoder->len; + decoder->buf = buf; + decoder->len = old_len; + return intel_pt_bad_packet(decoder); + } + + decoder->next_buf = decoder->buf + (ret - old_len); + decoder->next_len = decoder->len - (ret - old_len); + + decoder->buf = buf; + decoder->len = ret; + + return ret; +} + +struct intel_pt_pkt_info { + struct intel_pt_decoder *decoder; + struct intel_pt_pkt packet; + uint64_t pos; + int pkt_len; + int last_packet_type; + void *data; +}; + +typedef int (*intel_pt_pkt_cb_t)(struct intel_pt_pkt_info *pkt_info); + +/* Lookahead packets in current buffer */ +static int intel_pt_pkt_lookahead(struct intel_pt_decoder *decoder, + intel_pt_pkt_cb_t cb, void *data) +{ + struct intel_pt_pkt_info pkt_info; + const unsigned char *buf = decoder->buf; + size_t len = decoder->len; + int ret; + + pkt_info.decoder = decoder; + pkt_info.pos = decoder->pos; + pkt_info.pkt_len = decoder->pkt_step; + pkt_info.last_packet_type = decoder->last_packet_type; + pkt_info.data = data; + + while (1) { + do { + pkt_info.pos += pkt_info.pkt_len; + buf += pkt_info.pkt_len; + len -= pkt_info.pkt_len; + + if (!len) + return INTEL_PT_NEED_MORE_BYTES; + + ret = intel_pt_get_packet(buf, len, &pkt_info.packet); + if (!ret) + return INTEL_PT_NEED_MORE_BYTES; + if (ret < 0) + return ret; + + pkt_info.pkt_len = ret; + } while (pkt_info.packet.type == INTEL_PT_PAD); + + ret = cb(&pkt_info); + if (ret) + return 0; + + pkt_info.last_packet_type = pkt_info.packet.type; + } +} + +struct intel_pt_calc_cyc_to_tsc_info { + uint64_t cycle_cnt; + unsigned int cbr; + uint32_t last_mtc; + uint64_t ctc_timestamp; + uint64_t ctc_delta; + uint64_t tsc_timestamp; + uint64_t timestamp; + bool have_tma; + bool from_mtc; + double cbr_cyc_to_tsc; +}; + +static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) +{ + struct intel_pt_decoder *decoder = pkt_info->decoder; + struct intel_pt_calc_cyc_to_tsc_info *data = pkt_info->data; + uint64_t timestamp; + double cyc_to_tsc; + unsigned int cbr; + uint32_t mtc, mtc_delta, ctc, fc, ctc_rem; + + switch (pkt_info->packet.type) { + case INTEL_PT_TNT: + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + case INTEL_PT_FUP: + case INTEL_PT_PSB: + case INTEL_PT_PIP: + case INTEL_PT_MODE_EXEC: + case INTEL_PT_MODE_TSX: + case INTEL_PT_PSBEND: + case INTEL_PT_PAD: + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + return 0; + + case INTEL_PT_MTC: + if (!data->have_tma) + return 0; + + mtc = pkt_info->packet.payload; + if (mtc > data->last_mtc) + mtc_delta = mtc - data->last_mtc; + else + mtc_delta = mtc + 256 - data->last_mtc; + data->ctc_delta += mtc_delta << decoder->mtc_shift; + data->last_mtc = mtc; + + if (decoder->tsc_ctc_mult) { + timestamp = data->ctc_timestamp + + data->ctc_delta * decoder->tsc_ctc_mult; + } else { + timestamp = data->ctc_timestamp + + multdiv(data->ctc_delta, + decoder->tsc_ctc_ratio_n, + decoder->tsc_ctc_ratio_d); + } + + if (timestamp < data->timestamp) + return 1; + + if (pkt_info->last_packet_type != INTEL_PT_CYC) { + data->timestamp = timestamp; + return 0; + } + + break; + + case INTEL_PT_TSC: + timestamp = pkt_info->packet.payload | + (data->timestamp & (0xffULL << 56)); + if (data->from_mtc && timestamp < data->timestamp && + data->timestamp - timestamp < decoder->tsc_slip) + return 1; + if (timestamp < data->timestamp) + timestamp += (1ULL << 56); + if (pkt_info->last_packet_type != INTEL_PT_CYC) { + if (data->from_mtc) + return 1; + data->tsc_timestamp = timestamp; + data->timestamp = timestamp; + return 0; + } + break; + + case INTEL_PT_TMA: + if (data->from_mtc) + return 1; + + if (!decoder->tsc_ctc_ratio_d) + return 0; + + ctc = pkt_info->packet.payload; + fc = pkt_info->packet.count; + ctc_rem = ctc & decoder->ctc_rem_mask; + + data->last_mtc = (ctc >> decoder->mtc_shift) & 0xff; + + data->ctc_timestamp = data->tsc_timestamp - fc; + if (decoder->tsc_ctc_mult) { + data->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult; + } else { + data->ctc_timestamp -= + multdiv(ctc_rem, decoder->tsc_ctc_ratio_n, + decoder->tsc_ctc_ratio_d); + } + + data->ctc_delta = 0; + data->have_tma = true; + + return 0; + + case INTEL_PT_CYC: + data->cycle_cnt += pkt_info->packet.payload; + return 0; + + case INTEL_PT_CBR: + cbr = pkt_info->packet.payload; + if (data->cbr && data->cbr != cbr) + return 1; + data->cbr = cbr; + data->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr; + return 0; + + case INTEL_PT_TIP_PGD: + case INTEL_PT_TRACESTOP: + case INTEL_PT_OVF: + case INTEL_PT_BAD: /* Does not happen */ + default: + return 1; + } + + if (!data->cbr && decoder->cbr) { + data->cbr = decoder->cbr; + data->cbr_cyc_to_tsc = decoder->cbr_cyc_to_tsc; + } + + if (!data->cycle_cnt) + return 1; + + cyc_to_tsc = (double)(timestamp - decoder->timestamp) / data->cycle_cnt; + + if (data->cbr && cyc_to_tsc > data->cbr_cyc_to_tsc && + cyc_to_tsc / data->cbr_cyc_to_tsc > 1.25) { + intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt "\n", + cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos); + return 1; + } + + decoder->calc_cyc_to_tsc = cyc_to_tsc; + decoder->have_calc_cyc_to_tsc = true; + + if (data->cbr) { + intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt "\n", + cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos); + } else { + intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt "\n", + cyc_to_tsc, pkt_info->pos); + } + + return 1; +} + +static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder, + bool from_mtc) +{ + struct intel_pt_calc_cyc_to_tsc_info data = { + .cycle_cnt = 0, + .cbr = 0, + .last_mtc = decoder->last_mtc, + .ctc_timestamp = decoder->ctc_timestamp, + .ctc_delta = decoder->ctc_delta, + .tsc_timestamp = decoder->tsc_timestamp, + .timestamp = decoder->timestamp, + .have_tma = decoder->have_tma, + .from_mtc = from_mtc, + .cbr_cyc_to_tsc = 0, + }; + + intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data); +} + +static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder) +{ + int ret; + + decoder->last_packet_type = decoder->packet.type; + + do { + decoder->pos += decoder->pkt_step; + decoder->buf += decoder->pkt_step; + decoder->len -= decoder->pkt_step; + + if (!decoder->len) { + ret = intel_pt_get_next_data(decoder); + if (ret) + return ret; + } + + ret = intel_pt_get_packet(decoder->buf, decoder->len, + &decoder->packet); + if (ret == INTEL_PT_NEED_MORE_BYTES && + decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) { + ret = intel_pt_get_split_packet(decoder); + if (ret < 0) + return ret; + } + if (ret <= 0) + return intel_pt_bad_packet(decoder); + + decoder->pkt_len = ret; + decoder->pkt_step = ret; + intel_pt_decoder_log_packet(decoder); + } while (decoder->packet.type == INTEL_PT_PAD); + + return 0; +} + +static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder) +{ + uint64_t timestamp, masked_timestamp; + + timestamp = decoder->timestamp + decoder->timestamp_insn_cnt; + masked_timestamp = timestamp & decoder->period_mask; + if (decoder->continuous_period) { + if (masked_timestamp != decoder->last_masked_timestamp) + return 1; + } else { + timestamp += 1; + masked_timestamp = timestamp & decoder->period_mask; + if (masked_timestamp != decoder->last_masked_timestamp) { + decoder->last_masked_timestamp = masked_timestamp; + decoder->continuous_period = true; + } + } + return decoder->period_ticks - (timestamp - masked_timestamp); +} + +static uint64_t intel_pt_next_sample(struct intel_pt_decoder *decoder) +{ + switch (decoder->period_type) { + case INTEL_PT_PERIOD_INSTRUCTIONS: + return decoder->period - decoder->period_insn_cnt; + case INTEL_PT_PERIOD_TICKS: + return intel_pt_next_period(decoder); + case INTEL_PT_PERIOD_NONE: + case INTEL_PT_PERIOD_MTC: + default: + return 0; + } +} + +static void intel_pt_sample_insn(struct intel_pt_decoder *decoder) +{ + uint64_t timestamp, masked_timestamp; + + switch (decoder->period_type) { + case INTEL_PT_PERIOD_INSTRUCTIONS: + decoder->period_insn_cnt = 0; + break; + case INTEL_PT_PERIOD_TICKS: + timestamp = decoder->timestamp + decoder->timestamp_insn_cnt; + masked_timestamp = timestamp & decoder->period_mask; + decoder->last_masked_timestamp = masked_timestamp; + break; + case INTEL_PT_PERIOD_NONE: + case INTEL_PT_PERIOD_MTC: + default: + break; + } + + decoder->state.type |= INTEL_PT_INSTRUCTION; +} + +static int intel_pt_walk_insn(struct intel_pt_decoder *decoder, + struct intel_pt_insn *intel_pt_insn, uint64_t ip) +{ + uint64_t max_insn_cnt, insn_cnt = 0; + int err; + + if (!decoder->mtc_insn) + decoder->mtc_insn = true; + + max_insn_cnt = intel_pt_next_sample(decoder); + + err = decoder->walk_insn(intel_pt_insn, &insn_cnt, &decoder->ip, ip, + max_insn_cnt, decoder->data); + + decoder->tot_insn_cnt += insn_cnt; + decoder->timestamp_insn_cnt += insn_cnt; + decoder->period_insn_cnt += insn_cnt; + + if (err) { + decoder->no_progress = 0; + decoder->pkt_state = INTEL_PT_STATE_ERR2; + intel_pt_log_at("ERROR: Failed to get instruction", + decoder->ip); + if (err == -ENOENT) + return -ENOLINK; + return -EILSEQ; + } + + if (ip && decoder->ip == ip) { + err = -EAGAIN; + goto out; + } + + if (max_insn_cnt && insn_cnt >= max_insn_cnt) + intel_pt_sample_insn(decoder); + + if (intel_pt_insn->branch == INTEL_PT_BR_NO_BRANCH) { + decoder->state.type = INTEL_PT_INSTRUCTION; + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = 0; + decoder->ip += intel_pt_insn->length; + err = INTEL_PT_RETURN; + goto out; + } + + if (intel_pt_insn->op == INTEL_PT_OP_CALL) { + /* Zero-length calls are excluded */ + if (intel_pt_insn->branch != INTEL_PT_BR_UNCONDITIONAL || + intel_pt_insn->rel) { + err = intel_pt_push(&decoder->stack, decoder->ip + + intel_pt_insn->length); + if (err) + goto out; + } + } else if (intel_pt_insn->op == INTEL_PT_OP_RET) { + decoder->ret_addr = intel_pt_pop(&decoder->stack); + } + + if (intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL) { + int cnt = decoder->no_progress++; + + decoder->state.from_ip = decoder->ip; + decoder->ip += intel_pt_insn->length + + intel_pt_insn->rel; + decoder->state.to_ip = decoder->ip; + err = INTEL_PT_RETURN; + + /* + * Check for being stuck in a loop. This can happen if a + * decoder error results in the decoder erroneously setting the + * ip to an address that is itself in an infinite loop that + * consumes no packets. When that happens, there must be an + * unconditional branch. + */ + if (cnt) { + if (cnt == 1) { + decoder->stuck_ip = decoder->state.to_ip; + decoder->stuck_ip_prd = 1; + decoder->stuck_ip_cnt = 1; + } else if (cnt > INTEL_PT_MAX_LOOPS || + decoder->state.to_ip == decoder->stuck_ip) { + intel_pt_log_at("ERROR: Never-ending loop", + decoder->state.to_ip); + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + err = -ELOOP; + goto out; + } else if (!--decoder->stuck_ip_cnt) { + decoder->stuck_ip_prd += 1; + decoder->stuck_ip_cnt = decoder->stuck_ip_prd; + decoder->stuck_ip = decoder->state.to_ip; + } + } + goto out_no_progress; + } +out: + decoder->no_progress = 0; +out_no_progress: + decoder->state.insn_op = intel_pt_insn->op; + decoder->state.insn_len = intel_pt_insn->length; + + if (decoder->tx_flags & INTEL_PT_IN_TX) + decoder->state.flags |= INTEL_PT_IN_TX; + + return err; +} + +static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) +{ + struct intel_pt_insn intel_pt_insn; + uint64_t ip; + int err; + + ip = decoder->last_ip; + + while (1) { + err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip); + if (err == INTEL_PT_RETURN) + return 0; + if (err == -EAGAIN) { + if (decoder->set_fup_tx_flags) { + decoder->set_fup_tx_flags = false; + decoder->tx_flags = decoder->fup_tx_flags; + decoder->state.type = INTEL_PT_TRANSACTION; + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = 0; + decoder->state.flags = decoder->fup_tx_flags; + return 0; + } + return err; + } + decoder->set_fup_tx_flags = false; + if (err) + return err; + + if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) { + intel_pt_log_at("ERROR: Unexpected indirect branch", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + return -ENOENT; + } + + if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) { + intel_pt_log_at("ERROR: Unexpected conditional branch", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + return -ENOENT; + } + + intel_pt_bug(decoder); + } +} + +static int intel_pt_walk_tip(struct intel_pt_decoder *decoder) +{ + struct intel_pt_insn intel_pt_insn; + int err; + + err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0); + if (err == INTEL_PT_RETURN) + return 0; + if (err) + return err; + + if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) { + if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) { + decoder->pge = false; + decoder->continuous_period = false; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = 0; + if (decoder->packet.count != 0) + decoder->ip = decoder->last_ip; + } else { + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->state.from_ip = decoder->ip; + if (decoder->packet.count == 0) { + decoder->state.to_ip = 0; + } else { + decoder->state.to_ip = decoder->last_ip; + decoder->ip = decoder->last_ip; + } + } + return 0; + } + + if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) { + intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + return -ENOENT; + } + + return intel_pt_bug(decoder); +} + +static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder) +{ + struct intel_pt_insn intel_pt_insn; + int err; + + while (1) { + err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0); + if (err == INTEL_PT_RETURN) + return 0; + if (err) + return err; + + if (intel_pt_insn.op == INTEL_PT_OP_RET) { + if (!decoder->return_compression) { + intel_pt_log_at("ERROR: RET when expecting conditional branch", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR3; + return -ENOENT; + } + if (!decoder->ret_addr) { + intel_pt_log_at("ERROR: Bad RET compression (stack empty)", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR3; + return -ENOENT; + } + if (!(decoder->tnt.payload & BIT63)) { + intel_pt_log_at("ERROR: Bad RET compression (TNT=N)", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR3; + return -ENOENT; + } + decoder->tnt.count -= 1; + if (!decoder->tnt.count) + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->tnt.payload <<= 1; + decoder->state.from_ip = decoder->ip; + decoder->ip = decoder->ret_addr; + decoder->state.to_ip = decoder->ip; + return 0; + } + + if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) { + /* Handle deferred TIPs */ + err = intel_pt_get_next_packet(decoder); + if (err) + return err; + if (decoder->packet.type != INTEL_PT_TIP || + decoder->packet.count == 0) { + intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch", + decoder->ip); + decoder->pkt_state = INTEL_PT_STATE_ERR3; + decoder->pkt_step = 0; + return -ENOENT; + } + intel_pt_set_last_ip(decoder); + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = decoder->last_ip; + decoder->ip = decoder->last_ip; + return 0; + } + + if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) { + decoder->tnt.count -= 1; + if (!decoder->tnt.count) + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + if (decoder->tnt.payload & BIT63) { + decoder->tnt.payload <<= 1; + decoder->state.from_ip = decoder->ip; + decoder->ip += intel_pt_insn.length + + intel_pt_insn.rel; + decoder->state.to_ip = decoder->ip; + return 0; + } + /* Instruction sample for a non-taken branch */ + if (decoder->state.type & INTEL_PT_INSTRUCTION) { + decoder->tnt.payload <<= 1; + decoder->state.type = INTEL_PT_INSTRUCTION; + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = 0; + decoder->ip += intel_pt_insn.length; + return 0; + } + decoder->ip += intel_pt_insn.length; + if (!decoder->tnt.count) + return -EAGAIN; + decoder->tnt.payload <<= 1; + continue; + } + + return intel_pt_bug(decoder); + } +} + +static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip) +{ + unsigned int fup_tx_flags; + int err; + + fup_tx_flags = decoder->packet.payload & + (INTEL_PT_IN_TX | INTEL_PT_ABORT_TX); + err = intel_pt_get_next_packet(decoder); + if (err) + return err; + if (decoder->packet.type == INTEL_PT_FUP) { + decoder->fup_tx_flags = fup_tx_flags; + decoder->set_fup_tx_flags = true; + if (!(decoder->fup_tx_flags & INTEL_PT_ABORT_TX)) + *no_tip = true; + } else { + intel_pt_log_at("ERROR: Missing FUP after MODE.TSX", + decoder->pos); + intel_pt_update_in_tx(decoder); + } + return 0; +} + +static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder) +{ + uint64_t timestamp; + + decoder->have_tma = false; + + if (decoder->ref_timestamp) { + timestamp = decoder->packet.payload | + (decoder->ref_timestamp & (0xffULL << 56)); + if (timestamp < decoder->ref_timestamp) { + if (decoder->ref_timestamp - timestamp > (1ULL << 55)) + timestamp += (1ULL << 56); + } else { + if (timestamp - decoder->ref_timestamp > (1ULL << 55)) + timestamp -= (1ULL << 56); + } + decoder->tsc_timestamp = timestamp; + decoder->timestamp = timestamp; + decoder->ref_timestamp = 0; + decoder->timestamp_insn_cnt = 0; + } else if (decoder->timestamp) { + timestamp = decoder->packet.payload | + (decoder->timestamp & (0xffULL << 56)); + decoder->tsc_timestamp = timestamp; + if (timestamp < decoder->timestamp && + decoder->timestamp - timestamp < decoder->tsc_slip) { + intel_pt_log_to("Suppressing backwards timestamp", + timestamp); + timestamp = decoder->timestamp; + } + if (timestamp < decoder->timestamp) { + intel_pt_log_to("Wraparound timestamp", timestamp); + timestamp += (1ULL << 56); + decoder->tsc_timestamp = timestamp; + } + decoder->timestamp = timestamp; + decoder->timestamp_insn_cnt = 0; + } + + if (decoder->last_packet_type == INTEL_PT_CYC) { + decoder->cyc_ref_timestamp = decoder->timestamp; + decoder->cycle_cnt = 0; + decoder->have_calc_cyc_to_tsc = false; + intel_pt_calc_cyc_to_tsc(decoder, false); + } + + intel_pt_log_to("Setting timestamp", decoder->timestamp); +} + +static int intel_pt_overflow(struct intel_pt_decoder *decoder) +{ + intel_pt_log("ERROR: Buffer overflow\n"); + intel_pt_clear_tx_flags(decoder); + decoder->have_tma = false; + decoder->cbr = 0; + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + decoder->overflow = true; + return -EOVERFLOW; +} + +static void intel_pt_calc_tma(struct intel_pt_decoder *decoder) +{ + uint32_t ctc = decoder->packet.payload; + uint32_t fc = decoder->packet.count; + uint32_t ctc_rem = ctc & decoder->ctc_rem_mask; + + if (!decoder->tsc_ctc_ratio_d) + return; + + decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff; + decoder->ctc_timestamp = decoder->tsc_timestamp - fc; + if (decoder->tsc_ctc_mult) { + decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult; + } else { + decoder->ctc_timestamp -= multdiv(ctc_rem, + decoder->tsc_ctc_ratio_n, + decoder->tsc_ctc_ratio_d); + } + decoder->ctc_delta = 0; + decoder->have_tma = true; + intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n", + decoder->ctc_timestamp, decoder->last_mtc, ctc_rem); +} + +static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder) +{ + uint64_t timestamp; + uint32_t mtc, mtc_delta; + + if (!decoder->have_tma) + return; + + mtc = decoder->packet.payload; + + if (mtc > decoder->last_mtc) + mtc_delta = mtc - decoder->last_mtc; + else + mtc_delta = mtc + 256 - decoder->last_mtc; + + decoder->ctc_delta += mtc_delta << decoder->mtc_shift; + + if (decoder->tsc_ctc_mult) { + timestamp = decoder->ctc_timestamp + + decoder->ctc_delta * decoder->tsc_ctc_mult; + } else { + timestamp = decoder->ctc_timestamp + + multdiv(decoder->ctc_delta, + decoder->tsc_ctc_ratio_n, + decoder->tsc_ctc_ratio_d); + } + + if (timestamp < decoder->timestamp) + intel_pt_log("Suppressing MTC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n", + timestamp, decoder->timestamp); + else + decoder->timestamp = timestamp; + + decoder->timestamp_insn_cnt = 0; + decoder->last_mtc = mtc; + + if (decoder->last_packet_type == INTEL_PT_CYC) { + decoder->cyc_ref_timestamp = decoder->timestamp; + decoder->cycle_cnt = 0; + decoder->have_calc_cyc_to_tsc = false; + intel_pt_calc_cyc_to_tsc(decoder, true); + } +} + +static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder) +{ + unsigned int cbr = decoder->packet.payload; + + if (decoder->cbr == cbr) + return; + + decoder->cbr = cbr; + decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr; +} + +static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder) +{ + uint64_t timestamp = decoder->cyc_ref_timestamp; + + decoder->have_cyc = true; + + decoder->cycle_cnt += decoder->packet.payload; + + if (!decoder->cyc_ref_timestamp) + return; + + if (decoder->have_calc_cyc_to_tsc) + timestamp += decoder->cycle_cnt * decoder->calc_cyc_to_tsc; + else if (decoder->cbr) + timestamp += decoder->cycle_cnt * decoder->cbr_cyc_to_tsc; + else + return; + + if (timestamp < decoder->timestamp) + intel_pt_log("Suppressing CYC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n", + timestamp, decoder->timestamp); + else + decoder->timestamp = timestamp; +} + +/* Walk PSB+ packets when already in sync. */ +static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) +{ + int err; + + while (1) { + err = intel_pt_get_next_packet(decoder); + if (err) + return err; + + switch (decoder->packet.type) { + case INTEL_PT_PSBEND: + return 0; + + case INTEL_PT_TIP_PGD: + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + case INTEL_PT_TNT: + case INTEL_PT_TRACESTOP: + case INTEL_PT_BAD: + case INTEL_PT_PSB: + decoder->have_tma = false; + intel_pt_log("ERROR: Unexpected packet\n"); + return -EAGAIN; + + case INTEL_PT_OVF: + return intel_pt_overflow(decoder); + + case INTEL_PT_TSC: + intel_pt_calc_tsc_timestamp(decoder); + break; + + case INTEL_PT_TMA: + intel_pt_calc_tma(decoder); + break; + + case INTEL_PT_CBR: + intel_pt_calc_cbr(decoder); + break; + + case INTEL_PT_MODE_EXEC: + decoder->exec_mode = decoder->packet.payload; + break; + + case INTEL_PT_PIP: + decoder->cr3 = decoder->packet.payload & (BIT63 - 1); + break; + + case INTEL_PT_FUP: + decoder->pge = true; + intel_pt_set_last_ip(decoder); + break; + + case INTEL_PT_MODE_TSX: + intel_pt_update_in_tx(decoder); + break; + + case INTEL_PT_MTC: + intel_pt_calc_mtc_timestamp(decoder); + if (decoder->period_type == INTEL_PT_PERIOD_MTC) + decoder->state.type |= INTEL_PT_INSTRUCTION; + break; + + case INTEL_PT_CYC: + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + case INTEL_PT_PAD: + default: + break; + } + } +} + +static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder) +{ + int err; + + if (decoder->tx_flags & INTEL_PT_ABORT_TX) { + decoder->tx_flags = 0; + decoder->state.flags &= ~INTEL_PT_IN_TX; + decoder->state.flags |= INTEL_PT_ABORT_TX; + } else { + decoder->state.flags |= INTEL_PT_ASYNC; + } + + while (1) { + err = intel_pt_get_next_packet(decoder); + if (err) + return err; + + switch (decoder->packet.type) { + case INTEL_PT_TNT: + case INTEL_PT_FUP: + case INTEL_PT_TRACESTOP: + case INTEL_PT_PSB: + case INTEL_PT_TSC: + case INTEL_PT_TMA: + case INTEL_PT_CBR: + case INTEL_PT_MODE_TSX: + case INTEL_PT_BAD: + case INTEL_PT_PSBEND: + intel_pt_log("ERROR: Missing TIP after FUP\n"); + decoder->pkt_state = INTEL_PT_STATE_ERR3; + return -ENOENT; + + case INTEL_PT_OVF: + return intel_pt_overflow(decoder); + + case INTEL_PT_TIP_PGD: + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = 0; + if (decoder->packet.count != 0) { + intel_pt_set_ip(decoder); + intel_pt_log("Omitting PGD ip " x64_fmt "\n", + decoder->ip); + } + decoder->pge = false; + decoder->continuous_period = false; + return 0; + + case INTEL_PT_TIP_PGE: + decoder->pge = true; + intel_pt_log("Omitting PGE ip " x64_fmt "\n", + decoder->ip); + decoder->state.from_ip = 0; + if (decoder->packet.count == 0) { + decoder->state.to_ip = 0; + } else { + intel_pt_set_ip(decoder); + decoder->state.to_ip = decoder->ip; + } + return 0; + + case INTEL_PT_TIP: + decoder->state.from_ip = decoder->ip; + if (decoder->packet.count == 0) { + decoder->state.to_ip = 0; + } else { + intel_pt_set_ip(decoder); + decoder->state.to_ip = decoder->ip; + } + return 0; + + case INTEL_PT_PIP: + decoder->cr3 = decoder->packet.payload & (BIT63 - 1); + break; + + case INTEL_PT_MTC: + intel_pt_calc_mtc_timestamp(decoder); + if (decoder->period_type == INTEL_PT_PERIOD_MTC) + decoder->state.type |= INTEL_PT_INSTRUCTION; + break; + + case INTEL_PT_CYC: + intel_pt_calc_cyc_timestamp(decoder); + break; + + case INTEL_PT_MODE_EXEC: + decoder->exec_mode = decoder->packet.payload; + break; + + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + case INTEL_PT_PAD: + break; + + default: + return intel_pt_bug(decoder); + } + } +} + +static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) +{ + bool no_tip = false; + int err; + + while (1) { + err = intel_pt_get_next_packet(decoder); + if (err) + return err; +next: + switch (decoder->packet.type) { + case INTEL_PT_TNT: + if (!decoder->packet.count) + break; + decoder->tnt = decoder->packet; + decoder->pkt_state = INTEL_PT_STATE_TNT; + err = intel_pt_walk_tnt(decoder); + if (err == -EAGAIN) + break; + return err; + + case INTEL_PT_TIP_PGD: + if (decoder->packet.count != 0) + intel_pt_set_last_ip(decoder); + decoder->pkt_state = INTEL_PT_STATE_TIP_PGD; + return intel_pt_walk_tip(decoder); + + case INTEL_PT_TIP_PGE: { + decoder->pge = true; + if (decoder->packet.count == 0) { + intel_pt_log_at("Skipping zero TIP.PGE", + decoder->pos); + break; + } + intel_pt_set_ip(decoder); + decoder->state.from_ip = 0; + decoder->state.to_ip = decoder->ip; + return 0; + } + + case INTEL_PT_OVF: + return intel_pt_overflow(decoder); + + case INTEL_PT_TIP: + if (decoder->packet.count != 0) + intel_pt_set_last_ip(decoder); + decoder->pkt_state = INTEL_PT_STATE_TIP; + return intel_pt_walk_tip(decoder); + + case INTEL_PT_FUP: + if (decoder->packet.count == 0) { + intel_pt_log_at("Skipping zero FUP", + decoder->pos); + no_tip = false; + break; + } + intel_pt_set_last_ip(decoder); + err = intel_pt_walk_fup(decoder); + if (err != -EAGAIN) { + if (err) + return err; + if (no_tip) + decoder->pkt_state = + INTEL_PT_STATE_FUP_NO_TIP; + else + decoder->pkt_state = INTEL_PT_STATE_FUP; + return 0; + } + if (no_tip) { + no_tip = false; + break; + } + return intel_pt_walk_fup_tip(decoder); + + case INTEL_PT_TRACESTOP: + decoder->pge = false; + decoder->continuous_period = false; + intel_pt_clear_tx_flags(decoder); + decoder->have_tma = false; + break; + + case INTEL_PT_PSB: + intel_pt_clear_stack(&decoder->stack); + err = intel_pt_walk_psbend(decoder); + if (err == -EAGAIN) + goto next; + if (err) + return err; + break; + + case INTEL_PT_PIP: + decoder->cr3 = decoder->packet.payload & (BIT63 - 1); + break; + + case INTEL_PT_MTC: + intel_pt_calc_mtc_timestamp(decoder); + if (decoder->period_type != INTEL_PT_PERIOD_MTC) + break; + /* + * Ensure that there has been an instruction since the + * last MTC. + */ + if (!decoder->mtc_insn) + break; + decoder->mtc_insn = false; + /* Ensure that there is a timestamp */ + if (!decoder->timestamp) + break; + decoder->state.type = INTEL_PT_INSTRUCTION; + decoder->state.from_ip = decoder->ip; + decoder->state.to_ip = 0; + decoder->mtc_insn = false; + return 0; + + case INTEL_PT_TSC: + intel_pt_calc_tsc_timestamp(decoder); + break; + + case INTEL_PT_TMA: + intel_pt_calc_tma(decoder); + break; + + case INTEL_PT_CYC: + intel_pt_calc_cyc_timestamp(decoder); + break; + + case INTEL_PT_CBR: + intel_pt_calc_cbr(decoder); + break; + + case INTEL_PT_MODE_EXEC: + decoder->exec_mode = decoder->packet.payload; + break; + + case INTEL_PT_MODE_TSX: + /* MODE_TSX need not be followed by FUP */ + if (!decoder->pge) { + intel_pt_update_in_tx(decoder); + break; + } + err = intel_pt_mode_tsx(decoder, &no_tip); + if (err) + return err; + goto next; + + case INTEL_PT_BAD: /* Does not happen */ + return intel_pt_bug(decoder); + + case INTEL_PT_PSBEND: + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + case INTEL_PT_PAD: + break; + + default: + return intel_pt_bug(decoder); + } + } +} + +/* Walk PSB+ packets to get in sync. */ +static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) +{ + int err; + + while (1) { + err = intel_pt_get_next_packet(decoder); + if (err) + return err; + + switch (decoder->packet.type) { + case INTEL_PT_TIP_PGD: + decoder->continuous_period = false; + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + intel_pt_log("ERROR: Unexpected packet\n"); + return -ENOENT; + + case INTEL_PT_FUP: + decoder->pge = true; + if (decoder->last_ip || decoder->packet.count == 6 || + decoder->packet.count == 0) { + uint64_t current_ip = decoder->ip; + + intel_pt_set_ip(decoder); + if (current_ip) + intel_pt_log_to("Setting IP", + decoder->ip); + } + break; + + case INTEL_PT_MTC: + intel_pt_calc_mtc_timestamp(decoder); + break; + + case INTEL_PT_TSC: + intel_pt_calc_tsc_timestamp(decoder); + break; + + case INTEL_PT_TMA: + intel_pt_calc_tma(decoder); + break; + + case INTEL_PT_CYC: + intel_pt_calc_cyc_timestamp(decoder); + break; + + case INTEL_PT_CBR: + intel_pt_calc_cbr(decoder); + break; + + case INTEL_PT_PIP: + decoder->cr3 = decoder->packet.payload & (BIT63 - 1); + break; + + case INTEL_PT_MODE_EXEC: + decoder->exec_mode = decoder->packet.payload; + break; + + case INTEL_PT_MODE_TSX: + intel_pt_update_in_tx(decoder); + break; + + case INTEL_PT_TRACESTOP: + decoder->pge = false; + decoder->continuous_period = false; + intel_pt_clear_tx_flags(decoder); + case INTEL_PT_TNT: + decoder->have_tma = false; + intel_pt_log("ERROR: Unexpected packet\n"); + if (decoder->ip) + decoder->pkt_state = INTEL_PT_STATE_ERR4; + else + decoder->pkt_state = INTEL_PT_STATE_ERR3; + return -ENOENT; + + case INTEL_PT_BAD: /* Does not happen */ + return intel_pt_bug(decoder); + + case INTEL_PT_OVF: + return intel_pt_overflow(decoder); + + case INTEL_PT_PSBEND: + return 0; + + case INTEL_PT_PSB: + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + case INTEL_PT_PAD: + default: + break; + } + } +} + +static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) +{ + int err; + + while (1) { + err = intel_pt_get_next_packet(decoder); + if (err) + return err; + + switch (decoder->packet.type) { + case INTEL_PT_TIP_PGD: + decoder->continuous_period = false; + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; + if (decoder->last_ip || decoder->packet.count == 6 || + decoder->packet.count == 0) + intel_pt_set_ip(decoder); + if (decoder->ip) + return 0; + break; + + case INTEL_PT_FUP: + if (decoder->overflow) { + if (decoder->last_ip || + decoder->packet.count == 6 || + decoder->packet.count == 0) + intel_pt_set_ip(decoder); + if (decoder->ip) + return 0; + } + if (decoder->packet.count) + intel_pt_set_last_ip(decoder); + break; + + case INTEL_PT_MTC: + intel_pt_calc_mtc_timestamp(decoder); + break; + + case INTEL_PT_TSC: + intel_pt_calc_tsc_timestamp(decoder); + break; + + case INTEL_PT_TMA: + intel_pt_calc_tma(decoder); + break; + + case INTEL_PT_CYC: + intel_pt_calc_cyc_timestamp(decoder); + break; + + case INTEL_PT_CBR: + intel_pt_calc_cbr(decoder); + break; + + case INTEL_PT_PIP: + decoder->cr3 = decoder->packet.payload & (BIT63 - 1); + break; + + case INTEL_PT_MODE_EXEC: + decoder->exec_mode = decoder->packet.payload; + break; + + case INTEL_PT_MODE_TSX: + intel_pt_update_in_tx(decoder); + break; + + case INTEL_PT_OVF: + return intel_pt_overflow(decoder); + + case INTEL_PT_BAD: /* Does not happen */ + return intel_pt_bug(decoder); + + case INTEL_PT_TRACESTOP: + decoder->pge = false; + decoder->continuous_period = false; + intel_pt_clear_tx_flags(decoder); + decoder->have_tma = false; + break; + + case INTEL_PT_PSB: + err = intel_pt_walk_psb(decoder); + if (err) + return err; + if (decoder->ip) { + /* Do not have a sample */ + decoder->state.type = 0; + return 0; + } + break; + + case INTEL_PT_TNT: + case INTEL_PT_PSBEND: + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + case INTEL_PT_PAD: + default: + break; + } + } +} + +static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) +{ + int err; + + intel_pt_log("Scanning for full IP\n"); + err = intel_pt_walk_to_ip(decoder); + if (err) + return err; + + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->overflow = false; + + decoder->state.from_ip = 0; + decoder->state.to_ip = decoder->ip; + intel_pt_log_to("Setting IP", decoder->ip); + + return 0; +} + +static int intel_pt_part_psb(struct intel_pt_decoder *decoder) +{ + const unsigned char *end = decoder->buf + decoder->len; + size_t i; + + for (i = INTEL_PT_PSB_LEN - 1; i; i--) { + if (i > decoder->len) + continue; + if (!memcmp(end - i, INTEL_PT_PSB_STR, i)) + return i; + } + return 0; +} + +static int intel_pt_rest_psb(struct intel_pt_decoder *decoder, int part_psb) +{ + size_t rest_psb = INTEL_PT_PSB_LEN - part_psb; + const char *psb = INTEL_PT_PSB_STR; + + if (rest_psb > decoder->len || + memcmp(decoder->buf, psb + part_psb, rest_psb)) + return 0; + + return rest_psb; +} + +static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder, + int part_psb) +{ + int rest_psb, ret; + + decoder->pos += decoder->len; + decoder->len = 0; + + ret = intel_pt_get_next_data(decoder); + if (ret) + return ret; + + rest_psb = intel_pt_rest_psb(decoder, part_psb); + if (!rest_psb) + return 0; + + decoder->pos -= part_psb; + decoder->next_buf = decoder->buf + rest_psb; + decoder->next_len = decoder->len - rest_psb; + memcpy(decoder->temp_buf, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN); + decoder->buf = decoder->temp_buf; + decoder->len = INTEL_PT_PSB_LEN; + + return 0; +} + +static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder) +{ + unsigned char *next; + int ret; + + intel_pt_log("Scanning for PSB\n"); + while (1) { + if (!decoder->len) { + ret = intel_pt_get_next_data(decoder); + if (ret) + return ret; + } + + next = memmem(decoder->buf, decoder->len, INTEL_PT_PSB_STR, + INTEL_PT_PSB_LEN); + if (!next) { + int part_psb; + + part_psb = intel_pt_part_psb(decoder); + if (part_psb) { + ret = intel_pt_get_split_psb(decoder, part_psb); + if (ret) + return ret; + } else { + decoder->pos += decoder->len; + decoder->len = 0; + } + continue; + } + + decoder->pkt_step = next - decoder->buf; + return intel_pt_get_next_packet(decoder); + } +} + +static int intel_pt_sync(struct intel_pt_decoder *decoder) +{ + int err; + + decoder->pge = false; + decoder->continuous_period = false; + decoder->last_ip = 0; + decoder->ip = 0; + intel_pt_clear_stack(&decoder->stack); + + err = intel_pt_scan_for_psb(decoder); + if (err) + return err; + + decoder->pkt_state = INTEL_PT_STATE_NO_IP; + + err = intel_pt_walk_psb(decoder); + if (err) + return err; + + if (decoder->ip) { + decoder->state.type = 0; /* Do not have a sample */ + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + } else { + return intel_pt_sync_ip(decoder); + } + + return 0; +} + +static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) +{ + uint64_t est = decoder->timestamp_insn_cnt << 1; + + if (!decoder->cbr || !decoder->max_non_turbo_ratio) + goto out; + + est *= decoder->max_non_turbo_ratio; + est /= decoder->cbr; +out: + return decoder->timestamp + est; +} + +const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) +{ + int err; + + do { + decoder->state.type = INTEL_PT_BRANCH; + decoder->state.flags = 0; + + switch (decoder->pkt_state) { + case INTEL_PT_STATE_NO_PSB: + err = intel_pt_sync(decoder); + break; + case INTEL_PT_STATE_NO_IP: + decoder->last_ip = 0; + /* Fall through */ + case INTEL_PT_STATE_ERR_RESYNC: + err = intel_pt_sync_ip(decoder); + break; + case INTEL_PT_STATE_IN_SYNC: + err = intel_pt_walk_trace(decoder); + break; + case INTEL_PT_STATE_TNT: + err = intel_pt_walk_tnt(decoder); + if (err == -EAGAIN) + err = intel_pt_walk_trace(decoder); + break; + case INTEL_PT_STATE_TIP: + case INTEL_PT_STATE_TIP_PGD: + err = intel_pt_walk_tip(decoder); + break; + case INTEL_PT_STATE_FUP: + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + err = intel_pt_walk_fup(decoder); + if (err == -EAGAIN) + err = intel_pt_walk_fup_tip(decoder); + else if (!err) + decoder->pkt_state = INTEL_PT_STATE_FUP; + break; + case INTEL_PT_STATE_FUP_NO_TIP: + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + err = intel_pt_walk_fup(decoder); + if (err == -EAGAIN) + err = intel_pt_walk_trace(decoder); + break; + default: + err = intel_pt_bug(decoder); + break; + } + } while (err == -ENOLINK); + + decoder->state.err = err ? intel_pt_ext_err(err) : 0; + decoder->state.timestamp = decoder->timestamp; + decoder->state.est_timestamp = intel_pt_est_timestamp(decoder); + decoder->state.cr3 = decoder->cr3; + decoder->state.tot_insn_cnt = decoder->tot_insn_cnt; + + if (err) + decoder->state.from_ip = decoder->ip; + + return &decoder->state; +} + +static bool intel_pt_at_psb(unsigned char *buf, size_t len) +{ + if (len < INTEL_PT_PSB_LEN) + return false; + return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR, + INTEL_PT_PSB_LEN); +} + +/** + * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet. + * @buf: pointer to buffer pointer + * @len: size of buffer + * + * Updates the buffer pointer to point to the start of the next PSB packet if + * there is one, otherwise the buffer pointer is unchanged. If @buf is updated, + * @len is adjusted accordingly. + * + * Return: %true if a PSB packet is found, %false otherwise. + */ +static bool intel_pt_next_psb(unsigned char **buf, size_t *len) +{ + unsigned char *next; + + next = memmem(*buf, *len, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN); + if (next) { + *len -= next - *buf; + *buf = next; + return true; + } + return false; +} + +/** + * intel_pt_step_psb - move buffer pointer to the start of the following PSB + * packet. + * @buf: pointer to buffer pointer + * @len: size of buffer + * + * Updates the buffer pointer to point to the start of the following PSB packet + * (skipping the PSB at @buf itself) if there is one, otherwise the buffer + * pointer is unchanged. If @buf is updated, @len is adjusted accordingly. + * + * Return: %true if a PSB packet is found, %false otherwise. + */ +static bool intel_pt_step_psb(unsigned char **buf, size_t *len) +{ + unsigned char *next; + + if (!*len) + return false; + + next = memmem(*buf + 1, *len - 1, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN); + if (next) { + *len -= next - *buf; + *buf = next; + return true; + } + return false; +} + +/** + * intel_pt_last_psb - find the last PSB packet in a buffer. + * @buf: buffer + * @len: size of buffer + * + * This function finds the last PSB in a buffer. + * + * Return: A pointer to the last PSB in @buf if found, %NULL otherwise. + */ +static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len) +{ + const char *n = INTEL_PT_PSB_STR; + unsigned char *p; + size_t k; + + if (len < INTEL_PT_PSB_LEN) + return NULL; + + k = len - INTEL_PT_PSB_LEN + 1; + while (1) { + p = memrchr(buf, n[0], k); + if (!p) + return NULL; + if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1)) + return p; + k = p - buf; + if (!k) + return NULL; + } +} + +/** + * intel_pt_next_tsc - find and return next TSC. + * @buf: buffer + * @len: size of buffer + * @tsc: TSC value returned + * + * Find a TSC packet in @buf and return the TSC value. This function assumes + * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a + * PSBEND packet is found. + * + * Return: %true if TSC is found, false otherwise. + */ +static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc) +{ + struct intel_pt_pkt packet; + int ret; + + while (len) { + ret = intel_pt_get_packet(buf, len, &packet); + if (ret <= 0) + return false; + if (packet.type == INTEL_PT_TSC) { + *tsc = packet.payload; + return true; + } + if (packet.type == INTEL_PT_PSBEND) + return false; + buf += ret; + len -= ret; + } + return false; +} + +/** + * intel_pt_tsc_cmp - compare 7-byte TSCs. + * @tsc1: first TSC to compare + * @tsc2: second TSC to compare + * + * This function compares 7-byte TSC values allowing for the possibility that + * TSC wrapped around. Generally it is not possible to know if TSC has wrapped + * around so for that purpose this function assumes the absolute difference is + * less than half the maximum difference. + * + * Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is + * after @tsc2. + */ +static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2) +{ + const uint64_t halfway = (1ULL << 55); + + if (tsc1 == tsc2) + return 0; + + if (tsc1 < tsc2) { + if (tsc2 - tsc1 < halfway) + return -1; + else + return 1; + } else { + if (tsc1 - tsc2 < halfway) + return 1; + else + return -1; + } +} + +/** + * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data + * using TSC. + * @buf_a: first buffer + * @len_a: size of first buffer + * @buf_b: second buffer + * @len_b: size of second buffer + * + * If the trace contains TSC we can look at the last TSC of @buf_a and the + * first TSC of @buf_b in order to determine if the buffers overlap, and then + * walk forward in @buf_b until a later TSC is found. A precondition is that + * @buf_a and @buf_b are positioned at a PSB. + * + * Return: A pointer into @buf_b from where non-overlapped data starts, or + * @buf_b + @len_b if there is no non-overlapped data. + */ +static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a, + size_t len_a, + unsigned char *buf_b, + size_t len_b) +{ + uint64_t tsc_a, tsc_b; + unsigned char *p; + size_t len; + + p = intel_pt_last_psb(buf_a, len_a); + if (!p) + return buf_b; /* No PSB in buf_a => no overlap */ + + len = len_a - (p - buf_a); + if (!intel_pt_next_tsc(p, len, &tsc_a)) { + /* The last PSB+ in buf_a is incomplete, so go back one more */ + len_a -= len; + p = intel_pt_last_psb(buf_a, len_a); + if (!p) + return buf_b; /* No full PSB+ => assume no overlap */ + len = len_a - (p - buf_a); + if (!intel_pt_next_tsc(p, len, &tsc_a)) + return buf_b; /* No TSC in buf_a => assume no overlap */ + } + + while (1) { + /* Ignore PSB+ with no TSC */ + if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) && + intel_pt_tsc_cmp(tsc_a, tsc_b) < 0) + return buf_b; /* tsc_a < tsc_b => no overlap */ + + if (!intel_pt_step_psb(&buf_b, &len_b)) + return buf_b + len_b; /* No PSB in buf_b => no data */ + } +} + +/** + * intel_pt_find_overlap - determine start of non-overlapped trace data. + * @buf_a: first buffer + * @len_a: size of first buffer + * @buf_b: second buffer + * @len_b: size of second buffer + * @have_tsc: can use TSC packets to detect overlap + * + * When trace samples or snapshots are recorded there is the possibility that + * the data overlaps. Note that, for the purposes of decoding, data is only + * useful if it begins with a PSB packet. + * + * Return: A pointer into @buf_b from where non-overlapped data starts, or + * @buf_b + @len_b if there is no non-overlapped data. + */ +unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, + unsigned char *buf_b, size_t len_b, + bool have_tsc) +{ + unsigned char *found; + + /* Buffer 'b' must start at PSB so throw away everything before that */ + if (!intel_pt_next_psb(&buf_b, &len_b)) + return buf_b + len_b; /* No PSB */ + + if (!intel_pt_next_psb(&buf_a, &len_a)) + return buf_b; /* No overlap */ + + if (have_tsc) { + found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b); + if (found) + return found; + } + + /* + * Buffer 'b' cannot end within buffer 'a' so, for comparison purposes, + * we can ignore the first part of buffer 'a'. + */ + while (len_b < len_a) { + if (!intel_pt_step_psb(&buf_a, &len_a)) + return buf_b; /* No overlap */ + } + + /* Now len_b >= len_a */ + if (len_b > len_a) { + /* The leftover buffer 'b' must start at a PSB */ + while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) { + if (!intel_pt_step_psb(&buf_a, &len_a)) + return buf_b; /* No overlap */ + } + } + + while (1) { + /* Potential overlap so check the bytes */ + found = memmem(buf_a, len_a, buf_b, len_a); + if (found) + return buf_b + len_a; + + /* Try again at next PSB in buffer 'a' */ + if (!intel_pt_step_psb(&buf_a, &len_a)) + return buf_b; /* No overlap */ + + /* The leftover buffer 'b' must start at a PSB */ + while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) { + if (!intel_pt_step_psb(&buf_a, &len_a)) + return buf_b; /* No overlap */ + } + } +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h new file mode 100644 index 000000000..02c38fec1 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h @@ -0,0 +1,109 @@ +/* + * intel_pt_decoder.h: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef INCLUDE__INTEL_PT_DECODER_H__ +#define INCLUDE__INTEL_PT_DECODER_H__ + +#include +#include +#include + +#include "intel-pt-insn-decoder.h" + +#define INTEL_PT_IN_TX (1 << 0) +#define INTEL_PT_ABORT_TX (1 << 1) +#define INTEL_PT_ASYNC (1 << 2) + +enum intel_pt_sample_type { + INTEL_PT_BRANCH = 1 << 0, + INTEL_PT_INSTRUCTION = 1 << 1, + INTEL_PT_TRANSACTION = 1 << 2, +}; + +enum intel_pt_period_type { + INTEL_PT_PERIOD_NONE, + INTEL_PT_PERIOD_INSTRUCTIONS, + INTEL_PT_PERIOD_TICKS, + INTEL_PT_PERIOD_MTC, +}; + +enum { + INTEL_PT_ERR_NOMEM = 1, + INTEL_PT_ERR_INTERN, + INTEL_PT_ERR_BADPKT, + INTEL_PT_ERR_NODATA, + INTEL_PT_ERR_NOINSN, + INTEL_PT_ERR_MISMAT, + INTEL_PT_ERR_OVR, + INTEL_PT_ERR_LOST, + INTEL_PT_ERR_UNK, + INTEL_PT_ERR_NELOOP, + INTEL_PT_ERR_MAX, +}; + +struct intel_pt_state { + enum intel_pt_sample_type type; + int err; + uint64_t from_ip; + uint64_t to_ip; + uint64_t cr3; + uint64_t tot_insn_cnt; + uint64_t timestamp; + uint64_t est_timestamp; + uint64_t trace_nr; + uint32_t flags; + enum intel_pt_insn_op insn_op; + int insn_len; +}; + +struct intel_pt_insn; + +struct intel_pt_buffer { + const unsigned char *buf; + size_t len; + bool consecutive; + uint64_t ref_timestamp; + uint64_t trace_nr; +}; + +struct intel_pt_params { + int (*get_trace)(struct intel_pt_buffer *buffer, void *data); + int (*walk_insn)(struct intel_pt_insn *intel_pt_insn, + uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip, + uint64_t max_insn_cnt, void *data); + void *data; + bool return_compression; + uint64_t period; + enum intel_pt_period_type period_type; + unsigned max_non_turbo_ratio; + unsigned int mtc_period; + uint32_t tsc_ctc_ratio_n; + uint32_t tsc_ctc_ratio_d; +}; + +struct intel_pt_decoder; + +struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params); +void intel_pt_decoder_free(struct intel_pt_decoder *decoder); + +const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder); + +unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, + unsigned char *buf_b, size_t len_b, + bool have_tsc); + +int intel_pt__strerror(int code, char *buf, size_t buflen); + +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c new file mode 100644 index 000000000..d23138c06 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c @@ -0,0 +1,249 @@ +/* + * intel_pt_insn_decoder.c: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include + +#include "event.h" + +#include "insn.h" + +#include "inat.c" +#include "insn.c" + +#include "intel-pt-insn-decoder.h" + +/* Based on branch_type() from perf_event_intel_lbr.c */ +static void intel_pt_insn_decoder(struct insn *insn, + struct intel_pt_insn *intel_pt_insn) +{ + enum intel_pt_insn_op op = INTEL_PT_OP_OTHER; + enum intel_pt_insn_branch branch = INTEL_PT_BR_NO_BRANCH; + int ext; + + if (insn_is_avx(insn)) { + intel_pt_insn->op = INTEL_PT_OP_OTHER; + intel_pt_insn->branch = INTEL_PT_BR_NO_BRANCH; + intel_pt_insn->length = insn->length; + return; + } + + switch (insn->opcode.bytes[0]) { + case 0xf: + switch (insn->opcode.bytes[1]) { + case 0x05: /* syscall */ + case 0x34: /* sysenter */ + op = INTEL_PT_OP_SYSCALL; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0x07: /* sysret */ + case 0x35: /* sysexit */ + op = INTEL_PT_OP_SYSRET; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0x80 ... 0x8f: /* jcc */ + op = INTEL_PT_OP_JCC; + branch = INTEL_PT_BR_CONDITIONAL; + break; + default: + break; + } + break; + case 0x70 ... 0x7f: /* jcc */ + op = INTEL_PT_OP_JCC; + branch = INTEL_PT_BR_CONDITIONAL; + break; + case 0xc2: /* near ret */ + case 0xc3: /* near ret */ + case 0xca: /* far ret */ + case 0xcb: /* far ret */ + op = INTEL_PT_OP_RET; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0xcf: /* iret */ + op = INTEL_PT_OP_IRET; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0xcc ... 0xce: /* int */ + op = INTEL_PT_OP_INT; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0xe8: /* call near rel */ + op = INTEL_PT_OP_CALL; + branch = INTEL_PT_BR_UNCONDITIONAL; + break; + case 0x9a: /* call far absolute */ + op = INTEL_PT_OP_CALL; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0xe0 ... 0xe2: /* loop */ + op = INTEL_PT_OP_LOOP; + branch = INTEL_PT_BR_CONDITIONAL; + break; + case 0xe3: /* jcc */ + op = INTEL_PT_OP_JCC; + branch = INTEL_PT_BR_CONDITIONAL; + break; + case 0xe9: /* jmp */ + case 0xeb: /* jmp */ + op = INTEL_PT_OP_JMP; + branch = INTEL_PT_BR_UNCONDITIONAL; + break; + case 0xea: /* far jmp */ + op = INTEL_PT_OP_JMP; + branch = INTEL_PT_BR_INDIRECT; + break; + case 0xff: /* call near absolute, call far absolute ind */ + ext = (insn->modrm.bytes[0] >> 3) & 0x7; + switch (ext) { + case 2: /* near ind call */ + case 3: /* far ind call */ + op = INTEL_PT_OP_CALL; + branch = INTEL_PT_BR_INDIRECT; + break; + case 4: + case 5: + op = INTEL_PT_OP_JMP; + branch = INTEL_PT_BR_INDIRECT; + break; + default: + break; + } + break; + default: + break; + } + + intel_pt_insn->op = op; + intel_pt_insn->branch = branch; + intel_pt_insn->length = insn->length; + + if (branch == INTEL_PT_BR_CONDITIONAL || + branch == INTEL_PT_BR_UNCONDITIONAL) { +#if __BYTE_ORDER == __BIG_ENDIAN + switch (insn->immediate.nbytes) { + case 1: + intel_pt_insn->rel = insn->immediate.value; + break; + case 2: + intel_pt_insn->rel = + bswap_16((short)insn->immediate.value); + break; + case 4: + intel_pt_insn->rel = bswap_32(insn->immediate.value); + break; + default: + intel_pt_insn->rel = 0; + break; + } +#else + intel_pt_insn->rel = insn->immediate.value; +#endif + } +} + +int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64, + struct intel_pt_insn *intel_pt_insn) +{ + struct insn insn; + + insn_init(&insn, buf, len, x86_64); + insn_get_length(&insn); + if (!insn_complete(&insn) || insn.length > len) + return -1; + intel_pt_insn_decoder(&insn, intel_pt_insn); + if (insn.length < INTEL_PT_INSN_DBG_BUF_SZ) + memcpy(intel_pt_insn->buf, buf, insn.length); + else + memcpy(intel_pt_insn->buf, buf, INTEL_PT_INSN_DBG_BUF_SZ); + return 0; +} + +const char *branch_name[] = { + [INTEL_PT_OP_OTHER] = "Other", + [INTEL_PT_OP_CALL] = "Call", + [INTEL_PT_OP_RET] = "Ret", + [INTEL_PT_OP_JCC] = "Jcc", + [INTEL_PT_OP_JMP] = "Jmp", + [INTEL_PT_OP_LOOP] = "Loop", + [INTEL_PT_OP_IRET] = "IRet", + [INTEL_PT_OP_INT] = "Int", + [INTEL_PT_OP_SYSCALL] = "Syscall", + [INTEL_PT_OP_SYSRET] = "Sysret", +}; + +const char *intel_pt_insn_name(enum intel_pt_insn_op op) +{ + return branch_name[op]; +} + +int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf, + size_t buf_len) +{ + switch (intel_pt_insn->branch) { + case INTEL_PT_BR_CONDITIONAL: + case INTEL_PT_BR_UNCONDITIONAL: + return snprintf(buf, buf_len, "%s %s%d", + intel_pt_insn_name(intel_pt_insn->op), + intel_pt_insn->rel > 0 ? "+" : "", + intel_pt_insn->rel); + case INTEL_PT_BR_NO_BRANCH: + case INTEL_PT_BR_INDIRECT: + return snprintf(buf, buf_len, "%s", + intel_pt_insn_name(intel_pt_insn->op)); + default: + break; + } + return 0; +} + +size_t intel_pt_insn_max_size(void) +{ + return MAX_INSN_SIZE; +} + +int intel_pt_insn_type(enum intel_pt_insn_op op) +{ + switch (op) { + case INTEL_PT_OP_OTHER: + return 0; + case INTEL_PT_OP_CALL: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL; + case INTEL_PT_OP_RET: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN; + case INTEL_PT_OP_JCC: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL; + case INTEL_PT_OP_JMP: + return PERF_IP_FLAG_BRANCH; + case INTEL_PT_OP_LOOP: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL; + case INTEL_PT_OP_IRET: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | + PERF_IP_FLAG_INTERRUPT; + case INTEL_PT_OP_INT: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | + PERF_IP_FLAG_INTERRUPT; + case INTEL_PT_OP_SYSCALL: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | + PERF_IP_FLAG_SYSCALLRET; + case INTEL_PT_OP_SYSRET: + return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | + PERF_IP_FLAG_SYSCALLRET; + default: + return 0; + } +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h new file mode 100644 index 000000000..b0adbf373 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h @@ -0,0 +1,65 @@ +/* + * intel_pt_insn_decoder.h: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef INCLUDE__INTEL_PT_INSN_DECODER_H__ +#define INCLUDE__INTEL_PT_INSN_DECODER_H__ + +#include +#include + +#define INTEL_PT_INSN_DESC_MAX 32 +#define INTEL_PT_INSN_DBG_BUF_SZ 16 + +enum intel_pt_insn_op { + INTEL_PT_OP_OTHER, + INTEL_PT_OP_CALL, + INTEL_PT_OP_RET, + INTEL_PT_OP_JCC, + INTEL_PT_OP_JMP, + INTEL_PT_OP_LOOP, + INTEL_PT_OP_IRET, + INTEL_PT_OP_INT, + INTEL_PT_OP_SYSCALL, + INTEL_PT_OP_SYSRET, +}; + +enum intel_pt_insn_branch { + INTEL_PT_BR_NO_BRANCH, + INTEL_PT_BR_INDIRECT, + INTEL_PT_BR_CONDITIONAL, + INTEL_PT_BR_UNCONDITIONAL, +}; + +struct intel_pt_insn { + enum intel_pt_insn_op op; + enum intel_pt_insn_branch branch; + int length; + int32_t rel; + unsigned char buf[INTEL_PT_INSN_DBG_BUF_SZ]; +}; + +int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64, + struct intel_pt_insn *intel_pt_insn); + +const char *intel_pt_insn_name(enum intel_pt_insn_op op); + +int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf, + size_t buf_len); + +size_t intel_pt_insn_max_size(void); + +int intel_pt_insn_type(enum intel_pt_insn_op op); + +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.c new file mode 100644 index 000000000..319bef33a --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.c @@ -0,0 +1,156 @@ +/* + * intel_pt_log.c: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "intel-pt-log.h" +#include "intel-pt-insn-decoder.h" + +#include "intel-pt-pkt-decoder.h" + +#define MAX_LOG_NAME 256 + +static FILE *f; +static char log_name[MAX_LOG_NAME]; +bool intel_pt_enable_logging; + +void intel_pt_log_enable(void) +{ + intel_pt_enable_logging = true; +} + +void intel_pt_log_disable(void) +{ + if (f) + fflush(f); + intel_pt_enable_logging = false; +} + +void intel_pt_log_set_name(const char *name) +{ + strncpy(log_name, name, MAX_LOG_NAME - 5); + strcat(log_name, ".log"); +} + +static void intel_pt_print_data(const unsigned char *buf, int len, uint64_t pos, + int indent) +{ + int i; + + for (i = 0; i < indent; i++) + fprintf(f, " "); + + fprintf(f, " %08" PRIx64 ": ", pos); + for (i = 0; i < len; i++) + fprintf(f, " %02x", buf[i]); + for (; i < 16; i++) + fprintf(f, " "); + fprintf(f, " "); +} + +static void intel_pt_print_no_data(uint64_t pos, int indent) +{ + int i; + + for (i = 0; i < indent; i++) + fprintf(f, " "); + + fprintf(f, " %08" PRIx64 ": ", pos); + for (i = 0; i < 16; i++) + fprintf(f, " "); + fprintf(f, " "); +} + +static int intel_pt_log_open(void) +{ + if (!intel_pt_enable_logging) + return -1; + + if (f) + return 0; + + if (!log_name[0]) + return -1; + + f = fopen(log_name, "w+"); + if (!f) { + intel_pt_enable_logging = false; + return -1; + } + + return 0; +} + +void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len, + uint64_t pos, const unsigned char *buf) +{ + char desc[INTEL_PT_PKT_DESC_MAX]; + + if (intel_pt_log_open()) + return; + + intel_pt_print_data(buf, pkt_len, pos, 0); + intel_pt_pkt_desc(packet, desc, INTEL_PT_PKT_DESC_MAX); + fprintf(f, "%s\n", desc); +} + +void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip) +{ + char desc[INTEL_PT_INSN_DESC_MAX]; + size_t len = intel_pt_insn->length; + + if (intel_pt_log_open()) + return; + + if (len > INTEL_PT_INSN_DBG_BUF_SZ) + len = INTEL_PT_INSN_DBG_BUF_SZ; + intel_pt_print_data(intel_pt_insn->buf, len, ip, 8); + if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0) + fprintf(f, "%s\n", desc); + else + fprintf(f, "Bad instruction!\n"); +} + +void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, + uint64_t ip) +{ + char desc[INTEL_PT_INSN_DESC_MAX]; + + if (intel_pt_log_open()) + return; + + intel_pt_print_no_data(ip, 8); + if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0) + fprintf(f, "%s\n", desc); + else + fprintf(f, "Bad instruction!\n"); +} + +void __intel_pt_log(const char *fmt, ...) +{ + va_list args; + + if (intel_pt_log_open()) + return; + + va_start(args, fmt); + vfprintf(f, fmt, args); + va_end(args); +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.h new file mode 100644 index 000000000..debe751dc --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-log.h @@ -0,0 +1,78 @@ +/* + * intel_pt_log.h: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef INCLUDE__INTEL_PT_LOG_H__ +#define INCLUDE__INTEL_PT_LOG_H__ + +#include +#include + +struct intel_pt_pkt; + +void intel_pt_log_enable(void); +void intel_pt_log_disable(void); +void intel_pt_log_set_name(const char *name); + +void __intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len, + uint64_t pos, const unsigned char *buf); + +struct intel_pt_insn; + +void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip); +void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, + uint64_t ip); + +__attribute__((format(printf, 1, 2))) +void __intel_pt_log(const char *fmt, ...); + +#define intel_pt_log(fmt, ...) \ + do { \ + if (intel_pt_enable_logging) \ + __intel_pt_log(fmt, ##__VA_ARGS__); \ + } while (0) + +#define intel_pt_log_packet(arg, ...) \ + do { \ + if (intel_pt_enable_logging) \ + __intel_pt_log_packet(arg, ##__VA_ARGS__); \ + } while (0) + +#define intel_pt_log_insn(arg, ...) \ + do { \ + if (intel_pt_enable_logging) \ + __intel_pt_log_insn(arg, ##__VA_ARGS__); \ + } while (0) + +#define intel_pt_log_insn_no_data(arg, ...) \ + do { \ + if (intel_pt_enable_logging) \ + __intel_pt_log_insn_no_data(arg, ##__VA_ARGS__); \ + } while (0) + +#define x64_fmt "0x%" PRIx64 + +extern bool intel_pt_enable_logging; + +static inline void intel_pt_log_at(const char *msg, uint64_t u) +{ + intel_pt_log("%s at " x64_fmt "\n", msg, u); +} + +static inline void intel_pt_log_to(const char *msg, uint64_t u) +{ + intel_pt_log("%s to " x64_fmt "\n", msg, u); +} + +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c new file mode 100644 index 000000000..b1257c816 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -0,0 +1,518 @@ +/* + * intel_pt_pkt_decoder.c: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include + +#include "intel-pt-pkt-decoder.h" + +#define BIT(n) (1 << (n)) + +#define BIT63 ((uint64_t)1 << 63) + +#define NR_FLAG BIT63 + +#if __BYTE_ORDER == __BIG_ENDIAN +#define le16_to_cpu bswap_16 +#define le32_to_cpu bswap_32 +#define le64_to_cpu bswap_64 +#define memcpy_le64(d, s, n) do { \ + memcpy((d), (s), (n)); \ + *(d) = le64_to_cpu(*(d)); \ +} while (0) +#else +#define le16_to_cpu +#define le32_to_cpu +#define le64_to_cpu +#define memcpy_le64 memcpy +#endif + +static const char * const packet_name[] = { + [INTEL_PT_BAD] = "Bad Packet!", + [INTEL_PT_PAD] = "PAD", + [INTEL_PT_TNT] = "TNT", + [INTEL_PT_TIP_PGD] = "TIP.PGD", + [INTEL_PT_TIP_PGE] = "TIP.PGE", + [INTEL_PT_TSC] = "TSC", + [INTEL_PT_TMA] = "TMA", + [INTEL_PT_MODE_EXEC] = "MODE.Exec", + [INTEL_PT_MODE_TSX] = "MODE.TSX", + [INTEL_PT_MTC] = "MTC", + [INTEL_PT_TIP] = "TIP", + [INTEL_PT_FUP] = "FUP", + [INTEL_PT_CYC] = "CYC", + [INTEL_PT_VMCS] = "VMCS", + [INTEL_PT_PSB] = "PSB", + [INTEL_PT_PSBEND] = "PSBEND", + [INTEL_PT_CBR] = "CBR", + [INTEL_PT_TRACESTOP] = "TraceSTOP", + [INTEL_PT_PIP] = "PIP", + [INTEL_PT_OVF] = "OVF", + [INTEL_PT_MNT] = "MNT", +}; + +const char *intel_pt_pkt_name(enum intel_pt_pkt_type type) +{ + return packet_name[type]; +} + +static int intel_pt_get_long_tnt(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + uint64_t payload; + int count; + + if (len < 8) + return INTEL_PT_NEED_MORE_BYTES; + + payload = le64_to_cpu(*(uint64_t *)buf); + + for (count = 47; count; count--) { + if (payload & BIT63) + break; + payload <<= 1; + } + + packet->type = INTEL_PT_TNT; + packet->count = count; + packet->payload = payload << 1; + return 8; +} + +static int intel_pt_get_pip(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + uint64_t payload = 0; + + if (len < 8) + return INTEL_PT_NEED_MORE_BYTES; + + packet->type = INTEL_PT_PIP; + memcpy_le64(&payload, buf + 2, 6); + packet->payload = payload >> 1; + if (payload & 1) + packet->payload |= NR_FLAG; + + return 8; +} + +static int intel_pt_get_tracestop(struct intel_pt_pkt *packet) +{ + packet->type = INTEL_PT_TRACESTOP; + return 2; +} + +static int intel_pt_get_cbr(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 4) + return INTEL_PT_NEED_MORE_BYTES; + packet->type = INTEL_PT_CBR; + packet->payload = buf[2]; + return 4; +} + +static int intel_pt_get_vmcs(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + unsigned int count = (52 - 5) >> 3; + + if (count < 1 || count > 7) + return INTEL_PT_BAD_PACKET; + + if (len < count + 2) + return INTEL_PT_NEED_MORE_BYTES; + + packet->type = INTEL_PT_VMCS; + packet->count = count; + memcpy_le64(&packet->payload, buf + 2, count); + + return count + 2; +} + +static int intel_pt_get_ovf(struct intel_pt_pkt *packet) +{ + packet->type = INTEL_PT_OVF; + return 2; +} + +static int intel_pt_get_psb(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + int i; + + if (len < 16) + return INTEL_PT_NEED_MORE_BYTES; + + for (i = 2; i < 16; i += 2) { + if (buf[i] != 2 || buf[i + 1] != 0x82) + return INTEL_PT_BAD_PACKET; + } + + packet->type = INTEL_PT_PSB; + return 16; +} + +static int intel_pt_get_psbend(struct intel_pt_pkt *packet) +{ + packet->type = INTEL_PT_PSBEND; + return 2; +} + +static int intel_pt_get_tma(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 7) + return INTEL_PT_NEED_MORE_BYTES; + + packet->type = INTEL_PT_TMA; + packet->payload = buf[2] | (buf[3] << 8); + packet->count = buf[5] | ((buf[6] & BIT(0)) << 8); + return 7; +} + +static int intel_pt_get_pad(struct intel_pt_pkt *packet) +{ + packet->type = INTEL_PT_PAD; + return 1; +} + +static int intel_pt_get_mnt(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 11) + return INTEL_PT_NEED_MORE_BYTES; + packet->type = INTEL_PT_MNT; + memcpy_le64(&packet->payload, buf + 3, 8); + return 11 +; +} + +static int intel_pt_get_3byte(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 3) + return INTEL_PT_NEED_MORE_BYTES; + + switch (buf[2]) { + case 0x88: /* MNT */ + return intel_pt_get_mnt(buf, len, packet); + default: + return INTEL_PT_BAD_PACKET; + } +} + +static int intel_pt_get_ext(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 2) + return INTEL_PT_NEED_MORE_BYTES; + + switch (buf[1]) { + case 0xa3: /* Long TNT */ + return intel_pt_get_long_tnt(buf, len, packet); + case 0x43: /* PIP */ + return intel_pt_get_pip(buf, len, packet); + case 0x83: /* TraceStop */ + return intel_pt_get_tracestop(packet); + case 0x03: /* CBR */ + return intel_pt_get_cbr(buf, len, packet); + case 0xc8: /* VMCS */ + return intel_pt_get_vmcs(buf, len, packet); + case 0xf3: /* OVF */ + return intel_pt_get_ovf(packet); + case 0x82: /* PSB */ + return intel_pt_get_psb(buf, len, packet); + case 0x23: /* PSBEND */ + return intel_pt_get_psbend(packet); + case 0x73: /* TMA */ + return intel_pt_get_tma(buf, len, packet); + case 0xC3: /* 3-byte header */ + return intel_pt_get_3byte(buf, len, packet); + default: + return INTEL_PT_BAD_PACKET; + } +} + +static int intel_pt_get_short_tnt(unsigned int byte, + struct intel_pt_pkt *packet) +{ + int count; + + for (count = 6; count; count--) { + if (byte & BIT(7)) + break; + byte <<= 1; + } + + packet->type = INTEL_PT_TNT; + packet->count = count; + packet->payload = (uint64_t)byte << 57; + + return 1; +} + +static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf, + size_t len, struct intel_pt_pkt *packet) +{ + unsigned int offs = 1, shift; + uint64_t payload = byte >> 3; + + byte >>= 2; + len -= 1; + for (shift = 5; byte & 1; shift += 7) { + if (offs > 9) + return INTEL_PT_BAD_PACKET; + if (len < offs) + return INTEL_PT_NEED_MORE_BYTES; + byte = buf[offs++]; + payload |= (byte >> 1) << shift; + } + + packet->type = INTEL_PT_CYC; + packet->payload = payload; + return offs; +} + +static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte, + const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + switch (byte >> 5) { + case 0: + packet->count = 0; + break; + case 1: + if (len < 3) + return INTEL_PT_NEED_MORE_BYTES; + packet->count = 2; + packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); + break; + case 2: + if (len < 5) + return INTEL_PT_NEED_MORE_BYTES; + packet->count = 4; + packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); + break; + case 3: + case 6: + if (len < 7) + return INTEL_PT_NEED_MORE_BYTES; + packet->count = 6; + memcpy_le64(&packet->payload, buf + 1, 6); + break; + default: + return INTEL_PT_BAD_PACKET; + } + + packet->type = type; + + return packet->count + 1; +} + +static int intel_pt_get_mode(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 2) + return INTEL_PT_NEED_MORE_BYTES; + + switch (buf[1] >> 5) { + case 0: + packet->type = INTEL_PT_MODE_EXEC; + switch (buf[1] & 3) { + case 0: + packet->payload = 16; + break; + case 1: + packet->payload = 64; + break; + case 2: + packet->payload = 32; + break; + default: + return INTEL_PT_BAD_PACKET; + } + break; + case 1: + packet->type = INTEL_PT_MODE_TSX; + if ((buf[1] & 3) == 3) + return INTEL_PT_BAD_PACKET; + packet->payload = buf[1] & 3; + break; + default: + return INTEL_PT_BAD_PACKET; + } + + return 2; +} + +static int intel_pt_get_tsc(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 8) + return INTEL_PT_NEED_MORE_BYTES; + packet->type = INTEL_PT_TSC; + memcpy_le64(&packet->payload, buf + 1, 7); + return 8; +} + +static int intel_pt_get_mtc(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + if (len < 2) + return INTEL_PT_NEED_MORE_BYTES; + packet->type = INTEL_PT_MTC; + packet->payload = buf[1]; + return 2; +} + +static int intel_pt_do_get_packet(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + unsigned int byte; + + memset(packet, 0, sizeof(struct intel_pt_pkt)); + + if (!len) + return INTEL_PT_NEED_MORE_BYTES; + + byte = buf[0]; + if (!(byte & BIT(0))) { + if (byte == 0) + return intel_pt_get_pad(packet); + if (byte == 2) + return intel_pt_get_ext(buf, len, packet); + return intel_pt_get_short_tnt(byte, packet); + } + + if ((byte & 2)) + return intel_pt_get_cyc(byte, buf, len, packet); + + switch (byte & 0x1f) { + case 0x0D: + return intel_pt_get_ip(INTEL_PT_TIP, byte, buf, len, packet); + case 0x11: + return intel_pt_get_ip(INTEL_PT_TIP_PGE, byte, buf, len, + packet); + case 0x01: + return intel_pt_get_ip(INTEL_PT_TIP_PGD, byte, buf, len, + packet); + case 0x1D: + return intel_pt_get_ip(INTEL_PT_FUP, byte, buf, len, packet); + case 0x19: + switch (byte) { + case 0x99: + return intel_pt_get_mode(buf, len, packet); + case 0x19: + return intel_pt_get_tsc(buf, len, packet); + case 0x59: + return intel_pt_get_mtc(buf, len, packet); + default: + return INTEL_PT_BAD_PACKET; + } + default: + return INTEL_PT_BAD_PACKET; + } +} + +int intel_pt_get_packet(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet) +{ + int ret; + + ret = intel_pt_do_get_packet(buf, len, packet); + if (ret > 0) { + while (ret < 8 && len > (size_t)ret && !buf[ret]) + ret += 1; + } + return ret; +} + +int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf, + size_t buf_len) +{ + int ret, i, nr; + unsigned long long payload = packet->payload; + const char *name = intel_pt_pkt_name(packet->type); + + switch (packet->type) { + case INTEL_PT_BAD: + case INTEL_PT_PAD: + case INTEL_PT_PSB: + case INTEL_PT_PSBEND: + case INTEL_PT_TRACESTOP: + case INTEL_PT_OVF: + return snprintf(buf, buf_len, "%s", name); + case INTEL_PT_TNT: { + size_t blen = buf_len; + + ret = snprintf(buf, blen, "%s ", name); + if (ret < 0) + return ret; + buf += ret; + blen -= ret; + for (i = 0; i < packet->count; i++) { + if (payload & BIT63) + ret = snprintf(buf, blen, "T"); + else + ret = snprintf(buf, blen, "N"); + if (ret < 0) + return ret; + buf += ret; + blen -= ret; + payload <<= 1; + } + ret = snprintf(buf, blen, " (%d)", packet->count); + if (ret < 0) + return ret; + blen -= ret; + return buf_len - blen; + } + case INTEL_PT_TIP_PGD: + case INTEL_PT_TIP_PGE: + case INTEL_PT_TIP: + case INTEL_PT_FUP: + if (!(packet->count)) + return snprintf(buf, buf_len, "%s no ip", name); + case INTEL_PT_CYC: + case INTEL_PT_VMCS: + case INTEL_PT_MTC: + case INTEL_PT_MNT: + case INTEL_PT_CBR: + case INTEL_PT_TSC: + return snprintf(buf, buf_len, "%s 0x%llx", name, payload); + case INTEL_PT_TMA: + return snprintf(buf, buf_len, "%s CTC 0x%x FC 0x%x", name, + (unsigned)payload, packet->count); + case INTEL_PT_MODE_EXEC: + return snprintf(buf, buf_len, "%s %lld", name, payload); + case INTEL_PT_MODE_TSX: + return snprintf(buf, buf_len, "%s TXAbort:%u InTX:%u", + name, (unsigned)(payload >> 1) & 1, + (unsigned)payload & 1); + case INTEL_PT_PIP: + nr = packet->payload & NR_FLAG ? 1 : 0; + payload &= ~NR_FLAG; + ret = snprintf(buf, buf_len, "%s 0x%llx (NR=%d)", + name, payload, nr); + return ret; + default: + break; + } + return snprintf(buf, buf_len, "%s 0x%llx (%d)", + name, payload, packet->count); +} diff --git a/kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h new file mode 100644 index 000000000..781bb7988 --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h @@ -0,0 +1,70 @@ +/* + * intel_pt_pkt_decoder.h: Intel Processor Trace support + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef INCLUDE__INTEL_PT_PKT_DECODER_H__ +#define INCLUDE__INTEL_PT_PKT_DECODER_H__ + +#include +#include + +#define INTEL_PT_PKT_DESC_MAX 256 + +#define INTEL_PT_NEED_MORE_BYTES -1 +#define INTEL_PT_BAD_PACKET -2 + +#define INTEL_PT_PSB_STR "\002\202\002\202\002\202\002\202" \ + "\002\202\002\202\002\202\002\202" +#define INTEL_PT_PSB_LEN 16 + +#define INTEL_PT_PKT_MAX_SZ 16 + +enum intel_pt_pkt_type { + INTEL_PT_BAD, + INTEL_PT_PAD, + INTEL_PT_TNT, + INTEL_PT_TIP_PGD, + INTEL_PT_TIP_PGE, + INTEL_PT_TSC, + INTEL_PT_TMA, + INTEL_PT_MODE_EXEC, + INTEL_PT_MODE_TSX, + INTEL_PT_MTC, + INTEL_PT_TIP, + INTEL_PT_FUP, + INTEL_PT_CYC, + INTEL_PT_VMCS, + INTEL_PT_PSB, + INTEL_PT_PSBEND, + INTEL_PT_CBR, + INTEL_PT_TRACESTOP, + INTEL_PT_PIP, + INTEL_PT_OVF, + INTEL_PT_MNT, +}; + +struct intel_pt_pkt { + enum intel_pt_pkt_type type; + int count; + uint64_t payload; +}; + +const char *intel_pt_pkt_name(enum intel_pt_pkt_type); + +int intel_pt_get_packet(const unsigned char *buf, size_t len, + struct intel_pt_pkt *packet); + +int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf, size_t len); + +#endif diff --git a/kernel/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/kernel/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt new file mode 100644 index 000000000..d388de72e --- /dev/null +++ b/kernel/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt @@ -0,0 +1,984 @@ +# x86 Opcode Maps +# +# This is (mostly) based on following documentations. +# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C +# (#326018-047US, June 2013) +# +# +# Table: table-name +# Referrer: escaped-name +# AVXcode: avx-code +# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] +# (or) +# opcode: escape # escaped-name +# EndTable +# +# +# GrpTable: GrpXXX +# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] +# EndTable +# +# AVX Superscripts +# (v): this opcode requires VEX prefix. +# (v1): this opcode only supports 128bit VEX. +# +# Last Prefix Superscripts +# - (66): the last prefix is 0x66 +# - (F3): the last prefix is 0xF3 +# - (F2): the last prefix is 0xF2 +# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) +# - (66&F2): Both 0x66 and 0xF2 prefixes are specified. + +Table: one byte opcode +Referrer: +AVXcode: +# 0x00 - 0x0f +00: ADD Eb,Gb +01: ADD Ev,Gv +02: ADD Gb,Eb +03: ADD Gv,Ev +04: ADD AL,Ib +05: ADD rAX,Iz +06: PUSH ES (i64) +07: POP ES (i64) +08: OR Eb,Gb +09: OR Ev,Gv +0a: OR Gb,Eb +0b: OR Gv,Ev +0c: OR AL,Ib +0d: OR rAX,Iz +0e: PUSH CS (i64) +0f: escape # 2-byte escape +# 0x10 - 0x1f +10: ADC Eb,Gb +11: ADC Ev,Gv +12: ADC Gb,Eb +13: ADC Gv,Ev +14: ADC AL,Ib +15: ADC rAX,Iz +16: PUSH SS (i64) +17: POP SS (i64) +18: SBB Eb,Gb +19: SBB Ev,Gv +1a: SBB Gb,Eb +1b: SBB Gv,Ev +1c: SBB AL,Ib +1d: SBB rAX,Iz +1e: PUSH DS (i64) +1f: POP DS (i64) +# 0x20 - 0x2f +20: AND Eb,Gb +21: AND Ev,Gv +22: AND Gb,Eb +23: AND Gv,Ev +24: AND AL,Ib +25: AND rAx,Iz +26: SEG=ES (Prefix) +27: DAA (i64) +28: SUB Eb,Gb +29: SUB Ev,Gv +2a: SUB Gb,Eb +2b: SUB Gv,Ev +2c: SUB AL,Ib +2d: SUB rAX,Iz +2e: SEG=CS (Prefix) +2f: DAS (i64) +# 0x30 - 0x3f +30: XOR Eb,Gb +31: XOR Ev,Gv +32: XOR Gb,Eb +33: XOR Gv,Ev +34: XOR AL,Ib +35: XOR rAX,Iz +36: SEG=SS (Prefix) +37: AAA (i64) +38: CMP Eb,Gb +39: CMP Ev,Gv +3a: CMP Gb,Eb +3b: CMP Gv,Ev +3c: CMP AL,Ib +3d: CMP rAX,Iz +3e: SEG=DS (Prefix) +3f: AAS (i64) +# 0x40 - 0x4f +40: INC eAX (i64) | REX (o64) +41: INC eCX (i64) | REX.B (o64) +42: INC eDX (i64) | REX.X (o64) +43: INC eBX (i64) | REX.XB (o64) +44: INC eSP (i64) | REX.R (o64) +45: INC eBP (i64) | REX.RB (o64) +46: INC eSI (i64) | REX.RX (o64) +47: INC eDI (i64) | REX.RXB (o64) +48: DEC eAX (i64) | REX.W (o64) +49: DEC eCX (i64) | REX.WB (o64) +4a: DEC eDX (i64) | REX.WX (o64) +4b: DEC eBX (i64) | REX.WXB (o64) +4c: DEC eSP (i64) | REX.WR (o64) +4d: DEC eBP (i64) | REX.WRB (o64) +4e: DEC eSI (i64) | REX.WRX (o64) +4f: DEC eDI (i64) | REX.WRXB (o64) +# 0x50 - 0x5f +50: PUSH rAX/r8 (d64) +51: PUSH rCX/r9 (d64) +52: PUSH rDX/r10 (d64) +53: PUSH rBX/r11 (d64) +54: PUSH rSP/r12 (d64) +55: PUSH rBP/r13 (d64) +56: PUSH rSI/r14 (d64) +57: PUSH rDI/r15 (d64) +58: POP rAX/r8 (d64) +59: POP rCX/r9 (d64) +5a: POP rDX/r10 (d64) +5b: POP rBX/r11 (d64) +5c: POP rSP/r12 (d64) +5d: POP rBP/r13 (d64) +5e: POP rSI/r14 (d64) +5f: POP rDI/r15 (d64) +# 0x60 - 0x6f +60: PUSHA/PUSHAD (i64) +61: POPA/POPAD (i64) +62: BOUND Gv,Ma (i64) +63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64) +64: SEG=FS (Prefix) +65: SEG=GS (Prefix) +66: Operand-Size (Prefix) +67: Address-Size (Prefix) +68: PUSH Iz (d64) +69: IMUL Gv,Ev,Iz +6a: PUSH Ib (d64) +6b: IMUL Gv,Ev,Ib +6c: INS/INSB Yb,DX +6d: INS/INSW/INSD Yz,DX +6e: OUTS/OUTSB DX,Xb +6f: OUTS/OUTSW/OUTSD DX,Xz +# 0x70 - 0x7f +70: JO Jb +71: JNO Jb +72: JB/JNAE/JC Jb +73: JNB/JAE/JNC Jb +74: JZ/JE Jb +75: JNZ/JNE Jb +76: JBE/JNA Jb +77: JNBE/JA Jb +78: JS Jb +79: JNS Jb +7a: JP/JPE Jb +7b: JNP/JPO Jb +7c: JL/JNGE Jb +7d: JNL/JGE Jb +7e: JLE/JNG Jb +7f: JNLE/JG Jb +# 0x80 - 0x8f +80: Grp1 Eb,Ib (1A) +81: Grp1 Ev,Iz (1A) +82: Grp1 Eb,Ib (1A),(i64) +83: Grp1 Ev,Ib (1A) +84: TEST Eb,Gb +85: TEST Ev,Gv +86: XCHG Eb,Gb +87: XCHG Ev,Gv +88: MOV Eb,Gb +89: MOV Ev,Gv +8a: MOV Gb,Eb +8b: MOV Gv,Ev +8c: MOV Ev,Sw +8d: LEA Gv,M +8e: MOV Sw,Ew +8f: Grp1A (1A) | POP Ev (d64) +# 0x90 - 0x9f +90: NOP | PAUSE (F3) | XCHG r8,rAX +91: XCHG rCX/r9,rAX +92: XCHG rDX/r10,rAX +93: XCHG rBX/r11,rAX +94: XCHG rSP/r12,rAX +95: XCHG rBP/r13,rAX +96: XCHG rSI/r14,rAX +97: XCHG rDI/r15,rAX +98: CBW/CWDE/CDQE +99: CWD/CDQ/CQO +9a: CALLF Ap (i64) +9b: FWAIT/WAIT +9c: PUSHF/D/Q Fv (d64) +9d: POPF/D/Q Fv (d64) +9e: SAHF +9f: LAHF +# 0xa0 - 0xaf +a0: MOV AL,Ob +a1: MOV rAX,Ov +a2: MOV Ob,AL +a3: MOV Ov,rAX +a4: MOVS/B Yb,Xb +a5: MOVS/W/D/Q Yv,Xv +a6: CMPS/B Xb,Yb +a7: CMPS/W/D Xv,Yv +a8: TEST AL,Ib +a9: TEST rAX,Iz +aa: STOS/B Yb,AL +ab: STOS/W/D/Q Yv,rAX +ac: LODS/B AL,Xb +ad: LODS/W/D/Q rAX,Xv +ae: SCAS/B AL,Yb +# Note: The May 2011 Intel manual shows Xv for the second parameter of the +# next instruction but Yv is correct +af: SCAS/W/D/Q rAX,Yv +# 0xb0 - 0xbf +b0: MOV AL/R8L,Ib +b1: MOV CL/R9L,Ib +b2: MOV DL/R10L,Ib +b3: MOV BL/R11L,Ib +b4: MOV AH/R12L,Ib +b5: MOV CH/R13L,Ib +b6: MOV DH/R14L,Ib +b7: MOV BH/R15L,Ib +b8: MOV rAX/r8,Iv +b9: MOV rCX/r9,Iv +ba: MOV rDX/r10,Iv +bb: MOV rBX/r11,Iv +bc: MOV rSP/r12,Iv +bd: MOV rBP/r13,Iv +be: MOV rSI/r14,Iv +bf: MOV rDI/r15,Iv +# 0xc0 - 0xcf +c0: Grp2 Eb,Ib (1A) +c1: Grp2 Ev,Ib (1A) +c2: RETN Iw (f64) +c3: RETN +c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) +c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) +c6: Grp11A Eb,Ib (1A) +c7: Grp11B Ev,Iz (1A) +c8: ENTER Iw,Ib +c9: LEAVE (d64) +ca: RETF Iw +cb: RETF +cc: INT3 +cd: INT Ib +ce: INTO (i64) +cf: IRET/D/Q +# 0xd0 - 0xdf +d0: Grp2 Eb,1 (1A) +d1: Grp2 Ev,1 (1A) +d2: Grp2 Eb,CL (1A) +d3: Grp2 Ev,CL (1A) +d4: AAM Ib (i64) +d5: AAD Ib (i64) +d6: +d7: XLAT/XLATB +d8: ESC +d9: ESC +da: ESC +db: ESC +dc: ESC +dd: ESC +de: ESC +df: ESC +# 0xe0 - 0xef +# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix +# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation +# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD. +e0: LOOPNE/LOOPNZ Jb (f64) +e1: LOOPE/LOOPZ Jb (f64) +e2: LOOP Jb (f64) +e3: JrCXZ Jb (f64) +e4: IN AL,Ib +e5: IN eAX,Ib +e6: OUT Ib,AL +e7: OUT Ib,eAX +# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset +# in "near" jumps and calls is 16-bit. For CALL, +# push of return address is 16-bit wide, RSP is decremented by 2 +# but is not truncated to 16 bits, unlike RIP. +e8: CALL Jz (f64) +e9: JMP-near Jz (f64) +ea: JMP-far Ap (i64) +eb: JMP-short Jb (f64) +ec: IN AL,DX +ed: IN eAX,DX +ee: OUT DX,AL +ef: OUT DX,eAX +# 0xf0 - 0xff +f0: LOCK (Prefix) +f1: +f2: REPNE (Prefix) | XACQUIRE (Prefix) +f3: REP/REPE (Prefix) | XRELEASE (Prefix) +f4: HLT +f5: CMC +f6: Grp3_1 Eb (1A) +f7: Grp3_2 Ev (1A) +f8: CLC +f9: STC +fa: CLI +fb: STI +fc: CLD +fd: STD +fe: Grp4 (1A) +ff: Grp5 (1A) +EndTable + +Table: 2-byte opcode (0x0f) +Referrer: 2-byte escape +AVXcode: 1 +# 0x0f 0x00-0x0f +00: Grp6 (1A) +01: Grp7 (1A) +02: LAR Gv,Ew +03: LSL Gv,Ew +04: +05: SYSCALL (o64) +06: CLTS +07: SYSRET (o64) +08: INVD +09: WBINVD +0a: +0b: UD2 (1B) +0c: +# AMD's prefetch group. Intel supports prefetchw(/1) only. +0d: GrpP +0e: FEMMS +# 3DNow! uses the last imm byte as opcode extension. +0f: 3DNow! Pq,Qq,Ib +# 0x0f 0x10-0x1f +# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands +# but it actually has operands. And also, vmovss and vmovsd only accept 128bit. +# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form. +# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming +# Reference A.1 +10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1) +11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1) +12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2) +13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1) +14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66) +15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66) +16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3) +17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) +18: Grp16 (1A) +19: +# Intel SDM opcode map does not list MPX instructions. For now using Gv for +# bnd registers and Ev for everything else is OK because the instruction +# decoder does not use the information except as an indication that there is +# a ModR/M byte. +1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev +1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv +1c: +1d: +1e: +1f: NOP Ev +# 0x0f 0x20-0x2f +20: MOV Rd,Cd +21: MOV Rd,Dd +22: MOV Cd,Rd +23: MOV Dd,Rd +24: +25: +26: +27: +28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66) +29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66) +2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1) +2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66) +2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1) +2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1) +2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1) +2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1) +# 0x0f 0x30-0x3f +30: WRMSR +31: RDTSC +32: RDMSR +33: RDPMC +34: SYSENTER +35: SYSEXIT +36: +37: GETSEC +38: escape # 3-byte escape 1 +39: +3a: escape # 3-byte escape 2 +3b: +3c: +3d: +3e: +3f: +# 0x0f 0x40-0x4f +40: CMOVO Gv,Ev +41: CMOVNO Gv,Ev +42: CMOVB/C/NAE Gv,Ev +43: CMOVAE/NB/NC Gv,Ev +44: CMOVE/Z Gv,Ev +45: CMOVNE/NZ Gv,Ev +46: CMOVBE/NA Gv,Ev +47: CMOVA/NBE Gv,Ev +48: CMOVS Gv,Ev +49: CMOVNS Gv,Ev +4a: CMOVP/PE Gv,Ev +4b: CMOVNP/PO Gv,Ev +4c: CMOVL/NGE Gv,Ev +4d: CMOVNL/GE Gv,Ev +4e: CMOVLE/NG Gv,Ev +4f: CMOVNLE/G Gv,Ev +# 0x0f 0x50-0x5f +50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66) +51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1) +52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1) +53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1) +54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66) +55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66) +56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66) +57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66) +58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1) +59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1) +5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1) +5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3) +5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1) +5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1) +5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1) +5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1) +# 0x0f 0x60-0x6f +60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1) +61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1) +62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1) +63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1) +64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1) +65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1) +66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1) +67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1) +68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1) +69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1) +6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1) +6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1) +6c: vpunpcklqdq Vx,Hx,Wx (66),(v1) +6d: vpunpckhqdq Vx,Hx,Wx (66),(v1) +6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1) +6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3) +# 0x0f 0x70-0x7f +70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1) +71: Grp12 (1A) +72: Grp13 (1A) +73: Grp14 (1A) +74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1) +75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1) +76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1) +# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX. +77: emms | vzeroupper | vzeroall +78: VMREAD Ey,Gy +79: VMWRITE Gy,Ey +7a: +7b: +7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2) +7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2) +7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1) +7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3) +# 0x0f 0x80-0x8f +# Note: "forced64" is Intel CPU behavior (see comment about CALL insn). +80: JO Jz (f64) +81: JNO Jz (f64) +82: JB/JC/JNAE Jz (f64) +83: JAE/JNB/JNC Jz (f64) +84: JE/JZ Jz (f64) +85: JNE/JNZ Jz (f64) +86: JBE/JNA Jz (f64) +87: JA/JNBE Jz (f64) +88: JS Jz (f64) +89: JNS Jz (f64) +8a: JP/JPE Jz (f64) +8b: JNP/JPO Jz (f64) +8c: JL/JNGE Jz (f64) +8d: JNL/JGE Jz (f64) +8e: JLE/JNG Jz (f64) +8f: JNLE/JG Jz (f64) +# 0x0f 0x90-0x9f +90: SETO Eb +91: SETNO Eb +92: SETB/C/NAE Eb +93: SETAE/NB/NC Eb +94: SETE/Z Eb +95: SETNE/NZ Eb +96: SETBE/NA Eb +97: SETA/NBE Eb +98: SETS Eb +99: SETNS Eb +9a: SETP/PE Eb +9b: SETNP/PO Eb +9c: SETL/NGE Eb +9d: SETNL/GE Eb +9e: SETLE/NG Eb +9f: SETNLE/G Eb +# 0x0f 0xa0-0xaf +a0: PUSH FS (d64) +a1: POP FS (d64) +a2: CPUID +a3: BT Ev,Gv +a4: SHLD Ev,Gv,Ib +a5: SHLD Ev,Gv,CL +a6: GrpPDLK +a7: GrpRNG +a8: PUSH GS (d64) +a9: POP GS (d64) +aa: RSM +ab: BTS Ev,Gv +ac: SHRD Ev,Gv,Ib +ad: SHRD Ev,Gv,CL +ae: Grp15 (1A),(1C) +af: IMUL Gv,Ev +# 0x0f 0xb0-0xbf +b0: CMPXCHG Eb,Gb +b1: CMPXCHG Ev,Gv +b2: LSS Gv,Mp +b3: BTR Ev,Gv +b4: LFS Gv,Mp +b5: LGS Gv,Mp +b6: MOVZX Gv,Eb +b7: MOVZX Gv,Ew +b8: JMPE (!F3) | POPCNT Gv,Ev (F3) +b9: Grp10 (1A) +ba: Grp8 Ev,Ib (1A) +bb: BTC Ev,Gv +bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) +bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) +be: MOVSX Gv,Eb +bf: MOVSX Gv,Ew +# 0x0f 0xc0-0xcf +c0: XADD Eb,Gb +c1: XADD Ev,Gv +c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1) +c3: movnti My,Gy +c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1) +c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1) +c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66) +c7: Grp9 (1A) +c8: BSWAP RAX/EAX/R8/R8D +c9: BSWAP RCX/ECX/R9/R9D +ca: BSWAP RDX/EDX/R10/R10D +cb: BSWAP RBX/EBX/R11/R11D +cc: BSWAP RSP/ESP/R12/R12D +cd: BSWAP RBP/EBP/R13/R13D +ce: BSWAP RSI/ESI/R14/R14D +cf: BSWAP RDI/EDI/R15/R15D +# 0x0f 0xd0-0xdf +d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2) +d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1) +d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1) +d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1) +d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1) +d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1) +d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) +d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1) +d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1) +d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1) +da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1) +db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) +dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1) +dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1) +de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1) +df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) +# 0x0f 0xe0-0xef +e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1) +e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1) +e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1) +e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1) +e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1) +e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1) +e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2) +e7: movntq Mq,Pq | vmovntdq Mx,Vx (66) +e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1) +e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1) +ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1) +eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) +ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1) +ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1) +ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1) +ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) +# 0x0f 0xf0-0xff +f0: vlddqu Vx,Mx (F2) +f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1) +f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1) +f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1) +f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1) +f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1) +f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1) +f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1) +f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1) +f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1) +fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1) +fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) +fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) +fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) +fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) +ff: +EndTable + +Table: 3-byte opcode 1 (0x0f 0x38) +Referrer: 3-byte escape 1 +AVXcode: 2 +# 0x0f 0x38 0x00-0x0f +00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1) +01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1) +02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1) +03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1) +04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1) +05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1) +06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1) +07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1) +08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1) +09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1) +0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1) +0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1) +0c: vpermilps Vx,Hx,Wx (66),(v) +0d: vpermilpd Vx,Hx,Wx (66),(v) +0e: vtestps Vx,Wx (66),(v) +0f: vtestpd Vx,Wx (66),(v) +# 0x0f 0x38 0x10-0x1f +10: pblendvb Vdq,Wdq (66) +11: +12: +13: vcvtph2ps Vx,Wx,Ib (66),(v) +14: blendvps Vdq,Wdq (66) +15: blendvpd Vdq,Wdq (66) +16: vpermps Vqq,Hqq,Wqq (66),(v) +17: vptest Vx,Wx (66) +18: vbroadcastss Vx,Wd (66),(v) +19: vbroadcastsd Vqq,Wq (66),(v) +1a: vbroadcastf128 Vqq,Mdq (66),(v) +1b: +1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1) +1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1) +1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1) +1f: +# 0x0f 0x38 0x20-0x2f +20: vpmovsxbw Vx,Ux/Mq (66),(v1) +21: vpmovsxbd Vx,Ux/Md (66),(v1) +22: vpmovsxbq Vx,Ux/Mw (66),(v1) +23: vpmovsxwd Vx,Ux/Mq (66),(v1) +24: vpmovsxwq Vx,Ux/Md (66),(v1) +25: vpmovsxdq Vx,Ux/Mq (66),(v1) +26: +27: +28: vpmuldq Vx,Hx,Wx (66),(v1) +29: vpcmpeqq Vx,Hx,Wx (66),(v1) +2a: vmovntdqa Vx,Mx (66),(v1) +2b: vpackusdw Vx,Hx,Wx (66),(v1) +2c: vmaskmovps Vx,Hx,Mx (66),(v) +2d: vmaskmovpd Vx,Hx,Mx (66),(v) +2e: vmaskmovps Mx,Hx,Vx (66),(v) +2f: vmaskmovpd Mx,Hx,Vx (66),(v) +# 0x0f 0x38 0x30-0x3f +30: vpmovzxbw Vx,Ux/Mq (66),(v1) +31: vpmovzxbd Vx,Ux/Md (66),(v1) +32: vpmovzxbq Vx,Ux/Mw (66),(v1) +33: vpmovzxwd Vx,Ux/Mq (66),(v1) +34: vpmovzxwq Vx,Ux/Md (66),(v1) +35: vpmovzxdq Vx,Ux/Mq (66),(v1) +36: vpermd Vqq,Hqq,Wqq (66),(v) +37: vpcmpgtq Vx,Hx,Wx (66),(v1) +38: vpminsb Vx,Hx,Wx (66),(v1) +39: vpminsd Vx,Hx,Wx (66),(v1) +3a: vpminuw Vx,Hx,Wx (66),(v1) +3b: vpminud Vx,Hx,Wx (66),(v1) +3c: vpmaxsb Vx,Hx,Wx (66),(v1) +3d: vpmaxsd Vx,Hx,Wx (66),(v1) +3e: vpmaxuw Vx,Hx,Wx (66),(v1) +3f: vpmaxud Vx,Hx,Wx (66),(v1) +# 0x0f 0x38 0x40-0x8f +40: vpmulld Vx,Hx,Wx (66),(v1) +41: vphminposuw Vdq,Wdq (66),(v1) +42: +43: +44: +45: vpsrlvd/q Vx,Hx,Wx (66),(v) +46: vpsravd Vx,Hx,Wx (66),(v) +47: vpsllvd/q Vx,Hx,Wx (66),(v) +# Skip 0x48-0x57 +58: vpbroadcastd Vx,Wx (66),(v) +59: vpbroadcastq Vx,Wx (66),(v) +5a: vbroadcasti128 Vqq,Mdq (66),(v) +# Skip 0x5b-0x77 +78: vpbroadcastb Vx,Wx (66),(v) +79: vpbroadcastw Vx,Wx (66),(v) +# Skip 0x7a-0x7f +80: INVEPT Gy,Mdq (66) +81: INVPID Gy,Mdq (66) +82: INVPCID Gy,Mdq (66) +8c: vpmaskmovd/q Vx,Hx,Mx (66),(v) +8e: vpmaskmovd/q Mx,Vx,Hx (66),(v) +# 0x0f 0x38 0x90-0xbf (FMA) +90: vgatherdd/q Vx,Hx,Wx (66),(v) +91: vgatherqd/q Vx,Hx,Wx (66),(v) +92: vgatherdps/d Vx,Hx,Wx (66),(v) +93: vgatherqps/d Vx,Hx,Wx (66),(v) +94: +95: +96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v) +97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v) +98: vfmadd132ps/d Vx,Hx,Wx (66),(v) +99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1) +9a: vfmsub132ps/d Vx,Hx,Wx (66),(v) +9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1) +9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v) +9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1) +9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v) +9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1) +a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v) +a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v) +a8: vfmadd213ps/d Vx,Hx,Wx (66),(v) +a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1) +aa: vfmsub213ps/d Vx,Hx,Wx (66),(v) +ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1) +ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v) +ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1) +ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v) +af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1) +b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v) +b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v) +b8: vfmadd231ps/d Vx,Hx,Wx (66),(v) +b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1) +ba: vfmsub231ps/d Vx,Hx,Wx (66),(v) +bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1) +bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v) +bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1) +be: vfnmsub231ps/d Vx,Hx,Wx (66),(v) +bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1) +# 0x0f 0x38 0xc0-0xff +c8: sha1nexte Vdq,Wdq +c9: sha1msg1 Vdq,Wdq +ca: sha1msg2 Vdq,Wdq +cb: sha256rnds2 Vdq,Wdq +cc: sha256msg1 Vdq,Wdq +cd: sha256msg2 Vdq,Wdq +db: VAESIMC Vdq,Wdq (66),(v1) +dc: VAESENC Vdq,Hdq,Wdq (66),(v1) +dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) +de: VAESDEC Vdq,Hdq,Wdq (66),(v1) +df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) +f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2) +f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2) +f2: ANDN Gy,By,Ey (v) +f3: Grp17 (1A) +f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) +f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) +f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) +EndTable + +Table: 3-byte opcode 2 (0x0f 0x3a) +Referrer: 3-byte escape 2 +AVXcode: 3 +# 0x0f 0x3a 0x00-0xff +00: vpermq Vqq,Wqq,Ib (66),(v) +01: vpermpd Vqq,Wqq,Ib (66),(v) +02: vpblendd Vx,Hx,Wx,Ib (66),(v) +03: +04: vpermilps Vx,Wx,Ib (66),(v) +05: vpermilpd Vx,Wx,Ib (66),(v) +06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v) +07: +08: vroundps Vx,Wx,Ib (66) +09: vroundpd Vx,Wx,Ib (66) +0a: vroundss Vss,Wss,Ib (66),(v1) +0b: vroundsd Vsd,Wsd,Ib (66),(v1) +0c: vblendps Vx,Hx,Wx,Ib (66) +0d: vblendpd Vx,Hx,Wx,Ib (66) +0e: vpblendw Vx,Hx,Wx,Ib (66),(v1) +0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1) +14: vpextrb Rd/Mb,Vdq,Ib (66),(v1) +15: vpextrw Rd/Mw,Vdq,Ib (66),(v1) +16: vpextrd/q Ey,Vdq,Ib (66),(v1) +17: vextractps Ed,Vdq,Ib (66),(v1) +18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) +19: vextractf128 Wdq,Vqq,Ib (66),(v) +1d: vcvtps2ph Wx,Vx,Ib (66),(v) +20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1) +21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1) +22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1) +38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) +39: vextracti128 Wdq,Vqq,Ib (66),(v) +40: vdpps Vx,Hx,Wx,Ib (66) +41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1) +42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) +44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1) +46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v) +4a: vblendvps Vx,Hx,Wx,Lx (66),(v) +4b: vblendvpd Vx,Hx,Wx,Lx (66),(v) +4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1) +60: vpcmpestrm Vdq,Wdq,Ib (66),(v1) +61: vpcmpestri Vdq,Wdq,Ib (66),(v1) +62: vpcmpistrm Vdq,Wdq,Ib (66),(v1) +63: vpcmpistri Vdq,Wdq,Ib (66),(v1) +cc: sha1rnds4 Vdq,Wdq,Ib +df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1) +f0: RORX Gy,Ey,Ib (F2),(v) +EndTable + +GrpTable: Grp1 +0: ADD +1: OR +2: ADC +3: SBB +4: AND +5: SUB +6: XOR +7: CMP +EndTable + +GrpTable: Grp1A +0: POP +EndTable + +GrpTable: Grp2 +0: ROL +1: ROR +2: RCL +3: RCR +4: SHL/SAL +5: SHR +6: +7: SAR +EndTable + +GrpTable: Grp3_1 +0: TEST Eb,Ib +1: +2: NOT Eb +3: NEG Eb +4: MUL AL,Eb +5: IMUL AL,Eb +6: DIV AL,Eb +7: IDIV AL,Eb +EndTable + +GrpTable: Grp3_2 +0: TEST Ev,Iz +1: +2: NOT Ev +3: NEG Ev +4: MUL rAX,Ev +5: IMUL rAX,Ev +6: DIV rAX,Ev +7: IDIV rAX,Ev +EndTable + +GrpTable: Grp4 +0: INC Eb +1: DEC Eb +EndTable + +GrpTable: Grp5 +0: INC Ev +1: DEC Ev +# Note: "forced64" is Intel CPU behavior (see comment about CALL insn). +2: CALLN Ev (f64) +3: CALLF Ep +4: JMPN Ev (f64) +5: JMPF Mp +6: PUSH Ev (d64) +7: +EndTable + +GrpTable: Grp6 +0: SLDT Rv/Mw +1: STR Rv/Mw +2: LLDT Ew +3: LTR Ew +4: VERR Ew +5: VERW Ew +EndTable + +GrpTable: Grp7 +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) +3: LIDT Ms +4: SMSW Mw/Rv +5: rdpkru (110),(11B) | wrpkru (111),(11B) +6: LMSW Ew +7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B) +EndTable + +GrpTable: Grp8 +4: BT +5: BTS +6: BTR +7: BTC +EndTable + +GrpTable: Grp9 +1: CMPXCHG8B/16B Mq/Mdq +3: xrstors +4: xsavec +5: xsaves +6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) +7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B) +EndTable + +GrpTable: Grp10 +EndTable + +# Grp11A and Grp11B are expressed as Grp11 in Intel SDM +GrpTable: Grp11A +0: MOV Eb,Ib +7: XABORT Ib (000),(11B) +EndTable + +GrpTable: Grp11B +0: MOV Eb,Iz +7: XBEGIN Jz (000),(11B) +EndTable + +GrpTable: Grp12 +2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1) +4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1) +6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1) +EndTable + +GrpTable: Grp13 +2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1) +4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) +6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1) +EndTable + +GrpTable: Grp14 +2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1) +3: vpsrldq Hx,Ux,Ib (66),(11B),(v1) +6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1) +7: vpslldq Hx,Ux,Ib (66),(11B),(v1) +EndTable + +GrpTable: Grp15 +0: fxsave | RDFSBASE Ry (F3),(11B) +1: fxstor | RDGSBASE Ry (F3),(11B) +2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B) +3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B) +4: XSAVE +5: XRSTOR | lfence (11B) +6: XSAVEOPT | clwb (66) | mfence (11B) +7: clflush | clflushopt (66) | sfence (11B) | pcommit (66),(11B) +EndTable + +GrpTable: Grp16 +0: prefetch NTA +1: prefetch T0 +2: prefetch T1 +3: prefetch T2 +EndTable + +GrpTable: Grp17 +1: BLSR By,Ey (v) +2: BLSMSK By,Ey (v) +3: BLSI By,Ey (v) +EndTable + +# AMD's Prefetch Group +GrpTable: GrpP +0: PREFETCH +1: PREFETCHW +EndTable + +GrpTable: GrpPDLK +0: MONTMUL +1: XSHA1 +2: XSHA2 +EndTable + +GrpTable: GrpRNG +0: xstore-rng +1: xcrypt-ecb +2: xcrypt-cbc +4: xcrypt-cfb +5: xcrypt-ofb +EndTable diff --git a/kernel/tools/perf/util/intel-pt.c b/kernel/tools/perf/util/intel-pt.c new file mode 100644 index 000000000..97f963a3d --- /dev/null +++ b/kernel/tools/perf/util/intel-pt.c @@ -0,0 +1,2164 @@ +/* + * intel_pt.c: Intel Processor Trace support + * Copyright (c) 2013-2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "session.h" +#include "machine.h" +#include "sort.h" +#include "tool.h" +#include "event.h" +#include "evlist.h" +#include "evsel.h" +#include "map.h" +#include "color.h" +#include "util.h" +#include "thread.h" +#include "thread-stack.h" +#include "symbol.h" +#include "callchain.h" +#include "dso.h" +#include "debug.h" +#include "auxtrace.h" +#include "tsc.h" +#include "intel-pt.h" + +#include "intel-pt-decoder/intel-pt-log.h" +#include "intel-pt-decoder/intel-pt-decoder.h" +#include "intel-pt-decoder/intel-pt-insn-decoder.h" +#include "intel-pt-decoder/intel-pt-pkt-decoder.h" + +#define MAX_TIMESTAMP (~0ULL) + +struct intel_pt { + struct auxtrace auxtrace; + struct auxtrace_queues queues; + struct auxtrace_heap heap; + u32 auxtrace_type; + struct perf_session *session; + struct machine *machine; + struct perf_evsel *switch_evsel; + struct thread *unknown_thread; + bool timeless_decoding; + bool sampling_mode; + bool snapshot_mode; + bool per_cpu_mmaps; + bool have_tsc; + bool data_queued; + bool est_tsc; + bool sync_switch; + bool mispred_all; + int have_sched_switch; + u32 pmu_type; + u64 kernel_start; + u64 switch_ip; + u64 ptss_ip; + + struct perf_tsc_conversion tc; + bool cap_user_time_zero; + + struct itrace_synth_opts synth_opts; + + bool sample_instructions; + u64 instructions_sample_type; + u64 instructions_sample_period; + u64 instructions_id; + + bool sample_branches; + u32 branches_filter; + u64 branches_sample_type; + u64 branches_id; + + bool sample_transactions; + u64 transactions_sample_type; + u64 transactions_id; + + bool synth_needs_swap; + + u64 tsc_bit; + u64 mtc_bit; + u64 mtc_freq_bits; + u32 tsc_ctc_ratio_n; + u32 tsc_ctc_ratio_d; + u64 cyc_bit; + u64 noretcomp_bit; + unsigned max_non_turbo_ratio; +}; + +enum switch_state { + INTEL_PT_SS_NOT_TRACING, + INTEL_PT_SS_UNKNOWN, + INTEL_PT_SS_TRACING, + INTEL_PT_SS_EXPECTING_SWITCH_EVENT, + INTEL_PT_SS_EXPECTING_SWITCH_IP, +}; + +struct intel_pt_queue { + struct intel_pt *pt; + unsigned int queue_nr; + struct auxtrace_buffer *buffer; + void *decoder; + const struct intel_pt_state *state; + struct ip_callchain *chain; + struct branch_stack *last_branch; + struct branch_stack *last_branch_rb; + size_t last_branch_pos; + union perf_event *event_buf; + bool on_heap; + bool stop; + bool step_through_buffers; + bool use_buffer_pid_tid; + pid_t pid, tid; + int cpu; + int switch_state; + pid_t next_tid; + struct thread *thread; + bool exclude_kernel; + bool have_sample; + u64 time; + u64 timestamp; + u32 flags; + u16 insn_len; + u64 last_insn_cnt; +}; + +static void intel_pt_dump(struct intel_pt *pt __maybe_unused, + unsigned char *buf, size_t len) +{ + struct intel_pt_pkt packet; + size_t pos = 0; + int ret, pkt_len, i; + char desc[INTEL_PT_PKT_DESC_MAX]; + const char *color = PERF_COLOR_BLUE; + + color_fprintf(stdout, color, + ". ... Intel Processor Trace data: size %zu bytes\n", + len); + + while (len) { + ret = intel_pt_get_packet(buf, len, &packet); + if (ret > 0) + pkt_len = ret; + else + pkt_len = 1; + printf("."); + color_fprintf(stdout, color, " %08x: ", pos); + for (i = 0; i < pkt_len; i++) + color_fprintf(stdout, color, " %02x", buf[i]); + for (; i < 16; i++) + color_fprintf(stdout, color, " "); + if (ret > 0) { + ret = intel_pt_pkt_desc(&packet, desc, + INTEL_PT_PKT_DESC_MAX); + if (ret > 0) + color_fprintf(stdout, color, " %s\n", desc); + } else { + color_fprintf(stdout, color, " Bad packet!\n"); + } + pos += pkt_len; + buf += pkt_len; + len -= pkt_len; + } +} + +static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, + size_t len) +{ + printf(".\n"); + intel_pt_dump(pt, buf, len); +} + +static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, + struct auxtrace_buffer *b) +{ + void *start; + + start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, + pt->have_tsc); + if (!start) + return -EINVAL; + b->use_size = b->data + b->size - start; + b->use_data = start; + return 0; +} + +static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq, + struct auxtrace_queue *queue, + struct auxtrace_buffer *buffer) +{ + if (queue->cpu == -1 && buffer->cpu != -1) + ptq->cpu = buffer->cpu; + + ptq->pid = buffer->pid; + ptq->tid = buffer->tid; + + intel_pt_log("queue %u cpu %d pid %d tid %d\n", + ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); + + thread__zput(ptq->thread); + + if (ptq->tid != -1) { + if (ptq->pid != -1) + ptq->thread = machine__findnew_thread(ptq->pt->machine, + ptq->pid, + ptq->tid); + else + ptq->thread = machine__find_thread(ptq->pt->machine, -1, + ptq->tid); + } +} + +/* This function assumes data is processed sequentially only */ +static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) +{ + struct intel_pt_queue *ptq = data; + struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer; + struct auxtrace_queue *queue; + + if (ptq->stop) { + b->len = 0; + return 0; + } + + queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; + + buffer = auxtrace_buffer__next(queue, buffer); + if (!buffer) { + if (old_buffer) + auxtrace_buffer__drop_data(old_buffer); + b->len = 0; + return 0; + } + + ptq->buffer = buffer; + + if (!buffer->data) { + int fd = perf_data_file__fd(ptq->pt->session->file); + + buffer->data = auxtrace_buffer__get_data(buffer, fd); + if (!buffer->data) + return -ENOMEM; + } + + if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer && + intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) + return -ENOMEM; + + if (old_buffer) + auxtrace_buffer__drop_data(old_buffer); + + if (buffer->use_data) { + b->len = buffer->use_size; + b->buf = buffer->use_data; + } else { + b->len = buffer->size; + b->buf = buffer->data; + } + b->ref_timestamp = buffer->reference; + + if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode && + !buffer->consecutive)) { + b->consecutive = false; + b->trace_nr = buffer->buffer_nr + 1; + } else { + b->consecutive = true; + } + + if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid || + ptq->tid != buffer->tid)) + intel_pt_use_buffer_pid_tid(ptq, queue, buffer); + + if (ptq->step_through_buffers) + ptq->stop = true; + + if (!b->len) + return intel_pt_get_trace(b, data); + + return 0; +} + +struct intel_pt_cache_entry { + struct auxtrace_cache_entry entry; + u64 insn_cnt; + u64 byte_cnt; + enum intel_pt_insn_op op; + enum intel_pt_insn_branch branch; + int length; + int32_t rel; +}; + +static int intel_pt_config_div(const char *var, const char *value, void *data) +{ + int *d = data; + long val; + + if (!strcmp(var, "intel-pt.cache-divisor")) { + val = strtol(value, NULL, 0); + if (val > 0 && val <= INT_MAX) + *d = val; + } + + return 0; +} + +static int intel_pt_cache_divisor(void) +{ + static int d; + + if (d) + return d; + + perf_config(intel_pt_config_div, &d); + + if (!d) + d = 64; + + return d; +} + +static unsigned int intel_pt_cache_size(struct dso *dso, + struct machine *machine) +{ + off_t size; + + size = dso__data_size(dso, machine); + size /= intel_pt_cache_divisor(); + if (size < 1000) + return 10; + if (size > (1 << 21)) + return 21; + return 32 - __builtin_clz(size); +} + +static struct auxtrace_cache *intel_pt_cache(struct dso *dso, + struct machine *machine) +{ + struct auxtrace_cache *c; + unsigned int bits; + + if (dso->auxtrace_cache) + return dso->auxtrace_cache; + + bits = intel_pt_cache_size(dso, machine); + + /* Ignoring cache creation failure */ + c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); + + dso->auxtrace_cache = c; + + return c; +} + +static int intel_pt_cache_add(struct dso *dso, struct machine *machine, + u64 offset, u64 insn_cnt, u64 byte_cnt, + struct intel_pt_insn *intel_pt_insn) +{ + struct auxtrace_cache *c = intel_pt_cache(dso, machine); + struct intel_pt_cache_entry *e; + int err; + + if (!c) + return -ENOMEM; + + e = auxtrace_cache__alloc_entry(c); + if (!e) + return -ENOMEM; + + e->insn_cnt = insn_cnt; + e->byte_cnt = byte_cnt; + e->op = intel_pt_insn->op; + e->branch = intel_pt_insn->branch; + e->length = intel_pt_insn->length; + e->rel = intel_pt_insn->rel; + + err = auxtrace_cache__add(c, offset, &e->entry); + if (err) + auxtrace_cache__free_entry(c, e); + + return err; +} + +static struct intel_pt_cache_entry * +intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) +{ + struct auxtrace_cache *c = intel_pt_cache(dso, machine); + + if (!c) + return NULL; + + return auxtrace_cache__lookup(dso->auxtrace_cache, offset); +} + +static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, + uint64_t *insn_cnt_ptr, uint64_t *ip, + uint64_t to_ip, uint64_t max_insn_cnt, + void *data) +{ + struct intel_pt_queue *ptq = data; + struct machine *machine = ptq->pt->machine; + struct thread *thread; + struct addr_location al; + unsigned char buf[1024]; + size_t bufsz; + ssize_t len; + int x86_64; + u8 cpumode; + u64 offset, start_offset, start_ip; + u64 insn_cnt = 0; + bool one_map = true; + + if (to_ip && *ip == to_ip) + goto out_no_cache; + + bufsz = intel_pt_insn_max_size(); + + if (*ip >= ptq->pt->kernel_start) + cpumode = PERF_RECORD_MISC_KERNEL; + else + cpumode = PERF_RECORD_MISC_USER; + + thread = ptq->thread; + if (!thread) { + if (cpumode != PERF_RECORD_MISC_KERNEL) + return -EINVAL; + thread = ptq->pt->unknown_thread; + } + + while (1) { + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al); + if (!al.map || !al.map->dso) + return -EINVAL; + + if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && + dso__data_status_seen(al.map->dso, + DSO_DATA_STATUS_SEEN_ITRACE)) + return -ENOENT; + + offset = al.map->map_ip(al.map, *ip); + + if (!to_ip && one_map) { + struct intel_pt_cache_entry *e; + + e = intel_pt_cache_lookup(al.map->dso, machine, offset); + if (e && + (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { + *insn_cnt_ptr = e->insn_cnt; + *ip += e->byte_cnt; + intel_pt_insn->op = e->op; + intel_pt_insn->branch = e->branch; + intel_pt_insn->length = e->length; + intel_pt_insn->rel = e->rel; + intel_pt_log_insn_no_data(intel_pt_insn, *ip); + return 0; + } + } + + start_offset = offset; + start_ip = *ip; + + /* Load maps to ensure dso->is_64_bit has been updated */ + map__load(al.map, machine->symbol_filter); + + x86_64 = al.map->dso->is_64_bit; + + while (1) { + len = dso__data_read_offset(al.map->dso, machine, + offset, buf, bufsz); + if (len <= 0) + return -EINVAL; + + if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) + return -EINVAL; + + intel_pt_log_insn(intel_pt_insn, *ip); + + insn_cnt += 1; + + if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) + goto out; + + if (max_insn_cnt && insn_cnt >= max_insn_cnt) + goto out_no_cache; + + *ip += intel_pt_insn->length; + + if (to_ip && *ip == to_ip) + goto out_no_cache; + + if (*ip >= al.map->end) + break; + + offset += intel_pt_insn->length; + } + one_map = false; + } +out: + *insn_cnt_ptr = insn_cnt; + + if (!one_map) + goto out_no_cache; + + /* + * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate + * entries. + */ + if (to_ip) { + struct intel_pt_cache_entry *e; + + e = intel_pt_cache_lookup(al.map->dso, machine, start_offset); + if (e) + return 0; + } + + /* Ignore cache errors */ + intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt, + *ip - start_ip, intel_pt_insn); + + return 0; + +out_no_cache: + *insn_cnt_ptr = insn_cnt; + return 0; +} + +static bool intel_pt_get_config(struct intel_pt *pt, + struct perf_event_attr *attr, u64 *config) +{ + if (attr->type == pt->pmu_type) { + if (config) + *config = attr->config; + return true; + } + + return false; +} + +static bool intel_pt_exclude_kernel(struct intel_pt *pt) +{ + struct perf_evsel *evsel; + + evlist__for_each(pt->session->evlist, evsel) { + if (intel_pt_get_config(pt, &evsel->attr, NULL) && + !evsel->attr.exclude_kernel) + return false; + } + return true; +} + +static bool intel_pt_return_compression(struct intel_pt *pt) +{ + struct perf_evsel *evsel; + u64 config; + + if (!pt->noretcomp_bit) + return true; + + evlist__for_each(pt->session->evlist, evsel) { + if (intel_pt_get_config(pt, &evsel->attr, &config) && + (config & pt->noretcomp_bit)) + return false; + } + return true; +} + +static unsigned int intel_pt_mtc_period(struct intel_pt *pt) +{ + struct perf_evsel *evsel; + unsigned int shift; + u64 config; + + if (!pt->mtc_freq_bits) + return 0; + + for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) + config >>= 1; + + evlist__for_each(pt->session->evlist, evsel) { + if (intel_pt_get_config(pt, &evsel->attr, &config)) + return (config & pt->mtc_freq_bits) >> shift; + } + return 0; +} + +static bool intel_pt_timeless_decoding(struct intel_pt *pt) +{ + struct perf_evsel *evsel; + bool timeless_decoding = true; + u64 config; + + if (!pt->tsc_bit || !pt->cap_user_time_zero) + return true; + + evlist__for_each(pt->session->evlist, evsel) { + if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME)) + return true; + if (intel_pt_get_config(pt, &evsel->attr, &config)) { + if (config & pt->tsc_bit) + timeless_decoding = false; + else + return true; + } + } + return timeless_decoding; +} + +static bool intel_pt_tracing_kernel(struct intel_pt *pt) +{ + struct perf_evsel *evsel; + + evlist__for_each(pt->session->evlist, evsel) { + if (intel_pt_get_config(pt, &evsel->attr, NULL) && + !evsel->attr.exclude_kernel) + return true; + } + return false; +} + +static bool intel_pt_have_tsc(struct intel_pt *pt) +{ + struct perf_evsel *evsel; + bool have_tsc = false; + u64 config; + + if (!pt->tsc_bit) + return false; + + evlist__for_each(pt->session->evlist, evsel) { + if (intel_pt_get_config(pt, &evsel->attr, &config)) { + if (config & pt->tsc_bit) + have_tsc = true; + else + return false; + } + } + return have_tsc; +} + +static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) +{ + u64 quot, rem; + + quot = ns / pt->tc.time_mult; + rem = ns % pt->tc.time_mult; + return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / + pt->tc.time_mult; +} + +static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, + unsigned int queue_nr) +{ + struct intel_pt_params params = { .get_trace = 0, }; + struct intel_pt_queue *ptq; + + ptq = zalloc(sizeof(struct intel_pt_queue)); + if (!ptq) + return NULL; + + if (pt->synth_opts.callchain) { + size_t sz = sizeof(struct ip_callchain); + + sz += pt->synth_opts.callchain_sz * sizeof(u64); + ptq->chain = zalloc(sz); + if (!ptq->chain) + goto out_free; + } + + if (pt->synth_opts.last_branch) { + size_t sz = sizeof(struct branch_stack); + + sz += pt->synth_opts.last_branch_sz * + sizeof(struct branch_entry); + ptq->last_branch = zalloc(sz); + if (!ptq->last_branch) + goto out_free; + ptq->last_branch_rb = zalloc(sz); + if (!ptq->last_branch_rb) + goto out_free; + } + + ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); + if (!ptq->event_buf) + goto out_free; + + ptq->pt = pt; + ptq->queue_nr = queue_nr; + ptq->exclude_kernel = intel_pt_exclude_kernel(pt); + ptq->pid = -1; + ptq->tid = -1; + ptq->cpu = -1; + ptq->next_tid = -1; + + params.get_trace = intel_pt_get_trace; + params.walk_insn = intel_pt_walk_next_insn; + params.data = ptq; + params.return_compression = intel_pt_return_compression(pt); + params.max_non_turbo_ratio = pt->max_non_turbo_ratio; + params.mtc_period = intel_pt_mtc_period(pt); + params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; + params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; + + if (pt->synth_opts.instructions) { + if (pt->synth_opts.period) { + switch (pt->synth_opts.period_type) { + case PERF_ITRACE_PERIOD_INSTRUCTIONS: + params.period_type = + INTEL_PT_PERIOD_INSTRUCTIONS; + params.period = pt->synth_opts.period; + break; + case PERF_ITRACE_PERIOD_TICKS: + params.period_type = INTEL_PT_PERIOD_TICKS; + params.period = pt->synth_opts.period; + break; + case PERF_ITRACE_PERIOD_NANOSECS: + params.period_type = INTEL_PT_PERIOD_TICKS; + params.period = intel_pt_ns_to_ticks(pt, + pt->synth_opts.period); + break; + default: + break; + } + } + + if (!params.period) { + params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; + params.period = 1; + } + } + + ptq->decoder = intel_pt_decoder_new(¶ms); + if (!ptq->decoder) + goto out_free; + + return ptq; + +out_free: + zfree(&ptq->event_buf); + zfree(&ptq->last_branch); + zfree(&ptq->last_branch_rb); + zfree(&ptq->chain); + free(ptq); + return NULL; +} + +static void intel_pt_free_queue(void *priv) +{ + struct intel_pt_queue *ptq = priv; + + if (!ptq) + return; + thread__zput(ptq->thread); + intel_pt_decoder_free(ptq->decoder); + zfree(&ptq->event_buf); + zfree(&ptq->last_branch); + zfree(&ptq->last_branch_rb); + zfree(&ptq->chain); + free(ptq); +} + +static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, + struct auxtrace_queue *queue) +{ + struct intel_pt_queue *ptq = queue->priv; + + if (queue->tid == -1 || pt->have_sched_switch) { + ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); + thread__zput(ptq->thread); + } + + if (!ptq->thread && ptq->tid != -1) + ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid); + + if (ptq->thread) { + ptq->pid = ptq->thread->pid_; + if (queue->cpu == -1) + ptq->cpu = ptq->thread->cpu; + } +} + +static void intel_pt_sample_flags(struct intel_pt_queue *ptq) +{ + if (ptq->state->flags & INTEL_PT_ABORT_TX) { + ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; + } else if (ptq->state->flags & INTEL_PT_ASYNC) { + if (ptq->state->to_ip) + ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | + PERF_IP_FLAG_ASYNC | + PERF_IP_FLAG_INTERRUPT; + else + ptq->flags = PERF_IP_FLAG_BRANCH | + PERF_IP_FLAG_TRACE_END; + ptq->insn_len = 0; + } else { + if (ptq->state->from_ip) + ptq->flags = intel_pt_insn_type(ptq->state->insn_op); + else + ptq->flags = PERF_IP_FLAG_BRANCH | + PERF_IP_FLAG_TRACE_BEGIN; + if (ptq->state->flags & INTEL_PT_IN_TX) + ptq->flags |= PERF_IP_FLAG_IN_TX; + ptq->insn_len = ptq->state->insn_len; + } +} + +static int intel_pt_setup_queue(struct intel_pt *pt, + struct auxtrace_queue *queue, + unsigned int queue_nr) +{ + struct intel_pt_queue *ptq = queue->priv; + + if (list_empty(&queue->head)) + return 0; + + if (!ptq) { + ptq = intel_pt_alloc_queue(pt, queue_nr); + if (!ptq) + return -ENOMEM; + queue->priv = ptq; + + if (queue->cpu != -1) + ptq->cpu = queue->cpu; + ptq->tid = queue->tid; + + if (pt->sampling_mode) { + if (pt->timeless_decoding) + ptq->step_through_buffers = true; + if (pt->timeless_decoding || !pt->have_sched_switch) + ptq->use_buffer_pid_tid = true; + } + } + + if (!ptq->on_heap && + (!pt->sync_switch || + ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { + const struct intel_pt_state *state; + int ret; + + if (pt->timeless_decoding) + return 0; + + intel_pt_log("queue %u getting timestamp\n", queue_nr); + intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", + queue_nr, ptq->cpu, ptq->pid, ptq->tid); + while (1) { + state = intel_pt_decode(ptq->decoder); + if (state->err) { + if (state->err == INTEL_PT_ERR_NODATA) { + intel_pt_log("queue %u has no timestamp\n", + queue_nr); + return 0; + } + continue; + } + if (state->timestamp) + break; + } + + ptq->timestamp = state->timestamp; + intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n", + queue_nr, ptq->timestamp); + ptq->state = state; + ptq->have_sample = true; + intel_pt_sample_flags(ptq); + ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); + if (ret) + return ret; + ptq->on_heap = true; + } + + return 0; +} + +static int intel_pt_setup_queues(struct intel_pt *pt) +{ + unsigned int i; + int ret; + + for (i = 0; i < pt->queues.nr_queues; i++) { + ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i); + if (ret) + return ret; + } + return 0; +} + +static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq) +{ + struct branch_stack *bs_src = ptq->last_branch_rb; + struct branch_stack *bs_dst = ptq->last_branch; + size_t nr = 0; + + bs_dst->nr = bs_src->nr; + + if (!bs_src->nr) + return; + + nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos; + memcpy(&bs_dst->entries[0], + &bs_src->entries[ptq->last_branch_pos], + sizeof(struct branch_entry) * nr); + + if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) { + memcpy(&bs_dst->entries[nr], + &bs_src->entries[0], + sizeof(struct branch_entry) * ptq->last_branch_pos); + } +} + +static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq) +{ + ptq->last_branch_pos = 0; + ptq->last_branch_rb->nr = 0; +} + +static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq) +{ + const struct intel_pt_state *state = ptq->state; + struct branch_stack *bs = ptq->last_branch_rb; + struct branch_entry *be; + + if (!ptq->last_branch_pos) + ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz; + + ptq->last_branch_pos -= 1; + + be = &bs->entries[ptq->last_branch_pos]; + be->from = state->from_ip; + be->to = state->to_ip; + be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX); + be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX); + /* No support for mispredict */ + be->flags.mispred = ptq->pt->mispred_all; + + if (bs->nr < ptq->pt->synth_opts.last_branch_sz) + bs->nr += 1; +} + +static int intel_pt_inject_event(union perf_event *event, + struct perf_sample *sample, u64 type, + bool swapped) +{ + event->header.size = perf_event__sample_event_size(sample, type, 0); + return perf_event__synthesize_sample(event, type, 0, sample, swapped); +} + +static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) +{ + int ret; + struct intel_pt *pt = ptq->pt; + union perf_event *event = ptq->event_buf; + struct perf_sample sample = { .ip = 0, }; + struct dummy_branch_stack { + u64 nr; + struct branch_entry entries; + } dummy_bs; + + if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) + return 0; + + event->sample.header.type = PERF_RECORD_SAMPLE; + event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.size = sizeof(struct perf_event_header); + + if (!pt->timeless_decoding) + sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); + + sample.ip = ptq->state->from_ip; + sample.pid = ptq->pid; + sample.tid = ptq->tid; + sample.addr = ptq->state->to_ip; + sample.id = ptq->pt->branches_id; + sample.stream_id = ptq->pt->branches_id; + sample.period = 1; + sample.cpu = ptq->cpu; + sample.flags = ptq->flags; + sample.insn_len = ptq->insn_len; + + /* + * perf report cannot handle events without a branch stack when using + * SORT_MODE__BRANCH so make a dummy one. + */ + if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { + dummy_bs = (struct dummy_branch_stack){ + .nr = 1, + .entries = { + .from = sample.ip, + .to = sample.addr, + }, + }; + sample.branch_stack = (struct branch_stack *)&dummy_bs; + } + + if (pt->synth_opts.inject) { + ret = intel_pt_inject_event(event, &sample, + pt->branches_sample_type, + pt->synth_needs_swap); + if (ret) + return ret; + } + + ret = perf_session__deliver_synth_event(pt->session, event, &sample); + if (ret) + pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n", + ret); + + return ret; +} + +static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) +{ + int ret; + struct intel_pt *pt = ptq->pt; + union perf_event *event = ptq->event_buf; + struct perf_sample sample = { .ip = 0, }; + + event->sample.header.type = PERF_RECORD_SAMPLE; + event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.size = sizeof(struct perf_event_header); + + if (!pt->timeless_decoding) + sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); + + sample.ip = ptq->state->from_ip; + sample.pid = ptq->pid; + sample.tid = ptq->tid; + sample.addr = ptq->state->to_ip; + sample.id = ptq->pt->instructions_id; + sample.stream_id = ptq->pt->instructions_id; + sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; + sample.cpu = ptq->cpu; + sample.flags = ptq->flags; + sample.insn_len = ptq->insn_len; + + ptq->last_insn_cnt = ptq->state->tot_insn_cnt; + + if (pt->synth_opts.callchain) { + thread_stack__sample(ptq->thread, ptq->chain, + pt->synth_opts.callchain_sz, sample.ip); + sample.callchain = ptq->chain; + } + + if (pt->synth_opts.last_branch) { + intel_pt_copy_last_branch_rb(ptq); + sample.branch_stack = ptq->last_branch; + } + + if (pt->synth_opts.inject) { + ret = intel_pt_inject_event(event, &sample, + pt->instructions_sample_type, + pt->synth_needs_swap); + if (ret) + return ret; + } + + ret = perf_session__deliver_synth_event(pt->session, event, &sample); + if (ret) + pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n", + ret); + + if (pt->synth_opts.last_branch) + intel_pt_reset_last_branch_rb(ptq); + + return ret; +} + +static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) +{ + int ret; + struct intel_pt *pt = ptq->pt; + union perf_event *event = ptq->event_buf; + struct perf_sample sample = { .ip = 0, }; + + event->sample.header.type = PERF_RECORD_SAMPLE; + event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.size = sizeof(struct perf_event_header); + + if (!pt->timeless_decoding) + sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); + + sample.ip = ptq->state->from_ip; + sample.pid = ptq->pid; + sample.tid = ptq->tid; + sample.addr = ptq->state->to_ip; + sample.id = ptq->pt->transactions_id; + sample.stream_id = ptq->pt->transactions_id; + sample.period = 1; + sample.cpu = ptq->cpu; + sample.flags = ptq->flags; + sample.insn_len = ptq->insn_len; + + if (pt->synth_opts.callchain) { + thread_stack__sample(ptq->thread, ptq->chain, + pt->synth_opts.callchain_sz, sample.ip); + sample.callchain = ptq->chain; + } + + if (pt->synth_opts.last_branch) { + intel_pt_copy_last_branch_rb(ptq); + sample.branch_stack = ptq->last_branch; + } + + if (pt->synth_opts.inject) { + ret = intel_pt_inject_event(event, &sample, + pt->transactions_sample_type, + pt->synth_needs_swap); + if (ret) + return ret; + } + + ret = perf_session__deliver_synth_event(pt->session, event, &sample); + if (ret) + pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n", + ret); + + if (pt->synth_opts.callchain) + intel_pt_reset_last_branch_rb(ptq); + + return ret; +} + +static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, + pid_t pid, pid_t tid, u64 ip) +{ + union perf_event event; + char msg[MAX_AUXTRACE_ERROR_MSG]; + int err; + + intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); + + auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, + code, cpu, pid, tid, ip, msg); + + err = perf_session__deliver_synth_event(pt->session, &event, NULL); + if (err) + pr_err("Intel Processor Trace: failed to deliver error event, error %d\n", + err); + + return err; +} + +static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) +{ + struct auxtrace_queue *queue; + pid_t tid = ptq->next_tid; + int err; + + if (tid == -1) + return 0; + + intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid); + + err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid); + + queue = &pt->queues.queue_array[ptq->queue_nr]; + intel_pt_set_pid_tid_cpu(pt, queue); + + ptq->next_tid = -1; + + return err; +} + +static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) +{ + struct intel_pt *pt = ptq->pt; + + return ip == pt->switch_ip && + (ptq->flags & PERF_IP_FLAG_BRANCH) && + !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | + PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); +} + +static int intel_pt_sample(struct intel_pt_queue *ptq) +{ + const struct intel_pt_state *state = ptq->state; + struct intel_pt *pt = ptq->pt; + int err; + + if (!ptq->have_sample) + return 0; + + ptq->have_sample = false; + + if (pt->sample_instructions && + (state->type & INTEL_PT_INSTRUCTION)) { + err = intel_pt_synth_instruction_sample(ptq); + if (err) + return err; + } + + if (pt->sample_transactions && + (state->type & INTEL_PT_TRANSACTION)) { + err = intel_pt_synth_transaction_sample(ptq); + if (err) + return err; + } + + if (!(state->type & INTEL_PT_BRANCH)) + return 0; + + if (pt->synth_opts.callchain) + thread_stack__event(ptq->thread, ptq->flags, state->from_ip, + state->to_ip, ptq->insn_len, + state->trace_nr); + else + thread_stack__set_trace_nr(ptq->thread, state->trace_nr); + + if (pt->sample_branches) { + err = intel_pt_synth_branch_sample(ptq); + if (err) + return err; + } + + if (pt->synth_opts.last_branch) + intel_pt_update_last_branch_rb(ptq); + + if (!pt->sync_switch) + return 0; + + if (intel_pt_is_switch_ip(ptq, state->to_ip)) { + switch (ptq->switch_state) { + case INTEL_PT_SS_UNKNOWN: + case INTEL_PT_SS_EXPECTING_SWITCH_IP: + err = intel_pt_next_tid(pt, ptq); + if (err) + return err; + ptq->switch_state = INTEL_PT_SS_TRACING; + break; + default: + ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; + return 1; + } + } else if (!state->to_ip) { + ptq->switch_state = INTEL_PT_SS_NOT_TRACING; + } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { + ptq->switch_state = INTEL_PT_SS_UNKNOWN; + } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && + state->to_ip == pt->ptss_ip && + (ptq->flags & PERF_IP_FLAG_CALL)) { + ptq->switch_state = INTEL_PT_SS_TRACING; + } + + return 0; +} + +static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) +{ + struct machine *machine = pt->machine; + struct map *map; + struct symbol *sym, *start; + u64 ip, switch_ip = 0; + const char *ptss; + + if (ptss_ip) + *ptss_ip = 0; + + map = machine__kernel_map(machine); + if (!map) + return 0; + + if (map__load(map, machine->symbol_filter)) + return 0; + + start = dso__first_symbol(map->dso, MAP__FUNCTION); + + for (sym = start; sym; sym = dso__next_symbol(sym)) { + if (sym->binding == STB_GLOBAL && + !strcmp(sym->name, "__switch_to")) { + ip = map->unmap_ip(map, sym->start); + if (ip >= map->start && ip < map->end) { + switch_ip = ip; + break; + } + } + } + + if (!switch_ip || !ptss_ip) + return 0; + + if (pt->have_sched_switch == 1) + ptss = "perf_trace_sched_switch"; + else + ptss = "__perf_event_task_sched_out"; + + for (sym = start; sym; sym = dso__next_symbol(sym)) { + if (!strcmp(sym->name, ptss)) { + ip = map->unmap_ip(map, sym->start); + if (ip >= map->start && ip < map->end) { + *ptss_ip = ip; + break; + } + } + } + + return switch_ip; +} + +static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) +{ + const struct intel_pt_state *state = ptq->state; + struct intel_pt *pt = ptq->pt; + int err; + + if (!pt->kernel_start) { + pt->kernel_start = machine__kernel_start(pt->machine); + if (pt->per_cpu_mmaps && + (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && + !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && + !pt->sampling_mode) { + pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip); + if (pt->switch_ip) { + intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", + pt->switch_ip, pt->ptss_ip); + pt->sync_switch = true; + } + } + } + + intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", + ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); + while (1) { + err = intel_pt_sample(ptq); + if (err) + return err; + + state = intel_pt_decode(ptq->decoder); + if (state->err) { + if (state->err == INTEL_PT_ERR_NODATA) + return 1; + if (pt->sync_switch && + state->from_ip >= pt->kernel_start) { + pt->sync_switch = false; + intel_pt_next_tid(pt, ptq); + } + if (pt->synth_opts.errors) { + err = intel_pt_synth_error(pt, state->err, + ptq->cpu, ptq->pid, + ptq->tid, + state->from_ip); + if (err) + return err; + } + continue; + } + + ptq->state = state; + ptq->have_sample = true; + intel_pt_sample_flags(ptq); + + /* Use estimated TSC upon return to user space */ + if (pt->est_tsc && + (state->from_ip >= pt->kernel_start || !state->from_ip) && + state->to_ip && state->to_ip < pt->kernel_start) { + intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", + state->timestamp, state->est_timestamp); + ptq->timestamp = state->est_timestamp; + /* Use estimated TSC in unknown switch state */ + } else if (pt->sync_switch && + ptq->switch_state == INTEL_PT_SS_UNKNOWN && + intel_pt_is_switch_ip(ptq, state->to_ip) && + ptq->next_tid == -1) { + intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", + state->timestamp, state->est_timestamp); + ptq->timestamp = state->est_timestamp; + } else if (state->timestamp > ptq->timestamp) { + ptq->timestamp = state->timestamp; + } + + if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { + *timestamp = ptq->timestamp; + return 0; + } + } + return 0; +} + +static inline int intel_pt_update_queues(struct intel_pt *pt) +{ + if (pt->queues.new_data) { + pt->queues.new_data = false; + return intel_pt_setup_queues(pt); + } + return 0; +} + +static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) +{ + unsigned int queue_nr; + u64 ts; + int ret; + + while (1) { + struct auxtrace_queue *queue; + struct intel_pt_queue *ptq; + + if (!pt->heap.heap_cnt) + return 0; + + if (pt->heap.heap_array[0].ordinal >= timestamp) + return 0; + + queue_nr = pt->heap.heap_array[0].queue_nr; + queue = &pt->queues.queue_array[queue_nr]; + ptq = queue->priv; + + intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", + queue_nr, pt->heap.heap_array[0].ordinal, + timestamp); + + auxtrace_heap__pop(&pt->heap); + + if (pt->heap.heap_cnt) { + ts = pt->heap.heap_array[0].ordinal + 1; + if (ts > timestamp) + ts = timestamp; + } else { + ts = timestamp; + } + + intel_pt_set_pid_tid_cpu(pt, queue); + + ret = intel_pt_run_decoder(ptq, &ts); + + if (ret < 0) { + auxtrace_heap__add(&pt->heap, queue_nr, ts); + return ret; + } + + if (!ret) { + ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); + if (ret < 0) + return ret; + } else { + ptq->on_heap = false; + } + } + + return 0; +} + +static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, + u64 time_) +{ + struct auxtrace_queues *queues = &pt->queues; + unsigned int i; + u64 ts = 0; + + for (i = 0; i < queues->nr_queues; i++) { + struct auxtrace_queue *queue = &pt->queues.queue_array[i]; + struct intel_pt_queue *ptq = queue->priv; + + if (ptq && (tid == -1 || ptq->tid == tid)) { + ptq->time = time_; + intel_pt_set_pid_tid_cpu(pt, queue); + intel_pt_run_decoder(ptq, &ts); + } + } + return 0; +} + +static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) +{ + return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, + sample->pid, sample->tid, 0); +} + +static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) +{ + unsigned i, j; + + if (cpu < 0 || !pt->queues.nr_queues) + return NULL; + + if ((unsigned)cpu >= pt->queues.nr_queues) + i = pt->queues.nr_queues - 1; + else + i = cpu; + + if (pt->queues.queue_array[i].cpu == cpu) + return pt->queues.queue_array[i].priv; + + for (j = 0; i > 0; j++) { + if (pt->queues.queue_array[--i].cpu == cpu) + return pt->queues.queue_array[i].priv; + } + + for (; j < pt->queues.nr_queues; j++) { + if (pt->queues.queue_array[j].cpu == cpu) + return pt->queues.queue_array[j].priv; + } + + return NULL; +} + +static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, + u64 timestamp) +{ + struct intel_pt_queue *ptq; + int err; + + if (!pt->sync_switch) + return 1; + + ptq = intel_pt_cpu_to_ptq(pt, cpu); + if (!ptq) + return 1; + + switch (ptq->switch_state) { + case INTEL_PT_SS_NOT_TRACING: + ptq->next_tid = -1; + break; + case INTEL_PT_SS_UNKNOWN: + case INTEL_PT_SS_TRACING: + ptq->next_tid = tid; + ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; + return 0; + case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: + if (!ptq->on_heap) { + ptq->timestamp = perf_time_to_tsc(timestamp, + &pt->tc); + err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, + ptq->timestamp); + if (err) + return err; + ptq->on_heap = true; + } + ptq->switch_state = INTEL_PT_SS_TRACING; + break; + case INTEL_PT_SS_EXPECTING_SWITCH_IP: + ptq->next_tid = tid; + intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu); + break; + default: + break; + } + + return 1; +} + +static int intel_pt_process_switch(struct intel_pt *pt, + struct perf_sample *sample) +{ + struct perf_evsel *evsel; + pid_t tid; + int cpu, ret; + + evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id); + if (evsel != pt->switch_evsel) + return 0; + + tid = perf_evsel__intval(evsel, sample, "next_pid"); + cpu = sample->cpu; + + intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", + cpu, tid, sample->time, perf_time_to_tsc(sample->time, + &pt->tc)); + + ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); + if (ret <= 0) + return ret; + + return machine__set_current_tid(pt->machine, cpu, -1, tid); +} + +static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, + struct perf_sample *sample) +{ + bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; + pid_t pid, tid; + int cpu, ret; + + cpu = sample->cpu; + + if (pt->have_sched_switch == 3) { + if (!out) + return 0; + if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { + pr_err("Expecting CPU-wide context switch event\n"); + return -EINVAL; + } + pid = event->context_switch.next_prev_pid; + tid = event->context_switch.next_prev_tid; + } else { + if (out) + return 0; + pid = sample->pid; + tid = sample->tid; + } + + if (tid == -1) { + pr_err("context_switch event has no tid\n"); + return -EINVAL; + } + + intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", + cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time, + &pt->tc)); + + ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); + if (ret <= 0) + return ret; + + return machine__set_current_tid(pt->machine, cpu, pid, tid); +} + +static int intel_pt_process_itrace_start(struct intel_pt *pt, + union perf_event *event, + struct perf_sample *sample) +{ + if (!pt->per_cpu_mmaps) + return 0; + + intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", + sample->cpu, event->itrace_start.pid, + event->itrace_start.tid, sample->time, + perf_time_to_tsc(sample->time, &pt->tc)); + + return machine__set_current_tid(pt->machine, sample->cpu, + event->itrace_start.pid, + event->itrace_start.tid); +} + +static int intel_pt_process_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool) +{ + struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, + auxtrace); + u64 timestamp; + int err = 0; + + if (dump_trace) + return 0; + + if (!tool->ordered_events) { + pr_err("Intel Processor Trace requires ordered events\n"); + return -EINVAL; + } + + if (sample->time && sample->time != (u64)-1) + timestamp = perf_time_to_tsc(sample->time, &pt->tc); + else + timestamp = 0; + + if (timestamp || pt->timeless_decoding) { + err = intel_pt_update_queues(pt); + if (err) + return err; + } + + if (pt->timeless_decoding) { + if (event->header.type == PERF_RECORD_EXIT) { + err = intel_pt_process_timeless_queues(pt, + event->fork.tid, + sample->time); + } + } else if (timestamp) { + err = intel_pt_process_queues(pt, timestamp); + } + if (err) + return err; + + if (event->header.type == PERF_RECORD_AUX && + (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && + pt->synth_opts.errors) { + err = intel_pt_lost(pt, sample); + if (err) + return err; + } + + if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) + err = intel_pt_process_switch(pt, sample); + else if (event->header.type == PERF_RECORD_ITRACE_START) + err = intel_pt_process_itrace_start(pt, event, sample); + else if (event->header.type == PERF_RECORD_SWITCH || + event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) + err = intel_pt_context_switch(pt, event, sample); + + intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n", + perf_event__name(event->header.type), event->header.type, + sample->cpu, sample->time, timestamp); + + return err; +} + +static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool) +{ + struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, + auxtrace); + int ret; + + if (dump_trace) + return 0; + + if (!tool->ordered_events) + return -EINVAL; + + ret = intel_pt_update_queues(pt); + if (ret < 0) + return ret; + + if (pt->timeless_decoding) + return intel_pt_process_timeless_queues(pt, -1, + MAX_TIMESTAMP - 1); + + return intel_pt_process_queues(pt, MAX_TIMESTAMP); +} + +static void intel_pt_free_events(struct perf_session *session) +{ + struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, + auxtrace); + struct auxtrace_queues *queues = &pt->queues; + unsigned int i; + + for (i = 0; i < queues->nr_queues; i++) { + intel_pt_free_queue(queues->queue_array[i].priv); + queues->queue_array[i].priv = NULL; + } + intel_pt_log_disable(); + auxtrace_queues__free(queues); +} + +static void intel_pt_free(struct perf_session *session) +{ + struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, + auxtrace); + + auxtrace_heap__free(&pt->heap); + intel_pt_free_events(session); + session->auxtrace = NULL; + thread__delete(pt->unknown_thread); + free(pt); +} + +static int intel_pt_process_auxtrace_event(struct perf_session *session, + union perf_event *event, + struct perf_tool *tool __maybe_unused) +{ + struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, + auxtrace); + + if (pt->sampling_mode) + return 0; + + if (!pt->data_queued) { + struct auxtrace_buffer *buffer; + off_t data_offset; + int fd = perf_data_file__fd(session->file); + int err; + + if (perf_data_file__is_pipe(session->file)) { + data_offset = 0; + } else { + data_offset = lseek(fd, 0, SEEK_CUR); + if (data_offset == -1) + return -errno; + } + + err = auxtrace_queues__add_event(&pt->queues, session, event, + data_offset, &buffer); + if (err) + return err; + + /* Dump here now we have copied a piped trace out of the pipe */ + if (dump_trace) { + if (auxtrace_buffer__get_data(buffer, fd)) { + intel_pt_dump_event(pt, buffer->data, + buffer->size); + auxtrace_buffer__put_data(buffer); + } + } + } + + return 0; +} + +struct intel_pt_synth { + struct perf_tool dummy_tool; + struct perf_session *session; +}; + +static int intel_pt_event_synth(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) +{ + struct intel_pt_synth *intel_pt_synth = + container_of(tool, struct intel_pt_synth, dummy_tool); + + return perf_session__deliver_synth_event(intel_pt_synth->session, event, + NULL); +} + +static int intel_pt_synth_event(struct perf_session *session, + struct perf_event_attr *attr, u64 id) +{ + struct intel_pt_synth intel_pt_synth; + + memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth)); + intel_pt_synth.session = session; + + return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1, + &id, intel_pt_event_synth); +} + +static int intel_pt_synth_events(struct intel_pt *pt, + struct perf_session *session) +{ + struct perf_evlist *evlist = session->evlist; + struct perf_evsel *evsel; + struct perf_event_attr attr; + bool found = false; + u64 id; + int err; + + evlist__for_each(evlist, evsel) { + if (evsel->attr.type == pt->pmu_type && evsel->ids) { + found = true; + break; + } + } + + if (!found) { + pr_debug("There are no selected events with Intel Processor Trace data\n"); + return 0; + } + + memset(&attr, 0, sizeof(struct perf_event_attr)); + attr.size = sizeof(struct perf_event_attr); + attr.type = PERF_TYPE_HARDWARE; + attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK; + attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | + PERF_SAMPLE_PERIOD; + if (pt->timeless_decoding) + attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; + else + attr.sample_type |= PERF_SAMPLE_TIME; + if (!pt->per_cpu_mmaps) + attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; + attr.exclude_user = evsel->attr.exclude_user; + attr.exclude_kernel = evsel->attr.exclude_kernel; + attr.exclude_hv = evsel->attr.exclude_hv; + attr.exclude_host = evsel->attr.exclude_host; + attr.exclude_guest = evsel->attr.exclude_guest; + attr.sample_id_all = evsel->attr.sample_id_all; + attr.read_format = evsel->attr.read_format; + + id = evsel->id[0] + 1000000000; + if (!id) + id = 1; + + if (pt->synth_opts.instructions) { + attr.config = PERF_COUNT_HW_INSTRUCTIONS; + if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) + attr.sample_period = + intel_pt_ns_to_ticks(pt, pt->synth_opts.period); + else + attr.sample_period = pt->synth_opts.period; + pt->instructions_sample_period = attr.sample_period; + if (pt->synth_opts.callchain) + attr.sample_type |= PERF_SAMPLE_CALLCHAIN; + if (pt->synth_opts.last_branch) + attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; + pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n", + id, (u64)attr.sample_type); + err = intel_pt_synth_event(session, &attr, id); + if (err) { + pr_err("%s: failed to synthesize 'instructions' event type\n", + __func__); + return err; + } + pt->sample_instructions = true; + pt->instructions_sample_type = attr.sample_type; + pt->instructions_id = id; + id += 1; + } + + if (pt->synth_opts.transactions) { + attr.config = PERF_COUNT_HW_INSTRUCTIONS; + attr.sample_period = 1; + if (pt->synth_opts.callchain) + attr.sample_type |= PERF_SAMPLE_CALLCHAIN; + if (pt->synth_opts.last_branch) + attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; + pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n", + id, (u64)attr.sample_type); + err = intel_pt_synth_event(session, &attr, id); + if (err) { + pr_err("%s: failed to synthesize 'transactions' event type\n", + __func__); + return err; + } + pt->sample_transactions = true; + pt->transactions_id = id; + id += 1; + evlist__for_each(evlist, evsel) { + if (evsel->id && evsel->id[0] == pt->transactions_id) { + if (evsel->name) + zfree(&evsel->name); + evsel->name = strdup("transactions"); + break; + } + } + } + + if (pt->synth_opts.branches) { + attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; + attr.sample_period = 1; + attr.sample_type |= PERF_SAMPLE_ADDR; + attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN; + attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK; + pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n", + id, (u64)attr.sample_type); + err = intel_pt_synth_event(session, &attr, id); + if (err) { + pr_err("%s: failed to synthesize 'branches' event type\n", + __func__); + return err; + } + pt->sample_branches = true; + pt->branches_sample_type = attr.sample_type; + pt->branches_id = id; + } + + pt->synth_needs_swap = evsel->needs_swap; + + return 0; +} + +static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each_reverse(evlist, evsel) { + const char *name = perf_evsel__name(evsel); + + if (!strcmp(name, "sched:sched_switch")) + return evsel; + } + + return NULL; +} + +static bool intel_pt_find_switch(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (evsel->attr.context_switch) + return true; + } + + return false; +} + +static int intel_pt_perf_config(const char *var, const char *value, void *data) +{ + struct intel_pt *pt = data; + + if (!strcmp(var, "intel-pt.mispred-all")) + pt->mispred_all = perf_config_bool(var, value); + + return 0; +} + +static const char * const intel_pt_info_fmts[] = { + [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", + [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", + [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n", + [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n", + [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", + [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n", + [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n", + [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n", + [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", + [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", + [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", + [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", + [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", + [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", +}; + +static void intel_pt_print_info(u64 *arr, int start, int finish) +{ + int i; + + if (!dump_trace) + return; + + for (i = start; i <= finish; i++) + fprintf(stdout, intel_pt_info_fmts[i], arr[i]); +} + +int intel_pt_process_auxtrace_info(union perf_event *event, + struct perf_session *session) +{ + struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info; + size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; + struct intel_pt *pt; + int err; + + if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) + + min_sz) + return -EINVAL; + + pt = zalloc(sizeof(struct intel_pt)); + if (!pt) + return -ENOMEM; + + perf_config(intel_pt_perf_config, pt); + + err = auxtrace_queues__init(&pt->queues); + if (err) + goto err_free; + + intel_pt_log_set_name(INTEL_PT_PMU_NAME); + + pt->session = session; + pt->machine = &session->machines.host; /* No kvm support */ + pt->auxtrace_type = auxtrace_info->type; + pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; + pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; + pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; + pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; + pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; + pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; + pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; + pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; + pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; + pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; + intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE, + INTEL_PT_PER_CPU_MMAPS); + + if (auxtrace_info->header.size >= sizeof(struct auxtrace_info_event) + + (sizeof(u64) * INTEL_PT_CYC_BIT)) { + pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; + pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; + pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; + pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; + pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; + intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT, + INTEL_PT_CYC_BIT); + } + + pt->timeless_decoding = intel_pt_timeless_decoding(pt); + pt->have_tsc = intel_pt_have_tsc(pt); + pt->sampling_mode = false; + pt->est_tsc = !pt->timeless_decoding; + + pt->unknown_thread = thread__new(999999999, 999999999); + if (!pt->unknown_thread) { + err = -ENOMEM; + goto err_free_queues; + } + err = thread__set_comm(pt->unknown_thread, "unknown", 0); + if (err) + goto err_delete_thread; + if (thread__init_map_groups(pt->unknown_thread, pt->machine)) { + err = -ENOMEM; + goto err_delete_thread; + } + + pt->auxtrace.process_event = intel_pt_process_event; + pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; + pt->auxtrace.flush_events = intel_pt_flush; + pt->auxtrace.free_events = intel_pt_free_events; + pt->auxtrace.free = intel_pt_free; + session->auxtrace = &pt->auxtrace; + + if (dump_trace) + return 0; + + if (pt->have_sched_switch == 1) { + pt->switch_evsel = intel_pt_find_sched_switch(session->evlist); + if (!pt->switch_evsel) { + pr_err("%s: missing sched_switch event\n", __func__); + goto err_delete_thread; + } + } else if (pt->have_sched_switch == 2 && + !intel_pt_find_switch(session->evlist)) { + pr_err("%s: missing context_switch attribute flag\n", __func__); + goto err_delete_thread; + } + + if (session->itrace_synth_opts && session->itrace_synth_opts->set) { + pt->synth_opts = *session->itrace_synth_opts; + } else { + itrace_synth_opts__set_default(&pt->synth_opts); + if (use_browser != -1) { + pt->synth_opts.branches = false; + pt->synth_opts.callchain = true; + } + } + + if (pt->synth_opts.log) + intel_pt_log_enable(); + + /* Maximum non-turbo ratio is TSC freq / 100 MHz */ + if (pt->tc.time_mult) { + u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000); + + pt->max_non_turbo_ratio = (tsc_freq + 50000000) / 100000000; + intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq); + intel_pt_log("Maximum non-turbo ratio %u\n", + pt->max_non_turbo_ratio); + } + + if (pt->synth_opts.calls) + pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | + PERF_IP_FLAG_TRACE_END; + if (pt->synth_opts.returns) + pt->branches_filter |= PERF_IP_FLAG_RETURN | + PERF_IP_FLAG_TRACE_BEGIN; + + if (pt->synth_opts.callchain && !symbol_conf.use_callchain) { + symbol_conf.use_callchain = true; + if (callchain_register_param(&callchain_param) < 0) { + symbol_conf.use_callchain = false; + pt->synth_opts.callchain = false; + } + } + + err = intel_pt_synth_events(pt, session); + if (err) + goto err_delete_thread; + + err = auxtrace_queues__process_index(&pt->queues, session); + if (err) + goto err_delete_thread; + + if (pt->queues.populated) + pt->data_queued = true; + + if (pt->timeless_decoding) + pr_debug2("Intel PT decoding without timestamps\n"); + + return 0; + +err_delete_thread: + thread__delete(pt->unknown_thread); +err_free_queues: + intel_pt_log_disable(); + auxtrace_queues__free(&pt->queues); + session->auxtrace = NULL; +err_free: + free(pt); + return err; +} diff --git a/kernel/tools/perf/util/intel-pt.h b/kernel/tools/perf/util/intel-pt.h new file mode 100644 index 000000000..0065949df --- /dev/null +++ b/kernel/tools/perf/util/intel-pt.h @@ -0,0 +1,56 @@ +/* + * intel_pt.h: Intel Processor Trace support + * Copyright (c) 2013-2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef INCLUDE__PERF_INTEL_PT_H__ +#define INCLUDE__PERF_INTEL_PT_H__ + +#define INTEL_PT_PMU_NAME "intel_pt" + +enum { + INTEL_PT_PMU_TYPE, + INTEL_PT_TIME_SHIFT, + INTEL_PT_TIME_MULT, + INTEL_PT_TIME_ZERO, + INTEL_PT_CAP_USER_TIME_ZERO, + INTEL_PT_TSC_BIT, + INTEL_PT_NORETCOMP_BIT, + INTEL_PT_HAVE_SCHED_SWITCH, + INTEL_PT_SNAPSHOT_MODE, + INTEL_PT_PER_CPU_MMAPS, + INTEL_PT_MTC_BIT, + INTEL_PT_MTC_FREQ_BITS, + INTEL_PT_TSC_CTC_N, + INTEL_PT_TSC_CTC_D, + INTEL_PT_CYC_BIT, + INTEL_PT_AUXTRACE_PRIV_MAX, +}; + +#define INTEL_PT_AUXTRACE_PRIV_SIZE (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) + +struct auxtrace_record; +struct perf_tool; +union perf_event; +struct perf_session; +struct perf_event_attr; +struct perf_pmu; + +struct auxtrace_record *intel_pt_recording_init(int *err); + +int intel_pt_process_auxtrace_info(union perf_event *event, + struct perf_session *session); + +struct perf_event_attr *intel_pt_pmu_default_config(struct perf_pmu *pmu); + +#endif diff --git a/kernel/tools/perf/util/llvm-utils.c b/kernel/tools/perf/util/llvm-utils.c new file mode 100644 index 000000000..00724d496 --- /dev/null +++ b/kernel/tools/perf/util/llvm-utils.c @@ -0,0 +1,430 @@ +/* + * Copyright (C) 2015, Wang Nan + * Copyright (C) 2015, Huawei Inc. + */ + +#include +#include "util.h" +#include "debug.h" +#include "llvm-utils.h" +#include "cache.h" + +#define CLANG_BPF_CMD_DEFAULT_TEMPLATE \ + "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\ + "-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \ + "$CLANG_OPTIONS $KERNEL_INC_OPTIONS " \ + "-Wno-unused-value -Wno-pointer-sign " \ + "-working-directory $WORKING_DIR " \ + "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -" + +struct llvm_param llvm_param = { + .clang_path = "clang", + .clang_bpf_cmd_template = CLANG_BPF_CMD_DEFAULT_TEMPLATE, + .clang_opt = NULL, + .kbuild_dir = NULL, + .kbuild_opts = NULL, + .user_set_param = false, +}; + +int perf_llvm_config(const char *var, const char *value) +{ + if (prefixcmp(var, "llvm.")) + return 0; + var += sizeof("llvm.") - 1; + + if (!strcmp(var, "clang-path")) + llvm_param.clang_path = strdup(value); + else if (!strcmp(var, "clang-bpf-cmd-template")) + llvm_param.clang_bpf_cmd_template = strdup(value); + else if (!strcmp(var, "clang-opt")) + llvm_param.clang_opt = strdup(value); + else if (!strcmp(var, "kbuild-dir")) + llvm_param.kbuild_dir = strdup(value); + else if (!strcmp(var, "kbuild-opts")) + llvm_param.kbuild_opts = strdup(value); + else + return -1; + llvm_param.user_set_param = true; + return 0; +} + +static int +search_program(const char *def, const char *name, + char *output) +{ + char *env, *path, *tmp = NULL; + char buf[PATH_MAX]; + int ret; + + output[0] = '\0'; + if (def && def[0] != '\0') { + if (def[0] == '/') { + if (access(def, F_OK) == 0) { + strlcpy(output, def, PATH_MAX); + return 0; + } + } else if (def[0] != '\0') + name = def; + } + + env = getenv("PATH"); + if (!env) + return -1; + env = strdup(env); + if (!env) + return -1; + + ret = -ENOENT; + path = strtok_r(env, ":", &tmp); + while (path) { + scnprintf(buf, sizeof(buf), "%s/%s", path, name); + if (access(buf, F_OK) == 0) { + strlcpy(output, buf, PATH_MAX); + ret = 0; + break; + } + path = strtok_r(NULL, ":", &tmp); + } + + free(env); + return ret; +} + +#define READ_SIZE 4096 +static int +read_from_pipe(const char *cmd, void **p_buf, size_t *p_read_sz) +{ + int err = 0; + void *buf = NULL; + FILE *file = NULL; + size_t read_sz = 0, buf_sz = 0; + + file = popen(cmd, "r"); + if (!file) { + pr_err("ERROR: unable to popen cmd: %s\n", + strerror(errno)); + return -EINVAL; + } + + while (!feof(file) && !ferror(file)) { + /* + * Make buf_sz always have obe byte extra space so we + * can put '\0' there. + */ + if (buf_sz - read_sz < READ_SIZE + 1) { + void *new_buf; + + buf_sz = read_sz + READ_SIZE + 1; + new_buf = realloc(buf, buf_sz); + + if (!new_buf) { + pr_err("ERROR: failed to realloc memory\n"); + err = -ENOMEM; + goto errout; + } + + buf = new_buf; + } + read_sz += fread(buf + read_sz, 1, READ_SIZE, file); + } + + if (buf_sz - read_sz < 1) { + pr_err("ERROR: internal error\n"); + err = -EINVAL; + goto errout; + } + + if (ferror(file)) { + pr_err("ERROR: error occurred when reading from pipe: %s\n", + strerror(errno)); + err = -EIO; + goto errout; + } + + err = WEXITSTATUS(pclose(file)); + file = NULL; + if (err) { + err = -EINVAL; + goto errout; + } + + /* + * If buf is string, give it terminal '\0' to make our life + * easier. If buf is not string, that '\0' is out of space + * indicated by read_sz so caller won't even notice it. + */ + ((char *)buf)[read_sz] = '\0'; + + if (!p_buf) + free(buf); + else + *p_buf = buf; + + if (p_read_sz) + *p_read_sz = read_sz; + return 0; + +errout: + if (file) + pclose(file); + free(buf); + if (p_buf) + *p_buf = NULL; + if (p_read_sz) + *p_read_sz = 0; + return err; +} + +static inline void +force_set_env(const char *var, const char *value) +{ + if (value) { + setenv(var, value, 1); + pr_debug("set env: %s=%s\n", var, value); + } else { + unsetenv(var); + pr_debug("unset env: %s\n", var); + } +} + +static void +version_notice(void) +{ + pr_err( +" \tLLVM 3.7 or newer is required. Which can be found from http://llvm.org\n" +" \tYou may want to try git trunk:\n" +" \t\tgit clone http://llvm.org/git/llvm.git\n" +" \t\t and\n" +" \t\tgit clone http://llvm.org/git/clang.git\n\n" +" \tOr fetch the latest clang/llvm 3.7 from pre-built llvm packages for\n" +" \tdebian/ubuntu:\n" +" \t\thttp://llvm.org/apt\n\n" +" \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n" +" \toption in [llvm] section of ~/.perfconfig to:\n\n" +" \t \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS \\\n" +" \t -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n" +" \t -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n" +" \t(Replace /path/to/llc with path to your llc)\n\n" +); +} + +static int detect_kbuild_dir(char **kbuild_dir) +{ + const char *test_dir = llvm_param.kbuild_dir; + const char *prefix_dir = ""; + const char *suffix_dir = ""; + + char *autoconf_path; + + int err; + + if (!test_dir) { + /* _UTSNAME_LENGTH is 65 */ + char release[128]; + + err = fetch_kernel_version(NULL, release, + sizeof(release)); + if (err) + return -EINVAL; + + test_dir = release; + prefix_dir = "/lib/modules/"; + suffix_dir = "/build"; + } + + err = asprintf(&autoconf_path, "%s%s%s/include/generated/autoconf.h", + prefix_dir, test_dir, suffix_dir); + if (err < 0) + return -ENOMEM; + + if (access(autoconf_path, R_OK) == 0) { + free(autoconf_path); + + err = asprintf(kbuild_dir, "%s%s%s", prefix_dir, test_dir, + suffix_dir); + if (err < 0) + return -ENOMEM; + return 0; + } + free(autoconf_path); + return -ENOENT; +} + +static const char *kinc_fetch_script = +"#!/usr/bin/env sh\n" +"if ! test -d \"$KBUILD_DIR\"\n" +"then\n" +" exit -1\n" +"fi\n" +"if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n" +"then\n" +" exit -1\n" +"fi\n" +"TMPDIR=`mktemp -d`\n" +"if test -z \"$TMPDIR\"\n" +"then\n" +" exit -1\n" +"fi\n" +"cat << EOF > $TMPDIR/Makefile\n" +"obj-y := dummy.o\n" +"\\$(obj)/%.o: \\$(src)/%.c\n" +"\t@echo -n \"\\$(NOSTDINC_FLAGS) \\$(LINUXINCLUDE) \\$(EXTRA_CFLAGS)\"\n" +"EOF\n" +"touch $TMPDIR/dummy.c\n" +"make -s -C $KBUILD_DIR M=$TMPDIR $KBUILD_OPTS dummy.o 2>/dev/null\n" +"RET=$?\n" +"rm -rf $TMPDIR\n" +"exit $RET\n"; + +static inline void +get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts) +{ + int err; + + if (!kbuild_dir || !kbuild_include_opts) + return; + + *kbuild_dir = NULL; + *kbuild_include_opts = NULL; + + if (llvm_param.kbuild_dir && !llvm_param.kbuild_dir[0]) { + pr_debug("[llvm.kbuild-dir] is set to \"\" deliberately.\n"); + pr_debug("Skip kbuild options detection.\n"); + return; + } + + err = detect_kbuild_dir(kbuild_dir); + if (err) { + pr_warning( +"WARNING:\tunable to get correct kernel building directory.\n" +"Hint:\tSet correct kbuild directory using 'kbuild-dir' option in [llvm]\n" +" \tsection of ~/.perfconfig or set it to \"\" to suppress kbuild\n" +" \tdetection.\n\n"); + return; + } + + pr_debug("Kernel build dir is set to %s\n", *kbuild_dir); + force_set_env("KBUILD_DIR", *kbuild_dir); + force_set_env("KBUILD_OPTS", llvm_param.kbuild_opts); + err = read_from_pipe(kinc_fetch_script, + (void **)kbuild_include_opts, + NULL); + if (err) { + pr_warning( +"WARNING:\tunable to get kernel include directories from '%s'\n" +"Hint:\tTry set clang include options using 'clang-bpf-cmd-template'\n" +" \toption in [llvm] section of ~/.perfconfig and set 'kbuild-dir'\n" +" \toption in [llvm] to \"\" to suppress this detection.\n\n", + *kbuild_dir); + + free(*kbuild_dir); + *kbuild_dir = NULL; + return; + } + + pr_debug("include option is set to %s\n", *kbuild_include_opts); +} + +int llvm__compile_bpf(const char *path, void **p_obj_buf, + size_t *p_obj_buf_sz) +{ + size_t obj_buf_sz; + void *obj_buf = NULL; + int err, nr_cpus_avail; + unsigned int kernel_version; + char linux_version_code_str[64]; + const char *clang_opt = llvm_param.clang_opt; + char clang_path[PATH_MAX], nr_cpus_avail_str[64]; + char *kbuild_dir = NULL, *kbuild_include_opts = NULL; + const char *template = llvm_param.clang_bpf_cmd_template; + + if (!template) + template = CLANG_BPF_CMD_DEFAULT_TEMPLATE; + + err = search_program(llvm_param.clang_path, + "clang", clang_path); + if (err) { + pr_err( +"ERROR:\tunable to find clang.\n" +"Hint:\tTry to install latest clang/llvm to support BPF. Check your $PATH\n" +" \tand 'clang-path' option in [llvm] section of ~/.perfconfig.\n"); + version_notice(); + return -ENOENT; + } + + /* + * This is an optional work. Even it fail we can continue our + * work. Needn't to check error return. + */ + get_kbuild_opts(&kbuild_dir, &kbuild_include_opts); + + nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF); + if (nr_cpus_avail <= 0) { + pr_err( +"WARNING:\tunable to get available CPUs in this system: %s\n" +" \tUse 128 instead.\n", strerror(errno)); + nr_cpus_avail = 128; + } + snprintf(nr_cpus_avail_str, sizeof(nr_cpus_avail_str), "%d", + nr_cpus_avail); + + if (fetch_kernel_version(&kernel_version, NULL, 0)) + kernel_version = 0; + + snprintf(linux_version_code_str, sizeof(linux_version_code_str), + "0x%x", kernel_version); + + force_set_env("NR_CPUS", nr_cpus_avail_str); + force_set_env("LINUX_VERSION_CODE", linux_version_code_str); + force_set_env("CLANG_EXEC", clang_path); + force_set_env("CLANG_OPTIONS", clang_opt); + force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts); + force_set_env("WORKING_DIR", kbuild_dir ? : "."); + + /* + * Since we may reset clang's working dir, path of source file + * should be transferred into absolute path, except we want + * stdin to be source file (testing). + */ + force_set_env("CLANG_SOURCE", + (path[0] == '-') ? path : + make_nonrelative_path(path)); + + pr_debug("llvm compiling command template: %s\n", template); + err = read_from_pipe(template, &obj_buf, &obj_buf_sz); + if (err) { + pr_err("ERROR:\tunable to compile %s\n", path); + pr_err("Hint:\tCheck error message shown above.\n"); + pr_err("Hint:\tYou can also pre-compile it into .o using:\n"); + pr_err(" \t\tclang -target bpf -O2 -c %s\n", path); + pr_err(" \twith proper -I and -D options.\n"); + goto errout; + } + + free(kbuild_dir); + free(kbuild_include_opts); + if (!p_obj_buf) + free(obj_buf); + else + *p_obj_buf = obj_buf; + + if (p_obj_buf_sz) + *p_obj_buf_sz = obj_buf_sz; + return 0; +errout: + free(kbuild_dir); + free(kbuild_include_opts); + free(obj_buf); + if (p_obj_buf) + *p_obj_buf = NULL; + if (p_obj_buf_sz) + *p_obj_buf_sz = 0; + return err; +} + +int llvm__search_clang(void) +{ + char clang_path[PATH_MAX]; + + return search_program(llvm_param.clang_path, "clang", clang_path); +} diff --git a/kernel/tools/perf/util/llvm-utils.h b/kernel/tools/perf/util/llvm-utils.h new file mode 100644 index 000000000..5b3cf1c22 --- /dev/null +++ b/kernel/tools/perf/util/llvm-utils.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2015, Wang Nan + * Copyright (C) 2015, Huawei Inc. + */ +#ifndef __LLVM_UTILS_H +#define __LLVM_UTILS_H + +#include "debug.h" + +struct llvm_param { + /* Path of clang executable */ + const char *clang_path; + /* + * Template of clang bpf compiling. 5 env variables + * can be used: + * $CLANG_EXEC: Path to clang. + * $CLANG_OPTIONS: Extra options to clang. + * $KERNEL_INC_OPTIONS: Kernel include directories. + * $WORKING_DIR: Kernel source directory. + * $CLANG_SOURCE: Source file to be compiled. + */ + const char *clang_bpf_cmd_template; + /* Will be filled in $CLANG_OPTIONS */ + const char *clang_opt; + /* Where to find kbuild system */ + const char *kbuild_dir; + /* + * Arguments passed to make, like 'ARCH=arm' if doing cross + * compiling. Should not be used for dynamic compiling. + */ + const char *kbuild_opts; + /* + * Default is false. If one of the above fields is set by user + * explicitly then user_set_llvm is set to true. This is used + * for perf test. If user doesn't set anything in .perfconfig + * and clang is not found, don't trigger llvm test. + */ + bool user_set_param; +}; + +extern struct llvm_param llvm_param; +extern int perf_llvm_config(const char *var, const char *value); + +extern int llvm__compile_bpf(const char *path, void **p_obj_buf, + size_t *p_obj_buf_sz); + +/* This function is for test__llvm() use only */ +extern int llvm__search_clang(void); +#endif diff --git a/kernel/tools/perf/util/machine.c b/kernel/tools/perf/util/machine.c index 527e032e2..8b303ff20 100644 --- a/kernel/tools/perf/util/machine.c +++ b/kernel/tools/perf/util/machine.c @@ -14,24 +14,28 @@ #include "unwind.h" #include "linux/hash.h" +static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); + static void dsos__init(struct dsos *dsos) { INIT_LIST_HEAD(&dsos->head); dsos->root = RB_ROOT; + pthread_rwlock_init(&dsos->lock, NULL); } int machine__init(struct machine *machine, const char *root_dir, pid_t pid) { map_groups__init(&machine->kmaps, machine); RB_CLEAR_NODE(&machine->rb_node); - dsos__init(&machine->user_dsos); - dsos__init(&machine->kernel_dsos); + dsos__init(&machine->dsos); machine->threads = RB_ROOT; + pthread_rwlock_init(&machine->threads_lock, NULL); INIT_LIST_HEAD(&machine->dead_threads); machine->last_match = NULL; machine->vdso_info = NULL; + machine->env = NULL; machine->pid = pid; @@ -54,6 +58,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) snprintf(comm, sizeof(comm), "[guest/%d]", pid); thread__set_comm(thread, comm, 0); + thread__put(thread); } machine->current_tid = NULL; @@ -78,37 +83,51 @@ out_delete: return NULL; } -static void dsos__delete(struct dsos *dsos) +static void dsos__purge(struct dsos *dsos) { struct dso *pos, *n; + pthread_rwlock_wrlock(&dsos->lock); + list_for_each_entry_safe(pos, n, &dsos->head, node) { RB_CLEAR_NODE(&pos->rb_node); - list_del(&pos->node); - dso__delete(pos); + pos->root = NULL; + list_del_init(&pos->node); + dso__put(pos); } + + pthread_rwlock_unlock(&dsos->lock); +} + +static void dsos__exit(struct dsos *dsos) +{ + dsos__purge(dsos); + pthread_rwlock_destroy(&dsos->lock); } void machine__delete_threads(struct machine *machine) { - struct rb_node *nd = rb_first(&machine->threads); + struct rb_node *nd; + pthread_rwlock_wrlock(&machine->threads_lock); + nd = rb_first(&machine->threads); while (nd) { struct thread *t = rb_entry(nd, struct thread, rb_node); nd = rb_next(nd); - machine__remove_thread(machine, t); + __machine__remove_thread(machine, t, false); } + pthread_rwlock_unlock(&machine->threads_lock); } void machine__exit(struct machine *machine) { map_groups__exit(&machine->kmaps); - dsos__delete(&machine->user_dsos); - dsos__delete(&machine->kernel_dsos); - vdso__exit(machine); + dsos__exit(&machine->dsos); + machine__exit_vdso(machine); zfree(&machine->root_dir); zfree(&machine->current_tid); + pthread_rwlock_destroy(&machine->threads_lock); } void machine__delete(struct machine *machine) @@ -233,7 +252,7 @@ struct machine *machines__findnew(struct machines *machines, pid_t pid) static struct strlist *seen; if (!seen) - seen = strlist__new(true, NULL); + seen = strlist__new(NULL, NULL); if (!strlist__has_entry(seen, path)) { pr_err("Can't access file %s\n", path); @@ -303,7 +322,7 @@ static void machine__update_thread_pid(struct machine *machine, if (th->pid_ == th->tid) return; - leader = machine__findnew_thread(machine, th->pid_, th->pid_); + leader = __machine__findnew_thread(machine, th->pid_, th->pid_); if (!leader) goto out_err; @@ -325,7 +344,7 @@ static void machine__update_thread_pid(struct machine *machine, if (!map_groups__empty(th->mg)) pr_err("Discarding thread maps for %d:%d\n", th->pid_, th->tid); - map_groups__delete(th->mg); + map_groups__put(th->mg); } th->mg = map_groups__get(leader->mg); @@ -336,9 +355,9 @@ out_err: pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); } -static struct thread *__machine__findnew_thread(struct machine *machine, - pid_t pid, pid_t tid, - bool create) +static struct thread *____machine__findnew_thread(struct machine *machine, + pid_t pid, pid_t tid, + bool create) { struct rb_node **p = &machine->threads.rb_node; struct rb_node *parent = NULL; @@ -356,7 +375,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, return th; } - thread__zput(machine->last_match); + machine->last_match = NULL; } while (*p != NULL) { @@ -364,7 +383,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, th = rb_entry(parent, struct thread, rb_node); if (th->tid == tid) { - machine->last_match = thread__get(th); + machine->last_match = th; machine__update_thread_pid(machine, th, pid); return th; } @@ -392,7 +411,8 @@ static struct thread *__machine__findnew_thread(struct machine *machine, * leader and that would screwed the rb tree. */ if (thread__init_map_groups(th, machine)) { - rb_erase(&th->rb_node, &machine->threads); + rb_erase_init(&th->rb_node, &machine->threads); + RB_CLEAR_NODE(&th->rb_node); thread__delete(th); return NULL; } @@ -400,22 +420,36 @@ static struct thread *__machine__findnew_thread(struct machine *machine, * It is now in the rbtree, get a ref */ thread__get(th); - machine->last_match = thread__get(th); + machine->last_match = th; } return th; } +struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) +{ + return ____machine__findnew_thread(machine, pid, tid, true); +} + struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) { - return __machine__findnew_thread(machine, pid, tid, true); + struct thread *th; + + pthread_rwlock_wrlock(&machine->threads_lock); + th = thread__get(__machine__findnew_thread(machine, pid, tid)); + pthread_rwlock_unlock(&machine->threads_lock); + return th; } struct thread *machine__find_thread(struct machine *machine, pid_t pid, pid_t tid) { - return __machine__findnew_thread(machine, pid, tid, false); + struct thread *th; + pthread_rwlock_rdlock(&machine->threads_lock); + th = thread__get(____machine__findnew_thread(machine, pid, tid, false)); + pthread_rwlock_unlock(&machine->threads_lock); + return th; } struct comm *machine__thread_exec_comm(struct machine *machine, @@ -434,6 +468,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event event->comm.pid, event->comm.tid); bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; + int err = 0; if (exec) machine->comm_exec = true; @@ -444,10 +479,12 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event if (thread == NULL || __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); - return -1; + err = -1; } - return 0; + thread__put(thread); + + return err; } int machine__process_lost_event(struct machine *machine __maybe_unused, @@ -458,17 +495,27 @@ int machine__process_lost_event(struct machine *machine __maybe_unused, return 0; } -static struct dso* -machine__module_dso(struct machine *machine, struct kmod_path *m, - const char *filename) +int machine__process_lost_samples_event(struct machine *machine __maybe_unused, + union perf_event *event, struct perf_sample *sample) +{ + dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n", + sample->id, event->lost_samples.lost); + return 0; +} + +static struct dso *machine__findnew_module_dso(struct machine *machine, + struct kmod_path *m, + const char *filename) { struct dso *dso; - dso = dsos__find(&machine->kernel_dsos, m->name, true); + pthread_rwlock_wrlock(&machine->dsos.lock); + + dso = __dsos__find(&machine->dsos, m->name, true); if (!dso) { - dso = dsos__addnew(&machine->kernel_dsos, m->name); + dso = __dsos__addnew(&machine->dsos, m->name); if (dso == NULL) - return NULL; + goto out_unlock; if (machine__is_host(machine)) dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; @@ -483,11 +530,38 @@ machine__module_dso(struct machine *machine, struct kmod_path *m, dso__set_long_name(dso, strdup(filename), true); } + dso__get(dso); +out_unlock: + pthread_rwlock_unlock(&machine->dsos.lock); return dso; } -struct map *machine__new_module(struct machine *machine, u64 start, - const char *filename) +int machine__process_aux_event(struct machine *machine __maybe_unused, + union perf_event *event) +{ + if (dump_trace) + perf_event__fprintf_aux(event, stdout); + return 0; +} + +int machine__process_itrace_start_event(struct machine *machine __maybe_unused, + union perf_event *event) +{ + if (dump_trace) + perf_event__fprintf_itrace_start(event, stdout); + return 0; +} + +int machine__process_switch_event(struct machine *machine __maybe_unused, + union perf_event *event) +{ + if (dump_trace) + perf_event__fprintf_switch(event, stdout); + return 0; +} + +struct map *machine__findnew_module_map(struct machine *machine, u64 start, + const char *filename) { struct map *map = NULL; struct dso *dso; @@ -501,7 +575,7 @@ struct map *machine__new_module(struct machine *machine, u64 start, if (map) goto out; - dso = machine__module_dso(machine, &m, filename); + dso = machine__findnew_module_dso(machine, &m, filename); if (dso == NULL) goto out; @@ -519,13 +593,11 @@ out: size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) { struct rb_node *nd; - size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) + - __dsos__fprintf(&machines->host.user_dsos.head, fp); + size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += __dsos__fprintf(&pos->kernel_dsos.head, fp); - ret += __dsos__fprintf(&pos->user_dsos.head, fp); + ret += __dsos__fprintf(&pos->dsos.head, fp); } return ret; @@ -534,8 +606,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm) { - return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) + - __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm); + return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); } size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, @@ -555,7 +626,7 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) { int i; size_t printed = 0; - struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; + struct dso *kdso = machine__kernel_map(machine)->dso; if (kdso->has_build_id) { char filename[PATH_MAX]; @@ -575,12 +646,16 @@ size_t machine__fprintf(struct machine *machine, FILE *fp) size_t ret = 0; struct rb_node *nd; + pthread_rwlock_rdlock(&machine->threads_lock); + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); } + pthread_rwlock_unlock(&machine->threads_lock); + return ret; } @@ -594,9 +669,8 @@ static struct dso *machine__get_kernel(struct machine *machine) if (!vmlinux_name) vmlinux_name = "[kernel.kallsyms]"; - kernel = dso__kernel_findnew(machine, vmlinux_name, - "[kernel]", - DSO_TYPE_KERNEL); + kernel = machine__findnew_kernel(machine, vmlinux_name, + "[kernel]", DSO_TYPE_KERNEL); } else { char bf[PATH_MAX]; @@ -606,9 +680,9 @@ static struct dso *machine__get_kernel(struct machine *machine) vmlinux_name = machine__mmap_name(machine, bf, sizeof(bf)); - kernel = dso__kernel_findnew(machine, vmlinux_name, - "[guest.kernel]", - DSO_TYPE_GUEST_KERNEL); + kernel = machine__findnew_kernel(machine, vmlinux_name, + "[guest.kernel]", + DSO_TYPE_GUEST_KERNEL); } if (kernel != NULL && (!kernel->has_build_id)) @@ -668,6 +742,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; + struct map *map; machine->vmlinux_maps[type] = map__new2(start, kernel, type); if (machine->vmlinux_maps[type] == NULL) @@ -676,13 +751,13 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) machine->vmlinux_maps[type]->map_ip = machine->vmlinux_maps[type]->unmap_ip = identity__map_ip; - kmap = map__kmap(machine->vmlinux_maps[type]); + map = __machine__kernel_map(machine, type); + kmap = map__kmap(map); if (!kmap) return -1; kmap->kmaps = &machine->kmaps; - map_groups__insert(&machine->kmaps, - machine->vmlinux_maps[type]); + map_groups__insert(&machine->kmaps, map); } return 0; @@ -694,13 +769,13 @@ void machine__destroy_kernel_maps(struct machine *machine) for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; + struct map *map = __machine__kernel_map(machine, type); - if (machine->vmlinux_maps[type] == NULL) + if (map == NULL) continue; - kmap = map__kmap(machine->vmlinux_maps[type]); - map_groups__remove(&machine->kmaps, - machine->vmlinux_maps[type]); + kmap = map__kmap(map); + map_groups__remove(&machine->kmaps, map); if (kmap && kmap->ref_reloc_sym) { /* * ref_reloc_sym is shared among all maps, so free just @@ -713,7 +788,6 @@ void machine__destroy_kernel_maps(struct machine *machine) kmap->ref_reloc_sym = NULL; } - map__delete(machine->vmlinux_maps[type]); machine->vmlinux_maps[type] = NULL; } } @@ -795,7 +869,7 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid) int machine__load_kallsyms(struct machine *machine, const char *filename, enum map_type type, symbol_filter_t filter) { - struct map *map = machine->vmlinux_maps[type]; + struct map *map = machine__kernel_map(machine); int ret = dso__load_kallsyms(map->dso, filename, map, filter); if (ret > 0) { @@ -814,7 +888,7 @@ int machine__load_kallsyms(struct machine *machine, const char *filename, int machine__load_vmlinux_path(struct machine *machine, enum map_type type, symbol_filter_t filter) { - struct map *map = machine->vmlinux_maps[type]; + struct map *map = machine__kernel_map(machine); int ret = dso__load_vmlinux_path(map->dso, map, filter); if (ret > 0) @@ -970,7 +1044,7 @@ static int machine__create_module(void *arg, const char *name, u64 start) struct machine *machine = arg; struct map *map; - map = machine__new_module(machine, start, name); + map = machine__findnew_module_map(machine, start, name); if (map == NULL) return -1; @@ -1062,7 +1136,7 @@ static bool machine__uses_kcore(struct machine *machine) { struct dso *dso; - list_for_each_entry(dso, &machine->kernel_dsos.head, node) { + list_for_each_entry(dso, &machine->dsos.head, node) { if (dso__is_kcore(dso)) return true; } @@ -1093,8 +1167,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine, strlen(kmmap_prefix) - 1) == 0; if (event->mmap.filename[0] == '/' || (!is_kernel_mmap && event->mmap.filename[0] == '[')) { - map = machine__new_module(machine, event->mmap.start, - event->mmap.filename); + map = machine__findnew_module_map(machine, event->mmap.start, + event->mmap.filename); if (map == NULL) goto out_problem; @@ -1109,23 +1183,48 @@ static int machine__process_kernel_mmap_event(struct machine *machine, struct dso *kernel = NULL; struct dso *dso; - list_for_each_entry(dso, &machine->kernel_dsos.head, node) { - if (is_kernel_module(dso->long_name)) + pthread_rwlock_rdlock(&machine->dsos.lock); + + list_for_each_entry(dso, &machine->dsos.head, node) { + + /* + * The cpumode passed to is_kernel_module is not the + * cpumode of *this* event. If we insist on passing + * correct cpumode to is_kernel_module, we should + * record the cpumode when we adding this dso to the + * linked list. + * + * However we don't really need passing correct + * cpumode. We know the correct cpumode must be kernel + * mode (if not, we should not link it onto kernel_dsos + * list). + * + * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. + * is_kernel_module() treats it as a kernel cpumode. + */ + + if (!dso->kernel || + is_kernel_module(dso->long_name, + PERF_RECORD_MISC_CPUMODE_UNKNOWN)) continue; + kernel = dso; break; } + pthread_rwlock_unlock(&machine->dsos.lock); + if (kernel == NULL) - kernel = __dsos__findnew(&machine->kernel_dsos, - kmmap_prefix); + kernel = machine__findnew_dso(machine, kmmap_prefix); if (kernel == NULL) goto out_problem; kernel->kernel = kernel_type; - if (__machine__create_kernel_maps(machine, kernel) < 0) + if (__machine__create_kernel_maps(machine, kernel) < 0) { + dso__put(kernel); goto out_problem; + } if (strstr(kernel->long_name, "vmlinux")) dso__set_short_name(kernel, "[kernel.vmlinux]", false); @@ -1147,8 +1246,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine, /* * preload dso of guest kernel and modules */ - dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], - NULL); + dso__load(kernel, machine__kernel_map(machine), NULL); } } return 0; @@ -1197,11 +1295,15 @@ int machine__process_mmap2_event(struct machine *machine, event->mmap2.filename, type, thread); if (map == NULL) - goto out_problem; + goto out_problem_map; thread__insert_map(thread, map); + thread__put(thread); + map__put(map); return 0; +out_problem_map: + thread__put(thread); out_problem: dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); return 0; @@ -1244,31 +1346,46 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event type, thread); if (map == NULL) - goto out_problem; + goto out_problem_map; thread__insert_map(thread, map); + thread__put(thread); + map__put(map); return 0; +out_problem_map: + thread__put(thread); out_problem: dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); return 0; } -void machine__remove_thread(struct machine *machine, struct thread *th) +static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock) { if (machine->last_match == th) - thread__zput(machine->last_match); + machine->last_match = NULL; - rb_erase(&th->rb_node, &machine->threads); + BUG_ON(atomic_read(&th->refcnt) == 0); + if (lock) + pthread_rwlock_wrlock(&machine->threads_lock); + rb_erase_init(&th->rb_node, &machine->threads); + RB_CLEAR_NODE(&th->rb_node); /* * Move it first to the dead_threads list, then drop the reference, * if this is the last reference, then the thread__delete destructor * will be called and we will remove it from the dead_threads list. */ list_add_tail(&th->node, &machine->dead_threads); + if (lock) + pthread_rwlock_unlock(&machine->threads_lock); thread__put(th); } +void machine__remove_thread(struct machine *machine, struct thread *th) +{ + return __machine__remove_thread(machine, th, true); +} + int machine__process_fork_event(struct machine *machine, union perf_event *event, struct perf_sample *sample) { @@ -1278,23 +1395,44 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event struct thread *parent = machine__findnew_thread(machine, event->fork.ppid, event->fork.ptid); + int err = 0; + + if (dump_trace) + perf_event__fprintf_task(event, stdout); + + /* + * There may be an existing thread that is not actually the parent, + * either because we are processing events out of order, or because the + * (fork) event that would have removed the thread was lost. Assume the + * latter case and continue on as best we can. + */ + if (parent->pid_ != (pid_t)event->fork.ppid) { + dump_printf("removing erroneous parent thread %d/%d\n", + parent->pid_, parent->tid); + machine__remove_thread(machine, parent); + thread__put(parent); + parent = machine__findnew_thread(machine, event->fork.ppid, + event->fork.ptid); + } /* if a thread currently exists for the thread id remove it */ - if (thread != NULL) + if (thread != NULL) { machine__remove_thread(machine, thread); + thread__put(thread); + } thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid); - if (dump_trace) - perf_event__fprintf_task(event, stdout); if (thread == NULL || parent == NULL || thread__fork(thread, parent, sample->time) < 0) { dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); - return -1; + err = -1; } + thread__put(thread); + thread__put(parent); - return 0; + return err; } int machine__process_exit_event(struct machine *machine, union perf_event *event, @@ -1307,8 +1445,10 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event if (dump_trace) perf_event__fprintf_task(event, stdout); - if (thread != NULL) + if (thread != NULL) { thread__exited(thread); + thread__put(thread); + } return 0; } @@ -1331,6 +1471,15 @@ int machine__process_event(struct machine *machine, union perf_event *event, ret = machine__process_exit_event(machine, event, sample); break; case PERF_RECORD_LOST: ret = machine__process_lost_event(machine, event, sample); break; + case PERF_RECORD_AUX: + ret = machine__process_aux_event(machine, event); break; + case PERF_RECORD_ITRACE_START: + ret = machine__process_itrace_start_event(machine, event); break; + case PERF_RECORD_LOST_SAMPLES: + ret = machine__process_lost_samples_event(machine, event, sample); break; + case PERF_RECORD_SWITCH: + case PERF_RECORD_SWITCH_CPU_WIDE: + ret = machine__process_switch_event(machine, event); break; default: ret = -1; break; @@ -1683,7 +1832,7 @@ static int thread__resolve_callchain_sample(struct thread *thread, } check_calls: - if (chain->nr > PERF_MAX_STACK_DEPTH) { + if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) { pr_warning("corrupted callchain. skipping...\n"); return 0; } @@ -1769,14 +1918,36 @@ int machine__for_each_thread(struct machine *machine, return rc; } +int machines__for_each_thread(struct machines *machines, + int (*fn)(struct thread *thread, void *p), + void *priv) +{ + struct rb_node *nd; + int rc = 0; + + rc = machine__for_each_thread(&machines->host, fn, priv); + if (rc != 0) + return rc; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *machine = rb_entry(nd, struct machine, rb_node); + + rc = machine__for_each_thread(machine, fn, priv); + if (rc != 0) + return rc; + } + return rc; +} + int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, - perf_event__handler_t process, bool data_mmap) + perf_event__handler_t process, bool data_mmap, + unsigned int proc_map_timeout) { if (target__has_task(target)) - return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); + return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); else if (target__has_cpu(target)) - return perf_event__synthesize_threads(tool, process, machine, data_mmap); + return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout); /* command specified */ return 0; } @@ -1820,13 +1991,14 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, return -ENOMEM; thread->cpu = cpu; + thread__put(thread); return 0; } int machine__get_kernel_start(struct machine *machine) { - struct map *map = machine__kernel_map(machine, MAP__FUNCTION); + struct map *map = machine__kernel_map(machine); int err = 0; /* @@ -1845,3 +2017,22 @@ int machine__get_kernel_start(struct machine *machine) } return err; } + +struct dso *machine__findnew_dso(struct machine *machine, const char *filename) +{ + return dsos__findnew(&machine->dsos, filename); +} + +char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) +{ + struct machine *machine = vmachine; + struct map *map; + struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map, NULL); + + if (sym == NULL) + return NULL; + + *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL; + *addrp = map->unmap_ip(map, sym->start); + return sym->name; +} diff --git a/kernel/tools/perf/util/machine.h b/kernel/tools/perf/util/machine.h index 6d64cedb9..2c2b443df 100644 --- a/kernel/tools/perf/util/machine.h +++ b/kernel/tools/perf/util/machine.h @@ -30,11 +30,12 @@ struct machine { bool comm_exec; char *root_dir; struct rb_root threads; + pthread_rwlock_t threads_lock; struct list_head dead_threads; struct thread *last_match; struct vdso_info *vdso_info; - struct dsos user_dsos; - struct dsos kernel_dsos; + struct perf_env *env; + struct dsos dsos; struct map_groups kmaps; struct map *vmlinux_maps[MAP__NR_TYPES]; u64 kernel_start; @@ -47,11 +48,17 @@ struct machine { }; static inline -struct map *machine__kernel_map(struct machine *machine, enum map_type type) +struct map *__machine__kernel_map(struct machine *machine, enum map_type type) { return machine->vmlinux_maps[type]; } +static inline +struct map *machine__kernel_map(struct machine *machine) +{ + return __machine__kernel_map(machine, MAP__FUNCTION); +} + int machine__get_kernel_start(struct machine *machine); static inline u64 machine__kernel_start(struct machine *machine) @@ -81,6 +88,14 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event struct perf_sample *sample); int machine__process_lost_event(struct machine *machine, union perf_event *event, struct perf_sample *sample); +int machine__process_lost_samples_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_aux_event(struct machine *machine, + union perf_event *event); +int machine__process_itrace_start_event(struct machine *machine, + union perf_event *event); +int machine__process_switch_event(struct machine *machine __maybe_unused, + union perf_event *event); int machine__process_mmap_event(struct machine *machine, union perf_event *event, struct perf_sample *sample); int machine__process_mmap2_event(struct machine *machine, union perf_event *event, @@ -147,8 +162,10 @@ static inline bool machine__is_host(struct machine *machine) return machine ? machine->pid == HOST_KERNEL_ID : false; } -struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, - pid_t tid); +struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); + +struct dso *machine__findnew_dso(struct machine *machine, const char *filename); size_t machine__fprintf(struct machine *machine, FILE *fp); @@ -181,8 +198,8 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *machine, filter); } -struct map *machine__new_module(struct machine *machine, u64 start, - const char *filename); +struct map *machine__findnew_module_map(struct machine *machine, u64 start, + const char *filename); int machine__load_kallsyms(struct machine *machine, const char *filename, enum map_type type, symbol_filter_t filter); @@ -208,20 +225,30 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); int machine__for_each_thread(struct machine *machine, int (*fn)(struct thread *thread, void *p), void *priv); +int machines__for_each_thread(struct machines *machines, + int (*fn)(struct thread *thread, void *p), + void *priv); int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, - perf_event__handler_t process, bool data_mmap); + perf_event__handler_t process, bool data_mmap, + unsigned int proc_map_timeout); static inline int machine__synthesize_threads(struct machine *machine, struct target *target, - struct thread_map *threads, bool data_mmap) + struct thread_map *threads, bool data_mmap, + unsigned int proc_map_timeout) { return __machine__synthesize_threads(machine, NULL, target, threads, - perf_event__process, data_mmap); + perf_event__process, data_mmap, + proc_map_timeout); } pid_t machine__get_current_tid(struct machine *machine, int cpu); int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, pid_t tid); +/* + * For use with libtraceevent's pevent_set_function_resolver() + */ +char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp); #endif /* __PERF_MACHINE_H */ diff --git a/kernel/tools/perf/util/map.c b/kernel/tools/perf/util/map.c index a14f08f41..afc6b56cf 100644 --- a/kernel/tools/perf/util/map.c +++ b/kernel/tools/perf/util/map.c @@ -16,6 +16,8 @@ #include "machine.h" #include +static void __maps__insert(struct maps *maps, struct map *map); + const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", [MAP__VARIABLE] = "Variables", @@ -130,13 +132,13 @@ void map__init(struct map *map, enum map_type type, map->end = end; map->pgoff = pgoff; map->reloc = 0; - map->dso = dso; + map->dso = dso__get(dso); map->map_ip = map__map_ip; map->unmap_ip = map__unmap_ip; RB_CLEAR_NODE(&map->rb_node); map->groups = NULL; - map->referenced = false; map->erange_warned = false; + atomic_set(&map->refcnt, 1); } struct map *map__new(struct machine *machine, u64 start, u64 len, @@ -175,9 +177,9 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (vdso) { pgoff = 0; - dso = vdso__dso_findnew(machine, thread); + dso = machine__findnew_vdso(machine, thread); } else - dso = __dsos__findnew(&machine->user_dsos, filename); + dso = machine__findnew_dso(machine, filename); if (dso == NULL) goto out_delete; @@ -195,6 +197,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (type != MAP__FUNCTION) dso__set_loaded(dso, map->type); } + dso__put(dso); } return map; out_delete: @@ -221,11 +224,38 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type) return map; } +/* + * Use this and __map__is_kmodule() for map instances that are in + * machine->kmaps, and thus have map->groups->machine all properly set, to + * disambiguate between the kernel and modules. + * + * When the need arises, introduce map__is_{kernel,kmodule)() that + * checks (map->groups != NULL && map->groups->machine != NULL && + * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) + */ +bool __map__is_kernel(const struct map *map) +{ + return __machine__kernel_map(map->groups->machine, map->type) == map; +} + +static void map__exit(struct map *map) +{ + BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); + dso__zput(map->dso); +} + void map__delete(struct map *map) { + map__exit(map); free(map); } +void map__put(struct map *map) +{ + if (map && atomic_dec_and_test(&map->refcnt)) + map__delete(map); +} + void map__fixup_start(struct map *map) { struct rb_root *symbols = &map->dso->symbols[map->type]; @@ -292,6 +322,11 @@ int map__load(struct map *map, symbol_filter_t filter) return 0; } +int __weak arch__compare_symbol_names(const char *namea, const char *nameb) +{ + return strcmp(namea, nameb); +} + struct symbol *map__find_symbol(struct map *map, u64 addr, symbol_filter_t filter) { @@ -313,9 +348,18 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name, return dso__find_symbol_by_name(map->dso, map->type, name); } -struct map *map__clone(struct map *map) +struct map *map__clone(struct map *from) { - return memdup(map, sizeof(*map)); + struct map *map = memdup(from, sizeof(*map)); + + if (map != NULL) { + atomic_set(&map->refcnt, 1); + RB_CLEAR_NODE(&map->rb_node); + dso__get(map->dso); + map->groups = NULL; + } + + return map; } int map__overlap(struct map *l, struct map *r) @@ -413,48 +457,49 @@ u64 map__objdump_2mem(struct map *map, u64 ip) return ip + map->reloc; } +static void maps__init(struct maps *maps) +{ + maps->entries = RB_ROOT; + pthread_rwlock_init(&maps->lock, NULL); +} + void map_groups__init(struct map_groups *mg, struct machine *machine) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) { - mg->maps[i] = RB_ROOT; - INIT_LIST_HEAD(&mg->removed_maps[i]); + maps__init(&mg->maps[i]); } mg->machine = machine; - mg->refcnt = 1; + atomic_set(&mg->refcnt, 1); } -static void maps__delete(struct rb_root *maps) +static void __maps__purge(struct maps *maps) { - struct rb_node *next = rb_first(maps); + struct rb_root *root = &maps->entries; + struct rb_node *next = rb_first(root); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, maps); - map__delete(pos); + rb_erase_init(&pos->rb_node, root); + map__put(pos); } } -static void maps__delete_removed(struct list_head *maps) +static void maps__exit(struct maps *maps) { - struct map *pos, *n; - - list_for_each_entry_safe(pos, n, maps, node) { - list_del(&pos->node); - map__delete(pos); - } + pthread_rwlock_wrlock(&maps->lock); + __maps__purge(maps); + pthread_rwlock_unlock(&maps->lock); } void map_groups__exit(struct map_groups *mg) { int i; - for (i = 0; i < MAP__NR_TYPES; ++i) { - maps__delete(&mg->maps[i]); - maps__delete_removed(&mg->removed_maps[i]); - } + for (i = 0; i < MAP__NR_TYPES; ++i) + maps__exit(&mg->maps[i]); } bool map_groups__empty(struct map_groups *mg) @@ -464,8 +509,6 @@ bool map_groups__empty(struct map_groups *mg) for (i = 0; i < MAP__NR_TYPES; ++i) { if (maps__first(&mg->maps[i])) return false; - if (!list_empty(&mg->removed_maps[i])) - return false; } return true; @@ -489,32 +532,10 @@ void map_groups__delete(struct map_groups *mg) void map_groups__put(struct map_groups *mg) { - if (--mg->refcnt == 0) + if (mg && atomic_dec_and_test(&mg->refcnt)) map_groups__delete(mg); } -void map_groups__flush(struct map_groups *mg) -{ - int type; - - for (type = 0; type < MAP__NR_TYPES; type++) { - struct rb_root *root = &mg->maps[type]; - struct rb_node *next = rb_first(root); - - while (next) { - struct map *pos = rb_entry(next, struct map, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, root); - /* - * We may have references to this map, for - * instance in some hist_entry instances, so - * just move them to a separate list. - */ - list_add_tail(&pos->node, &mg->removed_maps[pos->type]); - } - } -} - struct symbol *map_groups__find_symbol(struct map_groups *mg, enum map_type type, u64 addr, struct map **mapp, @@ -532,26 +553,41 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg, return NULL; } -struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, - enum map_type type, - const char *name, - struct map **mapp, - symbol_filter_t filter) +struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, + struct map **mapp, symbol_filter_t filter) { + struct symbol *sym; struct rb_node *nd; - for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + pthread_rwlock_rdlock(&maps->lock); + + for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); - struct symbol *sym = map__find_symbol_by_name(pos, name, filter); + + sym = map__find_symbol_by_name(pos, name, filter); if (sym == NULL) continue; if (mapp != NULL) *mapp = pos; - return sym; + goto out; } - return NULL; + sym = NULL; +out: + pthread_rwlock_unlock(&maps->lock); + return sym; +} + +struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, + enum map_type type, + const char *name, + struct map **mapp, + symbol_filter_t filter) +{ + struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter); + + return sym; } int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) @@ -571,73 +607,60 @@ int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) return ams->sym ? 0 : -1; } -size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, - FILE *fp) +static size_t maps__fprintf(struct maps *maps, FILE *fp) { - size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); + size_t printed = 0; struct rb_node *nd; - for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + pthread_rwlock_rdlock(&maps->lock); + + for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); if (verbose > 2) { - printed += dso__fprintf(pos->dso, type, fp); + printed += dso__fprintf(pos->dso, pos->type, fp); printed += fprintf(fp, "--\n"); } } - return printed; -} + pthread_rwlock_unlock(&maps->lock); -static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp) -{ - size_t printed = 0, i; - for (i = 0; i < MAP__NR_TYPES; ++i) - printed += __map_groups__fprintf_maps(mg, i, fp); return printed; } -static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg, - enum map_type type, FILE *fp) +size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, + FILE *fp) { - struct map *pos; - size_t printed = 0; - - list_for_each_entry(pos, &mg->removed_maps[type], node) { - printed += fprintf(fp, "Map:"); - printed += map__fprintf(pos, fp); - if (verbose > 1) { - printed += dso__fprintf(pos->dso, type, fp); - printed += fprintf(fp, "--\n"); - } - } - return printed; + size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); + return printed += maps__fprintf(&mg->maps[type], fp); } -static size_t map_groups__fprintf_removed_maps(struct map_groups *mg, - FILE *fp) +size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) { size_t printed = 0, i; for (i = 0; i < MAP__NR_TYPES; ++i) - printed += __map_groups__fprintf_removed_maps(mg, i, fp); + printed += __map_groups__fprintf_maps(mg, i, fp); return printed; } -size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) +static void __map_groups__insert(struct map_groups *mg, struct map *map) { - size_t printed = map_groups__fprintf_maps(mg, fp); - printed += fprintf(fp, "Removed maps:\n"); - return printed + map_groups__fprintf_removed_maps(mg, fp); + __maps__insert(&mg->maps[map->type], map); + map->groups = mg; } -int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, - FILE *fp) +static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) { - struct rb_root *root = &mg->maps[map->type]; - struct rb_node *next = rb_first(root); + struct rb_root *root; + struct rb_node *next; int err = 0; + pthread_rwlock_wrlock(&maps->lock); + + root = &maps->entries; + next = rb_first(root); + while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); @@ -651,7 +674,7 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, map__fprintf(pos, fp); } - rb_erase(&pos->rb_node, root); + rb_erase_init(&pos->rb_node, root); /* * Now check if we need to create new maps for areas not * overlapped by the new map: @@ -661,11 +684,11 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, if (before == NULL) { err = -ENOMEM; - goto move_map; + goto put_map; } before->end = map->start; - map_groups__insert(mg, before); + __map_groups__insert(pos->groups, before); if (verbose >= 2) map__fprintf(before, fp); } @@ -675,28 +698,31 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, if (after == NULL) { err = -ENOMEM; - goto move_map; + goto put_map; } after->start = map->end; - map_groups__insert(mg, after); + __map_groups__insert(pos->groups, after); if (verbose >= 2) map__fprintf(after, fp); } -move_map: - /* - * If we have references, just move them to a separate list. - */ - if (pos->referenced) - list_add_tail(&pos->node, &mg->removed_maps[map->type]); - else - map__delete(pos); +put_map: + map__put(pos); if (err) - return err; + goto out; } - return 0; + err = 0; +out: + pthread_rwlock_unlock(&maps->lock); + return err; +} + +int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, + FILE *fp) +{ + return maps__fixup_overlappings(&mg->maps[map->type], map, fp); } /* @@ -705,20 +731,28 @@ move_map: int map_groups__clone(struct map_groups *mg, struct map_groups *parent, enum map_type type) { - struct rb_node *nd; - for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { - struct map *map = rb_entry(nd, struct map, rb_node); + int err = -ENOMEM; + struct map *map; + struct maps *maps = &parent->maps[type]; + + pthread_rwlock_rdlock(&maps->lock); + + for (map = maps__first(maps); map; map = map__next(map)) { struct map *new = map__clone(map); if (new == NULL) - return -ENOMEM; + goto out_unlock; map_groups__insert(mg, new); } - return 0; + + err = 0; +out_unlock: + pthread_rwlock_unlock(&maps->lock); + return err; } -void maps__insert(struct rb_root *maps, struct map *map) +static void __maps__insert(struct maps *maps, struct map *map) { - struct rb_node **p = &maps->rb_node; + struct rb_node **p = &maps->entries.rb_node; struct rb_node *parent = NULL; const u64 ip = map->start; struct map *m; @@ -733,20 +767,38 @@ void maps__insert(struct rb_root *maps, struct map *map) } rb_link_node(&map->rb_node, parent, p); - rb_insert_color(&map->rb_node, maps); + rb_insert_color(&map->rb_node, &maps->entries); + map__get(map); } -void maps__remove(struct rb_root *maps, struct map *map) +void maps__insert(struct maps *maps, struct map *map) { - rb_erase(&map->rb_node, maps); + pthread_rwlock_wrlock(&maps->lock); + __maps__insert(maps, map); + pthread_rwlock_unlock(&maps->lock); } -struct map *maps__find(struct rb_root *maps, u64 ip) +static void __maps__remove(struct maps *maps, struct map *map) { - struct rb_node **p = &maps->rb_node; - struct rb_node *parent = NULL; + rb_erase_init(&map->rb_node, &maps->entries); + map__put(map); +} + +void maps__remove(struct maps *maps, struct map *map) +{ + pthread_rwlock_wrlock(&maps->lock); + __maps__remove(maps, map); + pthread_rwlock_unlock(&maps->lock); +} + +struct map *maps__find(struct maps *maps, u64 ip) +{ + struct rb_node **p, *parent = NULL; struct map *m; + pthread_rwlock_rdlock(&maps->lock); + + p = &maps->entries.rb_node; while (*p != NULL) { parent = *p; m = rb_entry(parent, struct map, rb_node); @@ -755,22 +807,25 @@ struct map *maps__find(struct rb_root *maps, u64 ip) else if (ip >= m->end) p = &(*p)->rb_right; else - return m; + goto out; } - return NULL; + m = NULL; +out: + pthread_rwlock_unlock(&maps->lock); + return m; } -struct map *maps__first(struct rb_root *maps) +struct map *maps__first(struct maps *maps) { - struct rb_node *first = rb_first(maps); + struct rb_node *first = rb_first(&maps->entries); if (first) return rb_entry(first, struct map, rb_node); return NULL; } -struct map *maps__next(struct map *map) +struct map *map__next(struct map *map) { struct rb_node *next = rb_next(&map->rb_node); diff --git a/kernel/tools/perf/util/map.h b/kernel/tools/perf/util/map.h index ec19c59ca..7309d64ce 100644 --- a/kernel/tools/perf/util/map.h +++ b/kernel/tools/perf/util/map.h @@ -1,9 +1,11 @@ #ifndef __PERF_MAP_H #define __PERF_MAP_H +#include #include #include #include +#include #include #include #include @@ -32,7 +34,6 @@ struct map { u64 start; u64 end; u8 /* enum map_type */ type; - bool referenced; bool erange_warned; u32 priv; u32 prot; @@ -50,6 +51,7 @@ struct map { struct dso *dso; struct map_groups *groups; + atomic_t refcnt; }; struct kmap { @@ -57,11 +59,15 @@ struct kmap { struct map_groups *kmaps; }; +struct maps { + struct rb_root entries; + pthread_rwlock_t lock; +}; + struct map_groups { - struct rb_root maps[MAP__NR_TYPES]; - struct list_head removed_maps[MAP__NR_TYPES]; + struct maps maps[MAP__NR_TYPES]; struct machine *machine; - int refcnt; + atomic_t refcnt; }; struct map_groups *map_groups__new(struct machine *machine); @@ -70,7 +76,8 @@ bool map_groups__empty(struct map_groups *mg); static inline struct map_groups *map_groups__get(struct map_groups *mg) { - ++mg->refcnt; + if (mg) + atomic_inc(&mg->refcnt); return mg; } @@ -124,7 +131,7 @@ struct thread; */ #define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \ for (pos = map__find_symbol_by_name(map, sym_name, filter); \ - pos && strcmp(pos->name, sym_name) == 0; \ + pos && arch__compare_symbol_names(pos->name, sym_name) == 0; \ pos = symbol__next_by_name(pos)) #define map__for_each_symbol_by_name(map, sym_name, pos) \ @@ -132,6 +139,7 @@ struct thread; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); +int arch__compare_symbol_names(const char *namea, const char *nameb); void map__init(struct map *map, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso); struct map *map__new(struct machine *machine, u64 start, u64 len, @@ -141,6 +149,24 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, struct map *map__new2(u64 start, struct dso *dso, enum map_type type); void map__delete(struct map *map); struct map *map__clone(struct map *map); + +static inline struct map *map__get(struct map *map) +{ + if (map) + atomic_inc(&map->refcnt); + return map; +} + +void map__put(struct map *map); + +static inline void __map__zput(struct map **map) +{ + map__put(*map); + *map = NULL; +} + +#define map__zput(map) __map__zput(&map) + int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *map, FILE *fp); size_t map__fprintf_dsoname(struct map *map, FILE *fp); @@ -159,11 +185,13 @@ void map__reloc_vmlinux(struct map *map); size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, FILE *fp); -void maps__insert(struct rb_root *maps, struct map *map); -void maps__remove(struct rb_root *maps, struct map *map); -struct map *maps__find(struct rb_root *maps, u64 addr); -struct map *maps__first(struct rb_root *maps); -struct map *maps__next(struct map *map); +void maps__insert(struct maps *maps, struct map *map); +void maps__remove(struct maps *maps, struct map *map); +struct map *maps__find(struct maps *maps, u64 addr); +struct map *maps__first(struct maps *maps); +struct map *map__next(struct map *map); +struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, + struct map **mapp, symbol_filter_t filter); void map_groups__init(struct map_groups *mg, struct machine *machine); void map_groups__exit(struct map_groups *mg); int map_groups__clone(struct map_groups *mg, @@ -198,7 +226,7 @@ static inline struct map *map_groups__first(struct map_groups *mg, static inline struct map *map_groups__next(struct map *map) { - return maps__next(map); + return map__next(map); } struct symbol *map_groups__find_symbol(struct map_groups *mg, @@ -230,6 +258,11 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, struct map *map_groups__find_by_name(struct map_groups *mg, enum map_type type, const char *name); -void map_groups__flush(struct map_groups *mg); +bool __map__is_kernel(const struct map *map); + +static inline bool __map__is_kmodule(const struct map *map) +{ + return !__map__is_kernel(map); +} #endif /* __PERF_MAP_H */ diff --git a/kernel/tools/perf/util/ordered-events.c b/kernel/tools/perf/util/ordered-events.c index 52be201b9..b1b9e2385 100644 --- a/kernel/tools/perf/util/ordered-events.c +++ b/kernel/tools/perf/util/ordered-events.c @@ -220,6 +220,9 @@ static int __ordered_events__flush(struct ordered_events *oe) else if (last_ts <= limit) oe->last = list_entry(head->prev, struct ordered_event, list); + if (show_progress) + ui_progress__finish(); + return 0; } diff --git a/kernel/tools/perf/util/pager.c b/kernel/tools/perf/util/pager.c index 31ee02d4e..53ef006a9 100644 --- a/kernel/tools/perf/util/pager.c +++ b/kernel/tools/perf/util/pager.c @@ -50,11 +50,6 @@ void setup_pager(void) if (!isatty(1)) return; - if (!pager) { - if (!pager_program) - perf_config(perf_default_config, NULL); - pager = pager_program; - } if (!pager) pager = getenv("PAGER"); if (!(pager || access("/usr/bin/pager", X_OK))) diff --git a/kernel/tools/perf/util/parse-branch-options.c b/kernel/tools/perf/util/parse-branch-options.c new file mode 100644 index 000000000..355eecf6b --- /dev/null +++ b/kernel/tools/perf/util/parse-branch-options.c @@ -0,0 +1,95 @@ +#include "perf.h" +#include "util/util.h" +#include "util/debug.h" +#include "util/parse-options.h" +#include "util/parse-branch-options.h" + +#define BRANCH_OPT(n, m) \ + { .name = n, .mode = (m) } + +#define BRANCH_END { .name = NULL } + +struct branch_mode { + const char *name; + int mode; +}; + +static const struct branch_mode branch_modes[] = { + BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER), + BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL), + BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV), + BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY), + BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL), + BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN), + BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL), + BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX), + BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX), + BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX), + BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND), + BRANCH_OPT("ind_jmp", PERF_SAMPLE_BRANCH_IND_JUMP), + BRANCH_OPT("call", PERF_SAMPLE_BRANCH_CALL), + BRANCH_END +}; + +int +parse_branch_stack(const struct option *opt, const char *str, int unset) +{ +#define ONLY_PLM \ + (PERF_SAMPLE_BRANCH_USER |\ + PERF_SAMPLE_BRANCH_KERNEL |\ + PERF_SAMPLE_BRANCH_HV) + + uint64_t *mode = (uint64_t *)opt->value; + const struct branch_mode *br; + char *s, *os = NULL, *p; + int ret = -1; + + if (unset) + return 0; + + /* + * cannot set it twice, -b + --branch-filter for instance + */ + if (*mode) + return -1; + + /* str may be NULL in case no arg is passed to -b */ + if (str) { + /* because str is read-only */ + s = os = strdup(str); + if (!s) + return -1; + + for (;;) { + p = strchr(s, ','); + if (p) + *p = '\0'; + + for (br = branch_modes; br->name; br++) { + if (!strcasecmp(s, br->name)) + break; + } + if (!br->name) { + ui__warning("unknown branch filter %s," + " check man page\n", s); + goto error; + } + + *mode |= br->mode; + + if (!p) + break; + + s = p + 1; + } + } + ret = 0; + + /* default to any branch */ + if ((*mode & ~ONLY_PLM) == 0) { + *mode = PERF_SAMPLE_BRANCH_ANY; + } +error: + free(os); + return ret; +} diff --git a/kernel/tools/perf/util/parse-branch-options.h b/kernel/tools/perf/util/parse-branch-options.h new file mode 100644 index 000000000..b9d9470c2 --- /dev/null +++ b/kernel/tools/perf/util/parse-branch-options.h @@ -0,0 +1,5 @@ +#ifndef _PERF_PARSE_BRANCH_OPTIONS_H +#define _PERF_PARSE_BRANCH_OPTIONS_H 1 +struct option; +int parse_branch_stack(const struct option *opt, const char *str, int unset); +#endif /* _PERF_PARSE_BRANCH_OPTIONS_H */ diff --git a/kernel/tools/perf/util/parse-events.c b/kernel/tools/perf/util/parse-events.c index be0655388..b48e87693 100644 --- a/kernel/tools/perf/util/parse-events.c +++ b/kernel/tools/perf/util/parse-events.c @@ -1,4 +1,5 @@ #include +#include #include "util.h" #include "../perf.h" #include "evlist.h" @@ -10,13 +11,16 @@ #include "symbol.h" #include "cache.h" #include "header.h" +#include "bpf-loader.h" #include "debug.h" -#include +#include #include "parse-events-bison.h" #define YY_EXTRA_TYPE int #include "parse-events-flex.h" #include "pmu.h" #include "thread_map.h" +#include "cpumap.h" +#include "asm/bug.h" #define MAX_NAME_LEN 100 @@ -24,6 +28,8 @@ extern int parse_events_debug; #endif int parse_events_parse(void *data, void *scanner); +static int get_config_terms(struct list_head *head_config, + struct list_head *head_terms __maybe_unused); static struct perf_pmu_event_symbol *perf_pmu_events_list; /* @@ -118,6 +124,10 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { .symbol = "dummy", .alias = "", }, + [PERF_COUNT_SW_BPF_OUTPUT] = { + .symbol = "bpf-output", + .alias = "", + }, }; #define __PERF_EVENT_FIELD(config, name) \ @@ -274,7 +284,8 @@ const char *event_type(int type) static struct perf_evsel * __add_event(struct list_head *list, int *idx, struct perf_event_attr *attr, - char *name, struct cpu_map *cpus) + char *name, struct cpu_map *cpus, + struct list_head *config_terms) { struct perf_evsel *evsel; @@ -284,17 +295,24 @@ __add_event(struct list_head *list, int *idx, if (!evsel) return NULL; - evsel->cpus = cpus; + evsel->cpus = cpu_map__get(cpus); + evsel->own_cpus = cpu_map__get(cpus); + if (name) evsel->name = strdup(name); + + if (config_terms) + list_splice(config_terms, &evsel->config_terms); + list_add_tail(&evsel->node, list); return evsel; } static int add_event(struct list_head *list, int *idx, - struct perf_event_attr *attr, char *name) + struct perf_event_attr *attr, char *name, + struct list_head *config_terms) { - return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM; + return __add_event(list, idx, attr, name, NULL, config_terms) ? 0 : -ENOMEM; } static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) @@ -373,35 +391,78 @@ int parse_events_add_cache(struct list_head *list, int *idx, memset(&attr, 0, sizeof(attr)); attr.config = cache_type | (cache_op << 8) | (cache_result << 16); attr.type = PERF_TYPE_HW_CACHE; - return add_event(list, idx, &attr, name); + return add_event(list, idx, &attr, name, NULL); +} + +static void tracepoint_error(struct parse_events_error *e, int err, + char *sys, char *name) +{ + char help[BUFSIZ]; + + if (!e) + return; + + /* + * We get error directly from syscall errno ( > 0), + * or from encoded pointer's error ( < 0). + */ + err = abs(err); + + switch (err) { + case EACCES: + e->str = strdup("can't access trace events"); + break; + case ENOENT: + e->str = strdup("unknown tracepoint"); + break; + default: + e->str = strdup("failed to add tracepoint"); + break; + } + + tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name); + e->help = strdup(help); } static int add_tracepoint(struct list_head *list, int *idx, - char *sys_name, char *evt_name) + char *sys_name, char *evt_name, + struct parse_events_error *err, + struct list_head *head_config) { struct perf_evsel *evsel; evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++); - if (!evsel) - return -ENOMEM; + if (IS_ERR(evsel)) { + tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name); + return PTR_ERR(evsel); + } - list_add_tail(&evsel->node, list); + if (head_config) { + LIST_HEAD(config_terms); + if (get_config_terms(head_config, &config_terms)) + return -ENOMEM; + list_splice(&config_terms, &evsel->config_terms); + } + + list_add_tail(&evsel->node, list); return 0; } static int add_tracepoint_multi_event(struct list_head *list, int *idx, - char *sys_name, char *evt_name) + char *sys_name, char *evt_name, + struct parse_events_error *err, + struct list_head *head_config) { char evt_path[MAXPATHLEN]; struct dirent *evt_ent; DIR *evt_dir; - int ret = 0; + int ret = 0, found = 0; snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); evt_dir = opendir(evt_path); if (!evt_dir) { - perror("Can't open event dir"); + tracepoint_error(err, errno, sys_name, evt_name); return -1; } @@ -415,7 +476,15 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx, if (!strglobmatch(evt_ent->d_name, evt_name)) continue; - ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); + found++; + + ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name, + err, head_config); + } + + if (!found) { + tracepoint_error(err, ENOENT, sys_name, evt_name); + ret = -1; } closedir(evt_dir); @@ -423,15 +492,21 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx, } static int add_tracepoint_event(struct list_head *list, int *idx, - char *sys_name, char *evt_name) + char *sys_name, char *evt_name, + struct parse_events_error *err, + struct list_head *head_config) { return strpbrk(evt_name, "*?") ? - add_tracepoint_multi_event(list, idx, sys_name, evt_name) : - add_tracepoint(list, idx, sys_name, evt_name); + add_tracepoint_multi_event(list, idx, sys_name, evt_name, + err, head_config) : + add_tracepoint(list, idx, sys_name, evt_name, + err, head_config); } static int add_tracepoint_multi_sys(struct list_head *list, int *idx, - char *sys_name, char *evt_name) + char *sys_name, char *evt_name, + struct parse_events_error *err, + struct list_head *head_config) { struct dirent *events_ent; DIR *events_dir; @@ -439,7 +514,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx, events_dir = opendir(tracing_events_path); if (!events_dir) { - perror("Can't open event dir"); + tracepoint_error(err, errno, sys_name, evt_name); return -1; } @@ -455,20 +530,136 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx, continue; ret = add_tracepoint_event(list, idx, events_ent->d_name, - evt_name); + evt_name, err, head_config); } closedir(events_dir); return ret; } -int parse_events_add_tracepoint(struct list_head *list, int *idx, - char *sys, char *event) +struct __add_bpf_event_param { + struct parse_events_evlist *data; + struct list_head *list; +}; + +static int add_bpf_event(struct probe_trace_event *tev, int fd, + void *_param) { - if (strpbrk(sys, "*?")) - return add_tracepoint_multi_sys(list, idx, sys, event); - else - return add_tracepoint_event(list, idx, sys, event); + LIST_HEAD(new_evsels); + struct __add_bpf_event_param *param = _param; + struct parse_events_evlist *evlist = param->data; + struct list_head *list = param->list; + struct perf_evsel *pos; + int err; + + pr_debug("add bpf event %s:%s and attach bpf program %d\n", + tev->group, tev->event, fd); + + err = parse_events_add_tracepoint(&new_evsels, &evlist->idx, tev->group, + tev->event, evlist->error, NULL); + if (err) { + struct perf_evsel *evsel, *tmp; + + pr_debug("Failed to add BPF event %s:%s\n", + tev->group, tev->event); + list_for_each_entry_safe(evsel, tmp, &new_evsels, node) { + list_del(&evsel->node); + perf_evsel__delete(evsel); + } + return err; + } + pr_debug("adding %s:%s\n", tev->group, tev->event); + + list_for_each_entry(pos, &new_evsels, node) { + pr_debug("adding %s:%s to %p\n", + tev->group, tev->event, pos); + pos->bpf_fd = fd; + } + list_splice(&new_evsels, list); + return 0; +} + +int parse_events_load_bpf_obj(struct parse_events_evlist *data, + struct list_head *list, + struct bpf_object *obj) +{ + int err; + char errbuf[BUFSIZ]; + struct __add_bpf_event_param param = {data, list}; + static bool registered_unprobe_atexit = false; + + if (IS_ERR(obj) || !obj) { + snprintf(errbuf, sizeof(errbuf), + "Internal error: load bpf obj with NULL"); + err = -EINVAL; + goto errout; + } + + /* + * Register atexit handler before calling bpf__probe() so + * bpf__probe() don't need to unprobe probe points its already + * created when failure. + */ + if (!registered_unprobe_atexit) { + atexit(bpf__clear); + registered_unprobe_atexit = true; + } + + err = bpf__probe(obj); + if (err) { + bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf)); + goto errout; + } + + err = bpf__load(obj); + if (err) { + bpf__strerror_load(obj, err, errbuf, sizeof(errbuf)); + goto errout; + } + + err = bpf__foreach_tev(obj, add_bpf_event, ¶m); + if (err) { + snprintf(errbuf, sizeof(errbuf), + "Attach events in BPF object failed"); + goto errout; + } + + return 0; +errout: + data->error->help = strdup("(add -v to see detail)"); + data->error->str = strdup(errbuf); + return err; +} + +int parse_events_load_bpf(struct parse_events_evlist *data, + struct list_head *list, + char *bpf_file_name, + bool source) +{ + struct bpf_object *obj; + + obj = bpf__prepare_load(bpf_file_name, source); + if (IS_ERR(obj)) { + char errbuf[BUFSIZ]; + int err; + + err = PTR_ERR(obj); + + if (err == -ENOTSUP) + snprintf(errbuf, sizeof(errbuf), + "BPF support is not compiled"); + else + bpf__strerror_prepare_load(bpf_file_name, + source, + -err, errbuf, + sizeof(errbuf)); + + data->error->help = strdup("(add -v to see detail)"); + data->error->str = strdup(errbuf); + return err; + } + + return parse_events_load_bpf_obj(data, list, obj); } static int @@ -535,16 +726,38 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx, attr.type = PERF_TYPE_BREAKPOINT; attr.sample_period = 1; - return add_event(list, idx, &attr, NULL); + return add_event(list, idx, &attr, NULL, NULL); } -static int config_term(struct perf_event_attr *attr, - struct parse_events_term *term) +static int check_type_val(struct parse_events_term *term, + struct parse_events_error *err, + int type) { -#define CHECK_TYPE_VAL(type) \ -do { \ - if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \ - return -EINVAL; \ + if (type == term->type_val) + return 0; + + if (err) { + err->idx = term->err_val; + if (type == PARSE_EVENTS__TERM_TYPE_NUM) + err->str = strdup("expected numeric value"); + else + err->str = strdup("expected string value"); + } + return -EINVAL; +} + +typedef int config_term_func_t(struct perf_event_attr *attr, + struct parse_events_term *term, + struct parse_events_error *err); + +static int config_term_common(struct perf_event_attr *attr, + struct parse_events_term *term, + struct parse_events_error *err) +{ +#define CHECK_TYPE_VAL(type) \ +do { \ + if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \ + return -EINVAL; \ } while (0) switch (term->type_term) { @@ -562,7 +775,9 @@ do { \ break; case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: CHECK_TYPE_VAL(NUM); - attr->sample_period = term->val.num; + break; + case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: + CHECK_TYPE_VAL(NUM); break; case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: /* @@ -570,10 +785,33 @@ do { \ * attr->branch_sample_type = term->val.num; */ break; + case PARSE_EVENTS__TERM_TYPE_TIME: + CHECK_TYPE_VAL(NUM); + if (term->val.num > 1) { + err->str = strdup("expected 0 or 1"); + err->idx = term->err_val; + return -EINVAL; + } + break; + case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: + CHECK_TYPE_VAL(STR); + break; + case PARSE_EVENTS__TERM_TYPE_STACKSIZE: + CHECK_TYPE_VAL(NUM); + break; + case PARSE_EVENTS__TERM_TYPE_INHERIT: + CHECK_TYPE_VAL(NUM); + break; + case PARSE_EVENTS__TERM_TYPE_NOINHERIT: + CHECK_TYPE_VAL(NUM); + break; case PARSE_EVENTS__TERM_TYPE_NAME: CHECK_TYPE_VAL(STR); break; default: + err->str = strdup("unknown term"); + err->idx = term->err_term; + err->help = parse_events_formats_error_string(NULL); return -EINVAL; } @@ -581,33 +819,149 @@ do { \ #undef CHECK_TYPE_VAL } +static int config_term_pmu(struct perf_event_attr *attr, + struct parse_events_term *term, + struct parse_events_error *err) +{ + if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER) + /* + * Always succeed for sysfs terms, as we dont know + * at this point what type they need to have. + */ + return 0; + else + return config_term_common(attr, term, err); +} + +static int config_term_tracepoint(struct perf_event_attr *attr, + struct parse_events_term *term, + struct parse_events_error *err) +{ + switch (term->type_term) { + case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: + case PARSE_EVENTS__TERM_TYPE_STACKSIZE: + case PARSE_EVENTS__TERM_TYPE_INHERIT: + case PARSE_EVENTS__TERM_TYPE_NOINHERIT: + return config_term_common(attr, term, err); + default: + if (err) { + err->idx = term->err_term; + err->str = strdup("unknown term"); + err->help = strdup("valid terms: call-graph,stack-size\n"); + } + return -EINVAL; + } + + return 0; +} + static int config_attr(struct perf_event_attr *attr, - struct list_head *head, int fail) + struct list_head *head, + struct parse_events_error *err, + config_term_func_t config_term) { struct parse_events_term *term; list_for_each_entry(term, head, list) - if (config_term(attr, term) && fail) + if (config_term(attr, term, err)) return -EINVAL; return 0; } -int parse_events_add_numeric(struct list_head *list, int *idx, +static int get_config_terms(struct list_head *head_config, + struct list_head *head_terms __maybe_unused) +{ +#define ADD_CONFIG_TERM(__type, __name, __val) \ +do { \ + struct perf_evsel_config_term *__t; \ + \ + __t = zalloc(sizeof(*__t)); \ + if (!__t) \ + return -ENOMEM; \ + \ + INIT_LIST_HEAD(&__t->list); \ + __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ + __t->val.__name = __val; \ + list_add_tail(&__t->list, head_terms); \ +} while (0) + + struct parse_events_term *term; + + list_for_each_entry(term, head_config, list) { + switch (term->type_term) { + case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: + ADD_CONFIG_TERM(PERIOD, period, term->val.num); + break; + case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ: + ADD_CONFIG_TERM(FREQ, freq, term->val.num); + break; + case PARSE_EVENTS__TERM_TYPE_TIME: + ADD_CONFIG_TERM(TIME, time, term->val.num); + break; + case PARSE_EVENTS__TERM_TYPE_CALLGRAPH: + ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str); + break; + case PARSE_EVENTS__TERM_TYPE_STACKSIZE: + ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num); + break; + case PARSE_EVENTS__TERM_TYPE_INHERIT: + ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0); + break; + case PARSE_EVENTS__TERM_TYPE_NOINHERIT: + ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1); + break; + default: + break; + } + } +#undef ADD_EVSEL_CONFIG + return 0; +} + +int parse_events_add_tracepoint(struct list_head *list, int *idx, + char *sys, char *event, + struct parse_events_error *err, + struct list_head *head_config) +{ + if (head_config) { + struct perf_event_attr attr; + + if (config_attr(&attr, head_config, err, + config_term_tracepoint)) + return -EINVAL; + } + + if (strpbrk(sys, "*?")) + return add_tracepoint_multi_sys(list, idx, sys, event, + err, head_config); + else + return add_tracepoint_event(list, idx, sys, event, + err, head_config); +} + +int parse_events_add_numeric(struct parse_events_evlist *data, + struct list_head *list, u32 type, u64 config, struct list_head *head_config) { struct perf_event_attr attr; + LIST_HEAD(config_terms); memset(&attr, 0, sizeof(attr)); attr.type = type; attr.config = config; - if (head_config && - config_attr(&attr, head_config, 1)) - return -EINVAL; + if (head_config) { + if (config_attr(&attr, head_config, data->error, + config_term_common)) + return -EINVAL; + + if (get_config_terms(head_config, &config_terms)) + return -ENOMEM; + } - return add_event(list, idx, &attr, NULL); + return add_event(list, &data->idx, &attr, NULL, &config_terms); } static int parse_events__is_name_term(struct parse_events_term *term) @@ -626,13 +980,15 @@ static char *pmu_event_name(struct list_head *head_terms) return NULL; } -int parse_events_add_pmu(struct list_head *list, int *idx, - char *name, struct list_head *head_config) +int parse_events_add_pmu(struct parse_events_evlist *data, + struct list_head *list, char *name, + struct list_head *head_config) { struct perf_event_attr attr; struct perf_pmu_info info; struct perf_pmu *pmu; struct perf_evsel *evsel; + LIST_HEAD(config_terms); pmu = perf_pmu__find(name); if (!pmu) @@ -647,7 +1003,7 @@ int parse_events_add_pmu(struct list_head *list, int *idx, if (!head_config) { attr.type = pmu->type; - evsel = __add_event(list, idx, &attr, NULL, pmu->cpus); + evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus, NULL); return evsel ? 0 : -ENOMEM; } @@ -658,13 +1014,18 @@ int parse_events_add_pmu(struct list_head *list, int *idx, * Configure hardcoded terms first, no need to check * return value when called with fail == 0 ;) */ - config_attr(&attr, head_config, 0); + if (config_attr(&attr, head_config, data->error, config_term_pmu)) + return -EINVAL; - if (perf_pmu__config(pmu, &attr, head_config)) + if (get_config_terms(head_config, &config_terms)) + return -ENOMEM; + + if (perf_pmu__config(pmu, &attr, head_config, data->error)) return -EINVAL; - evsel = __add_event(list, idx, &attr, pmu_event_name(head_config), - pmu->cpus); + evsel = __add_event(list, &data->idx, &attr, + pmu_event_name(head_config), pmu->cpus, + &config_terms); if (evsel) { evsel->unit = info.unit; evsel->scale = info.scale; @@ -685,6 +1046,11 @@ void parse_events__set_leader(char *name, struct list_head *list) { struct perf_evsel *leader; + if (list_empty(list)) { + WARN_ONCE(true, "WARNING: failed to set leader: empty list"); + return; + } + __perf_evlist__set_leader(list); leader = list_entry(list->next, struct perf_evsel, node); leader->group_name = name ? strdup(name) : NULL; @@ -711,6 +1077,7 @@ struct event_modifier { int eG; int eI; int precise; + int precise_max; int exclude_GH; int sample_read; int pinned; @@ -726,6 +1093,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str, int eG = evsel ? evsel->attr.exclude_guest : 0; int eI = evsel ? evsel->attr.exclude_idle : 0; int precise = evsel ? evsel->attr.precise_ip : 0; + int precise_max = 0; int sample_read = 0; int pinned = evsel ? evsel->attr.pinned : 0; @@ -762,6 +1130,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str, /* use of precise requires exclude_guest */ if (!exclude_GH) eG = 1; + } else if (*str == 'P') { + precise_max = 1; } else if (*str == 'S') { sample_read = 1; } else if (*str == 'D') { @@ -792,6 +1162,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str, mod->eG = eG; mod->eI = eI; mod->precise = precise; + mod->precise_max = precise_max; mod->exclude_GH = exclude_GH; mod->sample_read = sample_read; mod->pinned = pinned; @@ -808,7 +1179,7 @@ static int check_modifier(char *str) char *p = str; /* The sizeof includes 0 byte as well. */ - if (strlen(str) > (sizeof("ukhGHpppSDI") - 1)) + if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1)) return -1; while (*p) { @@ -847,6 +1218,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) evsel->attr.exclude_idle = mod.eI; evsel->exclude_GH = mod.exclude_GH; evsel->sample_read = mod.sample_read; + evsel->precise_max = mod.precise_max; if (perf_evsel__is_group_leader(evsel)) evsel->attr.pinned = mod.pinned; @@ -1019,20 +1391,31 @@ int parse_events_terms(struct list_head *terms, const char *str) return ret; } -int parse_events(struct perf_evlist *evlist, const char *str) +int parse_events(struct perf_evlist *evlist, const char *str, + struct parse_events_error *err) { struct parse_events_evlist data = { - .list = LIST_HEAD_INIT(data.list), - .idx = evlist->nr_entries, + .list = LIST_HEAD_INIT(data.list), + .idx = evlist->nr_entries, + .error = err, }; int ret; ret = parse_events__scanner(str, &data, PE_START_EVENTS); perf_pmu__parse_cleanup(); if (!ret) { - int entries = data.idx - evlist->nr_entries; - perf_evlist__splice_list_tail(evlist, &data.list, entries); + struct perf_evsel *last; + + if (list_empty(&data.list)) { + WARN_ONCE(true, "WARNING: event parser found nothing"); + return -1; + } + + perf_evlist__splice_list_tail(evlist, &data.list); evlist->nr_groups += data.nr_groups; + last = perf_evlist__last(evlist); + last->cmdline_group_boundary = true; + return 0; } @@ -1044,43 +1427,183 @@ int parse_events(struct perf_evlist *evlist, const char *str) return ret; } +#define MAX_WIDTH 1000 +static int get_term_width(void) +{ + struct winsize ws; + + get_term_dimensions(&ws); + return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; +} + +static void parse_events_print_error(struct parse_events_error *err, + const char *event) +{ + const char *str = "invalid or unsupported event: "; + char _buf[MAX_WIDTH]; + char *buf = (char *) event; + int idx = 0; + + if (err->str) { + /* -2 for extra '' in the final fprintf */ + int width = get_term_width() - 2; + int len_event = strlen(event); + int len_str, max_len, cut = 0; + + /* + * Maximum error index indent, we will cut + * the event string if it's bigger. + */ + int max_err_idx = 13; + + /* + * Let's be specific with the message when + * we have the precise error. + */ + str = "event syntax error: "; + len_str = strlen(str); + max_len = width - len_str; + + buf = _buf; + + /* We're cutting from the beggining. */ + if (err->idx > max_err_idx) + cut = err->idx - max_err_idx; + + strncpy(buf, event + cut, max_len); + + /* Mark cut parts with '..' on both sides. */ + if (cut) + buf[0] = buf[1] = '.'; + + if ((len_event - cut) > max_len) { + buf[max_len - 1] = buf[max_len - 2] = '.'; + buf[max_len] = 0; + } + + idx = len_str + err->idx - cut; + } + + fprintf(stderr, "%s'%s'\n", str, buf); + if (idx) { + fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err->str); + if (err->help) + fprintf(stderr, "\n%s\n", err->help); + free(err->str); + free(err->help); + } + + fprintf(stderr, "Run 'perf list' for a list of valid events\n"); +} + +#undef MAX_WIDTH + int parse_events_option(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; - int ret = parse_events(evlist, str); + struct parse_events_error err = { .idx = 0, }; + int ret = parse_events(evlist, str, &err); + + if (ret) + parse_events_print_error(&err, str); - if (ret) { - fprintf(stderr, "invalid or unsupported event: '%s'\n", str); - fprintf(stderr, "Run 'perf list' for a list of valid events\n"); - } return ret; } -int parse_filter(const struct option *opt, const char *str, - int unset __maybe_unused) +static int +foreach_evsel_in_last_glob(struct perf_evlist *evlist, + int (*func)(struct perf_evsel *evsel, + const void *arg), + const void *arg) { - struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; struct perf_evsel *last = NULL; + int err; + /* + * Don't return when list_empty, give func a chance to report + * error when it found last == NULL. + * + * So no need to WARN here, let *func do this. + */ if (evlist->nr_entries > 0) last = perf_evlist__last(evlist); - if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { + do { + err = (*func)(last, arg); + if (err) + return -1; + if (!last) + return 0; + + if (last->node.prev == &evlist->entries) + return 0; + last = list_entry(last->node.prev, struct perf_evsel, node); + } while (!last->cmdline_group_boundary); + + return 0; +} + +static int set_filter(struct perf_evsel *evsel, const void *arg) +{ + const char *str = arg; + + if (evsel == NULL || evsel->attr.type != PERF_TYPE_TRACEPOINT) { fprintf(stderr, "--filter option should follow a -e tracepoint option\n"); return -1; } - last->filter = strdup(str); - if (last->filter == NULL) { - fprintf(stderr, "not enough memory to hold filter string\n"); + if (perf_evsel__append_filter(evsel, "&&", str) < 0) { + fprintf(stderr, + "not enough memory to hold filter string\n"); + return -1; + } + + return 0; +} + +int parse_filter(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; + + return foreach_evsel_in_last_glob(evlist, set_filter, + (const void *)str); +} + +static int add_exclude_perf_filter(struct perf_evsel *evsel, + const void *arg __maybe_unused) +{ + char new_filter[64]; + + if (evsel == NULL || evsel->attr.type != PERF_TYPE_TRACEPOINT) { + fprintf(stderr, + "--exclude-perf option should follow a -e tracepoint option\n"); + return -1; + } + + snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid()); + + if (perf_evsel__append_filter(evsel, "&&", new_filter) < 0) { + fprintf(stderr, + "not enough memory to hold filter string\n"); return -1; } return 0; } +int exclude_perf(const struct option *opt, + const char *arg __maybe_unused, + int unset __maybe_unused) +{ + struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; + + return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter, + NULL); +} + static const char * const event_type_descriptors[] = { "Hardware event", "Software event", @@ -1171,7 +1694,7 @@ restart: printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[PERF_TYPE_TRACEPOINT]); } - if (evt_num) + if (evt_num && pager_in_use()) printf("\n"); out_free: @@ -1327,7 +1850,7 @@ restart: printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[PERF_TYPE_HW_CACHE]); } - if (evt_num) + if (evt_num && pager_in_use()) printf("\n"); out_free: @@ -1363,7 +1886,7 @@ restart: for (i = 0; i < max; i++, syms++) { - if (event_glob != NULL && + if (event_glob != NULL && syms->symbol != NULL && !(strglobmatch(syms->symbol, event_glob) || (syms->alias && strglobmatch(syms->alias, event_glob)))) continue; @@ -1400,7 +1923,7 @@ restart: } printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]); } - if (evt_num) + if (evt_num && pager_in_use()) printf("\n"); out_free: @@ -1441,13 +1964,14 @@ void print_events(const char *event_glob, bool name_only) printf(" %-50s [%s]\n", "cpu/t1=v1[,t2=v2,t3 ...]/modifier", event_type_descriptors[PERF_TYPE_RAW]); - printf(" (see 'man perf-list' on how to encode it)\n"); - printf("\n"); + if (pager_in_use()) + printf(" (see 'man perf-list' on how to encode it)\n\n"); printf(" %-50s [%s]\n", "mem:[/len][:access]", event_type_descriptors[PERF_TYPE_BREAKPOINT]); - printf("\n"); + if (pager_in_use()) + printf("\n"); } print_tracepoint_events(NULL, NULL, name_only); @@ -1460,7 +1984,7 @@ int parse_events__is_hardcoded_term(struct parse_events_term *term) static int new_term(struct parse_events_term **_term, int type_val, int type_term, char *config, - char *str, u64 num) + char *str, u64 num, int err_term, int err_val) { struct parse_events_term *term; @@ -1472,6 +1996,8 @@ static int new_term(struct parse_events_term **_term, int type_val, term->type_val = type_val; term->type_term = type_term; term->config = config; + term->err_term = err_term; + term->err_val = err_val; switch (type_val) { case PARSE_EVENTS__TERM_TYPE_NUM: @@ -1490,17 +2016,29 @@ static int new_term(struct parse_events_term **_term, int type_val, } int parse_events_term__num(struct parse_events_term **term, - int type_term, char *config, u64 num) + int type_term, char *config, u64 num, + void *loc_term_, void *loc_val_) { + YYLTYPE *loc_term = loc_term_; + YYLTYPE *loc_val = loc_val_; + return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, - config, NULL, num); + config, NULL, num, + loc_term ? loc_term->first_column : 0, + loc_val ? loc_val->first_column : 0); } int parse_events_term__str(struct parse_events_term **term, - int type_term, char *config, char *str) + int type_term, char *config, char *str, + void *loc_term_, void *loc_val_) { + YYLTYPE *loc_term = loc_term_; + YYLTYPE *loc_val = loc_val_; + return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, - config, str, 0); + config, str, 0, + loc_term ? loc_term->first_column : 0, + loc_val ? loc_val->first_column : 0); } int parse_events_term__sym_hw(struct parse_events_term **term, @@ -1514,18 +2052,20 @@ int parse_events_term__sym_hw(struct parse_events_term **term, if (config) return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, PARSE_EVENTS__TERM_TYPE_USER, config, - (char *) sym->symbol, 0); + (char *) sym->symbol, 0, 0, 0); else return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, PARSE_EVENTS__TERM_TYPE_USER, - (char *) "event", (char *) sym->symbol, 0); + (char *) "event", (char *) sym->symbol, + 0, 0, 0); } int parse_events_term__clone(struct parse_events_term **new, struct parse_events_term *term) { return new_term(new, term->type_val, term->type_term, term->config, - term->val.str, term->val.num); + term->val.str, term->val.num, + term->err_term, term->err_val); } void parse_events__free_terms(struct list_head *terms) @@ -1535,3 +2075,41 @@ void parse_events__free_terms(struct list_head *terms) list_for_each_entry_safe(term, h, terms, list) free(term); } + +void parse_events_evlist_error(struct parse_events_evlist *data, + int idx, const char *str) +{ + struct parse_events_error *err = data->error; + + if (!err) + return; + err->idx = idx; + err->str = strdup(str); + WARN_ONCE(!err->str, "WARNING: failed to allocate error string"); +} + +/* + * Return string contains valid config terms of an event. + * @additional_terms: For terms such as PMU sysfs terms. + */ +char *parse_events_formats_error_string(char *additional_terms) +{ + char *str; + static const char *static_terms = "config,config1,config2,name," + "period,freq,branch_type,time," + "call-graph,stack-size\n"; + + /* valid terms */ + if (additional_terms) { + if (!asprintf(&str, "valid terms: %s,%s", + additional_terms, static_terms)) + goto fail; + } else { + if (!asprintf(&str, "valid terms: %s", static_terms)) + goto fail; + } + return str; + +fail: + return NULL; +} diff --git a/kernel/tools/perf/util/parse-events.h b/kernel/tools/perf/util/parse-events.h index 52a2dda4f..f1a6db107 100644 --- a/kernel/tools/perf/util/parse-events.h +++ b/kernel/tools/perf/util/parse-events.h @@ -12,6 +12,7 @@ struct list_head; struct perf_evsel; struct perf_evlist; +struct parse_events_error; struct option; @@ -29,9 +30,11 @@ const char *event_type(int type); extern int parse_events_option(const struct option *opt, const char *str, int unset); -extern int parse_events(struct perf_evlist *evlist, const char *str); +extern int parse_events(struct perf_evlist *evlist, const char *str, + struct parse_events_error *error); extern int parse_events_terms(struct list_head *terms, const char *str); extern int parse_filter(const struct option *opt, const char *str, int unset); +extern int exclude_perf(const struct option *opt, const char *arg, int unset); #define EVENTS_HELP_MAX (128*1024) @@ -59,7 +62,13 @@ enum { PARSE_EVENTS__TERM_TYPE_CONFIG2, PARSE_EVENTS__TERM_TYPE_NAME, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD, + PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, + PARSE_EVENTS__TERM_TYPE_TIME, + PARSE_EVENTS__TERM_TYPE_CALLGRAPH, + PARSE_EVENTS__TERM_TYPE_STACKSIZE, + PARSE_EVENTS__TERM_TYPE_NOINHERIT, + PARSE_EVENTS__TERM_TYPE_INHERIT }; struct parse_events_term { @@ -72,12 +81,23 @@ struct parse_events_term { int type_term; struct list_head list; bool used; + + /* error string indexes for within parsed string */ + int err_term; + int err_val; +}; + +struct parse_events_error { + int idx; /* index in the parsed string */ + char *str; /* string to display at the index */ + char *help; /* optional help string */ }; struct parse_events_evlist { - struct list_head list; - int idx; - int nr_groups; + struct list_head list; + int idx; + int nr_groups; + struct parse_events_error *error; }; struct parse_events_terms { @@ -85,10 +105,12 @@ struct parse_events_terms { }; int parse_events__is_hardcoded_term(struct parse_events_term *term); -int parse_events_term__num(struct parse_events_term **_term, - int type_term, char *config, u64 num); -int parse_events_term__str(struct parse_events_term **_term, - int type_term, char *config, char *str); +int parse_events_term__num(struct parse_events_term **term, + int type_term, char *config, u64 num, + void *loc_term, void *loc_val); +int parse_events_term__str(struct parse_events_term **term, + int type_term, char *config, char *str, + void *loc_term, void *loc_val); int parse_events_term__sym_hw(struct parse_events_term **term, char *config, unsigned idx); int parse_events_term__clone(struct parse_events_term **new, @@ -98,22 +120,36 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add); int parse_events__modifier_group(struct list_head *list, char *event_mod); int parse_events_name(struct list_head *list, char *name); int parse_events_add_tracepoint(struct list_head *list, int *idx, - char *sys, char *event); -int parse_events_add_numeric(struct list_head *list, int *idx, + char *sys, char *event, + struct parse_events_error *error, + struct list_head *head_config); +int parse_events_load_bpf(struct parse_events_evlist *data, + struct list_head *list, + char *bpf_file_name, + bool source); +/* Provide this function for perf test */ +struct bpf_object; +int parse_events_load_bpf_obj(struct parse_events_evlist *data, + struct list_head *list, + struct bpf_object *obj); +int parse_events_add_numeric(struct parse_events_evlist *data, + struct list_head *list, u32 type, u64 config, struct list_head *head_config); int parse_events_add_cache(struct list_head *list, int *idx, char *type, char *op_result1, char *op_result2); int parse_events_add_breakpoint(struct list_head *list, int *idx, void *ptr, char *type, u64 len); -int parse_events_add_pmu(struct list_head *list, int *idx, - char *pmu , struct list_head *head_config); +int parse_events_add_pmu(struct parse_events_evlist *data, + struct list_head *list, char *name, + struct list_head *head_config); enum perf_pmu_event_symbol_type perf_pmu__parse_check(const char *name); void parse_events__set_leader(char *name, struct list_head *list); void parse_events_update_lists(struct list_head *list_event, struct list_head *list_all); -void parse_events_error(void *data, void *scanner, char const *msg); +void parse_events_evlist_error(struct parse_events_evlist *data, + int idx, const char *str); void print_events(const char *event_glob, bool name_only); @@ -132,5 +168,6 @@ int print_hwcache_events(const char *event_glob, bool name_only); extern int is_valid_tracepoint(const char *event_string); int valid_event_mount(const char *eventfs); +char *parse_events_formats_error_string(char *additional_terms); #endif /* __PERF_PARSE_EVENTS_H */ diff --git a/kernel/tools/perf/util/parse-events.l b/kernel/tools/perf/util/parse-events.l index 8895cf313..58c5831ff 100644 --- a/kernel/tools/perf/util/parse-events.l +++ b/kernel/tools/perf/util/parse-events.l @@ -3,6 +3,8 @@ %option bison-bridge %option prefix="parse_events_" %option stack +%option bison-locations +%option yylineno %{ #include @@ -51,6 +53,18 @@ static int str(yyscan_t scanner, int token) return token; } +#define REWIND(__alloc) \ +do { \ + YYSTYPE *__yylval = parse_events_get_lval(yyscanner); \ + char *text = parse_events_get_text(yyscanner); \ + \ + if (__alloc) \ + __yylval->str = strdup(text); \ + \ + yycolumn -= strlen(text); \ + yyless(0); \ +} while (0) + static int pmu_str_check(yyscan_t scanner) { YYSTYPE *yylval = parse_events_get_lval(scanner); @@ -85,6 +99,13 @@ static int term(yyscan_t scanner, int type) return PE_TERM; } +#define YY_USER_ACTION \ +do { \ + yylloc->last_column = yylloc->first_column; \ + yylloc->first_column = yycolumn; \ + yycolumn += yyleng; \ +} while (0); + %} %x mem @@ -94,14 +115,16 @@ static int term(yyscan_t scanner, int type) group [^,{}/]*[{][^}]*[}][^,{}/]* event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* event [^,{}/]+ +bpf_object .*\.(o|bpf) +bpf_source .*\.c num_dec [0-9]+ num_hex 0x[a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+ -name [a-zA-Z_*?][a-zA-Z0-9_*?]* -name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* +name [a-zA-Z_*?][a-zA-Z0-9_*?.]* +name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]* /* If you add a modifier you need to update check_modifier() */ -modifier_event [ukhpGHSDI]+ +modifier_event [ukhpPGHSDI]+ modifier_bp [rwx]{1,3} %% @@ -119,6 +142,12 @@ modifier_bp [rwx]{1,3} if (start_token) { parse_events_set_extra(NULL, yyscanner); + /* + * The flex parser does not init locations variable + * via the scan_string interface, so we need do the + * init in here. + */ + yycolumn = 0; return start_token; } } @@ -127,30 +156,43 @@ modifier_bp [rwx]{1,3} { {group} { - BEGIN(INITIAL); yyless(0); + BEGIN(INITIAL); + REWIND(0); } {event_pmu} | +{bpf_object} | +{bpf_source} | {event} { - str(yyscanner, PE_EVENT_NAME); - BEGIN(INITIAL); yyless(0); + BEGIN(INITIAL); + REWIND(1); return PE_EVENT_NAME; } -. | <> { - BEGIN(INITIAL); yyless(0); + BEGIN(INITIAL); + REWIND(0); } } { + /* + * Please update parse_events_formats_error_string any time + * new static term is added. + */ config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } +freq { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ); } branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } +time { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); } +call-graph { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); } +stack-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); } +inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); } +no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); } , { return ','; } "/" { BEGIN(INITIAL); return '/'; } {name_minus} { return str(yyscanner, PE_NAME); } @@ -228,6 +270,8 @@ r{num_raw_hex} { return raw(yyscanner); } {num_hex} { return value(yyscanner, 16); } {modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } +{bpf_object} { return str(yyscanner, PE_BPF_OBJECT); } +{bpf_source} { return str(yyscanner, PE_BPF_SOURCE); } {name} { return pmu_str_check(yyscanner); } "/" { BEGIN(config); return '/'; } - { return '-'; } diff --git a/kernel/tools/perf/util/parse-events.y b/kernel/tools/perf/util/parse-events.y index 72def077d..ad379968d 100644 --- a/kernel/tools/perf/util/parse-events.y +++ b/kernel/tools/perf/util/parse-events.y @@ -2,6 +2,7 @@ %parse-param {void *_data} %parse-param {void *scanner} %lex-param {void* scanner} +%locations %{ @@ -14,8 +15,6 @@ #include "parse-events.h" #include "parse-events-bison.h" -extern int parse_events_lex (YYSTYPE* lvalp, void* scanner); - #define ABORT_ON(val) \ do { \ if (val) \ @@ -43,6 +42,7 @@ static inc_group_count(struct list_head *list, %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM %token PE_EVENT_NAME %token PE_NAME +%token PE_BPF_OBJECT PE_BPF_SOURCE %token PE_MODIFIER_EVENT PE_MODIFIER_BP %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT %token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP @@ -54,6 +54,8 @@ static inc_group_count(struct list_head *list, %type PE_RAW %type PE_TERM %type PE_NAME +%type PE_BPF_OBJECT +%type PE_BPF_SOURCE %type PE_NAME_CACHE_TYPE %type PE_NAME_CACHE_OP_RESULT %type PE_MODIFIER_EVENT @@ -68,8 +70,10 @@ static inc_group_count(struct list_head *list, %type event_legacy_cache %type event_legacy_mem %type event_legacy_tracepoint +%type tracepoint_name %type event_legacy_numeric %type event_legacy_raw +%type event_bpf_file %type event_def %type event_mod %type event_name @@ -85,6 +89,10 @@ static inc_group_count(struct list_head *list, u64 num; struct list_head *head; struct parse_events_term *term; + struct tracepoint_name { + char *sys; + char *event; + } tracepoint_name; } %% @@ -199,7 +207,8 @@ event_def: event_pmu | event_legacy_mem | event_legacy_tracepoint sep_dc | event_legacy_numeric sep_dc | - event_legacy_raw sep_dc + event_legacy_raw sep_dc | + event_bpf_file event_pmu: PE_NAME '/' event_config '/' @@ -208,7 +217,7 @@ PE_NAME '/' event_config '/' struct list_head *list; ALLOC_LIST(list); - ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3)); + ABORT_ON(parse_events_add_pmu(data, list, $1, $3)); parse_events__free_terms($3); $$ = list; } @@ -219,7 +228,7 @@ PE_NAME '/' '/' struct list_head *list; ALLOC_LIST(list); - ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, NULL)); + ABORT_ON(parse_events_add_pmu(data, list, $1, NULL)); $$ = list; } | @@ -232,11 +241,11 @@ PE_KERNEL_PMU_EVENT sep_dc ALLOC_LIST(head); ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, 1)); + $1, 1, &@1, NULL)); list_add_tail(&term->list, head); ALLOC_LIST(list); - ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); + ABORT_ON(parse_events_add_pmu(data, list, "cpu", head)); parse_events__free_terms(head); $$ = list; } @@ -252,11 +261,11 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc ALLOC_LIST(head); ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - &pmu_name, 1)); + &pmu_name, 1, &@1, NULL)); list_add_tail(&term->list, head); ALLOC_LIST(list); - ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); + ABORT_ON(parse_events_add_pmu(data, list, "cpu", head)); parse_events__free_terms(head); $$ = list; } @@ -275,8 +284,7 @@ value_sym '/' event_config '/' int config = $1 & 255; ALLOC_LIST(list); - ABORT_ON(parse_events_add_numeric(list, &data->idx, - type, config, $3)); + ABORT_ON(parse_events_add_numeric(data, list, type, config, $3)); parse_events__free_terms($3); $$ = list; } @@ -289,8 +297,7 @@ value_sym sep_slash_dc int config = $1 & 255; ALLOC_LIST(list); - ABORT_ON(parse_events_add_numeric(list, &data->idx, - type, config, NULL)); + ABORT_ON(parse_events_add_numeric(data, list, type, config, NULL)); $$ = list; } @@ -371,28 +378,60 @@ PE_PREFIX_MEM PE_VALUE sep_dc } event_legacy_tracepoint: -PE_NAME '-' PE_NAME ':' PE_NAME +tracepoint_name { struct parse_events_evlist *data = _data; + struct parse_events_error *error = data->error; struct list_head *list; - char sys_name[128]; - snprintf(&sys_name, 128, "%s-%s", $1, $3); ALLOC_LIST(list); - ABORT_ON(parse_events_add_tracepoint(list, &data->idx, &sys_name, $5)); + if (error) + error->idx = @1.first_column; + + if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event, + error, NULL)) + return -1; + $$ = list; } | -PE_NAME ':' PE_NAME +tracepoint_name '/' event_config '/' { struct parse_events_evlist *data = _data; + struct parse_events_error *error = data->error; struct list_head *list; ALLOC_LIST(list); - ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3)); + if (error) + error->idx = @1.first_column; + + if (parse_events_add_tracepoint(list, &data->idx, $1.sys, $1.event, + error, $3)) + return -1; + $$ = list; } +tracepoint_name: +PE_NAME '-' PE_NAME ':' PE_NAME +{ + char sys_name[128]; + struct tracepoint_name tracepoint; + + snprintf(&sys_name, 128, "%s-%s", $1, $3); + tracepoint.sys = &sys_name; + tracepoint.event = $5; + + $$ = tracepoint; +} +| +PE_NAME ':' PE_NAME +{ + struct tracepoint_name tracepoint = {$1, $3}; + + $$ = tracepoint; +} + event_legacy_numeric: PE_VALUE ':' PE_VALUE { @@ -400,7 +439,7 @@ PE_VALUE ':' PE_VALUE struct list_head *list; ALLOC_LIST(list); - ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL)); + ABORT_ON(parse_events_add_numeric(data, list, (u32)$1, $3, NULL)); $$ = list; } @@ -411,8 +450,29 @@ PE_RAW struct list_head *list; ALLOC_LIST(list); - ABORT_ON(parse_events_add_numeric(list, &data->idx, - PERF_TYPE_RAW, $1, NULL)); + ABORT_ON(parse_events_add_numeric(data, list, PERF_TYPE_RAW, $1, NULL)); + $$ = list; +} + +event_bpf_file: +PE_BPF_OBJECT +{ + struct parse_events_evlist *data = _data; + struct parse_events_error *error = data->error; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_load_bpf(data, list, $1, false)); + $$ = list; +} +| +PE_BPF_SOURCE +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_load_bpf(data, list, $1, true)); $$ = list; } @@ -450,7 +510,7 @@ PE_NAME '=' PE_NAME struct parse_events_term *term; ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, $3)); + $1, $3, &@1, &@3)); $$ = term; } | @@ -459,7 +519,7 @@ PE_NAME '=' PE_VALUE struct parse_events_term *term; ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, $3)); + $1, $3, &@1, &@3)); $$ = term; } | @@ -477,7 +537,7 @@ PE_NAME struct parse_events_term *term; ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, - $1, 1)); + $1, 1, &@1, NULL)); $$ = term; } | @@ -494,7 +554,7 @@ PE_TERM '=' PE_NAME { struct parse_events_term *term; - ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3)); + ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3)); $$ = term; } | @@ -502,7 +562,7 @@ PE_TERM '=' PE_VALUE { struct parse_events_term *term; - ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, &@1, &@3)); $$ = term; } | @@ -510,7 +570,7 @@ PE_TERM { struct parse_events_term *term; - ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL)); $$ = term; } @@ -520,7 +580,9 @@ sep_slash_dc: '/' | ':' | %% -void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused, +void parse_events_error(YYLTYPE *loc, void *data, + void *scanner __maybe_unused, char const *msg __maybe_unused) { + parse_events_evlist_error(data, loc->last_column, "parser error"); } diff --git a/kernel/tools/perf/util/parse-options.c b/kernel/tools/perf/util/parse-options.c index 01626be2a..9fca09296 100644 --- a/kernel/tools/perf/util/parse-options.c +++ b/kernel/tools/perf/util/parse-options.c @@ -2,10 +2,13 @@ #include "parse-options.h" #include "cache.h" #include "header.h" +#include #define OPT_SHORT 1 #define OPT_UNSET 2 +static struct strbuf error_buf = STRBUF_INIT; + static int opterror(const struct option *opt, const char *reason, int flags) { if (flags & OPT_SHORT) @@ -372,7 +375,8 @@ void parse_options_start(struct parse_opt_ctx_t *ctx, } static int usage_with_options_internal(const char * const *, - const struct option *, int); + const struct option *, int, + struct parse_opt_ctx_t *); int parse_options_step(struct parse_opt_ctx_t *ctx, const struct option *options, @@ -396,8 +400,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, if (arg[1] != '-') { ctx->opt = ++arg; - if (internal_help && *ctx->opt == 'h') - return usage_with_options_internal(usagestr, options, 0); + if (internal_help && *ctx->opt == 'h') { + return usage_with_options_internal(usagestr, options, 0, ctx); + } switch (parse_short_opt(ctx, options)) { case -1: return parse_options_usage(usagestr, options, arg, 1); @@ -412,7 +417,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, check_typos(arg, options); while (ctx->opt) { if (internal_help && *ctx->opt == 'h') - return usage_with_options_internal(usagestr, options, 0); + return usage_with_options_internal(usagestr, options, 0, ctx); arg = ctx->opt; switch (parse_short_opt(ctx, options)) { case -1: @@ -445,9 +450,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, arg += 2; if (internal_help && !strcmp(arg, "help-all")) - return usage_with_options_internal(usagestr, options, 1); + return usage_with_options_internal(usagestr, options, 1, ctx); if (internal_help && !strcmp(arg, "help")) - return usage_with_options_internal(usagestr, options, 0); + return usage_with_options_internal(usagestr, options, 0, ctx); if (!strcmp(arg, "list-opts")) return PARSE_OPT_LIST_OPTS; if (!strcmp(arg, "list-cmds")) @@ -496,7 +501,7 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o { struct parse_opt_ctx_t ctx; - perf_header__set_cmdline(argc, argv); + perf_env__set_cmdline(&perf_env, argc, argv); /* build usage string if it's not provided */ if (subcommands && !usagestr[0]) { @@ -537,9 +542,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o exit(130); default: /* PARSE_OPT_UNKNOWN */ if (ctx.argv[0][1] == '-') { - error("unknown option `%s'", ctx.argv[0] + 2); + strbuf_addf(&error_buf, "unknown option `%s'", + ctx.argv[0] + 2); } else { - error("unknown switch `%c'", *ctx.opt); + strbuf_addf(&error_buf, "unknown switch `%c'", + *ctx.opt); } usage_with_options(usagestr, options); } @@ -642,13 +649,93 @@ static void print_option_help(const struct option *opts, int full) fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); } +static int option__cmp(const void *va, const void *vb) +{ + const struct option *a = va, *b = vb; + int sa = tolower(a->short_name), sb = tolower(b->short_name), ret; + + if (sa == 0) + sa = 'z' + 1; + if (sb == 0) + sb = 'z' + 1; + + ret = sa - sb; + + if (ret == 0) { + const char *la = a->long_name ?: "", + *lb = b->long_name ?: ""; + ret = strcmp(la, lb); + } + + return ret; +} + +static struct option *options__order(const struct option *opts) +{ + int nr_opts = 0; + const struct option *o = opts; + struct option *ordered; + + for (o = opts; o->type != OPTION_END; o++) + ++nr_opts; + + ordered = memdup(opts, sizeof(*o) * (nr_opts + 1)); + if (ordered == NULL) + goto out; + + qsort(ordered, nr_opts, sizeof(*o), option__cmp); +out: + return ordered; +} + +static bool option__in_argv(const struct option *opt, const struct parse_opt_ctx_t *ctx) +{ + int i; + + for (i = 1; i < ctx->argc; ++i) { + const char *arg = ctx->argv[i]; + + if (arg[0] != '-') { + if (arg[1] == '\0') { + if (arg[0] == opt->short_name) + return true; + continue; + } + + if (opt->long_name && strcmp(opt->long_name, arg) == 0) + return true; + + if (opt->help && strcasestr(opt->help, arg) != NULL) + return true; + + continue; + } + + if (arg[1] == opt->short_name || + (arg[1] == '-' && opt->long_name && strcmp(opt->long_name, arg + 2) == 0)) + return true; + } + + return false; +} + int usage_with_options_internal(const char * const *usagestr, - const struct option *opts, int full) + const struct option *opts, int full, + struct parse_opt_ctx_t *ctx) { + struct option *ordered; + if (!usagestr) return PARSE_OPT_HELP; - fprintf(stderr, "\n usage: %s\n", *usagestr++); + setup_pager(); + + if (strbuf_avail(&error_buf)) { + fprintf(stderr, " Error: %s\n", error_buf.buf); + strbuf_release(&error_buf); + } + + fprintf(stderr, "\n Usage: %s\n", *usagestr++); while (*usagestr && **usagestr) fprintf(stderr, " or: %s\n", *usagestr++); while (*usagestr) { @@ -661,11 +748,20 @@ int usage_with_options_internal(const char * const *usagestr, if (opts->type != OPTION_GROUP) fputc('\n', stderr); - for ( ; opts->type != OPTION_END; opts++) + ordered = options__order(opts); + if (ordered) + opts = ordered; + + for ( ; opts->type != OPTION_END; opts++) { + if (ctx && ctx->argc > 1 && !option__in_argv(opts, ctx)) + continue; print_option_help(opts, full); + } fputc('\n', stderr); + free(ordered); + return PARSE_OPT_HELP; } @@ -673,7 +769,22 @@ void usage_with_options(const char * const *usagestr, const struct option *opts) { exit_browser(false); - usage_with_options_internal(usagestr, opts, 0); + usage_with_options_internal(usagestr, opts, 0, NULL); + exit(129); +} + +void usage_with_options_msg(const char * const *usagestr, + const struct option *opts, const char *fmt, ...) +{ + va_list ap; + + exit_browser(false); + + va_start(ap, fmt); + strbuf_addv(&error_buf, fmt, ap); + va_end(ap); + + usage_with_options_internal(usagestr, opts, 0, NULL); exit(129); } @@ -684,7 +795,7 @@ int parse_options_usage(const char * const *usagestr, if (!usagestr) goto opt; - fprintf(stderr, "\n usage: %s\n", *usagestr++); + fprintf(stderr, "\n Usage: %s\n", *usagestr++); while (*usagestr && **usagestr) fprintf(stderr, " or: %s\n", *usagestr++); while (*usagestr) { @@ -698,24 +809,23 @@ int parse_options_usage(const char * const *usagestr, opt: for ( ; opts->type != OPTION_END; opts++) { if (short_opt) { - if (opts->short_name == *optstr) + if (opts->short_name == *optstr) { + print_option_help(opts, 0); break; + } continue; } if (opts->long_name == NULL) continue; - if (!prefixcmp(optstr, opts->long_name)) - break; - if (!prefixcmp(optstr, "no-") && - !prefixcmp(optstr + 3, opts->long_name)) - break; + if (!prefixcmp(opts->long_name, optstr)) + print_option_help(opts, 0); + if (!prefixcmp("no-", optstr) && + !prefixcmp(opts->long_name, optstr + 3)) + print_option_help(opts, 0); } - if (opts->type != OPTION_END) - print_option_help(opts, 0); - return PARSE_OPT_HELP; } diff --git a/kernel/tools/perf/util/parse-options.h b/kernel/tools/perf/util/parse-options.h index 59561fd86..a8e407bc2 100644 --- a/kernel/tools/perf/util/parse-options.h +++ b/kernel/tools/perf/util/parse-options.h @@ -111,6 +111,7 @@ struct option { #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) } #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) } +#define OPT_BOOLEAN_FLAG(s, l, v, h, f) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h), .flags = (f) } #define OPT_BOOLEAN_SET(s, l, v, os, h) \ { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \ .value = check_vtype(v, bool *), .help = (h), \ @@ -123,6 +124,10 @@ struct option { #define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) } #define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) } #define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) } +#define OPT_STRING_OPTARG(s, l, v, a, h, d) \ + { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \ + .value = check_vtype(v, const char **), (a), .help = (h), \ + .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) } #define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY} #define OPT_DATE(s, l, v, h) \ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } @@ -156,6 +161,10 @@ extern int parse_options_subcommand(int argc, const char **argv, extern NORETURN void usage_with_options(const char * const *usagestr, const struct option *options); +extern NORETURN __attribute__((format(printf,3,4))) +void usage_with_options_msg(const char * const *usagestr, + const struct option *options, + const char *fmt, ...); /*----- incremantal advanced APIs -----*/ diff --git a/kernel/tools/perf/util/parse-regs-options.c b/kernel/tools/perf/util/parse-regs-options.c new file mode 100644 index 000000000..4f2c1c255 --- /dev/null +++ b/kernel/tools/perf/util/parse-regs-options.c @@ -0,0 +1,71 @@ +#include "perf.h" +#include "util/util.h" +#include "util/debug.h" +#include "util/parse-options.h" +#include "util/parse-regs-options.h" + +int +parse_regs(const struct option *opt, const char *str, int unset) +{ + uint64_t *mode = (uint64_t *)opt->value; + const struct sample_reg *r; + char *s, *os = NULL, *p; + int ret = -1; + + if (unset) + return 0; + + /* + * cannot set it twice + */ + if (*mode) + return -1; + + /* str may be NULL in case no arg is passed to -I */ + if (str) { + /* because str is read-only */ + s = os = strdup(str); + if (!s) + return -1; + + for (;;) { + p = strchr(s, ','); + if (p) + *p = '\0'; + + if (!strcmp(s, "?")) { + fprintf(stderr, "available registers: "); + for (r = sample_reg_masks; r->name; r++) { + fprintf(stderr, "%s ", r->name); + } + fputc('\n', stderr); + /* just printing available regs */ + return -1; + } + for (r = sample_reg_masks; r->name; r++) { + if (!strcasecmp(s, r->name)) + break; + } + if (!r->name) { + ui__warning("unknown register %s," + " check man page\n", s); + goto error; + } + + *mode |= r->mask; + + if (!p) + break; + + s = p + 1; + } + } + ret = 0; + + /* default to all possible regs */ + if (*mode == 0) + *mode = PERF_REGS_MASK; +error: + free(os); + return ret; +} diff --git a/kernel/tools/perf/util/parse-regs-options.h b/kernel/tools/perf/util/parse-regs-options.h new file mode 100644 index 000000000..7d762b188 --- /dev/null +++ b/kernel/tools/perf/util/parse-regs-options.h @@ -0,0 +1,5 @@ +#ifndef _PERF_PARSE_REGS_OPTIONS_H +#define _PERF_PARSE_REGS_OPTIONS_H 1 +struct option; +int parse_regs(const struct option *opt, const char *str, int unset); +#endif /* _PERF_PARSE_REGS_OPTIONS_H */ diff --git a/kernel/tools/perf/util/perf_regs.c b/kernel/tools/perf/util/perf_regs.c index 43168fb0d..6b8eb13e1 100644 --- a/kernel/tools/perf/util/perf_regs.c +++ b/kernel/tools/perf/util/perf_regs.c @@ -2,6 +2,11 @@ #include "perf_regs.h" #include "event.h" +const struct sample_reg __weak sample_reg_masks[] = { + SMPL_REG_END +}; + +#ifdef HAVE_PERF_REGS_SUPPORT int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) { int i, idx = 0; @@ -25,3 +30,4 @@ out: *valp = regs->cache_regs[id]; return 0; } +#endif diff --git a/kernel/tools/perf/util/perf_regs.h b/kernel/tools/perf/util/perf_regs.h index 980dbf76b..679d6e493 100644 --- a/kernel/tools/perf/util/perf_regs.h +++ b/kernel/tools/perf/util/perf_regs.h @@ -2,9 +2,19 @@ #define __PERF_REGS_H #include +#include struct regs_dump; +struct sample_reg { + const char *name; + uint64_t mask; +}; +#define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) } +#define SMPL_REG_END { .name = NULL } + +extern const struct sample_reg sample_reg_masks[]; + #ifdef HAVE_PERF_REGS_SUPPORT #include diff --git a/kernel/tools/perf/util/pmu.c b/kernel/tools/perf/util/pmu.c index 48411674d..e4b173dec 100644 --- a/kernel/tools/perf/util/pmu.c +++ b/kernel/tools/perf/util/pmu.c @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -112,7 +113,11 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char * if (sret < 0) goto error; - scale[sret] = '\0'; + if (scale[sret - 1] == '\n') + scale[sret - 1] = '\0'; + else + scale[sret] = '\0'; + /* * save current locale */ @@ -154,7 +159,10 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n close(fd); - alias->unit[sret] = '\0'; + if (alias->unit[sret - 1] == '\n') + alias->unit[sret - 1] = '\0'; + else + alias->unit[sret] = '\0'; return 0; error: @@ -198,17 +206,12 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias, return 0; } -static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file) +static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, + char *desc __maybe_unused, char *val) { struct perf_pmu_alias *alias; - char buf[256]; int ret; - ret = fread(buf, 1, sizeof(buf), file); - if (ret == 0) - return -EINVAL; - buf[ret] = 0; - alias = malloc(sizeof(*alias)); if (!alias) return -ENOMEM; @@ -218,26 +221,43 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI alias->unit[0] = '\0'; alias->per_pkg = false; - ret = parse_events_terms(&alias->terms, buf); + ret = parse_events_terms(&alias->terms, val); if (ret) { + pr_err("Cannot parse alias %s: %d\n", val, ret); free(alias); return ret; } alias->name = strdup(name); - /* - * load unit name and scale if available - */ - perf_pmu__parse_unit(alias, dir, name); - perf_pmu__parse_scale(alias, dir, name); - perf_pmu__parse_per_pkg(alias, dir, name); - perf_pmu__parse_snapshot(alias, dir, name); + if (dir) { + /* + * load unit name and scale if available + */ + perf_pmu__parse_unit(alias, dir, name); + perf_pmu__parse_scale(alias, dir, name); + perf_pmu__parse_per_pkg(alias, dir, name); + perf_pmu__parse_snapshot(alias, dir, name); + } list_add_tail(&alias->list, list); return 0; } +static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file) +{ + char buf[256]; + int ret; + + ret = fread(buf, 1, sizeof(buf), file); + if (ret == 0) + return -EINVAL; + + buf[ret] = 0; + + return __perf_pmu__new_alias(list, dir, name, NULL, buf); +} + static inline bool pmu_alias_info_file(char *name) { size_t len; @@ -429,7 +449,7 @@ static struct cpu_map *pmu_cpumask(const char *name) return cpus; } -struct perf_event_attr *__attribute__((weak)) +struct perf_event_attr * __weak perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) { return NULL; @@ -518,7 +538,7 @@ struct perf_pmu *perf_pmu__find(const char *name) } static struct perf_pmu_format * -pmu_find_format(struct list_head *formats, char *name) +pmu_find_format(struct list_head *formats, const char *name) { struct perf_pmu_format *format; @@ -529,6 +549,21 @@ pmu_find_format(struct list_head *formats, char *name) return NULL; } +__u64 perf_pmu__format_bits(struct list_head *formats, const char *name) +{ + struct perf_pmu_format *format = pmu_find_format(formats, name); + __u64 bits = 0; + int fbit; + + if (!format) + return 0; + + for_each_set_bit(fbit, format->bits, PERF_PMU_FORMAT_BITS) + bits |= 1ULL << fbit; + + return bits; +} + /* * Sets value based on the format definition (format parameter) * and unformated value (value parameter). @@ -550,6 +585,18 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v, } } +static __u64 pmu_format_max_value(const unsigned long *format) +{ + int w; + + w = bitmap_weight(format, PERF_PMU_FORMAT_BITS); + if (!w) + return 0; + if (w < 64) + return (1ULL << w) - 1; + return -1; +} + /* * Term is a string term, and might be a param-term. Try to look up it's value * in the remaining terms. @@ -579,6 +626,28 @@ static int pmu_resolve_param_term(struct parse_events_term *term, return -1; } +static char *pmu_formats_string(struct list_head *formats) +{ + struct perf_pmu_format *format; + char *str; + struct strbuf buf; + unsigned i = 0; + + if (!formats) + return NULL; + + strbuf_init(&buf, 0); + /* sysfs exported terms */ + list_for_each_entry(format, formats, list) + strbuf_addf(&buf, i++ ? ",%s" : "%s", + format->name); + + str = strbuf_detach(&buf, NULL); + strbuf_release(&buf); + + return str; +} + /* * Setup one of config[12] attr members based on the * user input data - term parameter. @@ -587,11 +656,11 @@ static int pmu_config_term(struct list_head *formats, struct perf_event_attr *attr, struct parse_events_term *term, struct list_head *head_terms, - bool zero) + bool zero, struct parse_events_error *err) { struct perf_pmu_format *format; __u64 *vp; - __u64 val; + __u64 val, max_val; /* * If this is a parameter we've already used for parameterized-eval, @@ -611,6 +680,14 @@ static int pmu_config_term(struct list_head *formats, if (!format) { if (verbose) printf("Invalid event/parameter '%s'\n", term->config); + if (err) { + char *pmu_term = pmu_formats_string(formats); + + err->idx = term->err_term; + err->str = strdup("unknown term"); + err->help = parse_events_formats_error_string(pmu_term); + free(pmu_term); + } return -EINVAL; } @@ -636,9 +713,14 @@ static int pmu_config_term(struct list_head *formats, val = term->val.num; else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { if (strcmp(term->val.str, "?")) { - if (verbose) + if (verbose) { pr_info("Invalid sysfs entry %s=%s\n", term->config, term->val.str); + } + if (err) { + err->idx = term->err_val; + err->str = strdup("expected numeric value"); + } return -EINVAL; } @@ -647,6 +729,22 @@ static int pmu_config_term(struct list_head *formats, } else return -EINVAL; + max_val = pmu_format_max_value(format->bits); + if (val > max_val) { + if (err) { + err->idx = term->err_val; + if (asprintf(&err->str, + "value too big for format, maximum is %llu", + (unsigned long long)max_val) < 0) + err->str = strdup("value too big for format"); + return -EINVAL; + } + /* + * Assume we don't care if !err, in which case the value will be + * silently truncated. + */ + } + pmu_format_value(format->bits, val, vp, zero); return 0; } @@ -654,12 +752,13 @@ static int pmu_config_term(struct list_head *formats, int perf_pmu__config_terms(struct list_head *formats, struct perf_event_attr *attr, struct list_head *head_terms, - bool zero) + bool zero, struct parse_events_error *err) { struct parse_events_term *term; list_for_each_entry(term, head_terms, list) { - if (pmu_config_term(formats, attr, term, head_terms, zero)) + if (pmu_config_term(formats, attr, term, head_terms, + zero, err)) return -EINVAL; } @@ -672,12 +771,14 @@ int perf_pmu__config_terms(struct list_head *formats, * 2) pmu format definitions - specified by pmu parameter */ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, - struct list_head *head_terms) + struct list_head *head_terms, + struct parse_events_error *err) { bool zero = !!pmu->default_config; attr->type = pmu->type; - return perf_pmu__config_terms(&pmu->format, attr, head_terms, zero); + return perf_pmu__config_terms(&pmu->format, attr, head_terms, + zero, err); } static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu, @@ -907,7 +1008,8 @@ void print_pmu_events(const char *event_glob, bool name_only) goto out_enomem; j++; } - if (pmu->selectable) { + if (pmu->selectable && + (event_glob == NULL || strglobmatch(pmu->name, event_glob))) { char *s; if (asprintf(&s, "%s//", pmu->name) < 0) goto out_enomem; @@ -925,7 +1027,7 @@ void print_pmu_events(const char *event_glob, bool name_only) printf(" %-50s [Kernel PMU event]\n", aliases[j]); printed++; } - if (printed) + if (printed && pager_in_use()) printf("\n"); out_free: for (j = 0; j < len; j++) diff --git a/kernel/tools/perf/util/pmu.h b/kernel/tools/perf/util/pmu.h index 6b1249fbd..5d7e84466 100644 --- a/kernel/tools/perf/util/pmu.h +++ b/kernel/tools/perf/util/pmu.h @@ -4,6 +4,7 @@ #include #include #include +#include "parse-events.h" enum { PERF_PMU_FORMAT_VALUE_CONFIG, @@ -47,11 +48,13 @@ struct perf_pmu_alias { struct perf_pmu *perf_pmu__find(const char *name); int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, - struct list_head *head_terms); + struct list_head *head_terms, + struct parse_events_error *error); int perf_pmu__config_terms(struct list_head *formats, struct perf_event_attr *attr, struct list_head *head_terms, - bool zero); + bool zero, struct parse_events_error *error); +__u64 perf_pmu__format_bits(struct list_head *formats, const char *name); int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, struct perf_pmu_info *info); struct list_head *perf_pmu__alias(struct perf_pmu *pmu, diff --git a/kernel/tools/perf/util/probe-event.c b/kernel/tools/perf/util/probe-event.c index d05b77cf3..03875f915 100644 --- a/kernel/tools/perf/util/probe-event.c +++ b/kernel/tools/perf/util/probe-event.c @@ -40,25 +40,22 @@ #include "color.h" #include "symbol.h" #include "thread.h" -#include -#include +#include #include "trace-event.h" /* For __maybe_unused */ #include "probe-event.h" #include "probe-finder.h" +#include "probe-file.h" #include "session.h" #define MAX_CMDLEN 256 #define PERFPROBE_GROUP "probe" bool probe_event_dry_run; /* Dry run flag */ +struct probe_conf probe_conf; #define semantic_error(msg ...) pr_err("Semantic error :" msg) -/* If there is no space to write, returns -E2BIG. */ -static int e_snprintf(char *str, size_t size, const char *format, ...) - __attribute__((format(printf, 3, 4))); - -static int e_snprintf(char *str, size_t size, const char *format, ...) +int e_snprintf(char *str, size_t size, const char *format, ...) { int ret; va_list ap; @@ -71,11 +68,10 @@ static int e_snprintf(char *str, size_t size, const char *format, ...) } static char *synthesize_perf_probe_point(struct perf_probe_point *pp); -static void clear_probe_trace_event(struct probe_trace_event *tev); static struct machine *host_machine; /* Initialize symbol maps and path of vmlinux/modules */ -static int init_symbol_maps(bool user_only) +int init_probe_symbol_maps(bool user_only) { int ret; @@ -105,7 +101,7 @@ out: return ret; } -static void exit_symbol_maps(void) +void exit_probe_symbol_maps(void) { if (host_machine) { machine__delete(host_machine); @@ -130,17 +126,19 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) { /* kmap->ref_reloc_sym should be set if host_machine is initialized */ struct kmap *kmap; + struct map *map = machine__kernel_map(host_machine); - if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0) + if (map__load(map, NULL) < 0) return NULL; - kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]); + kmap = map__kmap(map); if (!kmap) return NULL; return kmap->ref_reloc_sym; } -static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc) +static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, + bool reloc, bool reladdr) { struct ref_reloc_sym *reloc_sym; struct symbol *sym; @@ -149,30 +147,32 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc) /* ref_reloc_sym is just a label. Need a special fix*/ reloc_sym = kernel_get_ref_reloc_sym(); if (reloc_sym && strcmp(name, reloc_sym->name) == 0) - return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; + *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; else { sym = __find_kernel_function_by_name(name, &map); - if (sym) - return map->unmap_ip(map, sym->start) - - ((reloc) ? 0 : map->reloc); + if (!sym) + return -ENOENT; + *addr = map->unmap_ip(map, sym->start) - + ((reloc) ? 0 : map->reloc) - + ((reladdr) ? map->start : 0); } return 0; } static struct map *kernel_get_module_map(const char *module) { - struct rb_node *nd; struct map_groups *grp = &host_machine->kmaps; + struct maps *maps = &grp->maps[MAP__FUNCTION]; + struct map *pos; /* A file path -- this is an offline module */ if (module && strchr(module, '/')) - return machine__new_module(host_machine, 0, module); + return machine__findnew_module_map(host_machine, 0, module); if (!module) module = "kernel"; - for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node); + for (pos = maps__first(maps); pos; pos = map__next(pos)) { if (strncmp(pos->dso->short_name + 1, module, pos->dso->short_name_len - 2) == 0) { return pos; @@ -194,52 +194,11 @@ static void put_target_map(struct map *map, bool user) { if (map && user) { /* Only the user map needs to be released */ - dso__delete(map->dso); - map__delete(map); + map__put(map); } } -static struct dso *kernel_get_module_dso(const char *module) -{ - struct dso *dso; - struct map *map; - const char *vmlinux_name; - - if (module) { - list_for_each_entry(dso, &host_machine->kernel_dsos.head, - node) { - if (strncmp(dso->short_name + 1, module, - dso->short_name_len - 2) == 0) - goto found; - } - pr_debug("Failed to find module %s.\n", module); - return NULL; - } - - map = host_machine->vmlinux_maps[MAP__FUNCTION]; - dso = map->dso; - - vmlinux_name = symbol_conf.vmlinux_name; - if (vmlinux_name) { - if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0) - return NULL; - } else { - if (dso__load_vmlinux_path(dso, map, NULL) <= 0) { - pr_debug("Failed to load kernel map.\n"); - return NULL; - } - } -found: - return dso; -} - -const char *kernel_get_module_path(const char *module) -{ - struct dso *dso = kernel_get_module_dso(module); - return (dso) ? dso->long_name : NULL; -} - static int convert_exec_to_group(const char *exec, char **result) { char *ptr1, *ptr2, *exec_copy; @@ -286,7 +245,62 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) clear_probe_trace_event(tevs + i); } +static bool kprobe_blacklist__listed(unsigned long address); +static bool kprobe_warn_out_range(const char *symbol, unsigned long address) +{ + u64 etext_addr = 0; + int ret; + + /* Get the address of _etext for checking non-probable text symbol */ + ret = kernel_get_symbol_address_by_name("_etext", &etext_addr, + false, false); + + if (ret == 0 && etext_addr < address) + pr_warning("%s is out of .text, skip it.\n", symbol); + else if (kprobe_blacklist__listed(address)) + pr_warning("%s is blacklisted function, skip it.\n", symbol); + else + return false; + + return true; +} + #ifdef HAVE_DWARF_SUPPORT + +static int kernel_get_module_dso(const char *module, struct dso **pdso) +{ + struct dso *dso; + struct map *map; + const char *vmlinux_name; + int ret = 0; + + if (module) { + char module_name[128]; + + snprintf(module_name, sizeof(module_name), "[%s]", module); + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); + if (map) { + dso = map->dso; + goto found; + } + pr_debug("Failed to find module %s.\n", module); + return -ENOENT; + } + + map = machine__kernel_map(host_machine); + dso = map->dso; + + vmlinux_name = symbol_conf.vmlinux_name; + dso->load_errno = 0; + if (vmlinux_name) + ret = dso__load_vmlinux(dso, map, vmlinux_name, false, NULL); + else + ret = dso__load_vmlinux_path(dso, map, NULL); +found: + *pdso = dso; + return ret; +} + /* * Some binaries like glibc have special symbols which are on the symbol * table, but not in the debuginfo. If we can find the address of the @@ -344,15 +358,14 @@ out: static int get_alternative_probe_event(struct debuginfo *dinfo, struct perf_probe_event *pev, - struct perf_probe_point *tmp, - const char *target) + struct perf_probe_point *tmp) { int ret; memcpy(tmp, &pev->point, sizeof(*tmp)); memset(&pev->point, 0, sizeof(pev->point)); ret = find_alternative_probe_point(dinfo, tmp, &pev->point, - target, pev->uprobes); + pev->target, pev->uprobes); if (ret < 0) memcpy(&pev->point, tmp, sizeof(*tmp)); @@ -390,16 +403,25 @@ static int get_alternative_line_range(struct debuginfo *dinfo, static struct debuginfo *open_debuginfo(const char *module, bool silent) { const char *path = module; - struct debuginfo *ret; + char reason[STRERR_BUFSIZE]; + struct debuginfo *ret = NULL; + struct dso *dso = NULL; + int err; if (!module || !strchr(module, '/')) { - path = kernel_get_module_path(module); - if (!path) { + err = kernel_get_module_dso(module, &dso); + if (err < 0) { + if (!dso || dso->load_errno == 0) { + if (!strerror_r(-err, reason, STRERR_BUFSIZE)) + strcpy(reason, "(unknown)"); + } else + dso__strerror_load(dso, reason, STRERR_BUFSIZE); if (!silent) - pr_err("Failed to find path of %s module.\n", - module ?: "kernel"); + pr_err("Failed to find the path for %s: %s\n", + module ?: "kernel", reason); return NULL; } + path = dso->long_name; } ret = debuginfo__new(path); if (!ret && !silent) { @@ -413,6 +435,44 @@ static struct debuginfo *open_debuginfo(const char *module, bool silent) return ret; } +/* For caching the last debuginfo */ +static struct debuginfo *debuginfo_cache; +static char *debuginfo_cache_path; + +static struct debuginfo *debuginfo_cache__open(const char *module, bool silent) +{ + const char *path = module; + + /* If the module is NULL, it should be the kernel. */ + if (!module) + path = "kernel"; + + if (debuginfo_cache_path && !strcmp(debuginfo_cache_path, path)) + goto out; + + /* Copy module path */ + free(debuginfo_cache_path); + debuginfo_cache_path = strdup(path); + if (!debuginfo_cache_path) { + debuginfo__delete(debuginfo_cache); + debuginfo_cache = NULL; + goto out; + } + + debuginfo_cache = open_debuginfo(module, silent); + if (!debuginfo_cache) + zfree(&debuginfo_cache_path); +out: + return debuginfo_cache; +} + +static void debuginfo_cache__exit(void) +{ + debuginfo__delete(debuginfo_cache); + debuginfo_cache = NULL; + zfree(&debuginfo_cache_path); +} + static int get_text_start_address(const char *exec, unsigned long *address) { @@ -464,9 +524,11 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, if (ret < 0) goto error; addr += stext; - } else { - addr = kernel_get_symbol_address_by_name(tp->symbol, false); - if (addr == 0) + } else if (tp->symbol) { + /* If the module is given, this returns relative address */ + ret = kernel_get_symbol_address_by_name(tp->symbol, &addr, + false, !!tp->module); + if (ret != 0) goto error; addr += tp->offset; } @@ -474,12 +536,11 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, pr_debug("try to find information at %" PRIx64 " in %s\n", addr, tp->module ? : "kernel"); - dinfo = open_debuginfo(tp->module, verbose == 0); - if (dinfo) { + dinfo = debuginfo_cache__open(tp->module, verbose == 0); + if (dinfo) ret = debuginfo__find_probe_point(dinfo, (unsigned long)addr, pp); - debuginfo__delete(dinfo); - } else + else ret = -ENOENT; if (ret > 0) { @@ -558,7 +619,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, { struct ref_reloc_sym *reloc_sym; char *tmp; - int i; + int i, skipped = 0; if (uprobe) return add_exec_to_probe_trace_events(tevs, ntevs, module); @@ -574,31 +635,40 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, } for (i = 0; i < ntevs; i++) { - if (tevs[i].point.address && !tevs[i].point.retprobe) { + if (!tevs[i].point.address || tevs[i].point.retprobe) + continue; + /* If we found a wrong one, mark it by NULL symbol */ + if (kprobe_warn_out_range(tevs[i].point.symbol, + tevs[i].point.address)) { + tmp = NULL; + skipped++; + } else { tmp = strdup(reloc_sym->name); if (!tmp) return -ENOMEM; - free(tevs[i].point.symbol); - tevs[i].point.symbol = tmp; - tevs[i].point.offset = tevs[i].point.address - - reloc_sym->unrelocated_addr; } + /* If we have no realname, use symbol for it */ + if (!tevs[i].point.realname) + tevs[i].point.realname = tevs[i].point.symbol; + else + free(tevs[i].point.symbol); + tevs[i].point.symbol = tmp; + tevs[i].point.offset = tevs[i].point.address - + reloc_sym->unrelocated_addr; } - return 0; + return skipped; } /* Try to find perf_probe_event with debuginfo */ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, - struct probe_trace_event **tevs, - int max_tevs, const char *target) + struct probe_trace_event **tevs) { bool need_dwarf = perf_probe_event_need_dwarf(pev); struct perf_probe_point tmp; struct debuginfo *dinfo; int ntevs, ret = 0; - dinfo = open_debuginfo(target, !need_dwarf); - + dinfo = open_debuginfo(pev->target, !need_dwarf); if (!dinfo) { if (need_dwarf) return -ENOENT; @@ -608,13 +678,12 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, pr_debug("Try to find probe point from debuginfo.\n"); /* Searching trace events corresponding to a probe event */ - ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs); + ntevs = debuginfo__find_trace_events(dinfo, pev, tevs); if (ntevs == 0) { /* Not found, retry with an alternative */ - ret = get_alternative_probe_event(dinfo, pev, &tmp, target); + ret = get_alternative_probe_event(dinfo, pev, &tmp); if (!ret) { - ntevs = debuginfo__find_trace_events(dinfo, pev, - tevs, max_tevs); + ntevs = debuginfo__find_trace_events(dinfo, pev, tevs); /* * Write back to the original probe_event for * setting appropriate (user given) event name @@ -629,12 +698,15 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, if (ntevs > 0) { /* Succeeded to find trace events */ pr_debug("Found %d probe_trace_events.\n", ntevs); ret = post_process_probe_trace_events(*tevs, ntevs, - target, pev->uprobes); - if (ret < 0) { + pev->target, pev->uprobes); + if (ret < 0 || ret == ntevs) { clear_probe_trace_events(*tevs, ntevs); zfree(tevs); } - return ret < 0 ? ret : ntevs; + if (ret != ntevs) + return ret < 0 ? ret : ntevs; + ntevs = 0; + /* Fall through */ } if (ntevs == 0) { /* No error but failed to find probe point. */ @@ -644,9 +716,10 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, } /* Error path : ntevs < 0 */ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); - if (ntevs == -EBADF) { - pr_warning("Warning: No dwarf info found in the vmlinux - " - "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); + if (ntevs < 0) { + if (ntevs == -EBADF) + pr_warning("Warning: No dwarf info found in the vmlinux - " + "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); if (!need_dwarf) { pr_debug("Trying to use symbols.\n"); return 0; @@ -798,19 +871,18 @@ int show_line_range(struct line_range *lr, const char *module, bool user) { int ret; - ret = init_symbol_maps(user); + ret = init_probe_symbol_maps(user); if (ret < 0) return ret; ret = __show_line_range(lr, module, user); - exit_symbol_maps(); + exit_probe_symbol_maps(); return ret; } static int show_available_vars_at(struct debuginfo *dinfo, struct perf_probe_event *pev, - int max_vls, struct strfilter *_filter, - bool externs, const char *target) + struct strfilter *_filter) { char *buf; int ret, i, nvars; @@ -824,13 +896,12 @@ static int show_available_vars_at(struct debuginfo *dinfo, return -EINVAL; pr_debug("Searching variables at %s\n", buf); - ret = debuginfo__find_available_vars_at(dinfo, pev, &vls, - max_vls, externs); + ret = debuginfo__find_available_vars_at(dinfo, pev, &vls); if (!ret) { /* Not found, retry with an alternative */ - ret = get_alternative_probe_event(dinfo, pev, &tmp, target); + ret = get_alternative_probe_event(dinfo, pev, &tmp); if (!ret) { ret = debuginfo__find_available_vars_at(dinfo, pev, - &vls, max_vls, externs); + &vls); /* Release the old probe_point */ clear_perf_probe_point(&tmp); } @@ -877,17 +948,16 @@ end: /* Show available variables on given probe point */ int show_available_vars(struct perf_probe_event *pevs, int npevs, - int max_vls, const char *module, - struct strfilter *_filter, bool externs) + struct strfilter *_filter) { int i, ret = 0; struct debuginfo *dinfo; - ret = init_symbol_maps(pevs->uprobes); + ret = init_probe_symbol_maps(pevs->uprobes); if (ret < 0) return ret; - dinfo = open_debuginfo(module, false); + dinfo = open_debuginfo(pevs->target, false); if (!dinfo) { ret = -ENOENT; goto out; @@ -896,17 +966,20 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, setup_pager(); for (i = 0; i < npevs && ret >= 0; i++) - ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter, - externs, module); + ret = show_available_vars_at(dinfo, &pevs[i], _filter); debuginfo__delete(dinfo); out: - exit_symbol_maps(); + exit_probe_symbol_maps(); return ret; } #else /* !HAVE_DWARF_SUPPORT */ +static void debuginfo_cache__exit(void) +{ +} + static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused, struct perf_probe_point *pp __maybe_unused, @@ -916,9 +989,7 @@ find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused, } static int try_to_find_probe_trace_events(struct perf_probe_event *pev, - struct probe_trace_event **tevs __maybe_unused, - int max_tevs __maybe_unused, - const char *target __maybe_unused) + struct probe_trace_event **tevs __maybe_unused) { if (perf_probe_event_need_dwarf(pev)) { pr_warning("Debuginfo-analysis is not supported.\n"); @@ -937,10 +1008,8 @@ int show_line_range(struct line_range *lr __maybe_unused, } int show_available_vars(struct perf_probe_event *pevs __maybe_unused, - int npevs __maybe_unused, int max_vls __maybe_unused, - const char *module __maybe_unused, - struct strfilter *filter __maybe_unused, - bool externs __maybe_unused) + int npevs __maybe_unused, + struct strfilter *filter __maybe_unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; @@ -980,6 +1049,18 @@ static int parse_line_num(char **ptr, int *val, const char *what) return 0; } +/* Check the name is good for event, group or function */ +static bool is_c_func_name(const char *name) +{ + if (!isalpha(*name) && *name != '_') + return false; + while (*++name != '\0') { + if (!isalpha(*name) && !isdigit(*name) && *name != '_') + return false; + } + return true; +} + /* * Stuff 'lr' according to the line range described by 'arg'. * The line range syntax is described by: @@ -1048,10 +1129,15 @@ int parse_line_range_desc(const char *arg, struct line_range *lr) goto err; } lr->function = name; - } else if (strchr(name, '.')) + } else if (strchr(name, '/') || strchr(name, '.')) lr->file = name; - else + else if (is_c_func_name(name))/* We reuse it for checking funcname */ lr->function = name; + else { /* Invalid name */ + semantic_error("'%s' is not a valid function name.\n", name); + err = -EINVAL; + goto err; + } return 0; err: @@ -1059,24 +1145,13 @@ err: return err; } -/* Check the name is good for event/group */ -static bool check_event_name(const char *name) -{ - if (!isalpha(*name) && *name != '_') - return false; - while (*++name != '\0') { - if (!isalpha(*name) && !isdigit(*name) && *name != '_') - return false; - } - return true; -} - /* Parse probepoint definition. */ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) { struct perf_probe_point *pp = &pev->point; char *ptr, *tmp; char c, nc = 0; + bool file_spec = false; /* * * perf probe [EVENT=]SRC[:LN|;PTN] @@ -1095,7 +1170,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) semantic_error("Group name is not supported yet.\n"); return -ENOTSUP; } - if (!check_event_name(arg)) { + if (!is_c_func_name(arg)) { semantic_error("%s is bad for event name -it must " "follow C symbol-naming rule.\n", arg); return -EINVAL; @@ -1107,22 +1182,60 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) arg = tmp; } + /* + * Check arg is function or file name and copy it. + * + * We consider arg to be a file spec if and only if it satisfies + * all of the below criteria:: + * - it does not include any of "+@%", + * - it includes one of ":;", and + * - it has a period '.' in the name. + * + * Otherwise, we consider arg to be a function specification. + */ + if (!strpbrk(arg, "+@%") && (ptr = strpbrk(arg, ";:")) != NULL) { + /* This is a file spec if it includes a '.' before ; or : */ + if (memchr(arg, '.', ptr - arg)) + file_spec = true; + } + ptr = strpbrk(arg, ";:+@%"); if (ptr) { nc = *ptr; *ptr++ = '\0'; } - tmp = strdup(arg); - if (tmp == NULL) - return -ENOMEM; + if (arg[0] == '\0') + tmp = NULL; + else { + tmp = strdup(arg); + if (tmp == NULL) + return -ENOMEM; + } - /* Check arg is function or file and copy it */ - if (strchr(tmp, '.')) /* File */ + if (file_spec) pp->file = tmp; - else /* Function */ + else { pp->function = tmp; + /* + * Keep pp->function even if this is absolute address, + * so it can mark whether abs_address is valid. + * Which make 'perf probe lib.bin 0x0' possible. + * + * Note that checking length of tmp is not needed + * because when we access tmp[1] we know tmp[0] is '0', + * so tmp[1] should always valid (but could be '\0'). + */ + if (tmp && !strncmp(tmp, "0x", 2)) { + pp->abs_address = strtoul(pp->function, &tmp, 0); + if (*tmp != '\0') { + semantic_error("Invalid absolute address.\n"); + return -EINVAL; + } + } + } + /* Parse other options */ while (ptr) { arg = ptr; @@ -1384,8 +1497,7 @@ bool perf_probe_event_need_dwarf(struct perf_probe_event *pev) } /* Parse probe_events event into struct probe_point */ -static int parse_probe_trace_command(const char *cmd, - struct probe_trace_event *tev) +int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) { struct probe_trace_point *tp = &tev->point; char pr; @@ -1440,9 +1552,31 @@ static int parse_probe_trace_command(const char *cmd, } else p = argv[1]; fmt1_str = strtok_r(p, "+", &fmt); - if (fmt1_str[0] == '0') /* only the address started with 0x */ - tp->address = strtoul(fmt1_str, NULL, 0); - else { + /* only the address started with 0x */ + if (fmt1_str[0] == '0') { + /* + * Fix a special case: + * if address == 0, kernel reports something like: + * p:probe_libc/abs_0 /lib/libc-2.18.so:0x (null) arg1=%ax + * Newer kernel may fix that, but we want to + * support old kernel also. + */ + if (strcmp(fmt1_str, "0x") == 0) { + if (!argv[2] || strcmp(argv[2], "(null)")) { + ret = -EINVAL; + goto out; + } + tp->address = 0; + + free(argv[2]); + for (i = 2; argv[i + 1] != NULL; i++) + argv[i] = argv[i + 1]; + + argv[i] = NULL; + argc -= 1; + } else + tp->address = strtoul(fmt1_str, NULL, 0); + } else { /* Only the symbol-based probe has offset */ tp->symbol = strdup(fmt1_str); if (tp->symbol == NULL) { @@ -1699,14 +1833,29 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev) if (len <= 0) goto error; - /* Uprobes must have tp->address and tp->module */ - if (tev->uprobes && (!tp->address || !tp->module)) + /* Uprobes must have tp->module */ + if (tev->uprobes && !tp->module) goto error; + /* + * If tp->address == 0, then this point must be a + * absolute address uprobe. + * try_to_find_absolute_address() should have made + * tp->symbol to "0x0". + */ + if (tev->uprobes && !tp->address) { + if (!tp->symbol || strcmp(tp->symbol, "0x0")) + goto error; + } /* Use the tp->address for uprobes */ if (tev->uprobes) ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx", tp->module, tp->address); + else if (!strncmp(tp->symbol, "0x", 2)) + /* Absolute address. See try_to_find_absolute_address() */ + ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s0x%lx", + tp->module ?: "", tp->module ? ":" : "", + tp->address); else ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu", tp->module ?: "", tp->module ? ":" : "", @@ -1736,22 +1885,26 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp, { struct symbol *sym = NULL; struct map *map; - u64 addr; + u64 addr = tp->address; int ret = -ENOENT; if (!is_kprobe) { map = dso__new_map(tp->module); if (!map) goto out; - addr = tp->address; sym = map__find_symbol(map, addr, NULL); } else { - addr = kernel_get_symbol_address_by_name(tp->symbol, true); + if (tp->symbol && !addr) { + if (kernel_get_symbol_address_by_name(tp->symbol, + &addr, true, false) < 0) + goto out; + } if (addr) { addr += tp->offset; sym = __find_kernel_function(addr, &map); } } + if (!sym) goto out; @@ -1762,16 +1915,15 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp, out: if (map && !is_kprobe) { - dso__delete(map->dso); - map__delete(map); + map__put(map); } return ret; } static int convert_to_perf_probe_point(struct probe_trace_point *tp, - struct perf_probe_point *pp, - bool is_kprobe) + struct perf_probe_point *pp, + bool is_kprobe) { char buf[128]; int ret; @@ -1788,7 +1940,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp, if (tp->symbol) { pp->function = strdup(tp->symbol); pp->offset = tp->offset; - } else if (!tp->module && !is_kprobe) { + } else { ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address); if (ret < 0) return ret; @@ -1869,7 +2021,7 @@ void clear_perf_probe_event(struct perf_probe_event *pev) memset(pev, 0, sizeof(*pev)); } -static void clear_probe_trace_event(struct probe_trace_event *tev) +void clear_probe_trace_event(struct probe_trace_event *tev) { struct probe_trace_arg_ref *ref, *next; int i; @@ -1877,6 +2029,7 @@ static void clear_probe_trace_event(struct probe_trace_event *tev) free(tev->event); free(tev->group); free(tev->point.symbol); + free(tev->point.realname); free(tev->point.module); for (i = 0; i < tev->nargs; i++) { free(tev->args[i].name); @@ -1893,119 +2046,6 @@ static void clear_probe_trace_event(struct probe_trace_event *tev) memset(tev, 0, sizeof(*tev)); } -static void print_open_warning(int err, bool is_kprobe) -{ - char sbuf[STRERR_BUFSIZE]; - - if (err == -ENOENT) { - const char *config; - - if (!is_kprobe) - config = "CONFIG_UPROBE_EVENTS"; - else - config = "CONFIG_KPROBE_EVENTS"; - - pr_warning("%cprobe_events file does not exist" - " - please rebuild kernel with %s.\n", - is_kprobe ? 'k' : 'u', config); - } else if (err == -ENOTSUP) - pr_warning("Tracefs or debugfs is not mounted.\n"); - else - pr_warning("Failed to open %cprobe_events: %s\n", - is_kprobe ? 'k' : 'u', - strerror_r(-err, sbuf, sizeof(sbuf))); -} - -static void print_both_open_warning(int kerr, int uerr) -{ - /* Both kprobes and uprobes are disabled, warn it. */ - if (kerr == -ENOTSUP && uerr == -ENOTSUP) - pr_warning("Tracefs or debugfs is not mounted.\n"); - else if (kerr == -ENOENT && uerr == -ENOENT) - pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS " - "or/and CONFIG_UPROBE_EVENTS.\n"); - else { - char sbuf[STRERR_BUFSIZE]; - pr_warning("Failed to open kprobe events: %s.\n", - strerror_r(-kerr, sbuf, sizeof(sbuf))); - pr_warning("Failed to open uprobe events: %s.\n", - strerror_r(-uerr, sbuf, sizeof(sbuf))); - } -} - -static int open_probe_events(const char *trace_file, bool readwrite) -{ - char buf[PATH_MAX]; - const char *__debugfs; - const char *tracing_dir = ""; - int ret; - - __debugfs = tracefs_find_mountpoint(); - if (__debugfs == NULL) { - tracing_dir = "tracing/"; - - __debugfs = debugfs_find_mountpoint(); - if (__debugfs == NULL) - return -ENOTSUP; - } - - ret = e_snprintf(buf, PATH_MAX, "%s/%s%s", - __debugfs, tracing_dir, trace_file); - if (ret >= 0) { - pr_debug("Opening %s write=%d\n", buf, readwrite); - if (readwrite && !probe_event_dry_run) - ret = open(buf, O_RDWR, O_APPEND); - else - ret = open(buf, O_RDONLY, 0); - - if (ret < 0) - ret = -errno; - } - return ret; -} - -static int open_kprobe_events(bool readwrite) -{ - return open_probe_events("kprobe_events", readwrite); -} - -static int open_uprobe_events(bool readwrite) -{ - return open_probe_events("uprobe_events", readwrite); -} - -/* Get raw string list of current kprobe_events or uprobe_events */ -static struct strlist *get_probe_trace_command_rawlist(int fd) -{ - int ret, idx; - FILE *fp; - char buf[MAX_CMDLEN]; - char *p; - struct strlist *sl; - - sl = strlist__new(true, NULL); - - fp = fdopen(dup(fd), "r"); - while (!feof(fp)) { - p = fgets(buf, MAX_CMDLEN, fp); - if (!p) - break; - - idx = strlen(p) - 1; - if (p[idx] == '\n') - p[idx] = '\0'; - ret = strlist__add(sl, buf); - if (ret < 0) { - pr_debug("strlist__add failed (%d)\n", ret); - strlist__delete(sl); - return NULL; - } - } - fclose(fp); - - return sl; -} - struct kprobe_blacklist_node { struct list_head list; unsigned long start; @@ -2029,7 +2069,7 @@ static void kprobe_blacklist__delete(struct list_head *blacklist) static int kprobe_blacklist__load(struct list_head *blacklist) { struct kprobe_blacklist_node *node; - const char *__debugfs = debugfs_find_mountpoint(); + const char *__debugfs = debugfs__mountpoint(); char buf[PATH_MAX], *p; FILE *fp; int ret; @@ -2095,9 +2135,31 @@ kprobe_blacklist__find_by_address(struct list_head *blacklist, return NULL; } -/* Show an event */ -static int show_perf_probe_event(struct perf_probe_event *pev, - const char *module) +static LIST_HEAD(kprobe_blacklist); + +static void kprobe_blacklist__init(void) +{ + if (!list_empty(&kprobe_blacklist)) + return; + + if (kprobe_blacklist__load(&kprobe_blacklist) < 0) + pr_debug("No kprobe blacklist support, ignored\n"); +} + +static void kprobe_blacklist__release(void) +{ + kprobe_blacklist__delete(&kprobe_blacklist); +} + +static bool kprobe_blacklist__listed(unsigned long address) +{ + return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address); +} + +static int perf_probe_event__sprintf(const char *group, const char *event, + struct perf_probe_event *pev, + const char *module, + struct strbuf *result) { int i, ret; char buf[128]; @@ -2108,30 +2170,67 @@ static int show_perf_probe_event(struct perf_probe_event *pev, if (!place) return -EINVAL; - ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event); + ret = e_snprintf(buf, 128, "%s:%s", group, event); if (ret < 0) - return ret; + goto out; - pr_info(" %-20s (on %s", buf, place); + strbuf_addf(result, " %-20s (on %s", buf, place); if (module) - pr_info(" in %s", module); + strbuf_addf(result, " in %s", module); if (pev->nargs > 0) { - pr_info(" with"); + strbuf_addstr(result, " with"); for (i = 0; i < pev->nargs; i++) { ret = synthesize_perf_probe_arg(&pev->args[i], buf, 128); if (ret < 0) - break; - pr_info(" %s", buf); + goto out; + strbuf_addf(result, " %s", buf); } } - pr_info(")\n"); + strbuf_addch(result, ')'); +out: free(place); return ret; } -static int __show_perf_probe_events(int fd, bool is_kprobe) +/* Show an event */ +int show_perf_probe_event(const char *group, const char *event, + struct perf_probe_event *pev, + const char *module, bool use_stdout) +{ + struct strbuf buf = STRBUF_INIT; + int ret; + + ret = perf_probe_event__sprintf(group, event, pev, module, &buf); + if (ret >= 0) { + if (use_stdout) + printf("%s\n", buf.buf); + else + pr_info("%s\n", buf.buf); + } + strbuf_release(&buf); + + return ret; +} + +static bool filter_probe_trace_event(struct probe_trace_event *tev, + struct strfilter *filter) +{ + char tmp[128]; + + /* At first, check the event name itself */ + if (strfilter__compare(filter, tev->event)) + return true; + + /* Next, check the combination of name and group */ + if (e_snprintf(tmp, 128, "%s:%s", tev->group, tev->event) < 0) + return false; + return strfilter__compare(filter, tmp); +} + +static int __show_perf_probe_events(int fd, bool is_kprobe, + struct strfilter *filter) { int ret = 0; struct probe_trace_event tev; @@ -2142,123 +2241,61 @@ static int __show_perf_probe_events(int fd, bool is_kprobe) memset(&tev, 0, sizeof(tev)); memset(&pev, 0, sizeof(pev)); - rawlist = get_probe_trace_command_rawlist(fd); + rawlist = probe_file__get_rawlist(fd); if (!rawlist) return -ENOMEM; strlist__for_each(ent, rawlist) { ret = parse_probe_trace_command(ent->s, &tev); if (ret >= 0) { + if (!filter_probe_trace_event(&tev, filter)) + goto next; ret = convert_to_perf_probe_event(&tev, &pev, is_kprobe); - if (ret >= 0) - ret = show_perf_probe_event(&pev, - tev.point.module); + if (ret < 0) + goto next; + ret = show_perf_probe_event(pev.group, pev.event, + &pev, tev.point.module, + true); } +next: clear_perf_probe_event(&pev); clear_probe_trace_event(&tev); if (ret < 0) break; } strlist__delete(rawlist); + /* Cleanup cached debuginfo if needed */ + debuginfo_cache__exit(); return ret; } /* List up current perf-probe events */ -int show_perf_probe_events(void) +int show_perf_probe_events(struct strfilter *filter) { int kp_fd, up_fd, ret; setup_pager(); - ret = init_symbol_maps(false); + ret = init_probe_symbol_maps(false); if (ret < 0) return ret; - kp_fd = open_kprobe_events(false); - if (kp_fd >= 0) { - ret = __show_perf_probe_events(kp_fd, true); - close(kp_fd); - if (ret < 0) - goto out; - } - - up_fd = open_uprobe_events(false); - if (kp_fd < 0 && up_fd < 0) { - print_both_open_warning(kp_fd, up_fd); - ret = kp_fd; - goto out; - } + ret = probe_file__open_both(&kp_fd, &up_fd, 0); + if (ret < 0) + return ret; - if (up_fd >= 0) { - ret = __show_perf_probe_events(up_fd, false); + if (kp_fd >= 0) + ret = __show_perf_probe_events(kp_fd, true, filter); + if (up_fd >= 0 && ret >= 0) + ret = __show_perf_probe_events(up_fd, false, filter); + if (kp_fd > 0) + close(kp_fd); + if (up_fd > 0) close(up_fd); - } -out: - exit_symbol_maps(); - return ret; -} - -/* Get current perf-probe event names */ -static struct strlist *get_probe_trace_event_names(int fd, bool include_group) -{ - char buf[128]; - struct strlist *sl, *rawlist; - struct str_node *ent; - struct probe_trace_event tev; - int ret = 0; + exit_probe_symbol_maps(); - memset(&tev, 0, sizeof(tev)); - rawlist = get_probe_trace_command_rawlist(fd); - if (!rawlist) - return NULL; - sl = strlist__new(true, NULL); - strlist__for_each(ent, rawlist) { - ret = parse_probe_trace_command(ent->s, &tev); - if (ret < 0) - break; - if (include_group) { - ret = e_snprintf(buf, 128, "%s:%s", tev.group, - tev.event); - if (ret >= 0) - ret = strlist__add(sl, buf); - } else - ret = strlist__add(sl, tev.event); - clear_probe_trace_event(&tev); - if (ret < 0) - break; - } - strlist__delete(rawlist); - - if (ret < 0) { - strlist__delete(sl); - return NULL; - } - return sl; -} - -static int write_probe_trace_event(int fd, struct probe_trace_event *tev) -{ - int ret = 0; - char *buf = synthesize_probe_trace_command(tev); - char sbuf[STRERR_BUFSIZE]; - - if (!buf) { - pr_debug("Failed to synthesize probe trace event.\n"); - return -EINVAL; - } - - pr_debug("Writing event: %s\n", buf); - if (!probe_event_dry_run) { - ret = write(fd, buf, strlen(buf)); - if (ret <= 0) { - ret = -errno; - pr_warning("Failed to write event: %s\n", - strerror_r(errno, sbuf, sizeof(sbuf))); - } - } - free(buf); return ret; } @@ -2266,28 +2303,41 @@ static int get_new_event_name(char *buf, size_t len, const char *base, struct strlist *namelist, bool allow_suffix) { int i, ret; + char *p, *nbase; + + if (*base == '.') + base++; + nbase = strdup(base); + if (!nbase) + return -ENOMEM; - /* Try no suffix */ - ret = e_snprintf(buf, len, "%s", base); + /* Cut off the dot suffixes (e.g. .const, .isra)*/ + p = strchr(nbase, '.'); + if (p && p != nbase) + *p = '\0'; + + /* Try no suffix number */ + ret = e_snprintf(buf, len, "%s", nbase); if (ret < 0) { pr_debug("snprintf() failed: %d\n", ret); - return ret; + goto out; } if (!strlist__has_entry(namelist, buf)) - return 0; + goto out; if (!allow_suffix) { pr_warning("Error: event \"%s\" already exists. " - "(Use -f to force duplicates.)\n", base); - return -EEXIST; + "(Use -f to force duplicates.)\n", buf); + ret = -EEXIST; + goto out; } /* Try to add suffix */ for (i = 1; i < MAX_EVENT_INDEX; i++) { - ret = e_snprintf(buf, len, "%s_%d", base, i); + ret = e_snprintf(buf, len, "%s_%d", nbase, i); if (ret < 0) { pr_debug("snprintf() failed: %d\n", ret); - return ret; + goto out; } if (!strlist__has_entry(namelist, buf)) break; @@ -2297,6 +2347,8 @@ static int get_new_event_name(char *buf, size_t len, const char *base, ret = -ERANGE; } +out: + free(nbase); return ret; } @@ -2321,93 +2373,84 @@ out: free(buf); } +/* Set new name from original perf_probe_event and namelist */ +static int probe_trace_event__set_name(struct probe_trace_event *tev, + struct perf_probe_event *pev, + struct strlist *namelist, + bool allow_suffix) +{ + const char *event, *group; + char buf[64]; + int ret; + + if (pev->event) + event = pev->event; + else + if (pev->point.function && + (strncmp(pev->point.function, "0x", 2) != 0) && + !strisglob(pev->point.function)) + event = pev->point.function; + else + event = tev->point.realname; + if (pev->group) + group = pev->group; + else + group = PERFPROBE_GROUP; + + /* Get an unused new event name */ + ret = get_new_event_name(buf, 64, event, + namelist, allow_suffix); + if (ret < 0) + return ret; + + event = buf; + + tev->event = strdup(event); + tev->group = strdup(group); + if (tev->event == NULL || tev->group == NULL) + return -ENOMEM; + + /* Add added event name to namelist */ + strlist__add(namelist, event); + return 0; +} + static int __add_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event *tevs, int ntevs, bool allow_suffix) { int i, fd, ret; struct probe_trace_event *tev = NULL; - char buf[64]; - const char *event, *group; struct strlist *namelist; - LIST_HEAD(blacklist); - struct kprobe_blacklist_node *node; - - if (pev->uprobes) - fd = open_uprobe_events(true); - else - fd = open_kprobe_events(true); - if (fd < 0) { - print_open_warning(fd, !pev->uprobes); + fd = probe_file__open(PF_FL_RW | (pev->uprobes ? PF_FL_UPROBE : 0)); + if (fd < 0) return fd; - } /* Get current event names */ - namelist = get_probe_trace_event_names(fd, false); + namelist = probe_file__get_namelist(fd); if (!namelist) { pr_debug("Failed to get current event list.\n"); - return -EIO; - } - /* Get kprobe blacklist if exists */ - if (!pev->uprobes) { - ret = kprobe_blacklist__load(&blacklist); - if (ret < 0) - pr_debug("No kprobe blacklist support, ignored\n"); + ret = -ENOMEM; + goto close_out; } ret = 0; - pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":"); for (i = 0; i < ntevs; i++) { tev = &tevs[i]; - /* Ensure that the address is NOT blacklisted */ - node = kprobe_blacklist__find_by_address(&blacklist, - tev->point.address); - if (node) { - pr_warning("Warning: Skipped probing on blacklisted function: %s\n", node->symbol); + /* Skip if the symbol is out of .text or blacklisted */ + if (!tev->point.symbol) continue; - } - if (pev->event) - event = pev->event; - else - if (pev->point.function) - event = pev->point.function; - else - event = tev->point.symbol; - if (pev->group) - group = pev->group; - else - group = PERFPROBE_GROUP; - - /* Get an unused new event name */ - ret = get_new_event_name(buf, 64, event, - namelist, allow_suffix); + /* Set new name for tev (and update namelist) */ + ret = probe_trace_event__set_name(tev, pev, namelist, + allow_suffix); if (ret < 0) break; - event = buf; - tev->event = strdup(event); - tev->group = strdup(group); - if (tev->event == NULL || tev->group == NULL) { - ret = -ENOMEM; - break; - } - ret = write_probe_trace_event(fd, tev); + ret = probe_file__add_event(fd, tev); if (ret < 0) break; - /* Add added event name to namelist */ - strlist__add(namelist, event); - - /* Trick here - save current event/group */ - event = pev->event; - group = pev->group; - pev->event = tev->event; - pev->group = tev->group; - show_perf_probe_event(pev, tev->point.module); - /* Trick here - restore current event/group */ - pev->event = (char *)event; - pev->group = (char *)group; /* * Probes after the first probe which comes from same @@ -2420,27 +2463,28 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, if (ret == -EINVAL && pev->uprobes) warn_uprobe_event_compat(tev); - /* Note that it is possible to skip all events because of blacklist */ - if (ret >= 0 && tev->event) { - /* Show how to use the event. */ - pr_info("\nYou can now use it in all perf tools, such as:\n\n"); - pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group, - tev->event); - } - - kprobe_blacklist__delete(&blacklist); strlist__delete(namelist); +close_out: close(fd); return ret; } -static int find_probe_functions(struct map *map, char *name) +static int find_probe_functions(struct map *map, char *name, + struct symbol **syms) { int found = 0; struct symbol *sym; + struct rb_node *tmp; + + if (map__load(map, NULL) < 0) + return 0; - map__for_each_symbol_by_name(map, name, sym) { - found++; + map__for_each_symbol(map, sym, tmp) { + if (strglobmatch(sym->name, name)) { + found++; + if (syms && found < probe_conf.max_probes) + syms[found - 1] = sym; + } } return found; @@ -2449,47 +2493,58 @@ static int find_probe_functions(struct map *map, char *name) #define strdup_or_goto(str, label) \ ({ char *__p = strdup(str); if (!__p) goto label; __p; }) +void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused, + struct probe_trace_event *tev __maybe_unused, + struct map *map __maybe_unused) { } + /* * Find probe function addresses from map. * Return an error or the number of found probe_trace_event */ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, - struct probe_trace_event **tevs, - int max_tevs, const char *target) + struct probe_trace_event **tevs) { struct map *map = NULL; struct ref_reloc_sym *reloc_sym = NULL; struct symbol *sym; + struct symbol **syms = NULL; struct probe_trace_event *tev; struct perf_probe_point *pp = &pev->point; struct probe_trace_point *tp; int num_matched_functions; - int ret, i; + int ret, i, j, skipped = 0; - map = get_target_map(target, pev->uprobes); + map = get_target_map(pev->target, pev->uprobes); if (!map) { ret = -EINVAL; goto out; } + syms = malloc(sizeof(struct symbol *) * probe_conf.max_probes); + if (!syms) { + ret = -ENOMEM; + goto out; + } + /* * Load matched symbols: Since the different local symbols may have * same name but different addresses, this lists all the symbols. */ - num_matched_functions = find_probe_functions(map, pp->function); + num_matched_functions = find_probe_functions(map, pp->function, syms); if (num_matched_functions == 0) { pr_err("Failed to find symbol %s in %s\n", pp->function, - target ? : "kernel"); + pev->target ? : "kernel"); ret = -ENOENT; goto out; - } else if (num_matched_functions > max_tevs) { + } else if (num_matched_functions > probe_conf.max_probes) { pr_err("Too many functions matched in %s\n", - target ? : "kernel"); + pev->target ? : "kernel"); ret = -E2BIG; goto out; } - if (!pev->uprobes && !pp->retprobe) { + /* Note that the symbols in the kmodule are not relocated */ + if (!pev->uprobes && !pp->retprobe && !pev->target) { reloc_sym = kernel_get_ref_reloc_sym(); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); @@ -2507,7 +2562,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, ret = 0; - map__for_each_symbol_by_name(map, pp->function, sym) { + for (j = 0; j < num_matched_functions; j++) { + sym = syms[j]; + tev = (*tevs) + ret; tp = &tev->point; if (ret == num_matched_functions) { @@ -2524,16 +2581,25 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, } /* Add one probe point */ tp->address = map->unmap_ip(map, sym->start) + pp->offset; - if (reloc_sym) { + + /* Check the kprobe (not in module) is within .text */ + if (!pev->uprobes && !pev->target && + kprobe_warn_out_range(sym->name, tp->address)) { + tp->symbol = NULL; /* Skip it */ + skipped++; + } else if (reloc_sym) { tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out); tp->offset = tp->address - reloc_sym->addr; } else { tp->symbol = strdup_or_goto(sym->name, nomem_out); tp->offset = pp->offset; } + tp->realname = strdup_or_goto(sym->name, nomem_out); + tp->retprobe = pp->retprobe; - if (target) - tev->point.module = strdup_or_goto(target, nomem_out); + if (pev->target) + tev->point.module = strdup_or_goto(pev->target, + nomem_out); tev->uprobes = pev->uprobes; tev->nargs = pev->nargs; if (tev->nargs) { @@ -2555,10 +2621,16 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, strdup_or_goto(pev->args[i].type, nomem_out); } + arch__fix_tev_from_maps(pev, tev, map); + } + if (ret == skipped) { + ret = -ENOENT; + goto err_out; } out: put_target_map(map, pev->uprobes); + free(syms); return ret; nomem_out: @@ -2569,221 +2641,231 @@ err_out: goto out; } +static int try_to_find_absolute_address(struct perf_probe_event *pev, + struct probe_trace_event **tevs) +{ + struct perf_probe_point *pp = &pev->point; + struct probe_trace_event *tev; + struct probe_trace_point *tp; + int i, err; + + if (!(pev->point.function && !strncmp(pev->point.function, "0x", 2))) + return -EINVAL; + if (perf_probe_event_need_dwarf(pev)) + return -EINVAL; + + /* + * This is 'perf probe /lib/libc.so 0xabcd'. Try to probe at + * absolute address. + * + * Only one tev can be generated by this. + */ + *tevs = zalloc(sizeof(*tev)); + if (!*tevs) + return -ENOMEM; + + tev = *tevs; + tp = &tev->point; + + /* + * Don't use tp->offset, use address directly, because + * in synthesize_probe_trace_command() address cannot be + * zero. + */ + tp->address = pev->point.abs_address; + tp->retprobe = pp->retprobe; + tev->uprobes = pev->uprobes; + + err = -ENOMEM; + /* + * Give it a '0x' leading symbol name. + * In __add_probe_trace_events, a NULL symbol is interpreted as + * invalud. + */ + if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0) + goto errout; + + /* For kprobe, check range */ + if ((!tev->uprobes) && + (kprobe_warn_out_range(tev->point.symbol, + tev->point.address))) { + err = -EACCES; + goto errout; + } + + if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0) + goto errout; + + if (pev->target) { + tp->module = strdup(pev->target); + if (!tp->module) + goto errout; + } + + if (tev->group) { + tev->group = strdup(pev->group); + if (!tev->group) + goto errout; + } + + if (pev->event) { + tev->event = strdup(pev->event); + if (!tev->event) + goto errout; + } + + tev->nargs = pev->nargs; + tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); + if (!tev->args) { + err = -ENOMEM; + goto errout; + } + for (i = 0; i < tev->nargs; i++) + copy_to_probe_trace_arg(&tev->args[i], &pev->args[i]); + + return 1; + +errout: + if (*tevs) { + clear_probe_trace_events(*tevs, 1); + *tevs = NULL; + } + return err; +} + +bool __weak arch__prefers_symtab(void) { return false; } + static int convert_to_probe_trace_events(struct perf_probe_event *pev, - struct probe_trace_event **tevs, - int max_tevs, const char *target) + struct probe_trace_event **tevs) { int ret; if (pev->uprobes && !pev->group) { /* Replace group name if not given */ - ret = convert_exec_to_group(target, &pev->group); + ret = convert_exec_to_group(pev->target, &pev->group); if (ret != 0) { pr_warning("Failed to make a group name.\n"); return ret; } } + ret = try_to_find_absolute_address(pev, tevs); + if (ret > 0) + return ret; + + if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) { + ret = find_probe_trace_events_from_map(pev, tevs); + if (ret > 0) + return ret; /* Found in symbol table */ + } + /* Convert perf_probe_event with debuginfo */ - ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target); + ret = try_to_find_probe_trace_events(pev, tevs); if (ret != 0) return ret; /* Found in debuginfo or got an error */ - return find_probe_trace_events_from_map(pev, tevs, max_tevs, target); + return find_probe_trace_events_from_map(pev, tevs); } -struct __event_package { - struct perf_probe_event *pev; - struct probe_trace_event *tevs; - int ntevs; -}; - -int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - int max_tevs, bool force_add) +int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs) { - int i, j, ret; - struct __event_package *pkgs; - - ret = 0; - pkgs = zalloc(sizeof(struct __event_package) * npevs); - - if (pkgs == NULL) - return -ENOMEM; - - ret = init_symbol_maps(pevs->uprobes); - if (ret < 0) { - free(pkgs); - return ret; - } + int i, ret; /* Loop 1: convert all events */ for (i = 0; i < npevs; i++) { - pkgs[i].pev = &pevs[i]; + /* Init kprobe blacklist if needed */ + if (!pevs[i].uprobes) + kprobe_blacklist__init(); /* Convert with or without debuginfo */ - ret = convert_to_probe_trace_events(pkgs[i].pev, - &pkgs[i].tevs, - max_tevs, - pkgs[i].pev->target); + ret = convert_to_probe_trace_events(&pevs[i], &pevs[i].tevs); if (ret < 0) - goto end; - pkgs[i].ntevs = ret; + return ret; + pevs[i].ntevs = ret; } + /* This just release blacklist only if allocated */ + kprobe_blacklist__release(); + + return 0; +} + +int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs) +{ + int i, ret = 0; /* Loop 2: add all events */ for (i = 0; i < npevs; i++) { - ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, - pkgs[i].ntevs, force_add); + ret = __add_probe_trace_events(&pevs[i], pevs[i].tevs, + pevs[i].ntevs, + probe_conf.force_add); if (ret < 0) break; } -end: + return ret; +} + +void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs) +{ + int i, j; + /* Loop 3: cleanup and free trace events */ for (i = 0; i < npevs; i++) { - for (j = 0; j < pkgs[i].ntevs; j++) - clear_probe_trace_event(&pkgs[i].tevs[j]); - zfree(&pkgs[i].tevs); + for (j = 0; j < pevs[i].ntevs; j++) + clear_probe_trace_event(&pevs[i].tevs[j]); + zfree(&pevs[i].tevs); + pevs[i].ntevs = 0; + clear_perf_probe_event(&pevs[i]); } - free(pkgs); - exit_symbol_maps(); - - return ret; } -static int __del_trace_probe_event(int fd, struct str_node *ent) +int add_perf_probe_events(struct perf_probe_event *pevs, int npevs) { - char *p; - char buf[128]; int ret; - /* Convert from perf-probe event to trace-probe event */ - ret = e_snprintf(buf, 128, "-:%s", ent->s); + ret = init_probe_symbol_maps(pevs->uprobes); if (ret < 0) - goto error; + return ret; - p = strchr(buf + 2, ':'); - if (!p) { - pr_debug("Internal error: %s should have ':' but not.\n", - ent->s); - ret = -ENOTSUP; - goto error; - } - *p = '/'; + ret = convert_perf_probe_events(pevs, npevs); + if (ret == 0) + ret = apply_perf_probe_events(pevs, npevs); - pr_debug("Writing event: %s\n", buf); - ret = write(fd, buf, strlen(buf)); - if (ret < 0) { - ret = -errno; - goto error; - } + cleanup_perf_probe_events(pevs, npevs); - pr_info("Removed event: %s\n", ent->s); - return 0; -error: - pr_warning("Failed to delete event: %s\n", - strerror_r(-ret, buf, sizeof(buf))); + exit_probe_symbol_maps(); return ret; } -static int del_trace_probe_event(int fd, const char *buf, - struct strlist *namelist) +int del_perf_probe_events(struct strfilter *filter) { - struct str_node *ent, *n; - int ret = -1; - - if (strpbrk(buf, "*?")) { /* Glob-exp */ - strlist__for_each_safe(ent, n, namelist) - if (strglobmatch(ent->s, buf)) { - ret = __del_trace_probe_event(fd, ent); - if (ret < 0) - break; - strlist__remove(namelist, ent); - } - } else { - ent = strlist__find(namelist, buf); - if (ent) { - ret = __del_trace_probe_event(fd, ent); - if (ret >= 0) - strlist__remove(namelist, ent); - } - } + int ret, ret2, ufd = -1, kfd = -1; + char *str = strfilter__string(filter); - return ret; -} - -int del_perf_probe_events(struct strlist *dellist) -{ - int ret = -1, ufd = -1, kfd = -1; - char buf[128]; - const char *group, *event; - char *p, *str; - struct str_node *ent; - struct strlist *namelist = NULL, *unamelist = NULL; + if (!str) + return -EINVAL; /* Get current event names */ - kfd = open_kprobe_events(true); - if (kfd >= 0) - namelist = get_probe_trace_event_names(kfd, true); - - ufd = open_uprobe_events(true); - if (ufd >= 0) - unamelist = get_probe_trace_event_names(ufd, true); + ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); + if (ret < 0) + goto out; - if (kfd < 0 && ufd < 0) { - print_both_open_warning(kfd, ufd); + ret = probe_file__del_events(kfd, filter); + if (ret < 0 && ret != -ENOENT) goto error; - } - if (namelist == NULL && unamelist == NULL) + ret2 = probe_file__del_events(ufd, filter); + if (ret2 < 0 && ret2 != -ENOENT) { + ret = ret2; goto error; - - strlist__for_each(ent, dellist) { - str = strdup(ent->s); - if (str == NULL) { - ret = -ENOMEM; - goto error; - } - pr_debug("Parsing: %s\n", str); - p = strchr(str, ':'); - if (p) { - group = str; - *p = '\0'; - event = p + 1; - } else { - group = "*"; - event = str; - } - - ret = e_snprintf(buf, 128, "%s:%s", group, event); - if (ret < 0) { - pr_err("Failed to copy event."); - free(str); - goto error; - } - - pr_debug("Group: %s, Event: %s\n", group, event); - - if (namelist) - ret = del_trace_probe_event(kfd, buf, namelist); - - if (unamelist && ret != 0) - ret = del_trace_probe_event(ufd, buf, unamelist); - - if (ret != 0) - pr_info("Info: Event \"%s\" does not exist.\n", buf); - - free(str); } + ret = 0; error: - if (kfd >= 0) { - strlist__delete(namelist); + if (kfd >= 0) close(kfd); - } - - if (ufd >= 0) { - strlist__delete(unamelist); + if (ufd >= 0) close(ufd); - } +out: + free(str); return ret; } @@ -2809,7 +2891,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter, struct map *map; int ret; - ret = init_symbol_maps(user); + ret = init_probe_symbol_maps(user); if (ret < 0) return ret; @@ -2837,11 +2919,29 @@ int show_available_funcs(const char *target, struct strfilter *_filter, dso__fprintf_symbols_by_name(map->dso, map->type, stdout); end: if (user) { - dso__delete(map->dso); - map__delete(map); + map__put(map); } - exit_symbol_maps(); + exit_probe_symbol_maps(); return ret; } +int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, + struct perf_probe_arg *pvar) +{ + tvar->value = strdup(pvar->var); + if (tvar->value == NULL) + return -ENOMEM; + if (pvar->type) { + tvar->type = strdup(pvar->type); + if (tvar->type == NULL) + return -ENOMEM; + } + if (pvar->name) { + tvar->name = strdup(pvar->name); + if (tvar->name == NULL) + return -ENOMEM; + } else + tvar->name = NULL; + return 0; +} diff --git a/kernel/tools/perf/util/probe-event.h b/kernel/tools/perf/util/probe-event.h index d6b783447..ba926c30f 100644 --- a/kernel/tools/perf/util/probe-event.h +++ b/kernel/tools/perf/util/probe-event.h @@ -6,10 +6,20 @@ #include "strlist.h" #include "strfilter.h" +/* Probe related configurations */ +struct probe_conf { + bool show_ext_vars; + bool show_location_range; + bool force_add; + bool no_inlines; + int max_probes; +}; +extern struct probe_conf probe_conf; extern bool probe_event_dry_run; /* kprobe-tracer and uprobe-tracer tracing point */ struct probe_trace_point { + char *realname; /* function real name (if needed) */ char *symbol; /* Base symbol */ char *module; /* Module name */ unsigned long offset; /* Offset from symbol */ @@ -49,6 +59,7 @@ struct perf_probe_point { bool retprobe; /* Return probe flag */ char *lazy_line; /* Lazy matching pattern */ unsigned long offset; /* Offset from function entry */ + unsigned long abs_address; /* Absolute address of the point */ }; /* Perf probe probing argument field chain */ @@ -76,6 +87,8 @@ struct perf_probe_event { bool uprobes; /* Uprobe event flag */ char *target; /* Target binary */ struct perf_probe_arg *args; /* Arguments */ + struct probe_trace_event *tevs; + int ntevs; }; /* Line range */ @@ -96,9 +109,15 @@ struct variable_list { struct strlist *vars; /* Available variables */ }; +struct map; +int init_probe_symbol_maps(bool user_only); +void exit_probe_symbol_maps(void); + /* Command string to events */ extern int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev); +extern int parse_probe_trace_command(const char *cmd, + struct probe_trace_event *tev); /* Events to command string */ extern char *synthesize_perf_probe_command(struct perf_probe_event *pev); @@ -111,6 +130,7 @@ extern bool perf_probe_event_need_dwarf(struct perf_probe_event *pev); /* Release event contents */ extern void clear_perf_probe_event(struct perf_probe_event *pev); +extern void clear_probe_trace_event(struct probe_trace_event *tev); /* Command string to line-range */ extern int parse_line_range_desc(const char *cmd, struct line_range *lr); @@ -121,22 +141,34 @@ extern void line_range__clear(struct line_range *lr); /* Initialize line range */ extern int line_range__init(struct line_range *lr); -/* Internal use: Return kernel/module path */ -extern const char *kernel_get_module_path(const char *module); +extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs); +extern int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs); +extern int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs); +extern void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs); +extern int del_perf_probe_events(struct strfilter *filter); -extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, - int max_probe_points, bool force_add); -extern int del_perf_probe_events(struct strlist *dellist); -extern int show_perf_probe_events(void); +extern int show_perf_probe_event(const char *group, const char *event, + struct perf_probe_event *pev, + const char *module, bool use_stdout); +extern int show_perf_probe_events(struct strfilter *filter); extern int show_line_range(struct line_range *lr, const char *module, bool user); extern int show_available_vars(struct perf_probe_event *pevs, int npevs, - int max_probe_points, const char *module, - struct strfilter *filter, bool externs); + struct strfilter *filter); extern int show_available_funcs(const char *module, struct strfilter *filter, bool user); +bool arch__prefers_symtab(void); +void arch__fix_tev_from_maps(struct perf_probe_event *pev, + struct probe_trace_event *tev, struct map *map); + +/* If there is no space to write, returns -E2BIG. */ +int e_snprintf(char *str, size_t size, const char *format, ...) + __attribute__((format(printf, 3, 4))); /* Maximum index number of event-name postfix */ #define MAX_EVENT_INDEX 1024 +int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, + struct perf_probe_arg *pvar); + #endif /*_PROBE_EVENT_H */ diff --git a/kernel/tools/perf/util/probe-file.c b/kernel/tools/perf/util/probe-file.c new file mode 100644 index 000000000..e3b3b92e4 --- /dev/null +++ b/kernel/tools/perf/util/probe-file.c @@ -0,0 +1,327 @@ +/* + * probe-file.c : operate ftrace k/uprobe events files + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "util.h" +#include "event.h" +#include "strlist.h" +#include "debug.h" +#include "cache.h" +#include "color.h" +#include "symbol.h" +#include "thread.h" +#include +#include "probe-event.h" +#include "probe-file.h" +#include "session.h" + +#define MAX_CMDLEN 256 + +static void print_open_warning(int err, bool uprobe) +{ + char sbuf[STRERR_BUFSIZE]; + + if (err == -ENOENT) { + const char *config; + + if (uprobe) + config = "CONFIG_UPROBE_EVENTS"; + else + config = "CONFIG_KPROBE_EVENTS"; + + pr_warning("%cprobe_events file does not exist" + " - please rebuild kernel with %s.\n", + uprobe ? 'u' : 'k', config); + } else if (err == -ENOTSUP) + pr_warning("Tracefs or debugfs is not mounted.\n"); + else + pr_warning("Failed to open %cprobe_events: %s\n", + uprobe ? 'u' : 'k', + strerror_r(-err, sbuf, sizeof(sbuf))); +} + +static void print_both_open_warning(int kerr, int uerr) +{ + /* Both kprobes and uprobes are disabled, warn it. */ + if (kerr == -ENOTSUP && uerr == -ENOTSUP) + pr_warning("Tracefs or debugfs is not mounted.\n"); + else if (kerr == -ENOENT && uerr == -ENOENT) + pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS " + "or/and CONFIG_UPROBE_EVENTS.\n"); + else { + char sbuf[STRERR_BUFSIZE]; + pr_warning("Failed to open kprobe events: %s.\n", + strerror_r(-kerr, sbuf, sizeof(sbuf))); + pr_warning("Failed to open uprobe events: %s.\n", + strerror_r(-uerr, sbuf, sizeof(sbuf))); + } +} + +static int open_probe_events(const char *trace_file, bool readwrite) +{ + char buf[PATH_MAX]; + const char *tracing_dir = ""; + int ret; + + ret = e_snprintf(buf, PATH_MAX, "%s/%s%s", + tracing_path, tracing_dir, trace_file); + if (ret >= 0) { + pr_debug("Opening %s write=%d\n", buf, readwrite); + if (readwrite && !probe_event_dry_run) + ret = open(buf, O_RDWR | O_APPEND, 0); + else + ret = open(buf, O_RDONLY, 0); + + if (ret < 0) + ret = -errno; + } + return ret; +} + +static int open_kprobe_events(bool readwrite) +{ + return open_probe_events("kprobe_events", readwrite); +} + +static int open_uprobe_events(bool readwrite) +{ + return open_probe_events("uprobe_events", readwrite); +} + +int probe_file__open(int flag) +{ + int fd; + + if (flag & PF_FL_UPROBE) + fd = open_uprobe_events(flag & PF_FL_RW); + else + fd = open_kprobe_events(flag & PF_FL_RW); + if (fd < 0) + print_open_warning(fd, flag & PF_FL_UPROBE); + + return fd; +} + +int probe_file__open_both(int *kfd, int *ufd, int flag) +{ + if (!kfd || !ufd) + return -EINVAL; + + *kfd = open_kprobe_events(flag & PF_FL_RW); + *ufd = open_uprobe_events(flag & PF_FL_RW); + if (*kfd < 0 && *ufd < 0) { + print_both_open_warning(*kfd, *ufd); + return *kfd; + } + + return 0; +} + +/* Get raw string list of current kprobe_events or uprobe_events */ +struct strlist *probe_file__get_rawlist(int fd) +{ + int ret, idx; + FILE *fp; + char buf[MAX_CMDLEN]; + char *p; + struct strlist *sl; + + if (fd < 0) + return NULL; + + sl = strlist__new(NULL, NULL); + + fp = fdopen(dup(fd), "r"); + while (!feof(fp)) { + p = fgets(buf, MAX_CMDLEN, fp); + if (!p) + break; + + idx = strlen(p) - 1; + if (p[idx] == '\n') + p[idx] = '\0'; + ret = strlist__add(sl, buf); + if (ret < 0) { + pr_debug("strlist__add failed (%d)\n", ret); + strlist__delete(sl); + return NULL; + } + } + fclose(fp); + + return sl; +} + +static struct strlist *__probe_file__get_namelist(int fd, bool include_group) +{ + char buf[128]; + struct strlist *sl, *rawlist; + struct str_node *ent; + struct probe_trace_event tev; + int ret = 0; + + memset(&tev, 0, sizeof(tev)); + rawlist = probe_file__get_rawlist(fd); + if (!rawlist) + return NULL; + sl = strlist__new(NULL, NULL); + strlist__for_each(ent, rawlist) { + ret = parse_probe_trace_command(ent->s, &tev); + if (ret < 0) + break; + if (include_group) { + ret = e_snprintf(buf, 128, "%s:%s", tev.group, + tev.event); + if (ret >= 0) + ret = strlist__add(sl, buf); + } else + ret = strlist__add(sl, tev.event); + clear_probe_trace_event(&tev); + if (ret < 0) + break; + } + strlist__delete(rawlist); + + if (ret < 0) { + strlist__delete(sl); + return NULL; + } + return sl; +} + +/* Get current perf-probe event names */ +struct strlist *probe_file__get_namelist(int fd) +{ + return __probe_file__get_namelist(fd, false); +} + +int probe_file__add_event(int fd, struct probe_trace_event *tev) +{ + int ret = 0; + char *buf = synthesize_probe_trace_command(tev); + char sbuf[STRERR_BUFSIZE]; + + if (!buf) { + pr_debug("Failed to synthesize probe trace event.\n"); + return -EINVAL; + } + + pr_debug("Writing event: %s\n", buf); + if (!probe_event_dry_run) { + ret = write(fd, buf, strlen(buf)); + if (ret <= 0) { + ret = -errno; + pr_warning("Failed to write event: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); + } + } + free(buf); + + return ret; +} + +static int __del_trace_probe_event(int fd, struct str_node *ent) +{ + char *p; + char buf[128]; + int ret; + + /* Convert from perf-probe event to trace-probe event */ + ret = e_snprintf(buf, 128, "-:%s", ent->s); + if (ret < 0) + goto error; + + p = strchr(buf + 2, ':'); + if (!p) { + pr_debug("Internal error: %s should have ':' but not.\n", + ent->s); + ret = -ENOTSUP; + goto error; + } + *p = '/'; + + pr_debug("Writing event: %s\n", buf); + ret = write(fd, buf, strlen(buf)); + if (ret < 0) { + ret = -errno; + goto error; + } + + return 0; +error: + pr_warning("Failed to delete event: %s\n", + strerror_r(-ret, buf, sizeof(buf))); + return ret; +} + +int probe_file__get_events(int fd, struct strfilter *filter, + struct strlist *plist) +{ + struct strlist *namelist; + struct str_node *ent; + const char *p; + int ret = -ENOENT; + + if (!plist) + return -EINVAL; + + namelist = __probe_file__get_namelist(fd, true); + if (!namelist) + return -ENOENT; + + strlist__for_each(ent, namelist) { + p = strchr(ent->s, ':'); + if ((p && strfilter__compare(filter, p + 1)) || + strfilter__compare(filter, ent->s)) { + strlist__add(plist, ent->s); + ret = 0; + } + } + strlist__delete(namelist); + + return ret; +} + +int probe_file__del_strlist(int fd, struct strlist *namelist) +{ + int ret = 0; + struct str_node *ent; + + strlist__for_each(ent, namelist) { + ret = __del_trace_probe_event(fd, ent); + if (ret < 0) + break; + } + return ret; +} + +int probe_file__del_events(int fd, struct strfilter *filter) +{ + struct strlist *namelist; + int ret; + + namelist = strlist__new(NULL, NULL); + if (!namelist) + return -ENOMEM; + + ret = probe_file__get_events(fd, filter, namelist); + if (ret < 0) + return ret; + + ret = probe_file__del_strlist(fd, namelist); + strlist__delete(namelist); + + return ret; +} diff --git a/kernel/tools/perf/util/probe-file.h b/kernel/tools/perf/util/probe-file.h new file mode 100644 index 000000000..18ac9cf51 --- /dev/null +++ b/kernel/tools/perf/util/probe-file.h @@ -0,0 +1,22 @@ +#ifndef __PROBE_FILE_H +#define __PROBE_FILE_H + +#include "strlist.h" +#include "strfilter.h" +#include "probe-event.h" + +#define PF_FL_UPROBE 1 +#define PF_FL_RW 2 + +int probe_file__open(int flag); +int probe_file__open_both(int *kfd, int *ufd, int flag); +struct strlist *probe_file__get_namelist(int fd); +struct strlist *probe_file__get_rawlist(int fd); +int probe_file__add_event(int fd, struct probe_trace_event *tev); +int probe_file__del_events(int fd, struct strfilter *filter); +int probe_file__get_events(int fd, struct strfilter *filter, + struct strlist *plist); +int probe_file__del_strlist(int fd, struct strlist *namelist); + + +#endif diff --git a/kernel/tools/perf/util/probe-finder.c b/kernel/tools/perf/util/probe-finder.c index 2a76e14db..05012bb17 100644 --- a/kernel/tools/perf/util/probe-finder.c +++ b/kernel/tools/perf/util/probe-finder.c @@ -70,6 +70,7 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg, if (!dbg->dwfl) goto error; + dwfl_report_begin(dbg->dwfl); dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd); if (!dbg->mod) goto error; @@ -78,6 +79,8 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg, if (!dbg->dbg) goto error; + dwfl_report_end(dbg->dwfl, NULL, NULL); + return 0; error: if (dbg->dwfl) @@ -130,7 +133,7 @@ struct debuginfo *debuginfo__new(const char *path) continue; dinfo = __debuginfo__new(buf); } - dso__delete(dso); + dso__put(dso); out: /* if failed to open all distro debuginfo, open given binary */ @@ -177,7 +180,7 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, Dwarf_Word offs = 0; bool ref = false; const char *regs; - int ret; + int ret, ret2 = 0; if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL) goto static_var; @@ -187,9 +190,19 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, return -EINVAL; /* Broken DIE ? */ if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) { ret = dwarf_entrypc(sp_die, &tmp); - if (ret || addr != tmp || - dwarf_tag(vr_die) != DW_TAG_formal_parameter || - dwarf_highpc(sp_die, &tmp)) + if (ret) + return -ENOENT; + + if (probe_conf.show_location_range && + (dwarf_tag(vr_die) == DW_TAG_variable)) { + ret2 = -ERANGE; + } else if (addr != tmp || + dwarf_tag(vr_die) != DW_TAG_formal_parameter) { + return -ENOENT; + } + + ret = dwarf_highpc(sp_die, &tmp); + if (ret) return -ENOENT; /* * This is fuzzed by fentry mcount. We try to find the @@ -210,7 +223,7 @@ found: if (op->atom == DW_OP_addr) { static_var: if (!tvar) - return 0; + return ret2; /* Static variables on memory (not stack), make @varname */ ret = strlen(dwarf_diename(vr_die)); tvar->value = zalloc(ret + 2); @@ -220,7 +233,7 @@ static_var: tvar->ref = alloc_trace_arg_ref((long)offs); if (tvar->ref == NULL) return -ENOMEM; - return 0; + return ret2; } /* If this is based on frame buffer, set the offset */ @@ -250,14 +263,14 @@ static_var: } if (!tvar) - return 0; + return ret2; regs = get_arch_regstr(regn); if (!regs) { /* This should be a bug in DWARF or this tool */ pr_warning("Mapping for the register number %u " "missing on this architecture.\n", regn); - return -ERANGE; + return -ENOTSUP; } tvar->value = strdup(regs); @@ -269,7 +282,7 @@ static_var: if (tvar->ref == NULL) return -ENOMEM; } - return 0; + return ret2; } #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) @@ -517,10 +530,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, &pf->sp_die, pf->tvar); - if (ret == -ENOENT || ret == -EINVAL) - pr_err("Failed to find the location of %s at this address.\n" - " Perhaps, it has been optimized out.\n", pf->pvar->var); - else if (ret == -ENOTSUP) + if (ret == -ENOENT || ret == -EINVAL) { + pr_err("Failed to find the location of the '%s' variable at this address.\n" + " Perhaps it has been optimized out.\n" + " Use -V with the --range option to show '%s' location range.\n", + pf->pvar->var, pf->pvar->var); + } else if (ret == -ENOTSUP) pr_err("Sorry, we don't support this variable location yet.\n"); else if (ret == 0 && pf->pvar->field) { ret = convert_variable_fields(vr_die, pf->pvar->var, @@ -541,24 +556,9 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) char buf[32], *ptr; int ret = 0; - if (!is_c_varname(pf->pvar->var)) { - /* Copy raw parameters */ - pf->tvar->value = strdup(pf->pvar->var); - if (pf->tvar->value == NULL) - return -ENOMEM; - if (pf->pvar->type) { - pf->tvar->type = strdup(pf->pvar->type); - if (pf->tvar->type == NULL) - return -ENOMEM; - } - if (pf->pvar->name) { - pf->tvar->name = strdup(pf->pvar->name); - if (pf->tvar->name == NULL) - return -ENOMEM; - } else - pf->tvar->name = NULL; - return 0; - } + /* Copy raw parameters */ + if (!is_c_varname(pf->pvar->var)) + return copy_to_probe_trace_arg(pf->tvar, pf->pvar); if (pf->pvar->name) pf->tvar->name = strdup(pf->pvar->name); @@ -594,6 +594,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) /* Convert subprogram DIE to trace point */ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, Dwarf_Addr paddr, bool retprobe, + const char *function, struct probe_trace_point *tp) { Dwarf_Addr eaddr, highaddr; @@ -637,8 +638,10 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, /* Return probe must be on the head of a subprogram */ if (retprobe) { if (eaddr != paddr) { - pr_warning("Return probe must be on the head of" - " a real function.\n"); + pr_warning("Failed to find \"%s%%return\",\n" + " because %s is an inlined function and" + " has no return point.\n", function, + function); return -EINVAL; } tp->retprobe = true; @@ -662,9 +665,15 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) /* If not a real subprogram, find a real one */ if (!die_is_func_def(sc_die)) { if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { - pr_warning("Failed to find probe point in any " - "functions.\n"); - return -ENOENT; + if (die_find_tailfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { + pr_warning("Ignoring tail call from %s\n", + dwarf_diename(&pf->sp_die)); + return 0; + } else { + pr_warning("Failed to find probe point in any " + "functions.\n"); + return -ENOENT; + } } } else memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die)); @@ -719,7 +728,7 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data) } /* If the function name is given, that's what user expects */ if (fsp->function) { - if (die_compare_name(fn_die, fsp->function)) { + if (die_match_name(fn_die, fsp->function)) { memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); fsp->found = true; return 1; @@ -922,13 +931,14 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) /* Check tag and diename */ if (!die_is_func_def(sp_die) || - !die_compare_name(sp_die, pp->function)) + !die_match_name(sp_die, pp->function)) return DWARF_CB_OK; /* Check declared file */ if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die))) return DWARF_CB_OK; + pr_debug("Matched function: %s\n", dwarf_diename(sp_die)); pf->fname = dwarf_decl_file(sp_die); if (pp->line) { /* Function relative line */ dwarf_decl_line(sp_die, &pf->lno); @@ -945,10 +955,20 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) /* TODO: Check the address in this function */ param->retval = call_probe_finder(sp_die, pf); } - } else + } else if (!probe_conf.no_inlines) { /* Inlined function: search instances */ param->retval = die_walk_instances(sp_die, probe_point_inline_cb, (void *)pf); + /* This could be a non-existed inline definition */ + if (param->retval == -ENOENT && strisglob(pp->function)) + param->retval = 0; + } + + /* We need to find other candidates */ + if (strisglob(pp->function) && param->retval >= 0) { + param->retval = 0; /* We have to clear the result */ + return DWARF_CB_OK; + } return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ } @@ -977,7 +997,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) return DWARF_CB_OK; - if (die_compare_name(param->sp_die, param->function)) { + if (die_match_name(param->sp_die, param->function)) { if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) return DWARF_CB_OK; @@ -1030,7 +1050,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg, return -ENOMEM; /* Fastpath: lookup by function name from .debug_pubnames section */ - if (pp->function) { + if (pp->function && !strisglob(pp->function)) { struct pubname_callback_param pubname_param = { .function = pp->function, .file = pp->file, @@ -1089,6 +1109,7 @@ found: struct local_vars_finder { struct probe_finder *pf; struct perf_probe_arg *args; + bool vars; int max_args; int nargs; int ret; @@ -1103,7 +1124,7 @@ static int copy_variables_cb(Dwarf_Die *die_mem, void *data) tag = dwarf_tag(die_mem); if (tag == DW_TAG_formal_parameter || - tag == DW_TAG_variable) { + (tag == DW_TAG_variable && vf->vars)) { if (convert_variable_location(die_mem, vf->pf->addr, vf->pf->fb_ops, &pf->sp_die, NULL) == 0) { @@ -1129,26 +1150,28 @@ static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf, Dwarf_Die die_mem; int i; int n = 0; - struct local_vars_finder vf = {.pf = pf, .args = args, + struct local_vars_finder vf = {.pf = pf, .args = args, .vars = false, .max_args = MAX_PROBE_ARGS, .ret = 0}; for (i = 0; i < pf->pev->nargs; i++) { /* var never be NULL */ - if (strcmp(pf->pev->args[i].var, "$vars") == 0) { - pr_debug("Expanding $vars into:"); - vf.nargs = n; - /* Special local variables */ - die_find_child(sc_die, copy_variables_cb, (void *)&vf, - &die_mem); - pr_debug(" (%d)\n", vf.nargs - n); - if (vf.ret < 0) - return vf.ret; - n = vf.nargs; - } else { + if (strcmp(pf->pev->args[i].var, PROBE_ARG_VARS) == 0) + vf.vars = true; + else if (strcmp(pf->pev->args[i].var, PROBE_ARG_PARAMS) != 0) { /* Copy normal argument */ args[n] = pf->pev->args[i]; n++; + continue; } + pr_debug("Expanding %s into:", pf->pev->args[i].var); + vf.nargs = n; + /* Special local variables */ + die_find_child(sc_die, copy_variables_cb, (void *)&vf, + &die_mem); + pr_debug(" (%d)\n", vf.nargs - n); + if (vf.ret < 0) + return vf.ret; + n = vf.nargs; } return n; } @@ -1158,8 +1181,9 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) { struct trace_event_finder *tf = container_of(pf, struct trace_event_finder, pf); + struct perf_probe_point *pp = &pf->pev->point; struct probe_trace_event *tev; - struct perf_probe_arg *args; + struct perf_probe_arg *args = NULL; int ret, i; /* Check number of tevs */ @@ -1172,17 +1196,25 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) /* Trace point should be converted from subprogram DIE */ ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, - pf->pev->point.retprobe, &tev->point); + pp->retprobe, pp->function, &tev->point); if (ret < 0) - return ret; + goto end; + + tev->point.realname = strdup(dwarf_diename(sc_die)); + if (!tev->point.realname) { + ret = -ENOMEM; + goto end; + } pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, tev->point.offset); /* Expand special probe argument if exist */ args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); - if (args == NULL) - return -ENOMEM; + if (args == NULL) { + ret = -ENOMEM; + goto end; + } ret = expand_probe_args(sc_die, pf, args); if (ret < 0) @@ -1206,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) } end: + if (ret) { + clear_probe_trace_event(tev); + tf->ntevs--; + } free(args); return ret; } @@ -1213,15 +1249,15 @@ end: /* Find probe_trace_events specified by perf_probe_event from debuginfo */ int debuginfo__find_trace_events(struct debuginfo *dbg, struct perf_probe_event *pev, - struct probe_trace_event **tevs, int max_tevs) + struct probe_trace_event **tevs) { struct trace_event_finder tf = { .pf = {.pev = pev, .callback = add_probe_trace_event}, - .mod = dbg->mod, .max_tevs = max_tevs}; - int ret; + .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; + int ret, i; /* Allocate result tevs array */ - *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); + *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); if (*tevs == NULL) return -ENOMEM; @@ -1230,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, ret = debuginfo__find_probes(dbg, &tf.pf); if (ret < 0) { + for (i = 0; i < tf.ntevs; i++) + clear_probe_trace_event(&tf.tevs[i]); zfree(tevs); return ret; } @@ -1237,14 +1275,11 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, return (ret < 0) ? ret : tf.ntevs; } -#define MAX_VAR_LEN 64 - /* Collect available variables in this scope */ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) { struct available_var_finder *af = data; struct variable_list *vl; - char buf[MAX_VAR_LEN]; int tag, ret; vl = &af->vls[af->nvls - 1]; @@ -1255,11 +1290,38 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) ret = convert_variable_location(die_mem, af->pf.addr, af->pf.fb_ops, &af->pf.sp_die, NULL); - if (ret == 0) { - ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); - pr_debug2("Add new var: %s\n", buf); - if (ret > 0) - strlist__add(vl->vars, buf); + if (ret == 0 || ret == -ERANGE) { + int ret2; + bool externs = !af->child; + struct strbuf buf; + + strbuf_init(&buf, 64); + + if (probe_conf.show_location_range) { + if (!externs) { + if (ret) + strbuf_addf(&buf, "[INV]\t"); + else + strbuf_addf(&buf, "[VAL]\t"); + } else + strbuf_addf(&buf, "[EXT]\t"); + } + + ret2 = die_get_varname(die_mem, &buf); + + if (!ret2 && probe_conf.show_location_range && + !externs) { + strbuf_addf(&buf, "\t"); + ret2 = die_get_var_range(&af->pf.sp_die, + die_mem, &buf); + } + + pr_debug("Add new var: %s\n", buf.buf); + if (ret2 == 0) { + strlist__add(vl->vars, + strbuf_detach(&buf, NULL)); + } + strbuf_release(&buf); } } @@ -1274,6 +1336,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) { struct available_var_finder *af = container_of(pf, struct available_var_finder, pf); + struct perf_probe_point *pp = &pf->pev->point; struct variable_list *vl; Dwarf_Die die_mem; int ret; @@ -1287,7 +1350,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) /* Trace point should be converted from subprogram DIE */ ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr, - pf->pev->point.retprobe, &vl->point); + pp->retprobe, pp->function, &vl->point); if (ret < 0) return ret; @@ -1295,16 +1358,16 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) vl->point.offset); /* Find local variables */ - vl->vars = strlist__new(true, NULL); + vl->vars = strlist__new(NULL, NULL); if (vl->vars == NULL) return -ENOMEM; af->child = true; die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem); /* Find external variables */ - if (!af->externs) + if (!probe_conf.show_ext_vars) goto out; - /* Don't need to search child DIE for externs. */ + /* Don't need to search child DIE for external vars. */ af->child = false; die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem); @@ -1324,17 +1387,16 @@ out: */ int debuginfo__find_available_vars_at(struct debuginfo *dbg, struct perf_probe_event *pev, - struct variable_list **vls, - int max_vls, bool externs) + struct variable_list **vls) { struct available_var_finder af = { .pf = {.pev = pev, .callback = add_available_vars}, .mod = dbg->mod, - .max_vls = max_vls, .externs = externs}; + .max_vls = probe_conf.max_probes}; int ret; /* Allocate result vls array */ - *vls = zalloc(sizeof(struct variable_list) * max_vls); + *vls = zalloc(sizeof(struct variable_list) * af.max_vls); if (*vls == NULL) return -ENOMEM; @@ -1355,6 +1417,41 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg, return (ret < 0) ? ret : af.nvls; } +/* For the kernel module, we need a special code to get a DIE */ +static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) +{ + int n, i; + Elf32_Word shndx; + Elf_Scn *scn; + Elf *elf; + GElf_Shdr mem, *shdr; + const char *p; + + elf = dwfl_module_getelf(dbg->mod, &dbg->bias); + if (!elf) + return -EINVAL; + + /* Get the number of relocations */ + n = dwfl_module_relocations(dbg->mod); + if (n < 0) + return -ENOENT; + /* Search the relocation related .text section */ + for (i = 0; i < n; i++) { + p = dwfl_module_relocation_info(dbg->mod, i, &shndx); + if (strcmp(p, ".text") == 0) { + /* OK, get the section header */ + scn = elf_getscn(elf, shndx); + if (!scn) + return -ENOENT; + shdr = gelf_getshdr(scn, &mem); + if (!shdr) + return -ENOENT; + *offs = shdr->sh_addr; + } + } + return 0; +} + /* Reverse search */ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, struct perf_probe_point *ppt) @@ -1363,9 +1460,16 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, Dwarf_Addr _addr = 0, baseaddr = 0; const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; + bool reloc = false; +retry: /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { + if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { + addr += baseaddr; + reloc = true; + goto retry; + } pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; @@ -1535,7 +1639,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) return DWARF_CB_OK; if (die_is_func_def(sp_die) && - die_compare_name(sp_die, lr->function)) { + die_match_name(sp_die, lr->function)) { lf->fname = dwarf_decl_file(sp_die); dwarf_decl_line(sp_die, &lr->offset); pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset); diff --git a/kernel/tools/perf/util/probe-finder.h b/kernel/tools/perf/util/probe-finder.h index ebf8c8c81..bed82716e 100644 --- a/kernel/tools/perf/util/probe-finder.h +++ b/kernel/tools/perf/util/probe-finder.h @@ -10,6 +10,9 @@ #define MAX_PROBES 128 #define MAX_PROBE_ARGS 128 +#define PROBE_ARG_VARS "$vars" +#define PROBE_ARG_PARAMS "$params" + static inline int is_c_varname(const char *name) { /* TODO */ @@ -37,8 +40,7 @@ extern void debuginfo__delete(struct debuginfo *dbg); /* Find probe_trace_events specified by perf_probe_event from debuginfo */ extern int debuginfo__find_trace_events(struct debuginfo *dbg, struct perf_probe_event *pev, - struct probe_trace_event **tevs, - int max_tevs); + struct probe_trace_event **tevs); /* Find a perf_probe_point from debuginfo */ extern int debuginfo__find_probe_point(struct debuginfo *dbg, @@ -52,8 +54,7 @@ extern int debuginfo__find_line_range(struct debuginfo *dbg, /* Find available variables */ extern int debuginfo__find_available_vars_at(struct debuginfo *dbg, struct perf_probe_event *pev, - struct variable_list **vls, - int max_points, bool externs); + struct variable_list **vls); /* Find a src file from a DWARF tag path */ int get_real_path(const char *raw_path, const char *comp_dir, @@ -96,7 +97,6 @@ struct available_var_finder { struct variable_list *vls; /* Found variable lists */ int nvls; /* Number of variable lists */ int max_vls; /* Max no. of variable lists */ - bool externs; /* Find external vars too */ bool child; /* Search child scopes */ }; diff --git a/kernel/tools/perf/util/pstack.c b/kernel/tools/perf/util/pstack.c index a126e6cc6..b234a6e3d 100644 --- a/kernel/tools/perf/util/pstack.c +++ b/kernel/tools/perf/util/pstack.c @@ -74,3 +74,10 @@ void *pstack__pop(struct pstack *pstack) pstack->entries[pstack->top] = NULL; return ret; } + +void *pstack__peek(struct pstack *pstack) +{ + if (pstack->top == 0) + return NULL; + return pstack->entries[pstack->top - 1]; +} diff --git a/kernel/tools/perf/util/pstack.h b/kernel/tools/perf/util/pstack.h index c3cb6584d..ded7f2e36 100644 --- a/kernel/tools/perf/util/pstack.h +++ b/kernel/tools/perf/util/pstack.h @@ -10,5 +10,6 @@ bool pstack__empty(const struct pstack *pstack); void pstack__remove(struct pstack *pstack, void *key); void pstack__push(struct pstack *pstack, void *key); void *pstack__pop(struct pstack *pstack); +void *pstack__peek(struct pstack *pstack); #endif /* _PERF_PSTACK_ */ diff --git a/kernel/tools/perf/util/python-ext-sources b/kernel/tools/perf/util/python-ext-sources index 4d28624a1..51be28b1b 100644 --- a/kernel/tools/perf/util/python-ext-sources +++ b/kernel/tools/perf/util/python-ext-sources @@ -10,12 +10,14 @@ util/ctype.c util/evlist.c util/evsel.c util/cpumap.c -../../lib/hweight.c +../lib/hweight.c util/thread_map.c util/util.c util/xyarray.c util/cgroup.c util/rblist.c +util/counts.c util/strlist.c util/trace-event.c -../../lib/rbtree.c +../lib/rbtree.c +util/string.c diff --git a/kernel/tools/perf/util/python.c b/kernel/tools/perf/util/python.c index d906d0ad5..98f127abf 100644 --- a/kernel/tools/perf/util/python.c +++ b/kernel/tools/perf/util/python.c @@ -67,6 +67,7 @@ static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); static PyMemberDef pyrf_mmap_event__members[] = { sample_members member_def(perf_event_header, type, T_UINT, "event type"), + member_def(perf_event_header, misc, T_UINT, "event misc"), member_def(mmap_event, pid, T_UINT, "event pid"), member_def(mmap_event, tid, T_UINT, "event tid"), member_def(mmap_event, start, T_ULONGLONG, "start of the map"), @@ -297,6 +298,43 @@ static PyTypeObject pyrf_sample_event__type = { .tp_repr = (reprfunc)pyrf_sample_event__repr, }; +static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object."); + +static PyMemberDef pyrf_context_switch_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + member_def(context_switch_event, next_prev_pid, T_UINT, "next/prev pid"), + member_def(context_switch_event, next_prev_tid, T_UINT, "next/prev tid"), + { .name = NULL, }, +}; + +static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent) +{ + PyObject *ret; + char *s; + + if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }", + pevent->event.context_switch.next_prev_pid, + pevent->event.context_switch.next_prev_tid, + !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) { + ret = PyErr_NoMemory(); + } else { + ret = PyString_FromString(s); + free(s); + } + return ret; +} + +static PyTypeObject pyrf_context_switch_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.context_switch_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_context_switch_event__doc, + .tp_members = pyrf_context_switch_event__members, + .tp_repr = (reprfunc)pyrf_context_switch_event__repr, +}; + static int pyrf_event__setup_types(void) { int err; @@ -306,6 +344,7 @@ static int pyrf_event__setup_types(void) pyrf_lost_event__type.tp_new = pyrf_read_event__type.tp_new = pyrf_sample_event__type.tp_new = + pyrf_context_switch_event__type.tp_new = pyrf_throttle_event__type.tp_new = PyType_GenericNew; err = PyType_Ready(&pyrf_mmap_event__type); if (err < 0) @@ -328,6 +367,9 @@ static int pyrf_event__setup_types(void) err = PyType_Ready(&pyrf_sample_event__type); if (err < 0) goto out; + err = PyType_Ready(&pyrf_context_switch_event__type); + if (err < 0) + goto out; out: return err; } @@ -342,6 +384,8 @@ static PyTypeObject *pyrf_event__type[] = { [PERF_RECORD_FORK] = &pyrf_task_event__type, [PERF_RECORD_READ] = &pyrf_read_event__type, [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, + [PERF_RECORD_SWITCH] = &pyrf_context_switch_event__type, + [PERF_RECORD_SWITCH_CPU_WIDE] = &pyrf_context_switch_event__type, }; static PyObject *pyrf_event__new(union perf_event *event) @@ -349,8 +393,10 @@ static PyObject *pyrf_event__new(union perf_event *event) struct pyrf_event *pevent; PyTypeObject *ptype; - if (event->header.type < PERF_RECORD_MMAP || - event->header.type > PERF_RECORD_SAMPLE) + if ((event->header.type < PERF_RECORD_MMAP || + event->header.type > PERF_RECORD_SAMPLE) && + !(event->header.type == PERF_RECORD_SWITCH || + event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) return NULL; ptype = pyrf_event__type[event->header.type]; @@ -384,7 +430,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) { - cpu_map__delete(pcpus->cpus); + cpu_map__put(pcpus->cpus); pcpus->ob_type->tp_free((PyObject*)pcpus); } @@ -453,7 +499,7 @@ static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) { - thread_map__delete(pthreads->threads); + thread_map__put(pthreads->threads); pthreads->ob_type->tp_free((PyObject*)pthreads); } @@ -528,6 +574,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel, "exclude_hv", "exclude_idle", "mmap", + "context_switch", "comm", "freq", "inherit_stat", @@ -553,6 +600,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel, exclude_hv = 0, exclude_idle = 0, mmap = 0, + context_switch = 0, comm = 0, freq = 1, inherit_stat = 0, @@ -565,13 +613,13 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel, int idx = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, - "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist, + "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist, &attr.type, &attr.config, &attr.sample_freq, &sample_period, &attr.sample_type, &attr.read_format, &disabled, &inherit, &pinned, &exclusive, &exclude_user, &exclude_kernel, &exclude_hv, &exclude_idle, - &mmap, &comm, &freq, &inherit_stat, + &mmap, &context_switch, &comm, &freq, &inherit_stat, &enable_on_exec, &task, &watermark, &precise_ip, &mmap_data, &sample_id_all, &attr.wakeup_events, &attr.bp_type, @@ -595,6 +643,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel, attr.exclude_hv = exclude_hv; attr.exclude_idle = exclude_idle; attr.mmap = mmap; + attr.context_switch = context_switch; attr.comm = comm; attr.freq = freq; attr.inherit_stat = inherit_stat; @@ -941,76 +990,86 @@ static int pyrf_evlist__setup_types(void) return PyType_Ready(&pyrf_evlist__type); } +#define PERF_CONST(name) { #name, PERF_##name } + static struct { const char *name; int value; } perf__constants[] = { - { "TYPE_HARDWARE", PERF_TYPE_HARDWARE }, - { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE }, - { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT }, - { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE }, - { "TYPE_RAW", PERF_TYPE_RAW }, - { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT }, - - { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES }, - { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS }, - { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES }, - { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES }, - { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, - { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, - { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, - { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, - { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, - { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, - { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB }, - { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB }, - { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU }, - { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ }, - { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE }, - { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH }, - { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, - { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, - - { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, - - { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, - { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, - { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, - { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES }, - { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS }, - { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN }, - { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, - { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, - { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, - { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY }, - - { "SAMPLE_IP", PERF_SAMPLE_IP }, - { "SAMPLE_TID", PERF_SAMPLE_TID }, - { "SAMPLE_TIME", PERF_SAMPLE_TIME }, - { "SAMPLE_ADDR", PERF_SAMPLE_ADDR }, - { "SAMPLE_READ", PERF_SAMPLE_READ }, - { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN }, - { "SAMPLE_ID", PERF_SAMPLE_ID }, - { "SAMPLE_CPU", PERF_SAMPLE_CPU }, - { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD }, - { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID }, - { "SAMPLE_RAW", PERF_SAMPLE_RAW }, - - { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED }, - { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING }, - { "FORMAT_ID", PERF_FORMAT_ID }, - { "FORMAT_GROUP", PERF_FORMAT_GROUP }, - - { "RECORD_MMAP", PERF_RECORD_MMAP }, - { "RECORD_LOST", PERF_RECORD_LOST }, - { "RECORD_COMM", PERF_RECORD_COMM }, - { "RECORD_EXIT", PERF_RECORD_EXIT }, - { "RECORD_THROTTLE", PERF_RECORD_THROTTLE }, - { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE }, - { "RECORD_FORK", PERF_RECORD_FORK }, - { "RECORD_READ", PERF_RECORD_READ }, - { "RECORD_SAMPLE", PERF_RECORD_SAMPLE }, + PERF_CONST(TYPE_HARDWARE), + PERF_CONST(TYPE_SOFTWARE), + PERF_CONST(TYPE_TRACEPOINT), + PERF_CONST(TYPE_HW_CACHE), + PERF_CONST(TYPE_RAW), + PERF_CONST(TYPE_BREAKPOINT), + + PERF_CONST(COUNT_HW_CPU_CYCLES), + PERF_CONST(COUNT_HW_INSTRUCTIONS), + PERF_CONST(COUNT_HW_CACHE_REFERENCES), + PERF_CONST(COUNT_HW_CACHE_MISSES), + PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS), + PERF_CONST(COUNT_HW_BRANCH_MISSES), + PERF_CONST(COUNT_HW_BUS_CYCLES), + PERF_CONST(COUNT_HW_CACHE_L1D), + PERF_CONST(COUNT_HW_CACHE_L1I), + PERF_CONST(COUNT_HW_CACHE_LL), + PERF_CONST(COUNT_HW_CACHE_DTLB), + PERF_CONST(COUNT_HW_CACHE_ITLB), + PERF_CONST(COUNT_HW_CACHE_BPU), + PERF_CONST(COUNT_HW_CACHE_OP_READ), + PERF_CONST(COUNT_HW_CACHE_OP_WRITE), + PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH), + PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS), + PERF_CONST(COUNT_HW_CACHE_RESULT_MISS), + + PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND), + PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND), + + PERF_CONST(COUNT_SW_CPU_CLOCK), + PERF_CONST(COUNT_SW_TASK_CLOCK), + PERF_CONST(COUNT_SW_PAGE_FAULTS), + PERF_CONST(COUNT_SW_CONTEXT_SWITCHES), + PERF_CONST(COUNT_SW_CPU_MIGRATIONS), + PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN), + PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ), + PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS), + PERF_CONST(COUNT_SW_EMULATION_FAULTS), + PERF_CONST(COUNT_SW_DUMMY), + + PERF_CONST(SAMPLE_IP), + PERF_CONST(SAMPLE_TID), + PERF_CONST(SAMPLE_TIME), + PERF_CONST(SAMPLE_ADDR), + PERF_CONST(SAMPLE_READ), + PERF_CONST(SAMPLE_CALLCHAIN), + PERF_CONST(SAMPLE_ID), + PERF_CONST(SAMPLE_CPU), + PERF_CONST(SAMPLE_PERIOD), + PERF_CONST(SAMPLE_STREAM_ID), + PERF_CONST(SAMPLE_RAW), + + PERF_CONST(FORMAT_TOTAL_TIME_ENABLED), + PERF_CONST(FORMAT_TOTAL_TIME_RUNNING), + PERF_CONST(FORMAT_ID), + PERF_CONST(FORMAT_GROUP), + + PERF_CONST(RECORD_MMAP), + PERF_CONST(RECORD_LOST), + PERF_CONST(RECORD_COMM), + PERF_CONST(RECORD_EXIT), + PERF_CONST(RECORD_THROTTLE), + PERF_CONST(RECORD_UNTHROTTLE), + PERF_CONST(RECORD_FORK), + PERF_CONST(RECORD_READ), + PERF_CONST(RECORD_SAMPLE), + PERF_CONST(RECORD_MMAP2), + PERF_CONST(RECORD_AUX), + PERF_CONST(RECORD_ITRACE_START), + PERF_CONST(RECORD_LOST_SAMPLES), + PERF_CONST(RECORD_SWITCH), + PERF_CONST(RECORD_SWITCH_CPU_WIDE), + + PERF_CONST(RECORD_MISC_SWITCH_OUT), { .name = NULL, }, }; diff --git a/kernel/tools/perf/util/record.c b/kernel/tools/perf/util/record.c index 8acd0df88..0467367dc 100644 --- a/kernel/tools/perf/util/record.c +++ b/kernel/tools/perf/util/record.c @@ -20,7 +20,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) if (!evlist) return -ENOMEM; - if (parse_events(evlist, str)) + if (parse_events(evlist, str, NULL)) goto out_delete; evsel = perf_evlist__first(evlist); @@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn) if (!cpus) return false; cpu = cpus->map[0]; - cpu_map__delete(cpus); + cpu_map__put(cpus); do { ret = perf_do_probe_api(fn, cpu, try[i++]); @@ -85,6 +85,11 @@ static void perf_probe_comm_exec(struct perf_evsel *evsel) evsel->attr.comm_exec = 1; } +static void perf_probe_context_switch(struct perf_evsel *evsel) +{ + evsel->attr.context_switch = 1; +} + bool perf_can_sample_identifier(void) { return perf_probe_api(perf_probe_sample_identifier); @@ -95,6 +100,35 @@ static bool perf_can_comm_exec(void) return perf_probe_api(perf_probe_comm_exec); } +bool perf_can_record_switch_events(void) +{ + return perf_probe_api(perf_probe_context_switch); +} + +bool perf_can_record_cpu_wide(void) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + .exclude_kernel = 1, + }; + struct cpu_map *cpus; + int cpu, fd; + + cpus = cpu_map__new(NULL); + if (!cpus) + return false; + cpu = cpus->map[0]; + cpu_map__put(cpus); + + fd = sys_perf_event_open(&attr, -1, cpu, -1, 0); + if (fd < 0) + return false; + close(fd); + + return true; +} + void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) { struct perf_evsel *evsel; @@ -119,7 +153,16 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) evsel->attr.comm_exec = 1; } - if (evlist->nr_entries > 1) { + if (opts->full_auxtrace) { + /* + * Need to be able to synthesize and parse selected events with + * arbitrary sample types, which requires always being able to + * match the id. + */ + use_sample_identifier = perf_can_sample_identifier(); + evlist__for_each(evlist, evsel) + perf_evsel__set_sample_id(evsel, use_sample_identifier); + } else if (evlist->nr_entries > 1) { struct perf_evsel *first = perf_evlist__first(evlist); evlist__for_each(evlist, evsel) { @@ -207,7 +250,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) if (!temp_evlist) return false; - err = parse_events(temp_evlist, str); + err = parse_events(temp_evlist, str, NULL); if (err) goto out_delete; @@ -217,7 +260,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) struct cpu_map *cpus = cpu_map__new(NULL); cpu = cpus ? cpus->map[0] : 0; - cpu_map__delete(cpus); + cpu_map__put(cpus); } else { cpu = evlist->cpus->map[0]; } diff --git a/kernel/tools/perf/util/scripting-engines/trace-event-perl.c b/kernel/tools/perf/util/scripting-engines/trace-event-perl.c index 430b5d278..544509c15 100644 --- a/kernel/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/kernel/tools/perf/util/scripting-engines/trace-event-perl.c @@ -55,10 +55,10 @@ void xs_init(pTHX) INTERP my_perl; -#define FTRACE_MAX_EVENT \ +#define TRACE_EVENT_TYPE_MAX \ ((1 << (sizeof(unsigned short) * 8)) - 1) -static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); +static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); extern struct scripting_context *scripting_context; @@ -221,6 +221,7 @@ static void define_event_symbols(struct event_format *event, break; case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: + case PRINT_DYNAMIC_ARRAY_LEN: case PRINT_STRING: case PRINT_BITMASK: break; diff --git a/kernel/tools/perf/util/scripting-engines/trace-event-python.c b/kernel/tools/perf/util/scripting-engines/trace-event-python.c index 5544b8cdd..a8e825fca 100644 --- a/kernel/tools/perf/util/scripting-engines/trace-event-python.c +++ b/kernel/tools/perf/util/scripting-engines/trace-event-python.c @@ -44,10 +44,10 @@ PyMODINIT_FUNC initperf_trace_context(void); -#define FTRACE_MAX_EVENT \ +#define TRACE_EVENT_TYPE_MAX \ ((1 << (sizeof(unsigned short) * 8)) - 1) -static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); +static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); #define MAX_FIELDS 64 #define N_COMMON_FIELDS 7 @@ -251,6 +251,7 @@ static void define_event_symbols(struct event_format *event, /* gcc warns for these? */ case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: + case PRINT_DYNAMIC_ARRAY_LEN: case PRINT_FUNC: case PRINT_BITMASK: /* we should warn... */ @@ -318,7 +319,7 @@ static PyObject *python_process_callchain(struct perf_sample *sample, if (thread__resolve_callchain(al->thread, evsel, sample, NULL, NULL, - PERF_MAX_STACK_DEPTH) != 0) { + scripting_max_stack) != 0) { pr_err("Failed to resolve callchain. Skipping\n"); goto exit; } diff --git a/kernel/tools/perf/util/session.c b/kernel/tools/perf/util/session.c index 0c7401257..468de95bc 100644 --- a/kernel/tools/perf/util/session.c +++ b/kernel/tools/perf/util/session.c @@ -15,19 +15,21 @@ #include "cpumap.h" #include "perf_regs.h" #include "asm/bug.h" +#include "auxtrace.h" +#include "thread-stack.h" -static int machines__deliver_event(struct machines *machines, - struct perf_evlist *evlist, - union perf_event *event, - struct perf_sample *sample, - struct perf_tool *tool, u64 file_offset); +static int perf_session__deliver_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool, + u64 file_offset); static int perf_session__open(struct perf_session *session) { struct perf_data_file *file = session->file; if (perf_session__read_header(session) < 0) { - pr_err("incompatible file format (rerun with -v to learn more)"); + pr_err("incompatible file format (rerun with -v to learn more)\n"); return -1; } @@ -35,17 +37,17 @@ static int perf_session__open(struct perf_session *session) return 0; if (!perf_evlist__valid_sample_type(session->evlist)) { - pr_err("non matching sample_type"); + pr_err("non matching sample_type\n"); return -1; } if (!perf_evlist__valid_sample_id_all(session->evlist)) { - pr_err("non matching sample_id_all"); + pr_err("non matching sample_id_all\n"); return -1; } if (!perf_evlist__valid_read_format(session->evlist)) { - pr_err("non matching read_format"); + pr_err("non matching read_format\n"); return -1; } @@ -105,8 +107,8 @@ static int ordered_events__deliver_event(struct ordered_events *oe, return ret; } - return machines__deliver_event(&session->machines, session->evlist, event->event, - &sample, session->tool, event->file_offset); + return perf_session__deliver_event(session, event->event, &sample, + session->tool, event->file_offset); } struct perf_session *perf_session__new(struct perf_data_file *file, @@ -119,6 +121,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file, session->repipe = repipe; session->tool = tool; + INIT_LIST_HEAD(&session->auxtrace_index); machines__init(&session->machines); ordered_events__init(&session->ordered_events, ordered_events__deliver_event); @@ -135,6 +138,8 @@ struct perf_session *perf_session__new(struct perf_data_file *file, perf_session__set_id_hdr_size(session); perf_session__set_comm_exec(session); } + } else { + session->machines.host.env = &perf_env; } if (!file || perf_data_file__is_write(file)) { @@ -167,27 +172,13 @@ static void perf_session__delete_threads(struct perf_session *session) machine__delete_threads(&session->machines.host); } -static void perf_session_env__delete(struct perf_session_env *env) -{ - zfree(&env->hostname); - zfree(&env->os_release); - zfree(&env->version); - zfree(&env->arch); - zfree(&env->cpu_desc); - zfree(&env->cpuid); - - zfree(&env->cmdline); - zfree(&env->sibling_cores); - zfree(&env->sibling_threads); - zfree(&env->numa_nodes); - zfree(&env->pmu_mappings); -} - void perf_session__delete(struct perf_session *session) { + auxtrace__free(session); + auxtrace_index__free(&session->auxtrace_index); perf_session__destroy_kernel_maps(session); perf_session__delete_threads(session); - perf_session_env__delete(&session->header.env); + perf_env__exit(&session->header.env); machines__exit(&session->machines); if (session->file) perf_data_file__close(session->file); @@ -262,6 +253,49 @@ static int process_id_index_stub(struct perf_tool *tool __maybe_unused, return 0; } +static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *session __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int skipn(int fd, off_t n) +{ + char buf[4096]; + ssize_t ret; + + while (n > 0) { + ret = read(fd, buf, min(n, (off_t)sizeof(buf))); + if (ret <= 0) + return ret; + n -= ret; + } + + return 0; +} + +static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_session *session + __maybe_unused) +{ + dump_printf(": unhandled!\n"); + if (perf_data_file__is_pipe(session->file)) + skipn(perf_data_file__fd(session->file), event->auxtrace.size); + return event->auxtrace.size; +} + +static +int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *session __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + void perf_tool__fill_defaults(struct perf_tool *tool) { if (tool->sample == NULL) @@ -278,6 +312,14 @@ void perf_tool__fill_defaults(struct perf_tool *tool) tool->exit = process_event_stub; if (tool->lost == NULL) tool->lost = perf_event__process_lost; + if (tool->lost_samples == NULL) + tool->lost_samples = perf_event__process_lost_samples; + if (tool->aux == NULL) + tool->aux = perf_event__process_aux; + if (tool->itrace_start == NULL) + tool->itrace_start = perf_event__process_itrace_start; + if (tool->context_switch == NULL) + tool->context_switch = perf_event__process_switch; if (tool->read == NULL) tool->read = process_event_sample_stub; if (tool->throttle == NULL) @@ -298,6 +340,12 @@ void perf_tool__fill_defaults(struct perf_tool *tool) } if (tool->id_index == NULL) tool->id_index = process_id_index_stub; + if (tool->auxtrace_info == NULL) + tool->auxtrace_info = process_event_auxtrace_info_stub; + if (tool->auxtrace == NULL) + tool->auxtrace = process_event_auxtrace_stub; + if (tool->auxtrace_error == NULL) + tool->auxtrace_error = process_event_auxtrace_error_stub; } static void swap_sample_id_all(union perf_event *event, void *data) @@ -390,6 +438,39 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all) swap_sample_id_all(event, &event->read + 1); } +static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) +{ + event->aux.aux_offset = bswap_64(event->aux.aux_offset); + event->aux.aux_size = bswap_64(event->aux.aux_size); + event->aux.flags = bswap_64(event->aux.flags); + + if (sample_id_all) + swap_sample_id_all(event, &event->aux + 1); +} + +static void perf_event__itrace_start_swap(union perf_event *event, + bool sample_id_all) +{ + event->itrace_start.pid = bswap_32(event->itrace_start.pid); + event->itrace_start.tid = bswap_32(event->itrace_start.tid); + + if (sample_id_all) + swap_sample_id_all(event, &event->itrace_start + 1); +} + +static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) +{ + if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { + event->context_switch.next_prev_pid = + bswap_32(event->context_switch.next_prev_pid); + event->context_switch.next_prev_tid = + bswap_32(event->context_switch.next_prev_tid); + } + + if (sample_id_all) + swap_sample_id_all(event, &event->context_switch + 1); +} + static void perf_event__throttle_swap(union perf_event *event, bool sample_id_all) { @@ -438,19 +519,42 @@ void perf_event__attr_swap(struct perf_event_attr *attr) { attr->type = bswap_32(attr->type); attr->size = bswap_32(attr->size); - attr->config = bswap_64(attr->config); - attr->sample_period = bswap_64(attr->sample_period); - attr->sample_type = bswap_64(attr->sample_type); - attr->read_format = bswap_64(attr->read_format); - attr->wakeup_events = bswap_32(attr->wakeup_events); - attr->bp_type = bswap_32(attr->bp_type); - attr->bp_addr = bswap_64(attr->bp_addr); - attr->bp_len = bswap_64(attr->bp_len); - attr->branch_sample_type = bswap_64(attr->branch_sample_type); - attr->sample_regs_user = bswap_64(attr->sample_regs_user); - attr->sample_stack_user = bswap_32(attr->sample_stack_user); - swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); +#define bswap_safe(f, n) \ + (attr->size > (offsetof(struct perf_event_attr, f) + \ + sizeof(attr->f) * (n))) +#define bswap_field(f, sz) \ +do { \ + if (bswap_safe(f, 0)) \ + attr->f = bswap_##sz(attr->f); \ +} while(0) +#define bswap_field_32(f) bswap_field(f, 32) +#define bswap_field_64(f) bswap_field(f, 64) + + bswap_field_64(config); + bswap_field_64(sample_period); + bswap_field_64(sample_type); + bswap_field_64(read_format); + bswap_field_32(wakeup_events); + bswap_field_32(bp_type); + bswap_field_64(bp_addr); + bswap_field_64(bp_len); + bswap_field_64(branch_sample_type); + bswap_field_64(sample_regs_user); + bswap_field_32(sample_stack_user); + bswap_field_32(aux_watermark); + + /* + * After read_format are bitfields. Check read_format because + * we are unable to use offsetof on bitfield. + */ + if (bswap_safe(read_format, 1)) + swap_bitfield((u8 *) (&attr->read_format + 1), + sizeof(u64)); +#undef bswap_field_64 +#undef bswap_field_32 +#undef bswap_field +#undef bswap_safe } static void perf_event__hdr_attr_swap(union perf_event *event, @@ -478,6 +582,40 @@ static void perf_event__tracing_data_swap(union perf_event *event, event->tracing_data.size = bswap_32(event->tracing_data.size); } +static void perf_event__auxtrace_info_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + size_t size; + + event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); + + size = event->header.size; + size -= (void *)&event->auxtrace_info.priv - (void *)event; + mem_bswap_64(event->auxtrace_info.priv, size); +} + +static void perf_event__auxtrace_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->auxtrace.size = bswap_64(event->auxtrace.size); + event->auxtrace.offset = bswap_64(event->auxtrace.offset); + event->auxtrace.reference = bswap_64(event->auxtrace.reference); + event->auxtrace.idx = bswap_32(event->auxtrace.idx); + event->auxtrace.tid = bswap_32(event->auxtrace.tid); + event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); +} + +static void perf_event__auxtrace_error_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); + event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); + event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); + event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); + event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); + event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); +} + typedef void (*perf_event__swap_op)(union perf_event *event, bool sample_id_all); @@ -492,11 +630,19 @@ static perf_event__swap_op perf_event__swap_ops[] = { [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, [PERF_RECORD_SAMPLE] = perf_event__all64_swap, + [PERF_RECORD_AUX] = perf_event__aux_swap, + [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, + [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, + [PERF_RECORD_SWITCH] = perf_event__switch_swap, + [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, [PERF_RECORD_HEADER_BUILD_ID] = NULL, [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, + [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, + [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, + [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, [PERF_RECORD_HEADER_MAX] = NULL, }; @@ -543,6 +689,8 @@ static int process_finished_round(struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, struct ordered_events *oe) { + if (dump_trace) + fprintf(stdout, "\n"); return ordered_events__flush(oe, OE_FLUSH__ROUND); } @@ -621,10 +769,18 @@ static void branch_stack__printf(struct perf_sample *sample) printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); - for (i = 0; i < sample->branch_stack->nr; i++) - printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", - i, sample->branch_stack->entries[i].from, - sample->branch_stack->entries[i].to); + for (i = 0; i < sample->branch_stack->nr; i++) { + struct branch_entry *e = &sample->branch_stack->entries[i]; + + printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", + i, e->from, e->to, + e->flags.cycles, + e->flags.mispred ? "M" : " ", + e->flags.predicted ? "P" : " ", + e->flags.abort ? "A" : " ", + e->flags.in_tx ? "T" : " ", + (unsigned)e->flags.reserved); + } } static void regs_dump__printf(u64 mask, u64 *regs) @@ -816,7 +972,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines, machine = machines__find(machines, pid); if (!machine) - machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID); + machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); return machine; } @@ -908,11 +1064,11 @@ static int machines__deliver_event(struct machines *machines, switch (event->header.type) { case PERF_RECORD_SAMPLE: - dump_sample(evsel, event, sample); if (evsel == NULL) { ++evlist->stats.nr_unknown_id; return 0; } + dump_sample(evsel, event, sample); if (machine == NULL) { ++evlist->stats.nr_unprocessable_samples; return 0; @@ -921,6 +1077,8 @@ static int machines__deliver_event(struct machines *machines, case PERF_RECORD_MMAP: return tool->mmap(tool, event, sample, machine); case PERF_RECORD_MMAP2: + if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) + ++evlist->stats.nr_proc_map_timeout; return tool->mmap2(tool, event, sample, machine); case PERF_RECORD_COMM: return tool->comm(tool, event, sample, machine); @@ -932,18 +1090,50 @@ static int machines__deliver_event(struct machines *machines, if (tool->lost == perf_event__process_lost) evlist->stats.total_lost += event->lost.lost; return tool->lost(tool, event, sample, machine); + case PERF_RECORD_LOST_SAMPLES: + if (tool->lost_samples == perf_event__process_lost_samples) + evlist->stats.total_lost_samples += event->lost_samples.lost; + return tool->lost_samples(tool, event, sample, machine); case PERF_RECORD_READ: return tool->read(tool, event, sample, evsel, machine); case PERF_RECORD_THROTTLE: return tool->throttle(tool, event, sample, machine); case PERF_RECORD_UNTHROTTLE: return tool->unthrottle(tool, event, sample, machine); + case PERF_RECORD_AUX: + if (tool->aux == perf_event__process_aux && + (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)) + evlist->stats.total_aux_lost += 1; + return tool->aux(tool, event, sample, machine); + case PERF_RECORD_ITRACE_START: + return tool->itrace_start(tool, event, sample, machine); + case PERF_RECORD_SWITCH: + case PERF_RECORD_SWITCH_CPU_WIDE: + return tool->context_switch(tool, event, sample, machine); default: ++evlist->stats.nr_unknown_events; return -1; } } +static int perf_session__deliver_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool, + u64 file_offset) +{ + int ret; + + ret = auxtrace__process_event(session, event, sample, tool); + if (ret < 0) + return ret; + if (ret > 0) + return 0; + + return machines__deliver_event(&session->machines, session->evlist, + event, sample, tool, file_offset); +} + static s64 perf_session__process_user_event(struct perf_session *session, union perf_event *event, u64 file_offset) @@ -980,6 +1170,15 @@ static s64 perf_session__process_user_event(struct perf_session *session, return tool->finished_round(tool, event, oe); case PERF_RECORD_ID_INDEX: return tool->id_index(tool, event, session); + case PERF_RECORD_AUXTRACE_INFO: + return tool->auxtrace_info(tool, event, session); + case PERF_RECORD_AUXTRACE: + /* setup for reading amidst mmap */ + lseek(fd, file_offset + event->header.size, SEEK_SET); + return tool->auxtrace(tool, event, session); + case PERF_RECORD_AUXTRACE_ERROR: + perf_session__auxtrace_error_inc(session, event); + return tool->auxtrace_error(tool, event, session); default: return -EINVAL; } @@ -1034,7 +1233,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset, return -1; if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || - readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz) + readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) return -1; event = (union perf_event *)buf; @@ -1042,12 +1241,12 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset, if (session->header.needs_swap) perf_event_header__bswap(&event->header); - if (event->header.size < hdr_sz) + if (event->header.size < hdr_sz || event->header.size > buf_sz) return -1; rest = event->header.size - hdr_sz; - if (readn(fd, &buf, rest) != (ssize_t)rest) + if (readn(fd, buf, rest) != (ssize_t)rest) return -1; if (session->header.needs_swap) @@ -1096,8 +1295,8 @@ static s64 perf_session__process_event(struct perf_session *session, return ret; } - return machines__deliver_event(&session->machines, evlist, event, - &sample, tool, file_offset); + return perf_session__deliver_event(session, event, &sample, tool, + file_offset); } void perf_event_header__bswap(struct perf_event_header *hdr) @@ -1112,7 +1311,7 @@ struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) return machine__findnew_thread(&session->machines.host, -1, pid); } -static struct thread *perf_session__register_idle_thread(struct perf_session *session) +struct thread *perf_session__register_idle_thread(struct perf_session *session) { struct thread *thread; @@ -1138,6 +1337,25 @@ static void perf_session__warn_about_errors(const struct perf_session *session) stats->nr_events[PERF_RECORD_LOST]); } + if (session->tool->lost_samples == perf_event__process_lost_samples) { + double drop_rate; + + drop_rate = (double)stats->total_lost_samples / + (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); + if (drop_rate > 0.05) { + ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n", + stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, + drop_rate * 100.0); + } + } + + if (session->tool->aux == perf_event__process_aux && + stats->total_aux_lost != 0) { + ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", + stats->total_aux_lost, + stats->nr_events[PERF_RECORD_AUX]); + } + if (stats->nr_unknown_events != 0) { ui__warning("Found %u unknown events!\n\n" "Is this an older tool processing a perf.data " @@ -1168,6 +1386,32 @@ static void perf_session__warn_about_errors(const struct perf_session *session) if (oe->nr_unordered_events != 0) ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); + + events_stats__auxtrace_error_warn(stats); + + if (stats->nr_proc_map_timeout != 0) { + ui__warning("%d map information files for pre-existing threads were\n" + "not processed, if there are samples for addresses they\n" + "will not be resolved, you may find out which are these\n" + "threads by running with -v and redirecting the output\n" + "to a file.\n" + "The time limit to process proc map is too short?\n" + "Increase it by --proc-map-timeout\n", + stats->nr_proc_map_timeout); + } +} + +static int perf_session__flush_thread_stack(struct thread *thread, + void *p __maybe_unused) +{ + return thread_stack__flush(thread); +} + +static int perf_session__flush_thread_stacks(struct perf_session *session) +{ + return machines__for_each_thread(&session->machines, + perf_session__flush_thread_stack, + NULL); } volatile int session_done; @@ -1256,10 +1500,17 @@ more: done: /* do the final flush for ordered samples */ err = ordered_events__flush(oe, OE_FLUSH__FINAL); + if (err) + goto out_err; + err = auxtrace__flush_events(session, tool); + if (err) + goto out_err; + err = perf_session__flush_thread_stacks(session); out_err: free(buf); perf_session__warn_about_errors(session); ordered_events__free(&session->ordered_events); + auxtrace__free_events(session); return err; } @@ -1324,7 +1575,10 @@ static int __perf_session__process_events(struct perf_session *session, file_offset = page_offset; head = data_offset - page_offset; - if (data_size && (data_offset + data_size < file_size)) + if (data_size == 0) + goto out; + + if (data_offset + data_size < file_size) file_size = data_offset + data_size; ui_progress__init(&prog, file_size, "Processing events..."); @@ -1402,10 +1656,17 @@ more: out: /* do the final flush for ordered samples */ err = ordered_events__flush(oe, OE_FLUSH__FINAL); + if (err) + goto out_err; + err = auxtrace__flush_events(session, tool); + if (err) + goto out_err; + err = perf_session__flush_thread_stacks(session); out_err: ui_progress__finish(); perf_session__warn_about_errors(session); ordered_events__free(&session->ordered_events); + auxtrace__free_events(session); session->one_mmap = false; return err; } @@ -1488,7 +1749,13 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) { - size_t ret = fprintf(fp, "Aggregated stats:\n"); + size_t ret; + const char *msg = ""; + + if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) + msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; + + ret = fprintf(fp, "\nAggregated stats:%s\n", msg); ret += events_stats__fprintf(&session->evlist->stats, fp); return ret; @@ -1533,7 +1800,7 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, if (thread__resolve_callchain(al->thread, evsel, sample, NULL, NULL, - PERF_MAX_STACK_DEPTH) != 0) { + stack_depth) != 0) { if (verbose) error("Failed to resolve callchain. Skipping\n"); return; @@ -1655,7 +1922,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, err = 0; out_delete_map: - cpu_map__delete(map); + cpu_map__put(map); return err; } diff --git a/kernel/tools/perf/util/session.h b/kernel/tools/perf/util/session.h index d5fa7b791..3e900c0ef 100644 --- a/kernel/tools/perf/util/session.h +++ b/kernel/tools/perf/util/session.h @@ -15,10 +15,16 @@ struct ip_callchain; struct thread; +struct auxtrace; +struct itrace_synth_opts; + struct perf_session { struct perf_header header; struct machines machines; struct perf_evlist *evlist; + struct auxtrace *auxtrace; + struct itrace_synth_opts *itrace_synth_opts; + struct list_head auxtrace_index; struct trace_event tevent; bool repipe; bool one_mmap; @@ -83,6 +89,8 @@ struct machine *perf_session__findnew_machine(struct perf_session *session, pid_ } struct thread *perf_session__findnew(struct perf_session *session, pid_t pid); +struct thread *perf_session__register_idle_thread(struct perf_session *session); + size_t perf_session__fprintf(struct perf_session *session, FILE *fp); size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp); diff --git a/kernel/tools/perf/util/sort.c b/kernel/tools/perf/util/sort.c index 4593f36ec..2d8ccd4d9 100644 --- a/kernel/tools/perf/util/sort.c +++ b/kernel/tools/perf/util/sort.c @@ -9,7 +9,7 @@ regex_t parent_regex; const char default_parent_pattern[] = "^sys_|^do_page_fault"; const char *parent_pattern = default_parent_pattern; const char default_sort_order[] = "comm,dso,symbol"; -const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to"; +const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; const char default_top_sort_order[] = "dso,symbol"; const char default_diff_sort_order[] = "dso,symbol"; @@ -21,6 +21,7 @@ int sort__need_collapse = 0; int sort__has_parent = 0; int sort__has_sym = 0; int sort__has_dso = 0; +int sort__has_socket = 0; enum sort_mode sort__mode = SORT_MODE__NORMAL; @@ -89,14 +90,14 @@ static int64_t sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) { /* Compare the addr that should be unique among comm */ - return comm__str(right->comm) - comm__str(left->comm); + return strcmp(comm__str(right->comm), comm__str(left->comm)); } static int64_t sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) { /* Compare the addr that should be unique among comm */ - return comm__str(right->comm) - comm__str(left->comm); + return strcmp(comm__str(right->comm), comm__str(left->comm)); } static int64_t @@ -182,18 +183,16 @@ static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) { - u64 ip_l, ip_r; - if (!sym_l || !sym_r) return cmp_null(sym_l, sym_r); if (sym_l == sym_r) return 0; - ip_l = sym_l->start; - ip_r = sym_r->start; + if (sym_l->start != sym_r->start) + return (int64_t)(sym_r->start - sym_l->start); - return (int64_t)(ip_r - ip_l); + return (int64_t)(sym_r->end - sym_l->end); } static int64_t @@ -321,6 +320,59 @@ struct sort_entry sort_srcline = { .se_width_idx = HISTC_SRCLINE, }; +/* --sort srcfile */ + +static char no_srcfile[1]; + +static char *get_srcfile(struct hist_entry *e) +{ + char *sf, *p; + struct map *map = e->ms.map; + + sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), + e->ms.sym, false, true); + if (!strcmp(sf, SRCLINE_UNKNOWN)) + return no_srcfile; + p = strchr(sf, ':'); + if (p && *sf) { + *p = 0; + return sf; + } + free(sf); + return no_srcfile; +} + +static int64_t +sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->srcfile) { + if (!left->ms.map) + left->srcfile = no_srcfile; + else + left->srcfile = get_srcfile(left); + } + if (!right->srcfile) { + if (!right->ms.map) + right->srcfile = no_srcfile; + else + right->srcfile = get_srcfile(right); + } + return strcmp(right->srcfile, left->srcfile); +} + +static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile); +} + +struct sort_entry sort_srcfile = { + .se_header = "Source File", + .se_cmp = sort__srcfile_cmp, + .se_snprintf = hist_entry__srcfile_snprintf, + .se_width_idx = HISTC_SRCFILE, +}; + /* --sort parent */ static int64_t @@ -370,6 +422,27 @@ struct sort_entry sort_cpu = { .se_width_idx = HISTC_CPU, }; +/* --sort socket */ + +static int64_t +sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->socket - left->socket; +} + +static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); +} + +struct sort_entry sort_socket = { + .se_header = "Socket", + .se_cmp = sort__socket_cmp, + .se_snprintf = hist_entry__socket_snprintf, + .se_width_idx = HISTC_SOCKET, +}; + /* sort keys for branch stacks */ static int64_t @@ -528,6 +601,29 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, return repsep_snprintf(bf, size, "%-*.*s", width, width, out); } +static int64_t +sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->branch_info->flags.cycles - + right->branch_info->flags.cycles; +} + +static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + if (he->branch_info->flags.cycles == 0) + return repsep_snprintf(bf, size, "%-*s", width, "-"); + return repsep_snprintf(bf, size, "%-*hd", width, + he->branch_info->flags.cycles); +} + +struct sort_entry sort_cycles = { + .se_header = "Basic Block Cycles", + .se_cmp = sort__cycles_cmp, + .se_snprintf = hist_entry__cycles_snprintf, + .se_width_idx = HISTC_CYCLES, +}; + /* --sort daddr_sym */ static int64_t sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) @@ -558,6 +654,35 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, width); } +static int64_t +sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t l = 0, r = 0; + + if (left->mem_info) + l = left->mem_info->iaddr.addr; + if (right->mem_info) + r = right->mem_info->iaddr.addr; + + return (int64_t)(r - l); +} + +static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + uint64_t addr = 0; + struct map *map = NULL; + struct symbol *sym = NULL; + + if (he->mem_info) { + addr = he->mem_info->iaddr.addr; + map = he->mem_info->iaddr.map; + sym = he->mem_info->iaddr.sym; + } + return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, + width); +} + static int64_t sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -981,6 +1106,13 @@ struct sort_entry sort_mem_daddr_sym = { .se_width_idx = HISTC_MEM_DADDR_SYMBOL, }; +struct sort_entry sort_mem_iaddr_sym = { + .se_header = "Code Symbol", + .se_cmp = sort__iaddr_cmp, + .se_snprintf = hist_entry__iaddr_snprintf, + .se_width_idx = HISTC_MEM_IADDR_SYMBOL, +}; + struct sort_entry sort_mem_daddr_dso = { .se_header = "Data Object", .se_cmp = sort__dso_daddr_cmp, @@ -1174,7 +1306,9 @@ static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_SYM, "symbol", sort_sym), DIM(SORT_PARENT, "parent", sort_parent), DIM(SORT_CPU, "cpu", sort_cpu), + DIM(SORT_SOCKET, "socket", sort_socket), DIM(SORT_SRCLINE, "srcline", sort_srcline), + DIM(SORT_SRCFILE, "srcfile", sort_srcfile), DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), DIM(SORT_TRANSACTION, "transaction", sort_transaction), @@ -1192,6 +1326,7 @@ static struct sort_dimension bstack_sort_dimensions[] = { DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), DIM(SORT_IN_TX, "in_tx", sort_in_tx), DIM(SORT_ABORT, "abort", sort_abort), + DIM(SORT_CYCLES, "cycles", sort_cycles), }; #undef DIM @@ -1200,6 +1335,7 @@ static struct sort_dimension bstack_sort_dimensions[] = { static struct sort_dimension memory_sort_dimensions[] = { DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), + DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), @@ -1441,6 +1577,12 @@ static int __hpp_dimension__add_output(struct hpp_dimension *hd) return 0; } +int hpp_dimension__add_output(unsigned col) +{ + BUG_ON(col >= PERF_HPP__MAX_INDEX); + return __hpp_dimension__add_output(&hpp_sort_dimensions[col]); +} + int sort_dimension__add(const char *tok) { unsigned int i; @@ -1474,6 +1616,8 @@ int sort_dimension__add(const char *tok) } else if (sd->entry == &sort_dso) { sort__has_dso = 1; + } else if (sd->entry == &sort_socket) { + sort__has_socket = 1; } return __sort_dimension__add(sd); @@ -1779,8 +1923,6 @@ static int __setup_output_field(void) if (field_order == NULL) return 0; - reset_dimensions(); - strp = str = strdup(field_order); if (str == NULL) { error("Not enough memory to setup output fields"); diff --git a/kernel/tools/perf/util/sort.h b/kernel/tools/perf/util/sort.h index 846036a92..31228851e 100644 --- a/kernel/tools/perf/util/sort.h +++ b/kernel/tools/perf/util/sort.h @@ -34,6 +34,7 @@ extern int have_ignore_callees; extern int sort__need_collapse; extern int sort__has_parent; extern int sort__has_sym; +extern int sort__has_socket; extern enum sort_mode sort__mode; extern struct sort_entry sort_comm; extern struct sort_entry sort_dso; @@ -58,15 +59,16 @@ struct he_stat { struct hist_entry_diff { bool computed; + union { + /* PERF_HPP__DELTA */ + double period_ratio_delta; - /* PERF_HPP__DELTA */ - double period_ratio_delta; - - /* PERF_HPP__RATIO */ - double period_ratio; + /* PERF_HPP__RATIO */ + double period_ratio; - /* HISTC_WEIGHTED_DIFF */ - s64 wdiff; + /* HISTC_WEIGHTED_DIFF */ + s64 wdiff; + }; }; /** @@ -89,24 +91,33 @@ struct hist_entry { struct comm *comm; u64 ip; u64 transaction; + s32 socket; s32 cpu; u8 cpumode; - struct hist_entry_diff diff; - /* We are added by hists__add_dummy_entry. */ bool dummy; - /* XXX These two should move to some tree widget lib */ - u16 row_offset; - u16 nr_rows; - - bool init_have_children; char level; u8 filtered; + union { + /* + * Since perf diff only supports the stdio output, TUI + * fields are only accessed from perf report (or perf + * top). So make it an union to reduce memory usage. + */ + struct hist_entry_diff diff; + struct /* for TUI */ { + u16 row_offset; + u16 nr_rows; + bool init_have_children; + bool unfolded; + bool has_children; + }; + }; char *srcline; + char *srcfile; struct symbol *parent; - unsigned long position; struct rb_root sorted_chain; struct branch_info *branch_info; struct hists *hists; @@ -163,7 +174,9 @@ enum sort_type { SORT_SYM, SORT_PARENT, SORT_CPU, + SORT_SOCKET, SORT_SRCLINE, + SORT_SRCFILE, SORT_LOCAL_WEIGHT, SORT_GLOBAL_WEIGHT, SORT_TRANSACTION, @@ -177,6 +190,7 @@ enum sort_type { SORT_MISPREDICT, SORT_ABORT, SORT_IN_TX, + SORT_CYCLES, /* memory mode specific sort keys */ __SORT_MEMORY_MODE, @@ -187,6 +201,7 @@ enum sort_type { SORT_MEM_LVL, SORT_MEM_SNOOP, SORT_MEM_DCACHELINE, + SORT_MEM_IADDR_SYMBOL, }; /* @@ -219,4 +234,6 @@ void perf_hpp__set_elide(int idx, bool elide); int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); bool is_strict_order(const char *order); + +int hpp_dimension__add_output(unsigned col); #endif /* __PERF_SORT_H */ diff --git a/kernel/tools/perf/util/srcline.c b/kernel/tools/perf/util/srcline.c index c93fb0c5b..b4db3f48e 100644 --- a/kernel/tools/perf/util/srcline.c +++ b/kernel/tools/perf/util/srcline.c @@ -10,6 +10,8 @@ #include "symbol.h" +bool srcline_full_filename; + #ifdef HAVE_LIBBFD_SUPPORT /* @@ -147,8 +149,11 @@ static void addr2line_cleanup(struct a2l_data *a2l) free(a2l); } +#define MAX_INLINE_NEST 1024 + static int addr2line(const char *dso_name, u64 addr, - char **file, unsigned int *line, struct dso *dso) + char **file, unsigned int *line, struct dso *dso, + bool unwind_inlines) { int ret = 0; struct a2l_data *a2l = dso->a2l; @@ -168,6 +173,15 @@ static int addr2line(const char *dso_name, u64 addr, bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); + if (a2l->found && unwind_inlines) { + int cnt = 0; + + while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, + &a2l->funcname, &a2l->line) && + cnt++ < MAX_INLINE_NEST) + ; + } + if (a2l->found && a2l->filename) { *file = strdup(a2l->filename); *line = a2l->line; @@ -195,7 +209,8 @@ void dso__free_a2l(struct dso *dso) static int addr2line(const char *dso_name, u64 addr, char **file, unsigned int *line_nr, - struct dso *dso __maybe_unused) + struct dso *dso __maybe_unused, + bool unwind_inlines __maybe_unused) { FILE *fp; char cmd[PATH_MAX]; @@ -252,8 +267,8 @@ void dso__free_a2l(struct dso *dso __maybe_unused) */ #define A2L_FAIL_LIMIT 123 -char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, - bool show_sym) +char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym, + bool show_sym, bool unwind_inlines) { char *file = NULL; unsigned line = 0; @@ -274,10 +289,12 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, if (!strncmp(dso_name, "/tmp/perf-", 10)) goto out; - if (!addr2line(dso_name, addr, &file, &line, dso)) + if (!addr2line(dso_name, addr, &file, &line, dso, unwind_inlines)) goto out; - if (asprintf(&srcline, "%s:%u", basename(file), line) < 0) { + if (asprintf(&srcline, "%s:%u", + srcline_full_filename ? file : basename(file), + line) < 0) { free(file); goto out; } @@ -306,3 +323,9 @@ void free_srcline(char *srcline) if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) free(srcline); } + +char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, + bool show_sym) +{ + return __get_srcline(dso, addr, sym, show_sym, false); +} diff --git a/kernel/tools/perf/util/stat-shadow.c b/kernel/tools/perf/util/stat-shadow.c new file mode 100644 index 000000000..6ac031468 --- /dev/null +++ b/kernel/tools/perf/util/stat-shadow.c @@ -0,0 +1,437 @@ +#include +#include "evsel.h" +#include "stat.h" +#include "color.h" + +enum { + CTX_BIT_USER = 1 << 0, + CTX_BIT_KERNEL = 1 << 1, + CTX_BIT_HV = 1 << 2, + CTX_BIT_HOST = 1 << 3, + CTX_BIT_IDLE = 1 << 4, + CTX_BIT_MAX = 1 << 5, +}; + +#define NUM_CTX CTX_BIT_MAX + +static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; +static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS]; +static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS]; + +struct stats walltime_nsecs_stats; + +static int evsel_context(struct perf_evsel *evsel) +{ + int ctx = 0; + + if (evsel->attr.exclude_kernel) + ctx |= CTX_BIT_KERNEL; + if (evsel->attr.exclude_user) + ctx |= CTX_BIT_USER; + if (evsel->attr.exclude_hv) + ctx |= CTX_BIT_HV; + if (evsel->attr.exclude_host) + ctx |= CTX_BIT_HOST; + if (evsel->attr.exclude_idle) + ctx |= CTX_BIT_IDLE; + + return ctx; +} + +void perf_stat__reset_shadow_stats(void) +{ + memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); + memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); + memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); + memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); + memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); + memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); + memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); + memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); + memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); + memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); + memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); + memset(runtime_cycles_in_tx_stats, 0, + sizeof(runtime_cycles_in_tx_stats)); + memset(runtime_transaction_stats, 0, + sizeof(runtime_transaction_stats)); + memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); + memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); +} + +/* + * Update various tracking values we maintain to print + * more semantic information such as miss/hit ratios, + * instruction rates, etc: + */ +void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, + int cpu) +{ + int ctx = evsel_context(counter); + + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + update_stats(&runtime_nsecs_stats[cpu], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) + update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, TRANSACTION_START)) + update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); + else if (perf_stat_evsel__is(counter, ELISION_START)) + update_stats(&runtime_elision_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) + update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) + update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + update_stats(&runtime_branches_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) + update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) + update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) + update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) + update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) + update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) + update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]); +} + +/* used for get_ratio_color() */ +enum grc_type { + GRC_STALLED_CYCLES_FE, + GRC_STALLED_CYCLES_BE, + GRC_CACHE_MISSES, + GRC_MAX_NR +}; + +static const char *get_ratio_color(enum grc_type type, double ratio) +{ + static const double grc_table[GRC_MAX_NR][3] = { + [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 }, + [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 }, + [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 }, + }; + const char *color = PERF_COLOR_NORMAL; + + if (ratio > grc_table[type][0]) + color = PERF_COLOR_RED; + else if (ratio > grc_table[type][1]) + color = PERF_COLOR_MAGENTA; + else if (ratio > grc_table[type][2]) + color = PERF_COLOR_YELLOW; + + return color; +} + +static void print_stalled_cycles_frontend(FILE *out, int cpu, + struct perf_evsel *evsel + __maybe_unused, double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_cycles_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " frontend cycles idle "); +} + +static void print_stalled_cycles_backend(FILE *out, int cpu, + struct perf_evsel *evsel + __maybe_unused, double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_cycles_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " backend cycles idle "); +} + +static void print_branch_misses(FILE *out, int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_branches_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_CACHE_MISSES, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " of all branches "); +} + +static void print_l1_dcache_misses(FILE *out, int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_CACHE_MISSES, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " of all L1-dcache hits "); +} + +static void print_l1_icache_misses(FILE *out, int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_CACHE_MISSES, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " of all L1-icache hits "); +} + +static void print_dtlb_cache_misses(FILE *out, int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_CACHE_MISSES, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " of all dTLB cache hits "); +} + +static void print_itlb_cache_misses(FILE *out, int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_CACHE_MISSES, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " of all iTLB cache hits "); +} + +static void print_ll_cache_misses(FILE *out, int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) +{ + double total, ratio = 0.0; + const char *color; + int ctx = evsel_context(evsel); + + total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = get_ratio_color(GRC_CACHE_MISSES, ratio); + + fprintf(out, " # "); + color_fprintf(out, color, "%6.2f%%", ratio); + fprintf(out, " of all LL-cache hits "); +} + +void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel, + double avg, int cpu, enum aggr_mode aggr) +{ + double total, ratio = 0.0, total2; + int ctx = evsel_context(evsel); + + if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { + total = avg_stats(&runtime_cycles_stats[ctx][cpu]); + if (total) { + ratio = avg / total; + fprintf(out, " # %5.2f insns per cycle ", ratio); + } else { + fprintf(out, " "); + } + total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]); + total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu])); + + if (total && avg) { + ratio = total / avg; + fprintf(out, "\n"); + if (aggr == AGGR_NONE) + fprintf(out, " "); + fprintf(out, " # %5.2f stalled cycles per insn", ratio); + } + + } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && + runtime_branches_stats[ctx][cpu].n != 0) { + print_branch_misses(out, cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_l1_dcache_stats[ctx][cpu].n != 0) { + print_l1_dcache_misses(out, cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_l1_icache_stats[ctx][cpu].n != 0) { + print_l1_icache_misses(out, cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_dtlb_cache_stats[ctx][cpu].n != 0) { + print_dtlb_cache_misses(out, cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_itlb_cache_stats[ctx][cpu].n != 0) { + print_itlb_cache_misses(out, cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_ll_cache_stats[ctx][cpu].n != 0) { + print_ll_cache_misses(out, cpu, evsel, avg); + } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && + runtime_cacherefs_stats[ctx][cpu].n != 0) { + total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]); + + if (total) + ratio = avg * 100 / total; + + fprintf(out, " # %8.3f %% of all cache refs ", ratio); + + } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { + print_stalled_cycles_frontend(out, cpu, evsel, avg); + } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { + print_stalled_cycles_backend(out, cpu, evsel, avg); + } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { + total = avg_stats(&runtime_nsecs_stats[cpu]); + + if (total) { + ratio = avg / total; + fprintf(out, " # %8.3f GHz ", ratio); + } else { + fprintf(out, " "); + } + } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { + total = avg_stats(&runtime_cycles_stats[ctx][cpu]); + if (total) + fprintf(out, + " # %5.2f%% transactional cycles ", + 100.0 * (avg / total)); + } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { + total = avg_stats(&runtime_cycles_stats[ctx][cpu]); + total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); + if (total2 < avg) + total2 = avg; + if (total) + fprintf(out, + " # %5.2f%% aborted cycles ", + 100.0 * ((total2-avg) / total)); + } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) && + runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { + total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); + + if (avg) + ratio = total / avg; + + fprintf(out, " # %8.0f cycles / transaction ", ratio); + } else if (perf_stat_evsel__is(evsel, ELISION_START) && + runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { + total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); + + if (avg) + ratio = total / avg; + + fprintf(out, " # %8.0f cycles / elision ", ratio); + } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { + if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) + fprintf(out, " # %8.3f CPUs utilized ", avg / ratio); + else + fprintf(out, " "); + } else if (runtime_nsecs_stats[cpu].n != 0) { + char unit = 'M'; + + total = avg_stats(&runtime_nsecs_stats[cpu]); + + if (total) + ratio = 1000.0 * avg / total; + if (ratio < 0.001) { + ratio *= 1000; + unit = 'K'; + } + + fprintf(out, " # %8.3f %c/sec ", ratio, unit); + } else { + fprintf(out, " "); + } +} diff --git a/kernel/tools/perf/util/stat.c b/kernel/tools/perf/util/stat.c index 6506b3dfb..4a3a72cb5 100644 --- a/kernel/tools/perf/util/stat.c +++ b/kernel/tools/perf/util/stat.c @@ -1,6 +1,8 @@ #include - #include "stat.h" +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" void update_stats(struct stats *stats, u64 val) { @@ -61,3 +63,280 @@ double rel_stddev_stats(double stddev, double avg) return pct; } + +bool __perf_evsel_stat__is(struct perf_evsel *evsel, + enum perf_stat_evsel_id id) +{ + struct perf_stat_evsel *ps = evsel->priv; + + return ps->id == id; +} + +#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name +static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = { + ID(NONE, x), + ID(CYCLES_IN_TX, cpu/cycles-t/), + ID(TRANSACTION_START, cpu/tx-start/), + ID(ELISION_START, cpu/el-start/), + ID(CYCLES_IN_TX_CP, cpu/cycles-ct/), +}; +#undef ID + +void perf_stat_evsel_id_init(struct perf_evsel *evsel) +{ + struct perf_stat_evsel *ps = evsel->priv; + int i; + + /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ + + for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { + if (!strcmp(perf_evsel__name(evsel), id_str[i])) { + ps->id = i; + break; + } + } +} + +void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) +{ + int i; + struct perf_stat_evsel *ps = evsel->priv; + + for (i = 0; i < 3; i++) + init_stats(&ps->res_stats[i]); + + perf_stat_evsel_id_init(evsel); +} + +int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) +{ + evsel->priv = zalloc(sizeof(struct perf_stat_evsel)); + if (evsel->priv == NULL) + return -ENOMEM; + perf_evsel__reset_stat_priv(evsel); + return 0; +} + +void perf_evsel__free_stat_priv(struct perf_evsel *evsel) +{ + zfree(&evsel->priv); +} + +int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, + int ncpus, int nthreads) +{ + struct perf_counts *counts; + + counts = perf_counts__new(ncpus, nthreads); + if (counts) + evsel->prev_raw_counts = counts; + + return counts ? 0 : -ENOMEM; +} + +void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) +{ + perf_counts__delete(evsel->prev_raw_counts); + evsel->prev_raw_counts = NULL; +} + +int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) +{ + int ncpus = perf_evsel__nr_cpus(evsel); + int nthreads = thread_map__nr(evsel->threads); + + if (perf_evsel__alloc_stat_priv(evsel) < 0 || + perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || + (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) + return -ENOMEM; + + return 0; +} + +int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (perf_evsel__alloc_stats(evsel, alloc_raw)) + goto out_free; + } + + return 0; + +out_free: + perf_evlist__free_stats(evlist); + return -1; +} + +void perf_evlist__free_stats(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + perf_evsel__free_stat_priv(evsel); + perf_evsel__free_counts(evsel); + perf_evsel__free_prev_raw_counts(evsel); + } +} + +void perf_evlist__reset_stats(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + perf_evsel__reset_stat_priv(evsel); + perf_evsel__reset_counts(evsel); + } +} + +static void zero_per_pkg(struct perf_evsel *counter) +{ + if (counter->per_pkg_mask) + memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); +} + +static int check_per_pkg(struct perf_evsel *counter, + struct perf_counts_values *vals, int cpu, bool *skip) +{ + unsigned long *mask = counter->per_pkg_mask; + struct cpu_map *cpus = perf_evsel__cpus(counter); + int s; + + *skip = false; + + if (!counter->per_pkg) + return 0; + + if (cpu_map__empty(cpus)) + return 0; + + if (!mask) { + mask = zalloc(MAX_NR_CPUS); + if (!mask) + return -ENOMEM; + + counter->per_pkg_mask = mask; + } + + /* + * we do not consider an event that has not run as a good + * instance to mark a package as used (skip=1). Otherwise + * we may run into a situation where the first CPU in a package + * is not running anything, yet the second is, and this function + * would mark the package as used after the first CPU and would + * not read the values from the second CPU. + */ + if (!(vals->run && vals->ena)) + return 0; + + s = cpu_map__get_socket(cpus, cpu, NULL); + if (s < 0) + return -1; + + *skip = test_and_set_bit(s, mask) == 1; + return 0; +} + +static int +process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel, + int cpu, int thread, + struct perf_counts_values *count) +{ + struct perf_counts_values *aggr = &evsel->counts->aggr; + static struct perf_counts_values zero; + bool skip = false; + + if (check_per_pkg(evsel, count, cpu, &skip)) { + pr_err("failed to read per-pkg counter\n"); + return -1; + } + + if (skip) + count = &zero; + + switch (config->aggr_mode) { + case AGGR_THREAD: + case AGGR_CORE: + case AGGR_SOCKET: + case AGGR_NONE: + if (!evsel->snapshot) + perf_evsel__compute_deltas(evsel, cpu, thread, count); + perf_counts_values__scale(count, config->scale, NULL); + if (config->aggr_mode == AGGR_NONE) + perf_stat__update_shadow_stats(evsel, count->values, cpu); + break; + case AGGR_GLOBAL: + aggr->val += count->val; + if (config->scale) { + aggr->ena += count->ena; + aggr->run += count->run; + } + case AGGR_UNSET: + default: + break; + } + + return 0; +} + +static int process_counter_maps(struct perf_stat_config *config, + struct perf_evsel *counter) +{ + int nthreads = thread_map__nr(counter->threads); + int ncpus = perf_evsel__nr_cpus(counter); + int cpu, thread; + + if (counter->system_wide) + nthreads = 1; + + for (thread = 0; thread < nthreads; thread++) { + for (cpu = 0; cpu < ncpus; cpu++) { + if (process_counter_values(config, counter, cpu, thread, + perf_counts(counter->counts, cpu, thread))) + return -1; + } + } + + return 0; +} + +int perf_stat_process_counter(struct perf_stat_config *config, + struct perf_evsel *counter) +{ + struct perf_counts_values *aggr = &counter->counts->aggr; + struct perf_stat_evsel *ps = counter->priv; + u64 *count = counter->counts->aggr.values; + int i, ret; + + aggr->val = aggr->ena = aggr->run = 0; + + if (counter->per_pkg) + zero_per_pkg(counter); + + ret = process_counter_maps(config, counter); + if (ret) + return ret; + + if (config->aggr_mode != AGGR_GLOBAL) + return 0; + + if (!counter->snapshot) + perf_evsel__compute_deltas(counter, -1, -1, aggr); + perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); + + for (i = 0; i < 3; i++) + update_stats(&ps->res_stats[i], count[i]); + + if (verbose) { + fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", + perf_evsel__name(counter), count[0], count[1], count[2]); + } + + /* + * Save the full runtime - to allow normalization during printout: + */ + perf_stat__update_shadow_stats(counter, count, 0); + + return 0; +} diff --git a/kernel/tools/perf/util/stat.h b/kernel/tools/perf/util/stat.h index 5667fc3e3..da1d11c4f 100644 --- a/kernel/tools/perf/util/stat.h +++ b/kernel/tools/perf/util/stat.h @@ -2,6 +2,8 @@ #define __PERF_STATS_H #include +#include +#include "xyarray.h" struct stats { @@ -9,6 +11,36 @@ struct stats u64 max, min; }; +enum perf_stat_evsel_id { + PERF_STAT_EVSEL_ID__NONE = 0, + PERF_STAT_EVSEL_ID__CYCLES_IN_TX, + PERF_STAT_EVSEL_ID__TRANSACTION_START, + PERF_STAT_EVSEL_ID__ELISION_START, + PERF_STAT_EVSEL_ID__CYCLES_IN_TX_CP, + PERF_STAT_EVSEL_ID__MAX, +}; + +struct perf_stat_evsel { + struct stats res_stats[3]; + enum perf_stat_evsel_id id; +}; + +enum aggr_mode { + AGGR_NONE, + AGGR_GLOBAL, + AGGR_SOCKET, + AGGR_CORE, + AGGR_THREAD, + AGGR_UNSET, +}; + +struct perf_stat_config { + enum aggr_mode aggr_mode; + bool scale; + FILE *output; + unsigned int interval; +}; + void update_stats(struct stats *stats, u64 val); double avg_stats(struct stats *stats); double stddev_stats(struct stats *stats); @@ -22,4 +54,40 @@ static inline void init_stats(struct stats *stats) stats->min = (u64) -1; stats->max = 0; } + +struct perf_evsel; +struct perf_evlist; + +bool __perf_evsel_stat__is(struct perf_evsel *evsel, + enum perf_stat_evsel_id id); + +#define perf_stat_evsel__is(evsel, id) \ + __perf_evsel_stat__is(evsel, PERF_STAT_EVSEL_ID__ ## id) + +void perf_stat_evsel_id_init(struct perf_evsel *evsel); + +extern struct stats walltime_nsecs_stats; + +void perf_stat__reset_shadow_stats(void); +void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, + int cpu); +void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel, + double avg, int cpu, enum aggr_mode aggr); + +void perf_evsel__reset_stat_priv(struct perf_evsel *evsel); +int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel); +void perf_evsel__free_stat_priv(struct perf_evsel *evsel); + +int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, + int ncpus, int nthreads); +void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel); + +int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw); + +int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw); +void perf_evlist__free_stats(struct perf_evlist *evlist); +void perf_evlist__reset_stats(struct perf_evlist *evlist); + +int perf_stat_process_counter(struct perf_stat_config *config, + struct perf_evsel *counter); #endif diff --git a/kernel/tools/perf/util/strbuf.c b/kernel/tools/perf/util/strbuf.c index 4abe23550..25671fa16 100644 --- a/kernel/tools/perf/util/strbuf.c +++ b/kernel/tools/perf/util/strbuf.c @@ -82,23 +82,22 @@ void strbuf_add(struct strbuf *sb, const void *data, size_t len) strbuf_setlen(sb, sb->len + len); } -void strbuf_addf(struct strbuf *sb, const char *fmt, ...) +void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap) { int len; - va_list ap; + va_list ap_saved; if (!strbuf_avail(sb)) strbuf_grow(sb, 64); - va_start(ap, fmt); + + va_copy(ap_saved, ap); len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); - va_end(ap); if (len < 0) die("your vsnprintf is broken"); if (len > strbuf_avail(sb)) { strbuf_grow(sb, len); - va_start(ap, fmt); - len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); - va_end(ap); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved); + va_end(ap_saved); if (len > strbuf_avail(sb)) { die("this should not happen, your vsnprintf is broken"); } @@ -106,6 +105,15 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...) strbuf_setlen(sb, sb->len + len); } +void strbuf_addf(struct strbuf *sb, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + strbuf_addv(sb, fmt, ap); + va_end(ap); +} + ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint) { size_t oldlen = sb->len; diff --git a/kernel/tools/perf/util/strbuf.h b/kernel/tools/perf/util/strbuf.h index 436ac319f..529f2f035 100644 --- a/kernel/tools/perf/util/strbuf.h +++ b/kernel/tools/perf/util/strbuf.h @@ -39,6 +39,7 @@ */ #include +#include extern char strbuf_slopbuf[]; struct strbuf { @@ -85,6 +86,7 @@ static inline void strbuf_addstr(struct strbuf *sb, const char *s) { __attribute__((format(printf,2,3))) extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); +extern void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap); /* XXX: if read fails, any partial read is undone */ extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint); diff --git a/kernel/tools/perf/util/strfilter.c b/kernel/tools/perf/util/strfilter.c index 79a757a2a..bcae659b6 100644 --- a/kernel/tools/perf/util/strfilter.c +++ b/kernel/tools/perf/util/strfilter.c @@ -170,6 +170,46 @@ struct strfilter *strfilter__new(const char *rules, const char **err) return filter; } +static int strfilter__append(struct strfilter *filter, bool _or, + const char *rules, const char **err) +{ + struct strfilter_node *right, *root; + const char *ep = NULL; + + if (!filter || !rules) + return -EINVAL; + + right = strfilter_node__new(rules, &ep); + if (!right || *ep != '\0') { + if (err) + *err = ep; + goto error; + } + root = strfilter_node__alloc(_or ? OP_or : OP_and, filter->root, right); + if (!root) { + ep = NULL; + goto error; + } + + filter->root = root; + return 0; + +error: + strfilter_node__delete(right); + return ep ? -EINVAL : -ENOMEM; +} + +int strfilter__or(struct strfilter *filter, const char *rules, const char **err) +{ + return strfilter__append(filter, true, rules, err); +} + +int strfilter__and(struct strfilter *filter, const char *rules, + const char **err) +{ + return strfilter__append(filter, false, rules, err); +} + static bool strfilter_node__compare(struct strfilter_node *node, const char *str) { @@ -197,3 +237,70 @@ bool strfilter__compare(struct strfilter *filter, const char *str) return false; return strfilter_node__compare(filter->root, str); } + +static int strfilter_node__sprint(struct strfilter_node *node, char *buf); + +/* sprint node in parenthesis if needed */ +static int strfilter_node__sprint_pt(struct strfilter_node *node, char *buf) +{ + int len; + int pt = node->r ? 2 : 0; /* don't need to check node->l */ + + if (buf && pt) + *buf++ = '('; + len = strfilter_node__sprint(node, buf); + if (len < 0) + return len; + if (buf && pt) + *(buf + len) = ')'; + return len + pt; +} + +static int strfilter_node__sprint(struct strfilter_node *node, char *buf) +{ + int len = 0, rlen; + + if (!node || !node->p) + return -EINVAL; + + switch (*node->p) { + case '|': + case '&': + len = strfilter_node__sprint_pt(node->l, buf); + if (len < 0) + return len; + case '!': + if (buf) { + *(buf + len++) = *node->p; + buf += len; + } else + len++; + rlen = strfilter_node__sprint_pt(node->r, buf); + if (rlen < 0) + return rlen; + len += rlen; + break; + default: + len = strlen(node->p); + if (buf) + strcpy(buf, node->p); + } + + return len; +} + +char *strfilter__string(struct strfilter *filter) +{ + int len; + char *ret = NULL; + + len = strfilter_node__sprint(filter->root, NULL); + if (len < 0) + return NULL; + + ret = malloc(len + 1); + if (ret) + strfilter_node__sprint(filter->root, ret); + + return ret; +} diff --git a/kernel/tools/perf/util/strfilter.h b/kernel/tools/perf/util/strfilter.h index fe611f3c9..cff5eda88 100644 --- a/kernel/tools/perf/util/strfilter.h +++ b/kernel/tools/perf/util/strfilter.h @@ -28,6 +28,32 @@ struct strfilter { */ struct strfilter *strfilter__new(const char *rules, const char **err); +/** + * strfilter__or - Append an additional rule by logical-or + * @filter: Original string filter + * @rules: Filter rule to be appended at left of the root of + * @filter by using logical-or. + * @err: Pointer which points an error detected on @rules + * + * Parse @rules and join it to the @filter by using logical-or. + * Return 0 if success, or return the error code. + */ +int strfilter__or(struct strfilter *filter, + const char *rules, const char **err); + +/** + * strfilter__add - Append an additional rule by logical-and + * @filter: Original string filter + * @rules: Filter rule to be appended at left of the root of + * @filter by using logical-and. + * @err: Pointer which points an error detected on @rules + * + * Parse @rules and join it to the @filter by using logical-and. + * Return 0 if success, or return the error code. + */ +int strfilter__and(struct strfilter *filter, + const char *rules, const char **err); + /** * strfilter__compare - compare given string and a string filter * @filter: String filter @@ -45,4 +71,13 @@ bool strfilter__compare(struct strfilter *filter, const char *str); */ void strfilter__delete(struct strfilter *filter); +/** + * strfilter__string - Reconstruct a rule string from filter + * @filter: String filter to reconstruct + * + * Reconstruct a rule string from @filter. This will be good for + * debug messages. Note that returning string must be freed afterward. + */ +char *strfilter__string(struct strfilter *filter); + #endif diff --git a/kernel/tools/perf/util/string.c b/kernel/tools/perf/util/string.c index 6afd6106c..fc8781de6 100644 --- a/kernel/tools/perf/util/string.c +++ b/kernel/tools/perf/util/string.c @@ -357,3 +357,42 @@ void *memdup(const void *src, size_t len) return p; } + +char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints) +{ + /* + * FIXME: replace this with an expression using log10() when we + * find a suitable implementation, maybe the one in the dvb drivers... + * + * "%s == %d || " = log10(MAXINT) * 2 + 8 chars for the operators + */ + size_t size = nints * 28 + 1; /* \0 */ + size_t i, printed = 0; + char *expr = malloc(size); + + if (expr) { + const char *or_and = "||", *eq_neq = "=="; + char *e = expr; + + if (!in) { + or_and = "&&"; + eq_neq = "!="; + } + + for (i = 0; i < nints; ++i) { + if (printed == size) + goto out_err_overflow; + + if (i > 0) + printed += snprintf(e + printed, size - printed, " %s ", or_and); + printed += scnprintf(e + printed, size - printed, + "%s %s %d", var, eq_neq, ints[i]); + } + } + + return expr; + +out_err_overflow: + free(expr); + return NULL; +} diff --git a/kernel/tools/perf/util/strlist.c b/kernel/tools/perf/util/strlist.c index 71f9d102b..bdf98f6f2 100644 --- a/kernel/tools/perf/util/strlist.c +++ b/kernel/tools/perf/util/strlist.c @@ -72,7 +72,7 @@ int strlist__load(struct strlist *slist, const char *filename) FILE *fp = fopen(filename, "r"); if (fp == NULL) - return errno; + return -errno; while (fgets(entry, sizeof(entry), fp) != NULL) { const size_t len = strlen(entry); @@ -108,43 +108,70 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry) return snode; } -static int strlist__parse_list_entry(struct strlist *slist, const char *s) +static int strlist__parse_list_entry(struct strlist *slist, const char *s, + const char *subst_dir) { + int err; + char *subst = NULL; + if (strncmp(s, "file://", 7) == 0) return strlist__load(slist, s + 7); - return strlist__add(slist, s); + if (subst_dir) { + err = -ENOMEM; + if (asprintf(&subst, "%s/%s", subst_dir, s) < 0) + goto out; + + if (access(subst, F_OK) == 0) { + err = strlist__load(slist, subst); + goto out; + } + } + + err = strlist__add(slist, s); +out: + free(subst); + return err; } -int strlist__parse_list(struct strlist *slist, const char *s) +static int strlist__parse_list(struct strlist *slist, const char *s, const char *subst_dir) { char *sep; int err; while ((sep = strchr(s, ',')) != NULL) { *sep = '\0'; - err = strlist__parse_list_entry(slist, s); + err = strlist__parse_list_entry(slist, s, subst_dir); *sep = ','; if (err != 0) return err; s = sep + 1; } - return *s ? strlist__parse_list_entry(slist, s) : 0; + return *s ? strlist__parse_list_entry(slist, s, subst_dir) : 0; } -struct strlist *strlist__new(bool dupstr, const char *list) +struct strlist *strlist__new(const char *list, const struct strlist_config *config) { struct strlist *slist = malloc(sizeof(*slist)); if (slist != NULL) { + bool dupstr = true; + const char *dirname = NULL; + + if (config) { + dupstr = !config->dont_dupstr; + dirname = config->dirname; + } + rblist__init(&slist->rblist); slist->rblist.node_cmp = strlist__node_cmp; slist->rblist.node_new = strlist__node_new; slist->rblist.node_delete = strlist__node_delete; slist->dupstr = dupstr; - if (list && strlist__parse_list(slist, list) != 0) + + if (list && strlist__parse_list(slist, list, dirname) != 0) goto out_error; } diff --git a/kernel/tools/perf/util/strlist.h b/kernel/tools/perf/util/strlist.h index 5c7f87069..297565aa7 100644 --- a/kernel/tools/perf/util/strlist.h +++ b/kernel/tools/perf/util/strlist.h @@ -16,7 +16,12 @@ struct strlist { bool dupstr; }; -struct strlist *strlist__new(bool dupstr, const char *slist); +struct strlist_config { + bool dont_dupstr; + const char *dirname; +}; + +struct strlist *strlist__new(const char *slist, const struct strlist_config *config); void strlist__delete(struct strlist *slist); void strlist__remove(struct strlist *slist, struct str_node *sn); @@ -74,6 +79,4 @@ static inline struct str_node *strlist__next(struct str_node *sn) #define strlist__for_each_safe(pos, n, slist) \ for (pos = strlist__first(slist), n = strlist__next(pos); pos;\ pos = n, n = strlist__next(n)) - -int strlist__parse_list(struct strlist *slist, const char *s); #endif /* __PERF_STRLIST_H */ diff --git a/kernel/tools/perf/util/svghelper.c b/kernel/tools/perf/util/svghelper.c index 283d3e73e..eec6c1149 100644 --- a/kernel/tools/perf/util/svghelper.c +++ b/kernel/tools/perf/util/svghelper.c @@ -748,7 +748,7 @@ static int str_to_bitmap(char *s, cpumask_t *b) set_bit(c, cpumask_bits(b)); } - cpu_map__delete(m); + cpu_map__put(m); return ret; } diff --git a/kernel/tools/perf/util/symbol-elf.c b/kernel/tools/perf/util/symbol-elf.c index a7ab6063e..475d88d0a 100644 --- a/kernel/tools/perf/util/symbol-elf.c +++ b/kernel/tools/perf/util/symbol-elf.c @@ -630,6 +630,11 @@ void symsrc__destroy(struct symsrc *ss) close(ss->fd); } +bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr) +{ + return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL; +} + int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, enum dso_binary_type type) { @@ -678,6 +683,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, } if (!dso__build_id_equal(dso, build_id)) { + pr_debug("%s: build id mismatch for %s.\n", __func__, name); dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; goto out_elf_end; } @@ -711,8 +717,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, ".gnu.prelink_undo", NULL) != NULL); } else { - ss->adjust_symbols = ehdr.e_type == ET_EXEC || - ehdr.e_type == ET_REL; + ss->adjust_symbols = elf__needs_adjust_symbols(ehdr); } ss->name = strdup(name); @@ -771,6 +776,8 @@ static bool want_demangle(bool is_kernel_sym) return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; } +void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { } + int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, symbol_filter_t filter, int kmodule) @@ -868,6 +875,17 @@ int dso__load_sym(struct dso *dso, struct map *map, } } + /* + * Handle any relocation of vdso necessary because older kernels + * attempted to prelink vdso to its virtual address. + */ + if (dso__is_vdso(dso)) { + GElf_Shdr tshdr; + + if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL)) + map->reloc = map->start - tshdr.sh_addr + tshdr.sh_offset; + } + dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); /* * Initial kernel and module mappings do not map to the dso. For @@ -935,6 +953,8 @@ int dso__load_sym(struct dso *dso, struct map *map, (sym.st_value & 1)) --sym.st_value; + arch__elf_sym_adjust(&sym); + if (dso->kernel || kmodule) { char dso_name[PATH_MAX]; @@ -963,8 +983,10 @@ int dso__load_sym(struct dso *dso, struct map *map, map->unmap_ip = map__unmap_ip; /* Ensure maps are correctly ordered */ if (kmaps) { + map__get(map); map_groups__remove(kmaps, map); map_groups__insert(kmaps, map); + map__put(map); } } @@ -1005,7 +1027,7 @@ int dso__load_sym(struct dso *dso, struct map *map, curr_map = map__new2(start, curr_dso, map->type); if (curr_map == NULL) { - dso__delete(curr_dso); + dso__put(curr_dso); goto out_elf_end; } if (adjust_kernel_syms) { @@ -1020,11 +1042,7 @@ int dso__load_sym(struct dso *dso, struct map *map, } curr_dso->symtab_type = dso->symtab_type; map_groups__insert(kmaps, curr_map); - /* - * The new DSO should go to the kernel DSOS - */ - dsos__add(&map->groups->machine->kernel_dsos, - curr_dso); + dsos__add(&map->groups->machine->dsos, curr_dso); dso__set_loaded(curr_dso, map->type); } else curr_dso = curr_map->dso; @@ -1253,8 +1271,6 @@ out_close: static int kcore__init(struct kcore *kcore, char *filename, int elfclass, bool temp) { - GElf_Ehdr *ehdr; - kcore->elfclass = elfclass; if (temp) @@ -1271,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, if (!gelf_newehdr(kcore->elf, elfclass)) goto out_end; - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); - if (!ehdr) - goto out_end; + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); return 0; @@ -1330,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, u64 addr, u64 len) { - GElf_Phdr gphdr; - GElf_Phdr *phdr; - - phdr = gelf_getphdr(kcore->elf, idx, &gphdr); - if (!phdr) - return -1; - - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_offset = offset; - phdr->p_vaddr = addr; - phdr->p_paddr = 0; - phdr->p_filesz = len; - phdr->p_memsz = len; - phdr->p_align = page_size; - - if (!gelf_update_phdr(kcore->elf, idx, phdr)) + GElf_Phdr phdr = { + .p_type = PT_LOAD, + .p_flags = PF_R | PF_W | PF_X, + .p_offset = offset, + .p_vaddr = addr, + .p_paddr = 0, + .p_filesz = len, + .p_memsz = len, + .p_align = page_size, + }; + + if (!gelf_update_phdr(kcore->elf, idx, &phdr)) return -1; return 0; diff --git a/kernel/tools/perf/util/symbol-minimal.c b/kernel/tools/perf/util/symbol-minimal.c index fd8477cac..48906333a 100644 --- a/kernel/tools/perf/util/symbol-minimal.c +++ b/kernel/tools/perf/util/symbol-minimal.c @@ -337,7 +337,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, symbol_filter_t filter __maybe_unused, int kmodule __maybe_unused) { - unsigned char *build_id[BUILD_ID_SIZE]; + unsigned char build_id[BUILD_ID_SIZE]; int ret; ret = fd__is_64_bit(ss->fd); diff --git a/kernel/tools/perf/util/symbol.c b/kernel/tools/perf/util/symbol.c index 99378a5c5..cd08027a6 100644 --- a/kernel/tools/perf/util/symbol.c +++ b/kernel/tools/perf/util/symbol.c @@ -85,8 +85,17 @@ static int prefix_underscores_count(const char *str) return tail - str; } -#define SYMBOL_A 0 -#define SYMBOL_B 1 +int __weak arch__choose_best_symbol(struct symbol *syma, + struct symbol *symb __maybe_unused) +{ + /* Avoid "SyS" kernel syscall aliases */ + if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) + return SYMBOL_B; + if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) + return SYMBOL_B; + + return SYMBOL_A; +} static int choose_best_symbol(struct symbol *syma, struct symbol *symb) { @@ -134,13 +143,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) else if (na < nb) return SYMBOL_B; - /* Avoid "SyS" kernel syscall aliases */ - if (na >= 3 && !strncmp(syma->name, "SyS", 3)) - return SYMBOL_B; - if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) - return SYMBOL_B; - - return SYMBOL_A; + return arch__choose_best_symbol(syma, symb); } void symbols__fixup_duplicate(struct rb_root *symbols) @@ -199,18 +202,18 @@ void symbols__fixup_end(struct rb_root *symbols) void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) { - struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); + struct maps *maps = &mg->maps[type]; + struct map *next, *curr; - if (prevnd == NULL) - return; + pthread_rwlock_wrlock(&maps->lock); - curr = rb_entry(prevnd, struct map, rb_node); + curr = maps__first(maps); + if (curr == NULL) + goto out_unlock; - for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - prev = curr; - curr = rb_entry(nd, struct map, rb_node); - prev->end = curr->start; + for (next = map__next(curr); next; next = map__next(curr)) { + curr->end = next->start; + curr = next; } /* @@ -218,6 +221,9 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) * last map final address. */ curr->end = ~0ULL; + +out_unlock: + pthread_rwlock_unlock(&maps->lock); } struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) @@ -397,7 +403,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, const char *name) { struct rb_node *n; - struct symbol_name_rb_node *s; + struct symbol_name_rb_node *s = NULL; if (symbols == NULL) return NULL; @@ -408,7 +414,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, int cmp; s = rb_entry(n, struct symbol_name_rb_node, rb_node); - cmp = strcmp(name, s->sym.name); + cmp = arch__compare_symbol_names(name, s->sym.name); if (cmp < 0) n = n->rb_left; @@ -426,7 +432,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, struct symbol_name_rb_node *tmp; tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); - if (strcmp(tmp->sym.name, s->sym.name)) + if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) break; s = tmp; @@ -435,10 +441,25 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, return &s->sym; } +void dso__reset_find_symbol_cache(struct dso *dso) +{ + enum map_type type; + + for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) { + dso->last_find_result[type].addr = 0; + dso->last_find_result[type].symbol = NULL; + } +} + struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, u64 addr) { - return symbols__find(&dso->symbols[type], addr); + if (dso->last_find_result[type].addr != addr) { + dso->last_find_result[type].addr = addr; + dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr); + } + + return dso->last_find_result[type].symbol; } struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) @@ -603,7 +624,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * symbols, setting length to 0, and rely on * symbols__fixup_end() to fix it up. */ - sym = symbol__new(start, 0, kallsyms2elf_type(type), name); + sym = symbol__new(start, 0, kallsyms2elf_binding(type), name); if (sym == NULL) return -ENOMEM; /* @@ -633,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, struct map_groups *kmaps = map__kmaps(map); struct map *curr_map; struct symbol *pos; - int count = 0, moved = 0; + int count = 0; + struct rb_root old_root = dso->symbols[map->type]; struct rb_root *root = &dso->symbols[map->type]; struct rb_node *next = rb_first(root); if (!kmaps) return -1; + *root = RB_ROOT; + while (next) { char *module; pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); + rb_erase_init(&pos->rb_node, &old_root); + module = strchr(pos->name, '\t'); if (module) *module = '\0'; @@ -653,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, curr_map = map_groups__find(kmaps, map->type, pos->start); if (!curr_map || (filter && filter(curr_map, pos))) { - rb_erase(&pos->rb_node, root); symbol__delete(pos); - } else { - pos->start -= curr_map->start - curr_map->pgoff; - if (pos->end) - pos->end -= curr_map->start - curr_map->pgoff; - if (curr_map != map) { - rb_erase(&pos->rb_node, root); - symbols__insert( - &curr_map->dso->symbols[curr_map->type], - pos); - ++moved; - } else { - ++count; - } + continue; } + + pos->start -= curr_map->start - curr_map->pgoff; + if (pos->end) + pos->end -= curr_map->start - curr_map->pgoff; + symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); + ++count; } /* Symbols have been adjusted */ dso->adjust_symbols = 1; - return count + moved; + return count; } /* @@ -780,7 +799,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, curr_map = map__new2(pos->start, ndso, map->type); if (curr_map == NULL) { - dso__delete(ndso); + dso__put(ndso); return -1; } @@ -1126,8 +1145,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map, INIT_LIST_HEAD(&md.maps); fd = open(kcore_filename, O_RDONLY); - if (fd < 0) + if (fd < 0) { + pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", + kcore_filename); return -EINVAL; + } /* Read new maps into temporary lists */ err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, @@ -1167,20 +1189,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map, /* Add new maps */ while (!list_empty(&md.maps)) { new_map = list_entry(md.maps.next, struct map, node); - list_del(&new_map->node); + list_del_init(&new_map->node); if (new_map == replacement_map) { map->start = new_map->start; map->end = new_map->end; map->pgoff = new_map->pgoff; map->map_ip = new_map->map_ip; map->unmap_ip = new_map->unmap_ip; - map__delete(new_map); /* Ensure maps are correctly ordered */ + map__get(map); map_groups__remove(kmaps, map); map_groups__insert(kmaps, map); + map__put(map); } else { map_groups__insert(kmaps, new_map); } + + map__put(new_map); } /* @@ -1205,8 +1230,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, out_err: while (!list_empty(&md.maps)) { map = list_entry(md.maps.next, struct map, node); - list_del(&map->node); - map__delete(map); + list_del_init(&map->node); + map__put(map); } close(fd); return -EINVAL; @@ -1355,7 +1380,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: /* * kernel modules know their symtab type - it's set when - * creating a module dso in machine__new_module(). + * creating a module dso in machine__findnew_module_map(). */ return kmod && dso->symtab_type == type; @@ -1379,13 +1404,24 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) struct symsrc ss_[2]; struct symsrc *syms_ss = NULL, *runtime_ss = NULL; bool kmod; + unsigned char build_id[BUILD_ID_SIZE]; - dso__set_loaded(dso, map->type); + pthread_mutex_lock(&dso->lock); + + /* check again under the dso->lock */ + if (dso__loaded(dso, map->type)) { + ret = 1; + goto out; + } - if (dso->kernel == DSO_TYPE_KERNEL) - return dso__load_kernel_sym(dso, map, filter); - else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - return dso__load_guest_kernel_sym(dso, map, filter); + if (dso->kernel) { + if (dso->kernel == DSO_TYPE_KERNEL) + ret = dso__load_kernel_sym(dso, map, filter); + else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + ret = dso__load_guest_kernel_sym(dso, map, filter); + + goto out; + } if (map->groups && map->groups->machine) machine = map->groups->machine; @@ -1398,18 +1434,18 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) struct stat st; if (lstat(dso->name, &st) < 0) - return -1; + goto out; - if (st.st_uid && (st.st_uid != geteuid())) { + if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) { pr_warning("File %s not owned by current user or root, " - "ignoring it.\n", dso->name); - return -1; + "ignoring it (use -f to override).\n", dso->name); + goto out; } ret = dso__load_perf_map(dso, map, filter); dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : DSO_BINARY_TYPE__NOT_FOUND; - return ret; + goto out; } if (machine) @@ -1417,13 +1453,21 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) name = malloc(PATH_MAX); if (!name) - return -1; + goto out; kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; + + /* + * Read the build id if possible. This is required for + * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work + */ + if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0) + dso__set_build_id(dso, build_id); + /* * Iterate over candidate debug images. * Keep track of "interesting" ones (those which have a symtab, dynsym, @@ -1498,23 +1542,32 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) out_free: free(name); if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) - return 0; + ret = 0; +out: + dso__set_loaded(dso, map->type); + pthread_mutex_unlock(&dso->lock); + return ret; } struct map *map_groups__find_by_name(struct map_groups *mg, enum map_type type, const char *name) { - struct rb_node *nd; + struct maps *maps = &mg->maps[type]; + struct map *map; - for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { - struct map *map = rb_entry(nd, struct map, rb_node); + pthread_rwlock_rdlock(&maps->lock); + for (map = maps__first(maps); map; map = map__next(map)) { if (map->dso && strcmp(map->dso->short_name, name) == 0) - return map; + goto out_unlock; } - return NULL; + map = NULL; + +out_unlock: + pthread_rwlock_unlock(&maps->lock); + return map; } int dso__load_vmlinux(struct dso *dso, struct map *map, @@ -1561,6 +1614,15 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, int i, err = 0; char *filename = NULL; + pr_debug("Looking at the vmlinux_path (%d entries long)\n", + vmlinux_path__nr_entries + 1); + + for (i = 0; i < vmlinux_path__nr_entries; ++i) { + err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); + if (err > 0) + goto out; + } + if (!symbol_conf.ignore_vmlinux_buildid) filename = dso__build_id_filename(dso, NULL, 0); if (filename != NULL) { @@ -1569,15 +1631,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, goto out; free(filename); } - - pr_debug("Looking at the vmlinux_path (%d entries long)\n", - vmlinux_path__nr_entries + 1); - - for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); - if (err > 0) - break; - } out: return err; } @@ -1802,11 +1855,12 @@ static void vmlinux_path__exit(void) { while (--vmlinux_path__nr_entries >= 0) zfree(&vmlinux_path[vmlinux_path__nr_entries]); + vmlinux_path__nr_entries = 0; zfree(&vmlinux_path); } -static int vmlinux_path__init(struct perf_session_env *env) +static int vmlinux_path__init(struct perf_env *env) { struct utsname uts; char bf[PATH_MAX]; @@ -1874,11 +1928,13 @@ int setup_list(struct strlist **list, const char *list_str, if (list_str == NULL) return 0; - *list = strlist__new(true, list_str); + *list = strlist__new(list_str, NULL); if (!*list) { pr_err("problems parsing %s list\n", list_name); return -1; } + + symbol_conf.has_filter = true; return 0; } @@ -1893,8 +1949,6 @@ int setup_intlist(struct intlist **list, const char *list_str, pr_err("problems parsing %s list\n", list_name); return -1; } - - symbol_conf.has_filter = true; return 0; } @@ -1917,7 +1971,7 @@ static bool symbol__read_kptr_restrict(void) return value; } -int symbol__init(struct perf_session_env *env) +int symbol__init(struct perf_env *env) { const char *symfs; diff --git a/kernel/tools/perf/util/symbol.h b/kernel/tools/perf/util/symbol.h index be0217989..dcd786e36 100644 --- a/kernel/tools/perf/util/symbol.h +++ b/kernel/tools/perf/util/symbol.h @@ -84,6 +84,7 @@ struct symbol_conf { unsigned short priv_size; unsigned short nr_events; bool try_vmlinux_path, + force, ignore_vmlinux, ignore_vmlinux_buildid, show_kernel_path, @@ -106,7 +107,8 @@ struct symbol_conf { filter_relative, show_hist_headers, branch_callstack, - has_filter; + has_filter, + show_ref_callgraph; const char *vmlinux_name, *kallsyms_name, *source_prefix, @@ -159,8 +161,6 @@ struct ref_reloc_sym { struct map_symbol { struct map *map; struct symbol *sym; - bool unfolded; - bool has_children; }; struct addr_map_symbol { @@ -192,6 +192,7 @@ struct addr_location { u8 filtered; u8 cpumode; s32 cpu; + s32 socket; }; struct symsrc { @@ -253,8 +254,8 @@ int modules__parse(const char *filename, void *arg, int filename__read_debuglink(const char *filename, char *debuglink, size_t size); -struct perf_session_env; -int symbol__init(struct perf_session_env *env); +struct perf_env; +int symbol__init(struct perf_env *env); void symbol__exit(void); void symbol__elf_init(void); struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); @@ -304,4 +305,14 @@ int setup_list(struct strlist **list, const char *list_str, int setup_intlist(struct intlist **list, const char *list_str, const char *list_name); +#ifdef HAVE_LIBELF_SUPPORT +bool elf__needs_adjust_symbols(GElf_Ehdr ehdr); +void arch__elf_sym_adjust(GElf_Sym *sym); +#endif + +#define SYMBOL_A 0 +#define SYMBOL_B 1 + +int arch__choose_best_symbol(struct symbol *syma, struct symbol *symb); + #endif /* __PERF_SYMBOL */ diff --git a/kernel/tools/perf/util/thread-stack.c b/kernel/tools/perf/util/thread-stack.c index 9ed59a452..679688e70 100644 --- a/kernel/tools/perf/util/thread-stack.c +++ b/kernel/tools/perf/util/thread-stack.c @@ -219,7 +219,7 @@ static int thread_stack__call_return(struct thread *thread, return crp->process(&cr, crp->data); } -static int thread_stack__flush(struct thread *thread, struct thread_stack *ts) +static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts) { struct call_return_processor *crp = ts->crp; int err; @@ -242,6 +242,14 @@ static int thread_stack__flush(struct thread *thread, struct thread_stack *ts) return 0; } +int thread_stack__flush(struct thread *thread) +{ + if (thread->ts) + return __thread_stack__flush(thread, thread->ts); + + return 0; +} + int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, u64 to_ip, u16 insn_len, u64 trace_nr) { @@ -264,7 +272,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, */ if (trace_nr != thread->ts->trace_nr) { if (thread->ts->trace_nr) - thread_stack__flush(thread, thread->ts); + __thread_stack__flush(thread, thread->ts); thread->ts->trace_nr = trace_nr; } @@ -297,7 +305,7 @@ void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) if (trace_nr != thread->ts->trace_nr) { if (thread->ts->trace_nr) - thread_stack__flush(thread, thread->ts); + __thread_stack__flush(thread, thread->ts); thread->ts->trace_nr = trace_nr; } } @@ -305,7 +313,7 @@ void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) void thread_stack__free(struct thread *thread) { if (thread->ts) { - thread_stack__flush(thread, thread->ts); + __thread_stack__flush(thread, thread->ts); zfree(&thread->ts->stack); zfree(&thread->ts); } @@ -689,7 +697,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm, /* Flush stack on exec */ if (ts->comm != comm && thread->pid_ == thread->tid) { - err = thread_stack__flush(thread, ts); + err = __thread_stack__flush(thread, ts); if (err) return err; ts->comm = comm; diff --git a/kernel/tools/perf/util/thread-stack.h b/kernel/tools/perf/util/thread-stack.h index b843bbef8..e1528f137 100644 --- a/kernel/tools/perf/util/thread-stack.h +++ b/kernel/tools/perf/util/thread-stack.h @@ -96,6 +96,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr); void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, size_t sz, u64 ip); +int thread_stack__flush(struct thread *thread); void thread_stack__free(struct thread *thread); struct call_return_processor * diff --git a/kernel/tools/perf/util/thread.c b/kernel/tools/perf/util/thread.c index 1c8fbc958..0a9ae8014 100644 --- a/kernel/tools/perf/util/thread.c +++ b/kernel/tools/perf/util/thread.c @@ -18,7 +18,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine) if (pid == thread->tid || pid == -1) { thread->mg = map_groups__new(machine); } else { - leader = machine__findnew_thread(machine, pid, pid); + leader = __machine__findnew_thread(machine, pid, pid); if (leader) thread->mg = map_groups__get(leader->mg); } @@ -53,7 +53,8 @@ struct thread *thread__new(pid_t pid, pid_t tid) goto err_thread; list_add(&comm->list, &thread->comm_list); - + atomic_set(&thread->refcnt, 0); + RB_CLEAR_NODE(&thread->rb_node); } return thread; @@ -67,6 +68,8 @@ void thread__delete(struct thread *thread) { struct comm *comm, *tmp; + BUG_ON(!RB_EMPTY_NODE(&thread->rb_node)); + thread_stack__free(thread); if (thread->mg) { @@ -84,13 +87,14 @@ void thread__delete(struct thread *thread) struct thread *thread__get(struct thread *thread) { - ++thread->refcnt; + if (thread) + atomic_inc(&thread->refcnt); return thread; } void thread__put(struct thread *thread) { - if (thread && --thread->refcnt == 0) { + if (thread && atomic_dec_and_test(&thread->refcnt)) { list_del_init(&thread->node); thread__delete(thread); } @@ -187,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread, if (thread->pid_ == parent->pid_) return 0; + if (thread->mg == parent->mg) { + pr_debug("broken map groups on thread %d/%d parent %d/%d\n", + thread->pid_, thread->tid, parent->pid_, parent->tid); + return 0; + } + /* But this one is new process, copy maps. */ for (i = 0; i < MAP__NR_TYPES; ++i) if (map_groups__clone(thread->mg, parent->mg, i) < 0) diff --git a/kernel/tools/perf/util/thread.h b/kernel/tools/perf/util/thread.h index 9b8a54dc3..a0ac0317a 100644 --- a/kernel/tools/perf/util/thread.h +++ b/kernel/tools/perf/util/thread.h @@ -1,6 +1,7 @@ #ifndef __PERF_THREAD_H #define __PERF_THREAD_H +#include #include #include #include @@ -21,12 +22,12 @@ struct thread { pid_t tid; pid_t ppid; int cpu; - int refcnt; + atomic_t refcnt; char shortname[3]; bool comm_set; + int comm_len; bool dead; /* if set thread has exited */ struct list_head comm_list; - int comm_len; u64 db_id; void *priv; diff --git a/kernel/tools/perf/util/thread_map.c b/kernel/tools/perf/util/thread_map.c index f93b97347..6ec3c5ca4 100644 --- a/kernel/tools/perf/util/thread_map.c +++ b/kernel/tools/perf/util/thread_map.c @@ -8,8 +8,11 @@ #include #include "strlist.h" #include +#include +#include "asm/bug.h" #include "thread_map.h" #include "util.h" +#include "debug.h" /* Skip "." and ".." directories */ static int filter(const struct dirent *dir) @@ -20,6 +23,30 @@ static int filter(const struct dirent *dir) return 1; } +static void thread_map__reset(struct thread_map *map, int start, int nr) +{ + size_t size = (nr - start) * sizeof(map->map[0]); + + memset(&map->map[start], 0, size); +} + +static struct thread_map *thread_map__realloc(struct thread_map *map, int nr) +{ + size_t size = sizeof(*map) + sizeof(map->map[0]) * nr; + int start = map ? map->nr : 0; + + map = realloc(map, size); + /* + * We only realloc to add more items, let's reset new items. + */ + if (map) + thread_map__reset(map, start, nr); + + return map; +} + +#define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr) + struct thread_map *thread_map__new_by_pid(pid_t pid) { struct thread_map *threads; @@ -33,11 +60,12 @@ struct thread_map *thread_map__new_by_pid(pid_t pid) if (items <= 0) return NULL; - threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); + threads = thread_map__alloc(items); if (threads != NULL) { for (i = 0; i < items; i++) - threads->map[i] = atoi(namelist[i]->d_name); + thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); threads->nr = items; + atomic_set(&threads->refcnt, 1); } for (i=0; imap[0] = tid; - threads->nr = 1; + thread_map__set_pid(threads, 0, tid); + threads->nr = 1; + atomic_set(&threads->refcnt, 1); } return threads; @@ -65,8 +94,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) int max_threads = 32, items, i; char path[256]; struct dirent dirent, *next, **namelist = NULL; - struct thread_map *threads = malloc(sizeof(*threads) + - max_threads * sizeof(pid_t)); + struct thread_map *threads = thread_map__alloc(max_threads); + if (threads == NULL) goto out; @@ -75,6 +104,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) goto out_free_threads; threads->nr = 0; + atomic_set(&threads->refcnt, 1); while (!readdir_r(proc, &dirent, &next) && next) { char *end; @@ -106,16 +136,17 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) if (grow) { struct thread_map *tmp; - tmp = realloc(threads, (sizeof(*threads) + - max_threads * sizeof(pid_t))); + tmp = thread_map__realloc(threads, max_threads); if (tmp == NULL) goto out_free_namelist; threads = tmp; } - for (i = 0; i < items; i++) - threads->map[threads->nr + i] = atoi(namelist[i]->d_name); + for (i = 0; i < items; i++) { + thread_map__set_pid(threads, threads->nr + i, + atoi(namelist[i]->d_name)); + } for (i = 0; i < items; i++) zfree(&namelist[i]); @@ -164,7 +195,8 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) pid_t pid, prev_pid = INT_MAX; char *end_ptr; struct str_node *pos; - struct strlist *slist = strlist__new(false, pid_str); + struct strlist_config slist_config = { .dont_dupstr = true, }; + struct strlist *slist = strlist__new(pid_str, &slist_config); if (!slist) return NULL; @@ -185,15 +217,14 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) goto out_free_threads; total_tasks += items; - nt = realloc(threads, (sizeof(*threads) + - sizeof(pid_t) * total_tasks)); + nt = thread_map__realloc(threads, total_tasks); if (nt == NULL) goto out_free_namelist; threads = nt; for (i = 0; i < items; i++) { - threads->map[j++] = atoi(namelist[i]->d_name); + thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name)); zfree(&namelist[i]); } threads->nr = total_tasks; @@ -202,6 +233,8 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) out: strlist__delete(slist); + if (threads) + atomic_set(&threads->refcnt, 1); return threads; out_free_namelist: @@ -216,11 +249,12 @@ out_free_threads: struct thread_map *thread_map__new_dummy(void) { - struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); + struct thread_map *threads = thread_map__alloc(1); if (threads != NULL) { - threads->map[0] = -1; - threads->nr = 1; + thread_map__set_pid(threads, 0, -1); + threads->nr = 1; + atomic_set(&threads->refcnt, 1); } return threads; } @@ -232,13 +266,14 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) pid_t tid, prev_tid = INT_MAX; char *end_ptr; struct str_node *pos; + struct strlist_config slist_config = { .dont_dupstr = true, }; struct strlist *slist; /* perf-stat expects threads to be generated even if tid not given */ if (!tid_str) return thread_map__new_dummy(); - slist = strlist__new(false, tid_str); + slist = strlist__new(tid_str, &slist_config); if (!slist) return NULL; @@ -253,16 +288,18 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) continue; ntasks++; - nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks); + nt = thread_map__realloc(threads, ntasks); if (nt == NULL) goto out_free_threads; threads = nt; - threads->map[ntasks - 1] = tid; - threads->nr = ntasks; + thread_map__set_pid(threads, ntasks - 1, tid); + threads->nr = ntasks; } out: + if (threads) + atomic_set(&threads->refcnt, 1); return threads; out_free_threads: @@ -282,9 +319,30 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid, return thread_map__new_by_tid_str(tid); } -void thread_map__delete(struct thread_map *threads) +static void thread_map__delete(struct thread_map *threads) { - free(threads); + if (threads) { + int i; + + WARN_ONCE(atomic_read(&threads->refcnt) != 0, + "thread map refcnt unbalanced\n"); + for (i = 0; i < threads->nr; i++) + free(thread_map__comm(threads, i)); + free(threads); + } +} + +struct thread_map *thread_map__get(struct thread_map *map) +{ + if (map) + atomic_inc(&map->refcnt); + return map; +} + +void thread_map__put(struct thread_map *map) +{ + if (map && atomic_dec_and_test(&map->refcnt)) + thread_map__delete(map); } size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) @@ -293,7 +351,60 @@ size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) size_t printed = fprintf(fp, "%d thread%s: ", threads->nr, threads->nr > 1 ? "s" : ""); for (i = 0; i < threads->nr; ++i) - printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]); + printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i)); return printed + fprintf(fp, "\n"); } + +static int get_comm(char **comm, pid_t pid) +{ + char *path; + size_t size; + int err; + + if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1) + return -ENOMEM; + + err = filename__read_str(path, comm, &size); + if (!err) { + /* + * We're reading 16 bytes, while filename__read_str + * allocates data per BUFSIZ bytes, so we can safely + * mark the end of the string. + */ + (*comm)[size] = 0; + rtrim(*comm); + } + + free(path); + return err; +} + +static void comm_init(struct thread_map *map, int i) +{ + pid_t pid = thread_map__pid(map, i); + char *comm = NULL; + + /* dummy pid comm initialization */ + if (pid == -1) { + map->map[i].comm = strdup("dummy"); + return; + } + + /* + * The comm name is like extra bonus ;-), + * so just warn if we fail for any reason. + */ + if (get_comm(&comm, pid)) + pr_warning("Couldn't resolve comm name for pid %d\n", pid); + + map->map[i].comm = comm; +} + +void thread_map__read_comms(struct thread_map *threads) +{ + int i; + + for (i = 0; i < threads->nr; ++i) + comm_init(threads, i); +} diff --git a/kernel/tools/perf/util/thread_map.h b/kernel/tools/perf/util/thread_map.h index 95313f43c..af679d8a5 100644 --- a/kernel/tools/perf/util/thread_map.h +++ b/kernel/tools/perf/util/thread_map.h @@ -3,10 +3,17 @@ #include #include +#include + +struct thread_map_data { + pid_t pid; + char *comm; +}; struct thread_map { + atomic_t refcnt; int nr; - pid_t map[]; + struct thread_map_data map[]; }; struct thread_map *thread_map__new_dummy(void); @@ -15,11 +22,12 @@ struct thread_map *thread_map__new_by_tid(pid_t tid); struct thread_map *thread_map__new_by_uid(uid_t uid); struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); +struct thread_map *thread_map__get(struct thread_map *map); +void thread_map__put(struct thread_map *map); + struct thread_map *thread_map__new_str(const char *pid, const char *tid, uid_t uid); -void thread_map__delete(struct thread_map *threads); - size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); static inline int thread_map__nr(struct thread_map *threads) @@ -27,4 +35,21 @@ static inline int thread_map__nr(struct thread_map *threads) return threads ? threads->nr : 1; } +static inline pid_t thread_map__pid(struct thread_map *map, int thread) +{ + return map->map[thread].pid; +} + +static inline void +thread_map__set_pid(struct thread_map *map, int thread, pid_t pid) +{ + map->map[thread].pid = pid; +} + +static inline char *thread_map__comm(struct thread_map *map, int thread) +{ + return map->map[thread].comm; +} + +void thread_map__read_comms(struct thread_map *threads); #endif /* __PERF_THREAD_MAP_H */ diff --git a/kernel/tools/perf/util/tool.h b/kernel/tools/perf/util/tool.h index 51d9e56c0..cab8cc248 100644 --- a/kernel/tools/perf/util/tool.h +++ b/kernel/tools/perf/util/tool.h @@ -3,6 +3,8 @@ #include +#include + struct perf_session; union perf_event; struct perf_evlist; @@ -29,6 +31,9 @@ typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event, struct ordered_events *oe); +typedef s64 (*event_op3)(struct perf_tool *tool, union perf_event *event, + struct perf_session *session); + struct perf_tool { event_sample sample, read; @@ -38,13 +43,20 @@ struct perf_tool { fork, exit, lost, + lost_samples, + aux, + itrace_start, + context_switch, throttle, unthrottle; event_attr_op attr; event_op2 tracing_data; event_oe finished_round; event_op2 build_id, - id_index; + id_index, + auxtrace_info, + auxtrace_error; + event_op3 auxtrace; bool ordered_events; bool ordering_requires_timestamps; }; diff --git a/kernel/tools/perf/util/trace-event-info.c b/kernel/tools/perf/util/trace-event-info.c index eb7271601..d995743cb 100644 --- a/kernel/tools/perf/util/trace-event-info.c +++ b/kernel/tools/perf/util/trace-event-info.c @@ -38,7 +38,7 @@ #include "../perf.h" #include "trace-event.h" -#include +#include #include "evsel.h" #include "debug.h" @@ -341,20 +341,14 @@ out: static int record_proc_kallsyms(void) { - unsigned int size; - const char *path = "/proc/kallsyms"; - struct stat st; - int ret, err = 0; - - ret = stat(path, &st); - if (ret < 0) { - /* not found */ - size = 0; - if (write(output_fd, &size, 4) != 4) - err = -EIO; - return err; - } - return record_file(path, 4); + unsigned long long size = 0; + /* + * Just to keep older perf.data file parsers happy, record a zero + * sized kallsyms file, i.e. do the same thing that was done when + * /proc/kallsyms (or something specified via --kallsyms, in a + * different path) couldn't be read. + */ + return write(output_fd, &size, 4) != 4 ? -EIO : 0; } static int record_ftrace_printk(void) diff --git a/kernel/tools/perf/util/trace-event-parse.c b/kernel/tools/perf/util/trace-event-parse.c index 25d6c737b..8ff7d620d 100644 --- a/kernel/tools/perf/util/trace-event-parse.c +++ b/kernel/tools/perf/util/trace-event-parse.c @@ -135,36 +135,6 @@ void event_format__print(struct event_format *event, return event_format__fprintf(event, cpu, data, size, stdout); } -void parse_proc_kallsyms(struct pevent *pevent, - char *file, unsigned int size __maybe_unused) -{ - unsigned long long addr; - char *func; - char *line; - char *next = NULL; - char *addr_str; - char *mod; - char *fmt = NULL; - - line = strtok_r(file, "\n", &next); - while (line) { - mod = NULL; - addr_str = strtok_r(line, " ", &fmt); - addr = strtoull(addr_str, NULL, 16); - /* skip character */ - strtok_r(NULL, " ", &fmt); - func = strtok_r(NULL, "\t", &fmt); - mod = strtok_r(NULL, "]", &fmt); - /* truncate the extra '[' */ - if (mod) - mod = mod + 1; - - pevent_register_function(pevent, func, addr, mod); - - line = strtok_r(NULL, "\n", &next); - } -} - void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size __maybe_unused) { @@ -173,7 +143,7 @@ void parse_ftrace_printk(struct pevent *pevent, char *line; char *next = NULL; char *addr_str; - char *fmt; + char *fmt = NULL; line = strtok_r(file, "\n", &next); while (line) { diff --git a/kernel/tools/perf/util/trace-event-read.c b/kernel/tools/perf/util/trace-event-read.c index 54d9e9b54..b67a0ccf5 100644 --- a/kernel/tools/perf/util/trace-event-read.c +++ b/kernel/tools/perf/util/trace-event-read.c @@ -162,25 +162,23 @@ out: static int read_proc_kallsyms(struct pevent *pevent) { unsigned int size; - char *buf; size = read4(pevent); if (!size) return 0; - - buf = malloc(size + 1); - if (buf == NULL) - return -1; - - if (do_read(buf, size) < 0) { - free(buf); - return -1; - } - buf[size] = '\0'; - - parse_proc_kallsyms(pevent, buf, size); - - free(buf); + /* + * Just skip it, now that we configure libtraceevent to use the + * tools/perf/ symbol resolver. + * + * We need to skip it so that we can continue parsing old perf.data + * files, that contains this /proc/kallsyms payload. + * + * Newer perf.data files will have just the 4-bytes zeros "kallsyms + * payload", so that older tools can continue reading it and interpret + * it as "no kallsyms payload is present". + */ + lseek(input_fd, size, SEEK_CUR); + trace_data_size += size; return 0; } diff --git a/kernel/tools/perf/util/trace-event.c b/kernel/tools/perf/util/trace-event.c index 6322d3716..802bb868d 100644 --- a/kernel/tools/perf/util/trace-event.c +++ b/kernel/tools/perf/util/trace-event.c @@ -7,8 +7,11 @@ #include #include #include +#include #include +#include #include "trace-event.h" +#include "machine.h" #include "util.h" /* @@ -19,6 +22,7 @@ * there. */ static struct trace_event tevent; +static bool tevent_initialized; int trace_event__init(struct trace_event *t) { @@ -32,12 +36,40 @@ int trace_event__init(struct trace_event *t) return pevent ? 0 : -1; } +static int trace_event__init2(void) +{ + int be = traceevent_host_bigendian(); + struct pevent *pevent; + + if (trace_event__init(&tevent)) + return -1; + + pevent = tevent.pevent; + pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); + pevent_set_file_bigendian(pevent, be); + pevent_set_host_bigendian(pevent, be); + tevent_initialized = true; + return 0; +} + +int trace_event__register_resolver(struct machine *machine, + pevent_func_resolver_t *func) +{ + if (!tevent_initialized && trace_event__init2()) + return -1; + + return pevent_set_function_resolver(tevent.pevent, func, machine); +} + void trace_event__cleanup(struct trace_event *t) { traceevent_unload_plugins(t->plugin_list, t->pevent); pevent_free(t->pevent); } +/* + * Returns pointer with encoded error via interface. + */ static struct event_format* tp_format(const char *sys, const char *name) { @@ -46,12 +78,14 @@ tp_format(const char *sys, const char *name) char path[PATH_MAX]; size_t size; char *data; + int err; scnprintf(path, PATH_MAX, "%s/%s/%s/format", tracing_events_path, sys, name); - if (filename__read_str(path, &data, &size)) - return NULL; + err = filename__read_str(path, &data, &size); + if (err) + return ERR_PTR(err); pevent_parse_format(pevent, &event, data, size, sys); @@ -59,24 +93,14 @@ tp_format(const char *sys, const char *name) return event; } +/* + * Returns pointer with encoded error via interface. + */ struct event_format* trace_event__tp_format(const char *sys, const char *name) { - static bool initialized; - - if (!initialized) { - int be = traceevent_host_bigendian(); - struct pevent *pevent; - - if (trace_event__init(&tevent)) - return NULL; - - pevent = tevent.pevent; - pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); - pevent_set_file_bigendian(pevent, be); - pevent_set_host_bigendian(pevent, be); - initialized = true; - } + if (!tevent_initialized && trace_event__init2()) + return ERR_PTR(-ENOMEM); return tp_format(sys, name); } diff --git a/kernel/tools/perf/util/trace-event.h b/kernel/tools/perf/util/trace-event.h index d5168f0be..b85ee55cc 100644 --- a/kernel/tools/perf/util/trace-event.h +++ b/kernel/tools/perf/util/trace-event.h @@ -18,6 +18,8 @@ struct trace_event { int trace_event__init(struct trace_event *t); void trace_event__cleanup(struct trace_event *t); +int trace_event__register_resolver(struct machine *machine, + pevent_func_resolver_t *func); struct event_format* trace_event__tp_format(const char *sys, const char *name); @@ -76,6 +78,8 @@ struct scripting_ops { int (*generate_script) (struct pevent *pevent, const char *outfile); }; +extern unsigned int scripting_max_stack; + int script_spec_register(const char *spec, struct scripting_ops *ops); void setup_perl_scripting(void); diff --git a/kernel/tools/perf/util/unwind-libunwind.c b/kernel/tools/perf/util/unwind-libunwind.c index 7b09a443a..c83832b55 100644 --- a/kernel/tools/perf/util/unwind-libunwind.c +++ b/kernel/tools/perf/util/unwind-libunwind.c @@ -269,13 +269,14 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine, u64 offset = dso->data.eh_frame_hdr_offset; if (offset == 0) { - fd = dso__data_fd(dso, machine); + fd = dso__data_get_fd(dso, machine); if (fd < 0) return -EINVAL; /* Check the .eh_frame section for unwinding info */ offset = elf_section_offset(fd, ".eh_frame_hdr"); dso->data.eh_frame_hdr_offset = offset; + dso__data_put_fd(dso); } if (offset) @@ -294,13 +295,14 @@ static int read_unwind_spec_debug_frame(struct dso *dso, u64 ofs = dso->data.debug_frame_offset; if (ofs == 0) { - fd = dso__data_fd(dso, machine); + fd = dso__data_get_fd(dso, machine); if (fd < 0) return -EINVAL; /* Check the .debug_frame section for unwinding info */ ofs = elf_section_offset(fd, ".debug_frame"); dso->data.debug_frame_offset = ofs; + dso__data_put_fd(dso); } *offset = ofs; @@ -328,6 +330,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, struct map *map; unw_dyn_info_t di; u64 table_data, segbase, fde_count; + int ret = -EINVAL; map = find_map(ip, ui); if (!map || !map->dso) @@ -346,26 +349,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, di.u.rti.table_data = map->start + table_data; di.u.rti.table_len = fde_count * sizeof(struct table_entry) / sizeof(unw_word_t); - return dwarf_search_unwind_table(as, ip, &di, pi, - need_unwind_info, arg); + ret = dwarf_search_unwind_table(as, ip, &di, pi, + need_unwind_info, arg); } #ifndef NO_LIBUNWIND_DEBUG_FRAME /* Check the .debug_frame section for unwinding info */ - if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { - int fd = dso__data_fd(map->dso, ui->machine); + if (ret < 0 && + !read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { + int fd = dso__data_get_fd(map->dso, ui->machine); int is_exec = elf_is_exec(fd, map->dso->name); unw_word_t base = is_exec ? 0 : map->start; + const char *symfile; + + if (fd >= 0) + dso__data_put_fd(map->dso); + + symfile = map->dso->symsrc_filename ?: map->dso->name; memset(&di, 0, sizeof(di)); - if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name, + if (dwarf_find_debug_frame(0, &di, ip, base, symfile, map->start, map->end)) return dwarf_search_unwind_table(as, ip, &di, pi, need_unwind_info, arg); } #endif - return -EINVAL; + return ret; } static int access_fpreg(unw_addr_space_t __maybe_unused as, @@ -456,7 +466,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as, if (ret) { pr_debug("unwind: access_mem %p not inside range" " 0x%" PRIx64 "-0x%" PRIx64 "\n", - (void *) addr, start, end); + (void *) (uintptr_t) addr, start, end); *valp = 0; return ret; } @@ -466,7 +476,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as, offset = addr - start; *valp = *(unw_word_t *)&stack->data[offset]; pr_debug("unwind: access_mem addr %p val %lx, offset %d\n", - (void *) addr, (unsigned long)*valp, offset); + (void *) (uintptr_t) addr, (unsigned long)*valp, offset); return 0; } diff --git a/kernel/tools/perf/util/usage.c b/kernel/tools/perf/util/usage.c index 4007aca8e..6adfa18cd 100644 --- a/kernel/tools/perf/util/usage.c +++ b/kernel/tools/perf/util/usage.c @@ -50,6 +50,11 @@ void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) die_routine = routine; } +void set_warning_routine(void (*routine)(const char *err, va_list params)) +{ + warn_routine = routine; +} + void usage(const char *err) { usage_routine(err); diff --git a/kernel/tools/perf/util/util.c b/kernel/tools/perf/util/util.c index 4ee6d0d4c..47b1e36c7 100644 --- a/kernel/tools/perf/util/util.c +++ b/kernel/tools/perf/util/util.c @@ -3,6 +3,7 @@ #include "debug.h" #include #include +#include #ifdef HAVE_BACKTRACE_SUPPORT #include #endif @@ -17,7 +18,7 @@ #include "callchain.h" struct callchain_param callchain_param = { - .mode = CHAIN_GRAPH_REL, + .mode = CHAIN_GRAPH_ABS, .min_percent = 0.5, .order = ORDER_CALLEE, .key = CCKEY_FUNCTION @@ -34,8 +35,6 @@ bool test_attr__enabled; bool perf_host = true; bool perf_guest = false; -char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; - void event_attr_init(struct perf_event_attr *attr) { if (!perf_host) @@ -72,20 +71,60 @@ int mkdir_p(char *path, mode_t mode) return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; } -static int slow_copyfile(const char *from, const char *to, mode_t mode) +int rm_rf(char *path) +{ + DIR *dir; + int ret = 0; + struct dirent *d; + char namebuf[PATH_MAX]; + + dir = opendir(path); + if (dir == NULL) + return 0; + + while ((d = readdir(dir)) != NULL && !ret) { + struct stat statbuf; + + if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) + continue; + + scnprintf(namebuf, sizeof(namebuf), "%s/%s", + path, d->d_name); + + ret = stat(namebuf, &statbuf); + if (ret < 0) { + pr_debug("stat failed: %s\n", namebuf); + break; + } + + if (S_ISREG(statbuf.st_mode)) + ret = unlink(namebuf); + else if (S_ISDIR(statbuf.st_mode)) + ret = rm_rf(namebuf); + else { + pr_debug("unknown file: %s\n", namebuf); + ret = -1; + } + } + closedir(dir); + + if (ret < 0) + return ret; + + return rmdir(path); +} + +static int slow_copyfile(const char *from, const char *to) { int err = -1; char *line = NULL; size_t n; FILE *from_fp = fopen(from, "r"), *to_fp; - mode_t old_umask; if (from_fp == NULL) goto out; - old_umask = umask(mode ^ 0777); to_fp = fopen(to, "w"); - umask(old_umask); if (to_fp == NULL) goto out_fclose_from; @@ -102,42 +141,81 @@ out: return err; } +int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) +{ + void *ptr; + loff_t pgoff; + + pgoff = off_in & ~(page_size - 1); + off_in -= pgoff; + + ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff); + if (ptr == MAP_FAILED) + return -1; + + while (size) { + ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out); + if (ret < 0 && errno == EINTR) + continue; + if (ret <= 0) + break; + + size -= ret; + off_in += ret; + off_out -= ret; + } + munmap(ptr, off_in + size); + + return size ? -1 : 0; +} + int copyfile_mode(const char *from, const char *to, mode_t mode) { int fromfd, tofd; struct stat st; - void *addr; int err = -1; + char *tmp = NULL, *ptr = NULL; if (stat(from, &st)) goto out; - if (st.st_size == 0) /* /proc? do it slowly... */ - return slow_copyfile(from, to, mode); - - fromfd = open(from, O_RDONLY); - if (fromfd < 0) + /* extra 'x' at the end is to reserve space for '.' */ + if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) { + tmp = NULL; goto out; + } + ptr = strrchr(tmp, '/'); + if (!ptr) + goto out; + ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1); + *ptr = '.'; - tofd = creat(to, mode); + tofd = mkstemp(tmp); if (tofd < 0) - goto out_close_from; + goto out; + + if (fchmod(tofd, mode)) + goto out_close_to; - addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0); - if (addr == MAP_FAILED) + if (st.st_size == 0) { /* /proc? do it slowly... */ + err = slow_copyfile(from, tmp); + goto out_close_to; + } + + fromfd = open(from, O_RDONLY); + if (fromfd < 0) goto out_close_to; - if (write(tofd, addr, st.st_size) == st.st_size) - err = 0; + err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size); - munmap(addr, st.st_size); + close(fromfd); out_close_to: close(tofd); - if (err) - unlink(to); -out_close_from: - close(fromfd); + if (!err) + err = link(tmp, to); + unlink(tmp); out: + free(tmp); return err; } @@ -310,123 +388,6 @@ void set_term_quiet_input(struct termios *old) tcsetattr(0, TCSANOW, &tc); } -static void set_tracing_events_path(const char *tracing, const char *mountpoint) -{ - snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s", - mountpoint, tracing, "events"); -} - -static const char *__perf_tracefs_mount(const char *mountpoint) -{ - const char *mnt; - - mnt = tracefs_mount(mountpoint); - if (!mnt) - return NULL; - - set_tracing_events_path("", mnt); - - return mnt; -} - -static const char *__perf_debugfs_mount(const char *mountpoint) -{ - const char *mnt; - - mnt = debugfs_mount(mountpoint); - if (!mnt) - return NULL; - - set_tracing_events_path("tracing/", mnt); - - return mnt; -} - -const char *perf_debugfs_mount(const char *mountpoint) -{ - const char *mnt; - - mnt = __perf_tracefs_mount(mountpoint); - if (mnt) - return mnt; - - mnt = __perf_debugfs_mount(mountpoint); - - return mnt; -} - -void perf_debugfs_set_path(const char *mntpt) -{ - snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt); - set_tracing_events_path("tracing/", mntpt); -} - -static const char *find_tracefs(void) -{ - const char *path = __perf_tracefs_mount(NULL); - - return path; -} - -static const char *find_debugfs(void) -{ - const char *path = __perf_debugfs_mount(NULL); - - if (!path) - fprintf(stderr, "Your kernel does not support the debugfs filesystem"); - - return path; -} - -/* - * Finds the path to the debugfs/tracing - * Allocates the string and stores it. - */ -const char *find_tracing_dir(void) -{ - const char *tracing_dir = ""; - static char *tracing; - static int tracing_found; - const char *debugfs; - - if (tracing_found) - return tracing; - - debugfs = find_tracefs(); - if (!debugfs) { - tracing_dir = "/tracing"; - debugfs = find_debugfs(); - if (!debugfs) - return NULL; - } - - if (asprintf(&tracing, "%s%s", debugfs, tracing_dir) < 0) - return NULL; - - tracing_found = 1; - return tracing; -} - -char *get_tracing_file(const char *name) -{ - const char *tracing; - char *file; - - tracing = find_tracing_dir(); - if (!tracing) - return NULL; - - if (asprintf(&file, "%s/%s", tracing, name) < 0) - return NULL; - - return file; -} - -void put_tracing_file(char *file) -{ - free(file); -} - int parse_nsec_time(const char *str, u64 *ptime) { u64 time_sec, time_nsec; @@ -487,6 +448,96 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags) return (unsigned long) -1; } +int get_stack_size(const char *str, unsigned long *_size) +{ + char *endptr; + unsigned long size; + unsigned long max_size = round_down(USHRT_MAX, sizeof(u64)); + + size = strtoul(str, &endptr, 0); + + do { + if (*endptr) + break; + + size = round_up(size, sizeof(u64)); + if (!size || size > max_size) + break; + + *_size = size; + return 0; + + } while (0); + + pr_err("callchain: Incorrect stack dump size (max %ld): %s\n", + max_size, str); + return -1; +} + +int parse_callchain_record(const char *arg, struct callchain_param *param) +{ + char *tok, *name, *saveptr = NULL; + char *buf; + int ret = -1; + + /* We need buffer that we know we can write to. */ + buf = malloc(strlen(arg) + 1); + if (!buf) + return -ENOMEM; + + strcpy(buf, arg); + + tok = strtok_r((char *)buf, ",", &saveptr); + name = tok ? : (char *)buf; + + do { + /* Framepointer style */ + if (!strncmp(name, "fp", sizeof("fp"))) { + if (!strtok_r(NULL, ",", &saveptr)) { + param->record_mode = CALLCHAIN_FP; + ret = 0; + } else + pr_err("callchain: No more arguments " + "needed for --call-graph fp\n"); + break; + +#ifdef HAVE_DWARF_UNWIND_SUPPORT + /* Dwarf style */ + } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) { + const unsigned long default_stack_dump_size = 8192; + + ret = 0; + param->record_mode = CALLCHAIN_DWARF; + param->dump_size = default_stack_dump_size; + + tok = strtok_r(NULL, ",", &saveptr); + if (tok) { + unsigned long size = 0; + + ret = get_stack_size(tok, &size); + param->dump_size = size; + } +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ + } else if (!strncmp(name, "lbr", sizeof("lbr"))) { + if (!strtok_r(NULL, ",", &saveptr)) { + param->record_mode = CALLCHAIN_LBR; + ret = 0; + } else + pr_err("callchain: No more arguments " + "needed for --call-graph lbr\n"); + break; + } else { + pr_err("callchain: Unknown --call-graph option " + "value: %s\n", arg); + break; + } + + } while (0); + + free(buf); + return ret; +} + int filename__read_str(const char *filename, char **buf, size_t *sizep) { size_t size = 0, alloc_size = 0; @@ -589,7 +640,7 @@ bool find_process(const char *name) dir = opendir(procfs__mountpoint()); if (!dir) - return -1; + return false; /* Walk through the directory. */ while (ret && (d = readdir(dir)) != NULL) { @@ -615,3 +666,32 @@ bool find_process(const char *name) closedir(dir); return ret ? false : true; } + +int +fetch_kernel_version(unsigned int *puint, char *str, + size_t str_size) +{ + struct utsname utsname; + int version, patchlevel, sublevel, err; + + if (uname(&utsname)) + return -1; + + if (str && str_size) { + strncpy(str, utsname.release, str_size); + str[str_size - 1] = '\0'; + } + + err = sscanf(utsname.release, "%d.%d.%d", + &version, &patchlevel, &sublevel); + + if (err != 3) { + pr_debug("Unablt to get kernel version from uname '%s'\n", + utsname.release); + return -1; + } + + if (puint) + *puint = (version << 16) + (patchlevel << 8) + sublevel; + return 0; +} diff --git a/kernel/tools/perf/util/util.h b/kernel/tools/perf/util/util.h index 1ff23e04a..dcc659017 100644 --- a/kernel/tools/perf/util/util.h +++ b/kernel/tools/perf/util/util.h @@ -74,8 +74,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -83,12 +82,6 @@ extern const char *graph_line; extern const char *graph_dotted_line; extern char buildid_dir[]; -extern char tracing_events_path[]; -extern void perf_debugfs_set_path(const char *mountpoint); -const char *perf_debugfs_mount(const char *mountpoint); -const char *find_tracing_dir(void); -char *get_tracing_file(const char *name); -void put_tracing_file(char *file); /* On most systems would have given us this, but * not on some systems (e.g. GNU/Hurd). @@ -152,6 +145,7 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))) extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); +extern void set_warning_routine(void (*routine)(const char *err, va_list params)); extern int prefixcmp(const char *str, const char *prefix); extern void set_buildid_dir(const char *dir); @@ -249,14 +243,20 @@ static inline int sane_case(int x, int high) } int mkdir_p(char *path, mode_t mode); +int rm_rf(char *path); int copyfile(const char *from, const char *to); int copyfile_mode(const char *from, const char *to, mode_t mode); +int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size); s64 perf_atoll(const char *str); char **argv_split(const char *str, int *argcp); void argv_free(char **argv); bool strglobmatch(const char *str, const char *pat); bool strlazymatch(const char *str, const char *pat); +static inline bool strisglob(const char *str) +{ + return strpbrk(str, "*?[") != NULL; +} int strtailcmp(const char *s1, const char *s2); char *strxfrchar(char *s, char from, char to); unsigned long convert_unit(unsigned long value, char *unit); @@ -312,8 +312,11 @@ static inline int path__join3(char *bf, size_t size, struct dso; struct symbol; +extern bool srcline_full_filename; char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, bool show_sym); +char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym, + bool show_sym, bool unwind_inlines); void free_srcline(char *srcline); int filename__read_str(const char *filename, char **buf, size_t *sizep); @@ -333,4 +336,26 @@ int gzip_decompress_to_file(const char *input, int output_fd); int lzma_decompress_to_file(const char *input, int output_fd); #endif +char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints); + +static inline char *asprintf_expr_in_ints(const char *var, size_t nints, int *ints) +{ + return asprintf_expr_inout_ints(var, true, nints, ints); +} + +static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int *ints) +{ + return asprintf_expr_inout_ints(var, false, nints, ints); +} + +int get_stack_size(const char *str, unsigned long *_size); + +int fetch_kernel_version(unsigned int *puint, + char *str, size_t str_sz); +#define KVER_VERSION(x) (((x) >> 16) & 0xff) +#define KVER_PATCHLEVEL(x) (((x) >> 8) & 0xff) +#define KVER_SUBLEVEL(x) ((x) & 0xff) +#define KVER_FMT "%d.%d.%d" +#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x) + #endif /* GIT_COMPAT_UTIL_H */ diff --git a/kernel/tools/perf/util/vdso.c b/kernel/tools/perf/util/vdso.c index 5c7dd7969..44d440da1 100644 --- a/kernel/tools/perf/util/vdso.c +++ b/kernel/tools/perf/util/vdso.c @@ -101,7 +101,7 @@ static char *get_file(struct vdso_file *vdso_file) return vdso; } -void vdso__exit(struct machine *machine) +void machine__exit_vdso(struct machine *machine) { struct vdso_info *vdso_info = machine->vdso_info; @@ -120,14 +120,14 @@ void vdso__exit(struct machine *machine) zfree(&machine->vdso_info); } -static struct dso *vdso__new(struct machine *machine, const char *short_name, - const char *long_name) +static struct dso *__machine__addnew_vdso(struct machine *machine, const char *short_name, + const char *long_name) { struct dso *dso; dso = dso__new(short_name); if (dso != NULL) { - dsos__add(&machine->user_dsos, dso); + __dsos__add(&machine->dsos, dso); dso__set_long_name(dso, long_name, false); } @@ -230,27 +230,29 @@ static const char *vdso__get_compat_file(struct vdso_file *vdso_file) return vdso_file->temp_file_name; } -static struct dso *vdso__findnew_compat(struct machine *machine, - struct vdso_file *vdso_file) +static struct dso *__machine__findnew_compat(struct machine *machine, + struct vdso_file *vdso_file) { const char *file_name; struct dso *dso; - dso = dsos__find(&machine->user_dsos, vdso_file->dso_name, true); + dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true); if (dso) - return dso; + goto out; file_name = vdso__get_compat_file(vdso_file); if (!file_name) - return NULL; + goto out; - return vdso__new(machine, vdso_file->dso_name, file_name); + dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name); +out: + return dso; } -static int vdso__dso_findnew_compat(struct machine *machine, - struct thread *thread, - struct vdso_info *vdso_info, - struct dso **dso) +static int __machine__findnew_vdso_compat(struct machine *machine, + struct thread *thread, + struct vdso_info *vdso_info, + struct dso **dso) { enum dso_type dso_type; @@ -267,10 +269,10 @@ static int vdso__dso_findnew_compat(struct machine *machine, switch (dso_type) { case DSO__TYPE_32BIT: - *dso = vdso__findnew_compat(machine, &vdso_info->vdso32); + *dso = __machine__findnew_compat(machine, &vdso_info->vdso32); return 1; case DSO__TYPE_X32BIT: - *dso = vdso__findnew_compat(machine, &vdso_info->vdsox32); + *dso = __machine__findnew_compat(machine, &vdso_info->vdsox32); return 1; case DSO__TYPE_UNKNOWN: case DSO__TYPE_64BIT: @@ -281,35 +283,37 @@ static int vdso__dso_findnew_compat(struct machine *machine, #endif -struct dso *vdso__dso_findnew(struct machine *machine, - struct thread *thread __maybe_unused) +struct dso *machine__findnew_vdso(struct machine *machine, + struct thread *thread __maybe_unused) { struct vdso_info *vdso_info; - struct dso *dso; + struct dso *dso = NULL; + pthread_rwlock_wrlock(&machine->dsos.lock); if (!machine->vdso_info) machine->vdso_info = vdso_info__new(); vdso_info = machine->vdso_info; if (!vdso_info) - return NULL; + goto out_unlock; #if BITS_PER_LONG == 64 - if (vdso__dso_findnew_compat(machine, thread, vdso_info, &dso)) - return dso; + if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso)) + goto out_unlock; #endif - dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true); + dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true); if (!dso) { char *file; file = get_file(&vdso_info->vdso); - if (!file) - return NULL; - - dso = vdso__new(machine, DSO__NAME_VDSO, file); + if (file) + dso = __machine__addnew_vdso(machine, DSO__NAME_VDSO, file); } +out_unlock: + dso__get(dso); + pthread_rwlock_unlock(&machine->dsos.lock); return dso; } diff --git a/kernel/tools/perf/util/vdso.h b/kernel/tools/perf/util/vdso.h index d97da1616..cdc4fabfc 100644 --- a/kernel/tools/perf/util/vdso.h +++ b/kernel/tools/perf/util/vdso.h @@ -23,7 +23,7 @@ bool dso__is_vdso(struct dso *dso); struct machine; struct thread; -struct dso *vdso__dso_findnew(struct machine *machine, struct thread *thread); -void vdso__exit(struct machine *machine); +struct dso *machine__findnew_vdso(struct machine *machine, struct thread *thread); +void machine__exit_vdso(struct machine *machine); #endif /* __PERF_VDSO__ */ diff --git a/kernel/tools/perf/util/xyarray.c b/kernel/tools/perf/util/xyarray.c index 22afbf6c5..c10ba41ef 100644 --- a/kernel/tools/perf/util/xyarray.c +++ b/kernel/tools/perf/util/xyarray.c @@ -9,11 +9,19 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size) if (xy != NULL) { xy->entry_size = entry_size; xy->row_size = row_size; + xy->entries = xlen * ylen; } return xy; } +void xyarray__reset(struct xyarray *xy) +{ + size_t n = xy->entries * xy->entry_size; + + memset(xy->contents, 0, n); +} + void xyarray__delete(struct xyarray *xy) { free(xy); diff --git a/kernel/tools/perf/util/xyarray.h b/kernel/tools/perf/util/xyarray.h index c488a0727..7f30af371 100644 --- a/kernel/tools/perf/util/xyarray.h +++ b/kernel/tools/perf/util/xyarray.h @@ -6,11 +6,13 @@ struct xyarray { size_t row_size; size_t entry_size; + size_t entries; char contents[]; }; struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); void xyarray__delete(struct xyarray *xy); +void xyarray__reset(struct xyarray *xy); static inline void *xyarray__entry(struct xyarray *xy, int x, int y) { -- cgit 1.2.3-korg