From 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Tue, 4 Aug 2015 12:17:53 -0700 Subject: Add the rt linux 4.1.3-rt3 as base Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang --- kernel/tools/perf/util/Build | 145 + kernel/tools/perf/util/PERF-VERSION-GEN | 50 + kernel/tools/perf/util/abspath.c | 37 + kernel/tools/perf/util/alias.c | 76 + kernel/tools/perf/util/annotate.c | 1479 ++++++++++ kernel/tools/perf/util/annotate.h | 172 ++ kernel/tools/perf/util/bitmap.c | 31 + kernel/tools/perf/util/build-id.c | 538 ++++ kernel/tools/perf/util/build-id.h | 34 + kernel/tools/perf/util/cache.h | 80 + kernel/tools/perf/util/callchain.c | 881 ++++++ kernel/tools/perf/util/callchain.h | 204 ++ kernel/tools/perf/util/cgroup.c | 177 ++ kernel/tools/perf/util/cgroup.h | 17 + kernel/tools/perf/util/cloexec.c | 92 + kernel/tools/perf/util/cloexec.h | 12 + kernel/tools/perf/util/color.c | 227 ++ kernel/tools/perf/util/color.h | 46 + kernel/tools/perf/util/comm.c | 125 + kernel/tools/perf/util/comm.h | 27 + kernel/tools/perf/util/config.c | 564 ++++ kernel/tools/perf/util/cpumap.c | 479 ++++ kernel/tools/perf/util/cpumap.h | 84 + kernel/tools/perf/util/ctype.c | 39 + kernel/tools/perf/util/data-convert-bt.c | 857 ++++++ kernel/tools/perf/util/data-convert-bt.h | 8 + kernel/tools/perf/util/data.c | 138 + kernel/tools/perf/util/data.h | 50 + kernel/tools/perf/util/db-export.c | 428 +++ kernel/tools/perf/util/db-export.h | 106 + kernel/tools/perf/util/debug.c | 189 ++ kernel/tools/perf/util/debug.h | 56 + kernel/tools/perf/util/dso.c | 1172 ++++++++ kernel/tools/perf/util/dso.h | 325 +++ kernel/tools/perf/util/dwarf-aux.c | 905 +++++++ kernel/tools/perf/util/dwarf-aux.h | 121 + kernel/tools/perf/util/environment.c | 9 + kernel/tools/perf/util/event.c | 978 +++++++ kernel/tools/perf/util/event.h | 395 +++ kernel/tools/perf/util/evlist.c | 1570 +++++++++++ kernel/tools/perf/util/evlist.h | 284 ++ kernel/tools/perf/util/evsel.c | 2190 +++++++++++++++ kernel/tools/perf/util/evsel.h | 369 +++ kernel/tools/perf/util/exec_cmd.c | 148 + kernel/tools/perf/util/exec_cmd.h | 12 + kernel/tools/perf/util/find-vdso-map.c | 30 + kernel/tools/perf/util/generate-cmdlist.sh | 39 + kernel/tools/perf/util/header.c | 2781 +++++++++++++++++++ kernel/tools/perf/util/header.h | 159 ++ kernel/tools/perf/util/help.c | 339 +++ kernel/tools/perf/util/help.h | 29 + kernel/tools/perf/util/hist.c | 1482 ++++++++++ kernel/tools/perf/util/hist.h | 358 +++ .../tools/perf/util/include/asm/alternative-asm.h | 9 + kernel/tools/perf/util/include/asm/asm-offsets.h | 1 + kernel/tools/perf/util/include/asm/byteorder.h | 2 + kernel/tools/perf/util/include/asm/cpufeature.h | 9 + kernel/tools/perf/util/include/asm/dwarf2.h | 13 + kernel/tools/perf/util/include/asm/swab.h | 1 + kernel/tools/perf/util/include/asm/system.h | 1 + kernel/tools/perf/util/include/asm/uaccess.h | 14 + kernel/tools/perf/util/include/asm/unistd_32.h | 1 + kernel/tools/perf/util/include/asm/unistd_64.h | 1 + kernel/tools/perf/util/include/dwarf-regs.h | 8 + kernel/tools/perf/util/include/linux/bitmap.h | 66 + kernel/tools/perf/util/include/linux/const.h | 1 + kernel/tools/perf/util/include/linux/ctype.h | 1 + kernel/tools/perf/util/include/linux/kernel.h | 107 + kernel/tools/perf/util/include/linux/linkage.h | 13 + kernel/tools/perf/util/include/linux/list.h | 29 + kernel/tools/perf/util/include/linux/poison.h | 1 + kernel/tools/perf/util/include/linux/rbtree.h | 2 + .../perf/util/include/linux/rbtree_augmented.h | 2 + kernel/tools/perf/util/include/linux/string.h | 3 + kernel/tools/perf/util/intlist.c | 146 + kernel/tools/perf/util/intlist.h | 77 + kernel/tools/perf/util/kvm-stat.h | 140 + kernel/tools/perf/util/levenshtein.c | 84 + kernel/tools/perf/util/levenshtein.h | 8 + kernel/tools/perf/util/lzma.c | 95 + kernel/tools/perf/util/machine.c | 1847 +++++++++++++ kernel/tools/perf/util/machine.h | 227 ++ kernel/tools/perf/util/map.c | 800 ++++++ kernel/tools/perf/util/map.h | 235 ++ kernel/tools/perf/util/ordered-events.c | 307 +++ kernel/tools/perf/util/ordered-events.h | 64 + kernel/tools/perf/util/pager.c | 100 + kernel/tools/perf/util/parse-events.c | 1537 +++++++++++ kernel/tools/perf/util/parse-events.h | 136 + kernel/tools/perf/util/parse-events.l | 247 ++ kernel/tools/perf/util/parse-events.y | 526 ++++ kernel/tools/perf/util/parse-options.c | 757 ++++++ kernel/tools/perf/util/parse-options.h | 220 ++ kernel/tools/perf/util/path.c | 161 ++ kernel/tools/perf/util/perf_regs.c | 27 + kernel/tools/perf/util/perf_regs.h | 29 + kernel/tools/perf/util/pmu.c | 992 +++++++ kernel/tools/perf/util/pmu.h | 79 + kernel/tools/perf/util/pmu.l | 43 + kernel/tools/perf/util/pmu.y | 92 + kernel/tools/perf/util/probe-event.c | 2847 ++++++++++++++++++++ kernel/tools/perf/util/probe-event.h | 142 + kernel/tools/perf/util/probe-finder.c | 1695 ++++++++++++ kernel/tools/perf/util/probe-finder.h | 116 + kernel/tools/perf/util/pstack.c | 76 + kernel/tools/perf/util/pstack.h | 14 + kernel/tools/perf/util/python-ext-sources | 21 + kernel/tools/perf/util/python.c | 1074 ++++++++ kernel/tools/perf/util/quote.c | 54 + kernel/tools/perf/util/quote.h | 29 + kernel/tools/perf/util/rblist.c | 128 + kernel/tools/perf/util/rblist.h | 48 + kernel/tools/perf/util/record.c | 243 ++ kernel/tools/perf/util/run-command.c | 219 ++ kernel/tools/perf/util/run-command.h | 58 + kernel/tools/perf/util/scripting-engines/Build | 6 + .../perf/util/scripting-engines/trace-event-perl.c | 632 +++++ .../util/scripting-engines/trace-event-python.c | 1209 +++++++++ kernel/tools/perf/util/session.c | 1818 +++++++++++++ kernel/tools/perf/util/session.h | 133 + kernel/tools/perf/util/setup.py | 48 + kernel/tools/perf/util/sigchain.c | 52 + kernel/tools/perf/util/sigchain.h | 10 + kernel/tools/perf/util/sort.c | 1861 +++++++++++++ kernel/tools/perf/util/sort.h | 222 ++ kernel/tools/perf/util/srcline.c | 308 +++ kernel/tools/perf/util/stat.c | 63 + kernel/tools/perf/util/stat.h | 25 + kernel/tools/perf/util/strbuf.c | 134 + kernel/tools/perf/util/strbuf.h | 92 + kernel/tools/perf/util/strfilter.c | 199 ++ kernel/tools/perf/util/strfilter.h | 48 + kernel/tools/perf/util/string.c | 359 +++ kernel/tools/perf/util/strlist.c | 173 ++ kernel/tools/perf/util/strlist.h | 79 + kernel/tools/perf/util/svghelper.c | 808 ++++++ kernel/tools/perf/util/svghelper.h | 37 + kernel/tools/perf/util/symbol-elf.c | 1774 ++++++++++++ kernel/tools/perf/util/symbol-minimal.c | 377 +++ kernel/tools/perf/util/symbol.c | 2001 ++++++++++++++ kernel/tools/perf/util/symbol.h | 306 +++ kernel/tools/perf/util/target.c | 155 ++ kernel/tools/perf/util/target.h | 79 + kernel/tools/perf/util/thread-stack.c | 747 +++++ kernel/tools/perf/util/thread-stack.h | 111 + kernel/tools/perf/util/thread.c | 232 ++ kernel/tools/perf/util/thread.h | 117 + kernel/tools/perf/util/thread_map.c | 299 ++ kernel/tools/perf/util/thread_map.h | 30 + kernel/tools/perf/util/tool.h | 52 + kernel/tools/perf/util/top.c | 117 + kernel/tools/perf/util/top.h | 47 + kernel/tools/perf/util/trace-event-info.c | 595 ++++ kernel/tools/perf/util/trace-event-parse.c | 269 ++ kernel/tools/perf/util/trace-event-read.c | 444 +++ kernel/tools/perf/util/trace-event-scripting.c | 177 ++ kernel/tools/perf/util/trace-event.c | 82 + kernel/tools/perf/util/trace-event.h | 93 + kernel/tools/perf/util/tsc.c | 30 + kernel/tools/perf/util/tsc.h | 12 + kernel/tools/perf/util/unwind-libdw.c | 211 ++ kernel/tools/perf/util/unwind-libdw.h | 21 + kernel/tools/perf/util/unwind-libunwind.c | 653 +++++ kernel/tools/perf/util/unwind.h | 55 + kernel/tools/perf/util/usage.c | 84 + kernel/tools/perf/util/util.c | 617 +++++ kernel/tools/perf/util/util.h | 336 +++ kernel/tools/perf/util/values.c | 232 ++ kernel/tools/perf/util/values.h | 27 + kernel/tools/perf/util/vdso.c | 321 +++ kernel/tools/perf/util/vdso.h | 29 + kernel/tools/perf/util/wrapper.c | 41 + kernel/tools/perf/util/xyarray.c | 20 + kernel/tools/perf/util/xyarray.h | 20 + kernel/tools/perf/util/zlib.c | 78 + 175 files changed, 56375 insertions(+) create mode 100644 kernel/tools/perf/util/Build create mode 100755 kernel/tools/perf/util/PERF-VERSION-GEN create mode 100644 kernel/tools/perf/util/abspath.c create mode 100644 kernel/tools/perf/util/alias.c create mode 100644 kernel/tools/perf/util/annotate.c create mode 100644 kernel/tools/perf/util/annotate.h create mode 100644 kernel/tools/perf/util/bitmap.c create mode 100644 kernel/tools/perf/util/build-id.c create mode 100644 kernel/tools/perf/util/build-id.h create mode 100644 kernel/tools/perf/util/cache.h create mode 100644 kernel/tools/perf/util/callchain.c create mode 100644 kernel/tools/perf/util/callchain.h create mode 100644 kernel/tools/perf/util/cgroup.c create mode 100644 kernel/tools/perf/util/cgroup.h create mode 100644 kernel/tools/perf/util/cloexec.c create mode 100644 kernel/tools/perf/util/cloexec.h create mode 100644 kernel/tools/perf/util/color.c create mode 100644 kernel/tools/perf/util/color.h create mode 100644 kernel/tools/perf/util/comm.c create mode 100644 kernel/tools/perf/util/comm.h create mode 100644 kernel/tools/perf/util/config.c create mode 100644 kernel/tools/perf/util/cpumap.c create mode 100644 kernel/tools/perf/util/cpumap.h create mode 100644 kernel/tools/perf/util/ctype.c create mode 100644 kernel/tools/perf/util/data-convert-bt.c create mode 100644 kernel/tools/perf/util/data-convert-bt.h create mode 100644 kernel/tools/perf/util/data.c create mode 100644 kernel/tools/perf/util/data.h create mode 100644 kernel/tools/perf/util/db-export.c create mode 100644 kernel/tools/perf/util/db-export.h create mode 100644 kernel/tools/perf/util/debug.c create mode 100644 kernel/tools/perf/util/debug.h create mode 100644 kernel/tools/perf/util/dso.c create mode 100644 kernel/tools/perf/util/dso.h create mode 100644 kernel/tools/perf/util/dwarf-aux.c create mode 100644 kernel/tools/perf/util/dwarf-aux.h create mode 100644 kernel/tools/perf/util/environment.c create mode 100644 kernel/tools/perf/util/event.c create mode 100644 kernel/tools/perf/util/event.h create mode 100644 kernel/tools/perf/util/evlist.c create mode 100644 kernel/tools/perf/util/evlist.h create mode 100644 kernel/tools/perf/util/evsel.c create mode 100644 kernel/tools/perf/util/evsel.h create mode 100644 kernel/tools/perf/util/exec_cmd.c create mode 100644 kernel/tools/perf/util/exec_cmd.h create mode 100644 kernel/tools/perf/util/find-vdso-map.c create mode 100755 kernel/tools/perf/util/generate-cmdlist.sh create mode 100644 kernel/tools/perf/util/header.c create mode 100644 kernel/tools/perf/util/header.h create mode 100644 kernel/tools/perf/util/help.c create mode 100644 kernel/tools/perf/util/help.h create mode 100644 kernel/tools/perf/util/hist.c create mode 100644 kernel/tools/perf/util/hist.h create mode 100644 kernel/tools/perf/util/include/asm/alternative-asm.h create mode 100644 kernel/tools/perf/util/include/asm/asm-offsets.h create mode 100644 kernel/tools/perf/util/include/asm/byteorder.h create mode 100644 kernel/tools/perf/util/include/asm/cpufeature.h create mode 100644 kernel/tools/perf/util/include/asm/dwarf2.h create mode 100644 kernel/tools/perf/util/include/asm/swab.h create mode 100644 kernel/tools/perf/util/include/asm/system.h create mode 100644 kernel/tools/perf/util/include/asm/uaccess.h create mode 100644 kernel/tools/perf/util/include/asm/unistd_32.h create mode 100644 kernel/tools/perf/util/include/asm/unistd_64.h create mode 100644 kernel/tools/perf/util/include/dwarf-regs.h create mode 100644 kernel/tools/perf/util/include/linux/bitmap.h create mode 100644 kernel/tools/perf/util/include/linux/const.h create mode 100644 kernel/tools/perf/util/include/linux/ctype.h create mode 100644 kernel/tools/perf/util/include/linux/kernel.h create mode 100644 kernel/tools/perf/util/include/linux/linkage.h create mode 100644 kernel/tools/perf/util/include/linux/list.h create mode 100644 kernel/tools/perf/util/include/linux/poison.h create mode 100644 kernel/tools/perf/util/include/linux/rbtree.h create mode 100644 kernel/tools/perf/util/include/linux/rbtree_augmented.h create mode 100644 kernel/tools/perf/util/include/linux/string.h create mode 100644 kernel/tools/perf/util/intlist.c create mode 100644 kernel/tools/perf/util/intlist.h create mode 100644 kernel/tools/perf/util/kvm-stat.h create mode 100644 kernel/tools/perf/util/levenshtein.c create mode 100644 kernel/tools/perf/util/levenshtein.h create mode 100644 kernel/tools/perf/util/lzma.c create mode 100644 kernel/tools/perf/util/machine.c create mode 100644 kernel/tools/perf/util/machine.h create mode 100644 kernel/tools/perf/util/map.c create mode 100644 kernel/tools/perf/util/map.h create mode 100644 kernel/tools/perf/util/ordered-events.c create mode 100644 kernel/tools/perf/util/ordered-events.h create mode 100644 kernel/tools/perf/util/pager.c create mode 100644 kernel/tools/perf/util/parse-events.c create mode 100644 kernel/tools/perf/util/parse-events.h create mode 100644 kernel/tools/perf/util/parse-events.l create mode 100644 kernel/tools/perf/util/parse-events.y create mode 100644 kernel/tools/perf/util/parse-options.c create mode 100644 kernel/tools/perf/util/parse-options.h create mode 100644 kernel/tools/perf/util/path.c create mode 100644 kernel/tools/perf/util/perf_regs.c create mode 100644 kernel/tools/perf/util/perf_regs.h create mode 100644 kernel/tools/perf/util/pmu.c create mode 100644 kernel/tools/perf/util/pmu.h create mode 100644 kernel/tools/perf/util/pmu.l create mode 100644 kernel/tools/perf/util/pmu.y create mode 100644 kernel/tools/perf/util/probe-event.c create mode 100644 kernel/tools/perf/util/probe-event.h create mode 100644 kernel/tools/perf/util/probe-finder.c create mode 100644 kernel/tools/perf/util/probe-finder.h create mode 100644 kernel/tools/perf/util/pstack.c create mode 100644 kernel/tools/perf/util/pstack.h create mode 100644 kernel/tools/perf/util/python-ext-sources create mode 100644 kernel/tools/perf/util/python.c create mode 100644 kernel/tools/perf/util/quote.c create mode 100644 kernel/tools/perf/util/quote.h create mode 100644 kernel/tools/perf/util/rblist.c create mode 100644 kernel/tools/perf/util/rblist.h create mode 100644 kernel/tools/perf/util/record.c create mode 100644 kernel/tools/perf/util/run-command.c create mode 100644 kernel/tools/perf/util/run-command.h create mode 100644 kernel/tools/perf/util/scripting-engines/Build create mode 100644 kernel/tools/perf/util/scripting-engines/trace-event-perl.c create mode 100644 kernel/tools/perf/util/scripting-engines/trace-event-python.c create mode 100644 kernel/tools/perf/util/session.c create mode 100644 kernel/tools/perf/util/session.h create mode 100644 kernel/tools/perf/util/setup.py create mode 100644 kernel/tools/perf/util/sigchain.c create mode 100644 kernel/tools/perf/util/sigchain.h create mode 100644 kernel/tools/perf/util/sort.c create mode 100644 kernel/tools/perf/util/sort.h create mode 100644 kernel/tools/perf/util/srcline.c create mode 100644 kernel/tools/perf/util/stat.c create mode 100644 kernel/tools/perf/util/stat.h create mode 100644 kernel/tools/perf/util/strbuf.c create mode 100644 kernel/tools/perf/util/strbuf.h create mode 100644 kernel/tools/perf/util/strfilter.c create mode 100644 kernel/tools/perf/util/strfilter.h create mode 100644 kernel/tools/perf/util/string.c create mode 100644 kernel/tools/perf/util/strlist.c create mode 100644 kernel/tools/perf/util/strlist.h create mode 100644 kernel/tools/perf/util/svghelper.c create mode 100644 kernel/tools/perf/util/svghelper.h create mode 100644 kernel/tools/perf/util/symbol-elf.c create mode 100644 kernel/tools/perf/util/symbol-minimal.c create mode 100644 kernel/tools/perf/util/symbol.c create mode 100644 kernel/tools/perf/util/symbol.h create mode 100644 kernel/tools/perf/util/target.c create mode 100644 kernel/tools/perf/util/target.h create mode 100644 kernel/tools/perf/util/thread-stack.c create mode 100644 kernel/tools/perf/util/thread-stack.h create mode 100644 kernel/tools/perf/util/thread.c create mode 100644 kernel/tools/perf/util/thread.h create mode 100644 kernel/tools/perf/util/thread_map.c create mode 100644 kernel/tools/perf/util/thread_map.h create mode 100644 kernel/tools/perf/util/tool.h create mode 100644 kernel/tools/perf/util/top.c create mode 100644 kernel/tools/perf/util/top.h create mode 100644 kernel/tools/perf/util/trace-event-info.c create mode 100644 kernel/tools/perf/util/trace-event-parse.c create mode 100644 kernel/tools/perf/util/trace-event-read.c create mode 100644 kernel/tools/perf/util/trace-event-scripting.c create mode 100644 kernel/tools/perf/util/trace-event.c create mode 100644 kernel/tools/perf/util/trace-event.h create mode 100644 kernel/tools/perf/util/tsc.c create mode 100644 kernel/tools/perf/util/tsc.h create mode 100644 kernel/tools/perf/util/unwind-libdw.c create mode 100644 kernel/tools/perf/util/unwind-libdw.h create mode 100644 kernel/tools/perf/util/unwind-libunwind.c create mode 100644 kernel/tools/perf/util/unwind.h create mode 100644 kernel/tools/perf/util/usage.c create mode 100644 kernel/tools/perf/util/util.c create mode 100644 kernel/tools/perf/util/util.h create mode 100644 kernel/tools/perf/util/values.c create mode 100644 kernel/tools/perf/util/values.h create mode 100644 kernel/tools/perf/util/vdso.c create mode 100644 kernel/tools/perf/util/vdso.h create mode 100644 kernel/tools/perf/util/wrapper.c create mode 100644 kernel/tools/perf/util/xyarray.c create mode 100644 kernel/tools/perf/util/xyarray.h create mode 100644 kernel/tools/perf/util/zlib.c (limited to 'kernel/tools/perf/util') diff --git a/kernel/tools/perf/util/Build b/kernel/tools/perf/util/Build new file mode 100644 index 000000000..797490a40 --- /dev/null +++ b/kernel/tools/perf/util/Build @@ -0,0 +1,145 @@ +libperf-y += abspath.o +libperf-y += alias.o +libperf-y += annotate.o +libperf-y += build-id.o +libperf-y += config.o +libperf-y += ctype.o +libperf-y += db-export.o +libperf-y += environment.o +libperf-y += event.o +libperf-y += evlist.o +libperf-y += evsel.o +libperf-y += exec_cmd.o +libperf-y += find_next_bit.o +libperf-y += help.o +libperf-y += kallsyms.o +libperf-y += levenshtein.o +libperf-y += parse-options.o +libperf-y += parse-events.o +libperf-y += path.o +libperf-y += rbtree.o +libperf-y += bitmap.o +libperf-y += hweight.o +libperf-y += run-command.o +libperf-y += quote.o +libperf-y += strbuf.o +libperf-y += string.o +libperf-y += strlist.o +libperf-y += strfilter.o +libperf-y += top.o +libperf-y += usage.o +libperf-y += wrapper.o +libperf-y += sigchain.o +libperf-y += dso.o +libperf-y += symbol.o +libperf-y += color.o +libperf-y += pager.o +libperf-y += header.o +libperf-y += callchain.o +libperf-y += values.o +libperf-y += debug.o +libperf-y += machine.o +libperf-y += map.o +libperf-y += pstack.o +libperf-y += session.o +libperf-y += ordered-events.o +libperf-y += comm.o +libperf-y += thread.o +libperf-y += thread_map.o +libperf-y += trace-event-parse.o +libperf-y += parse-events-flex.o +libperf-y += parse-events-bison.o +libperf-y += pmu.o +libperf-y += pmu-flex.o +libperf-y += pmu-bison.o +libperf-y += trace-event-read.o +libperf-y += trace-event-info.o +libperf-y += trace-event-scripting.o +libperf-y += trace-event.o +libperf-y += svghelper.o +libperf-y += sort.o +libperf-y += hist.o +libperf-y += util.o +libperf-y += xyarray.o +libperf-y += cpumap.o +libperf-y += cgroup.o +libperf-y += target.o +libperf-y += rblist.o +libperf-y += intlist.o +libperf-y += vdso.o +libperf-y += stat.o +libperf-y += record.o +libperf-y += srcline.o +libperf-y += data.o +libperf-$(CONFIG_X86) += tsc.o +libperf-y += cloexec.o +libperf-y += thread-stack.o + +libperf-$(CONFIG_LIBELF) += symbol-elf.o +libperf-$(CONFIG_LIBELF) += probe-event.o + +ifndef CONFIG_LIBELF +libperf-y += symbol-minimal.o +endif + +libperf-$(CONFIG_DWARF) += probe-finder.o +libperf-$(CONFIG_DWARF) += dwarf-aux.o + +libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o + +libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o + +libperf-y += scripting-engines/ + +libperf-$(CONFIG_PERF_REGS) += perf_regs.o +libperf-$(CONFIG_ZLIB) += zlib.o +libperf-$(CONFIG_LZMA) += lzma.o + +CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" +CFLAGS_exec_cmd.o += -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))" -DPREFIX="BUILD_STR($(prefix_SQ))" + +$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c + $(call rule_mkdir) + @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l + +$(OUTPUT)util/parse-events-bison.c: util/parse-events.y + $(call rule_mkdir) + @$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_ + +$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c + $(call rule_mkdir) + @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l + +$(OUTPUT)util/pmu-bison.c: util/pmu.y + $(call rule_mkdir) + @$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_ + +CFLAGS_parse-events-flex.o += -w +CFLAGS_pmu-flex.o += -w +CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w +CFLAGS_pmu-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w + +$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c +$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c + +CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" +CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" +CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" +CFLAGS_parse-events.o += -Wno-redundant-decls + +$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) + +$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) + +$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) + +$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) diff --git a/kernel/tools/perf/util/PERF-VERSION-GEN b/kernel/tools/perf/util/PERF-VERSION-GEN new file mode 100755 index 000000000..39f175075 --- /dev/null +++ b/kernel/tools/perf/util/PERF-VERSION-GEN @@ -0,0 +1,50 @@ +#!/bin/sh + +if [ $# -eq 1 ] ; then + OUTPUT=$1 +fi + +GVF=${OUTPUT}PERF-VERSION-FILE + +LF=' +' + +# +# First check if there is a .git to get the version from git describe +# otherwise try to get the version from the kernel Makefile +# +CID= +TAG= +if test -d ../../.git -o -f ../../.git +then + TAG=$(git describe --abbrev=0 --match "v[0-9].[0-9]*" 2>/dev/null ) + CID=$(git log -1 --abbrev=4 --pretty=format:"%h" 2>/dev/null) && CID="-g$CID" +elif test -f ../../PERF-VERSION-FILE +then + TAG=$(cut -d' ' -f3 ../../PERF-VERSION-FILE | sed -e 's/\"//g') +fi +if test -z "$TAG" +then + TAG=$(MAKEFLAGS= make -sC ../.. kernelversion) +fi +VN="$TAG$CID" +if test -n "$CID" +then + # format version string, strip trailing zero of sublevel: + VN=$(echo "$VN" | sed -e 's/-/./g;s/\([0-9]*[.][0-9]*\)[.]0/\1/') +fi + +VN=$(expr "$VN" : v*'\(.*\)') + +if test -r $GVF +then + VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF) +else + VC=unset +fi +test "$VN" = "$VC" || { + echo >&2 " PERF_VERSION = $VN" + echo "#define PERF_VERSION \"$VN\"" >$GVF +} + + diff --git a/kernel/tools/perf/util/abspath.c b/kernel/tools/perf/util/abspath.c new file mode 100644 index 000000000..0e76affe9 --- /dev/null +++ b/kernel/tools/perf/util/abspath.c @@ -0,0 +1,37 @@ +#include "cache.h" + +static const char *get_pwd_cwd(void) +{ + static char cwd[PATH_MAX + 1]; + char *pwd; + struct stat cwd_stat, pwd_stat; + if (getcwd(cwd, PATH_MAX) == NULL) + return NULL; + pwd = getenv("PWD"); + if (pwd && strcmp(pwd, cwd)) { + stat(cwd, &cwd_stat); + if (!stat(pwd, &pwd_stat) && + pwd_stat.st_dev == cwd_stat.st_dev && + pwd_stat.st_ino == cwd_stat.st_ino) { + strlcpy(cwd, pwd, PATH_MAX); + } + } + return cwd; +} + +const char *make_nonrelative_path(const char *path) +{ + static char buf[PATH_MAX + 1]; + + if (is_absolute_path(path)) { + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } else { + const char *cwd = get_pwd_cwd(); + if (!cwd) + die("Cannot determine the current working directory"); + if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + } + return buf; +} diff --git a/kernel/tools/perf/util/alias.c b/kernel/tools/perf/util/alias.c new file mode 100644 index 000000000..c0b43ee40 --- /dev/null +++ b/kernel/tools/perf/util/alias.c @@ -0,0 +1,76 @@ +#include "cache.h" + +static const char *alias_key; +static char *alias_val; + +static int alias_lookup_cb(const char *k, const char *v, + void *cb __maybe_unused) +{ + if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { + if (!v) + return config_error_nonbool(k); + alias_val = strdup(v); + return 0; + } + return 0; +} + +char *alias_lookup(const char *alias) +{ + alias_key = alias; + alias_val = NULL; + perf_config(alias_lookup_cb, NULL); + return alias_val; +} + +int split_cmdline(char *cmdline, const char ***argv) +{ + int src, dst, count = 0, size = 16; + char quoted = 0; + + *argv = malloc(sizeof(char*) * size); + + /* split alias_string */ + (*argv)[count++] = cmdline; + for (src = dst = 0; cmdline[src];) { + char c = cmdline[src]; + if (!quoted && isspace(c)) { + cmdline[dst++] = 0; + while (cmdline[++src] + && isspace(cmdline[src])) + ; /* skip */ + if (count >= size) { + size += 16; + *argv = realloc(*argv, sizeof(char*) * size); + } + (*argv)[count++] = cmdline + dst; + } else if (!quoted && (c == '\'' || c == '"')) { + quoted = c; + src++; + } else if (c == quoted) { + quoted = 0; + src++; + } else { + if (c == '\\' && quoted != '\'') { + src++; + c = cmdline[src]; + if (!c) { + zfree(argv); + return error("cmdline ends with \\"); + } + } + cmdline[dst++] = c; + src++; + } + } + + cmdline[dst] = 0; + + if (quoted) { + zfree(argv); + return error("unclosed quote"); + } + + return count; +} + diff --git a/kernel/tools/perf/util/annotate.c b/kernel/tools/perf/util/annotate.c new file mode 100644 index 000000000..7f5bdfc9b --- /dev/null +++ b/kernel/tools/perf/util/annotate.c @@ -0,0 +1,1479 @@ +/* + * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo + * + * Parts came from builtin-annotate.c, see those files for further + * copyright notes. + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include "util.h" +#include "ui/ui.h" +#include "sort.h" +#include "build-id.h" +#include "color.h" +#include "cache.h" +#include "symbol.h" +#include "debug.h" +#include "annotate.h" +#include "evsel.h" +#include +#include +#include + +const char *disassembler_style; +const char *objdump_path; +static regex_t file_lineno; + +static struct ins *ins__find(const char *name); +static int disasm_line__parse(char *line, char **namep, char **rawp); + +static void ins__delete(struct ins_operands *ops) +{ + if (ops == NULL) + return; + zfree(&ops->source.raw); + zfree(&ops->source.name); + zfree(&ops->target.raw); + zfree(&ops->target.name); +} + +static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); +} + +int ins__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + if (ins->ops->scnprintf) + return ins->ops->scnprintf(ins, bf, size, ops); + + return ins__raw_scnprintf(ins, bf, size, ops); +} + +static int call__parse(struct ins_operands *ops) +{ + char *endptr, *tok, *name; + + ops->target.addr = strtoull(ops->raw, &endptr, 16); + + name = strchr(endptr, '<'); + if (name == NULL) + goto indirect_call; + + name++; + + tok = strchr(name, '>'); + if (tok == NULL) + return -1; + + *tok = '\0'; + ops->target.name = strdup(name); + *tok = '>'; + + return ops->target.name == NULL ? -1 : 0; + +indirect_call: + tok = strchr(endptr, '('); + if (tok != NULL) { + ops->target.addr = 0; + return 0; + } + + tok = strchr(endptr, '*'); + if (tok == NULL) + return -1; + + ops->target.addr = strtoull(tok + 1, NULL, 16); + return 0; +} + +static int call__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + if (ops->target.name) + return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); + + if (ops->target.addr == 0) + return ins__raw_scnprintf(ins, bf, size, ops); + + return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); +} + +static struct ins_ops call_ops = { + .parse = call__parse, + .scnprintf = call__scnprintf, +}; + +bool ins__is_call(const struct ins *ins) +{ + return ins->ops == &call_ops; +} + +static int jump__parse(struct ins_operands *ops) +{ + const char *s = strchr(ops->raw, '+'); + + ops->target.addr = strtoull(ops->raw, NULL, 16); + + if (s++ != NULL) + ops->target.offset = strtoull(s, NULL, 16); + else + ops->target.offset = UINT64_MAX; + + return 0; +} + +static int jump__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); +} + +static struct ins_ops jump_ops = { + .parse = jump__parse, + .scnprintf = jump__scnprintf, +}; + +bool ins__is_jump(const struct ins *ins) +{ + return ins->ops == &jump_ops; +} + +static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep) +{ + char *endptr, *name, *t; + + if (strstr(raw, "(%rip)") == NULL) + return 0; + + *addrp = strtoull(comment, &endptr, 16); + name = strchr(endptr, '<'); + if (name == NULL) + return -1; + + name++; + + t = strchr(name, '>'); + if (t == NULL) + return 0; + + *t = '\0'; + *namep = strdup(name); + *t = '>'; + + return 0; +} + +static int lock__parse(struct ins_operands *ops) +{ + char *name; + + ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); + if (ops->locked.ops == NULL) + return 0; + + if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0) + goto out_free_ops; + + ops->locked.ins = ins__find(name); + free(name); + + if (ops->locked.ins == NULL) + goto out_free_ops; + + if (!ops->locked.ins->ops) + return 0; + + if (ops->locked.ins->ops->parse && + ops->locked.ins->ops->parse(ops->locked.ops) < 0) + goto out_free_ops; + + return 0; + +out_free_ops: + zfree(&ops->locked.ops); + return 0; +} + +static int lock__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + int printed; + + if (ops->locked.ins == NULL) + return ins__raw_scnprintf(ins, bf, size, ops); + + printed = scnprintf(bf, size, "%-6.6s ", ins->name); + return printed + ins__scnprintf(ops->locked.ins, bf + printed, + size - printed, ops->locked.ops); +} + +static void lock__delete(struct ins_operands *ops) +{ + struct ins *ins = ops->locked.ins; + + if (ins && ins->ops->free) + ins->ops->free(ops->locked.ops); + else + ins__delete(ops->locked.ops); + + zfree(&ops->locked.ops); + zfree(&ops->target.raw); + zfree(&ops->target.name); +} + +static struct ins_ops lock_ops = { + .free = lock__delete, + .parse = lock__parse, + .scnprintf = lock__scnprintf, +}; + +static int mov__parse(struct ins_operands *ops) +{ + char *s = strchr(ops->raw, ','), *target, *comment, prev; + + if (s == NULL) + return -1; + + *s = '\0'; + ops->source.raw = strdup(ops->raw); + *s = ','; + + if (ops->source.raw == NULL) + return -1; + + target = ++s; + comment = strchr(s, '#'); + + if (comment != NULL) + s = comment - 1; + else + s = strchr(s, '\0') - 1; + + while (s > target && isspace(s[0])) + --s; + s++; + prev = *s; + *s = '\0'; + + ops->target.raw = strdup(target); + *s = prev; + + if (ops->target.raw == NULL) + goto out_free_source; + + if (comment == NULL) + return 0; + + while (comment[0] != '\0' && isspace(comment[0])) + ++comment; + + comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name); + comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); + + return 0; + +out_free_source: + zfree(&ops->source.raw); + return -1; +} + +static int mov__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, + ops->source.name ?: ops->source.raw, + ops->target.name ?: ops->target.raw); +} + +static struct ins_ops mov_ops = { + .parse = mov__parse, + .scnprintf = mov__scnprintf, +}; + +static int dec__parse(struct ins_operands *ops) +{ + char *target, *comment, *s, prev; + + target = s = ops->raw; + + while (s[0] != '\0' && !isspace(s[0])) + ++s; + prev = *s; + *s = '\0'; + + ops->target.raw = strdup(target); + *s = prev; + + if (ops->target.raw == NULL) + return -1; + + comment = strchr(s, '#'); + if (comment == NULL) + return 0; + + while (comment[0] != '\0' && isspace(comment[0])) + ++comment; + + comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); + + return 0; +} + +static int dec__scnprintf(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops) +{ + return scnprintf(bf, size, "%-6.6s %s", ins->name, + ops->target.name ?: ops->target.raw); +} + +static struct ins_ops dec_ops = { + .parse = dec__parse, + .scnprintf = dec__scnprintf, +}; + +static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, + struct ins_operands *ops __maybe_unused) +{ + return scnprintf(bf, size, "%-6.6s", "nop"); +} + +static struct ins_ops nop_ops = { + .scnprintf = nop__scnprintf, +}; + +/* + * Must be sorted by name! + */ +static struct ins instructions[] = { + { .name = "add", .ops = &mov_ops, }, + { .name = "addl", .ops = &mov_ops, }, + { .name = "addq", .ops = &mov_ops, }, + { .name = "addw", .ops = &mov_ops, }, + { .name = "and", .ops = &mov_ops, }, + { .name = "bts", .ops = &mov_ops, }, + { .name = "call", .ops = &call_ops, }, + { .name = "callq", .ops = &call_ops, }, + { .name = "cmp", .ops = &mov_ops, }, + { .name = "cmpb", .ops = &mov_ops, }, + { .name = "cmpl", .ops = &mov_ops, }, + { .name = "cmpq", .ops = &mov_ops, }, + { .name = "cmpw", .ops = &mov_ops, }, + { .name = "cmpxch", .ops = &mov_ops, }, + { .name = "dec", .ops = &dec_ops, }, + { .name = "decl", .ops = &dec_ops, }, + { .name = "imul", .ops = &mov_ops, }, + { .name = "inc", .ops = &dec_ops, }, + { .name = "incl", .ops = &dec_ops, }, + { .name = "ja", .ops = &jump_ops, }, + { .name = "jae", .ops = &jump_ops, }, + { .name = "jb", .ops = &jump_ops, }, + { .name = "jbe", .ops = &jump_ops, }, + { .name = "jc", .ops = &jump_ops, }, + { .name = "jcxz", .ops = &jump_ops, }, + { .name = "je", .ops = &jump_ops, }, + { .name = "jecxz", .ops = &jump_ops, }, + { .name = "jg", .ops = &jump_ops, }, + { .name = "jge", .ops = &jump_ops, }, + { .name = "jl", .ops = &jump_ops, }, + { .name = "jle", .ops = &jump_ops, }, + { .name = "jmp", .ops = &jump_ops, }, + { .name = "jmpq", .ops = &jump_ops, }, + { .name = "jna", .ops = &jump_ops, }, + { .name = "jnae", .ops = &jump_ops, }, + { .name = "jnb", .ops = &jump_ops, }, + { .name = "jnbe", .ops = &jump_ops, }, + { .name = "jnc", .ops = &jump_ops, }, + { .name = "jne", .ops = &jump_ops, }, + { .name = "jng", .ops = &jump_ops, }, + { .name = "jnge", .ops = &jump_ops, }, + { .name = "jnl", .ops = &jump_ops, }, + { .name = "jnle", .ops = &jump_ops, }, + { .name = "jno", .ops = &jump_ops, }, + { .name = "jnp", .ops = &jump_ops, }, + { .name = "jns", .ops = &jump_ops, }, + { .name = "jnz", .ops = &jump_ops, }, + { .name = "jo", .ops = &jump_ops, }, + { .name = "jp", .ops = &jump_ops, }, + { .name = "jpe", .ops = &jump_ops, }, + { .name = "jpo", .ops = &jump_ops, }, + { .name = "jrcxz", .ops = &jump_ops, }, + { .name = "js", .ops = &jump_ops, }, + { .name = "jz", .ops = &jump_ops, }, + { .name = "lea", .ops = &mov_ops, }, + { .name = "lock", .ops = &lock_ops, }, + { .name = "mov", .ops = &mov_ops, }, + { .name = "movb", .ops = &mov_ops, }, + { .name = "movdqa",.ops = &mov_ops, }, + { .name = "movl", .ops = &mov_ops, }, + { .name = "movq", .ops = &mov_ops, }, + { .name = "movslq", .ops = &mov_ops, }, + { .name = "movzbl", .ops = &mov_ops, }, + { .name = "movzwl", .ops = &mov_ops, }, + { .name = "nop", .ops = &nop_ops, }, + { .name = "nopl", .ops = &nop_ops, }, + { .name = "nopw", .ops = &nop_ops, }, + { .name = "or", .ops = &mov_ops, }, + { .name = "orl", .ops = &mov_ops, }, + { .name = "test", .ops = &mov_ops, }, + { .name = "testb", .ops = &mov_ops, }, + { .name = "testl", .ops = &mov_ops, }, + { .name = "xadd", .ops = &mov_ops, }, + { .name = "xbeginl", .ops = &jump_ops, }, + { .name = "xbeginq", .ops = &jump_ops, }, +}; + +static int ins__cmp(const void *name, const void *insp) +{ + const struct ins *ins = insp; + + return strcmp(name, ins->name); +} + +static struct ins *ins__find(const char *name) +{ + const int nmemb = ARRAY_SIZE(instructions); + + return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); +} + +int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + pthread_mutex_init(¬es->lock, NULL); + return 0; +} + +int symbol__alloc_hist(struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + const size_t size = symbol__size(sym); + size_t sizeof_sym_hist; + + /* Check for overflow when calculating sizeof_sym_hist */ + if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64)) + return -1; + + sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); + + /* Check for overflow in zalloc argument */ + if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src)) + / symbol_conf.nr_events) + return -1; + + notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); + if (notes->src == NULL) + return -1; + notes->src->sizeof_sym_hist = sizeof_sym_hist; + notes->src->nr_histograms = symbol_conf.nr_events; + INIT_LIST_HEAD(¬es->src->source); + return 0; +} + +void symbol__annotate_zero_histograms(struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + + pthread_mutex_lock(¬es->lock); + if (notes->src != NULL) + memset(notes->src->histograms, 0, + notes->src->nr_histograms * notes->src->sizeof_sym_hist); + pthread_mutex_unlock(¬es->lock); +} + +static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, + struct annotation *notes, int evidx, u64 addr) +{ + unsigned offset; + struct sym_hist *h; + + pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); + + if (addr < sym->start || addr >= sym->end) + return -ERANGE; + + offset = addr - sym->start; + h = annotation__histogram(notes, evidx); + h->sum++; + h->addr[offset]++; + + pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 + ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name, + addr, addr - sym->start, evidx, h->addr[offset]); + return 0; +} + +static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, + int evidx, u64 addr) +{ + struct annotation *notes; + + if (sym == NULL) + return 0; + + notes = symbol__annotation(sym); + if (notes->src == NULL) { + if (symbol__alloc_hist(sym) < 0) + return -ENOMEM; + } + + return __symbol__inc_addr_samples(sym, map, notes, evidx, addr); +} + +int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx) +{ + return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr); +} + +int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) +{ + return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); +} + +static void disasm_line__init_ins(struct disasm_line *dl) +{ + dl->ins = ins__find(dl->name); + + if (dl->ins == NULL) + return; + + if (!dl->ins->ops) + return; + + if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0) + dl->ins = NULL; +} + +static int disasm_line__parse(char *line, char **namep, char **rawp) +{ + char *name = line, tmp; + + while (isspace(name[0])) + ++name; + + if (name[0] == '\0') + return -1; + + *rawp = name + 1; + + while ((*rawp)[0] != '\0' && !isspace((*rawp)[0])) + ++*rawp; + + tmp = (*rawp)[0]; + (*rawp)[0] = '\0'; + *namep = strdup(name); + + if (*namep == NULL) + goto out_free_name; + + (*rawp)[0] = tmp; + + if ((*rawp)[0] != '\0') { + (*rawp)++; + while (isspace((*rawp)[0])) + ++(*rawp); + } + + return 0; + +out_free_name: + zfree(namep); + return -1; +} + +static struct disasm_line *disasm_line__new(s64 offset, char *line, + size_t privsize, int line_nr) +{ + struct disasm_line *dl = zalloc(sizeof(*dl) + privsize); + + if (dl != NULL) { + dl->offset = offset; + dl->line = strdup(line); + dl->line_nr = line_nr; + if (dl->line == NULL) + goto out_delete; + + if (offset != -1) { + if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0) + goto out_free_line; + + disasm_line__init_ins(dl); + } + } + + return dl; + +out_free_line: + zfree(&dl->line); +out_delete: + free(dl); + return NULL; +} + +void disasm_line__free(struct disasm_line *dl) +{ + zfree(&dl->line); + zfree(&dl->name); + if (dl->ins && dl->ins->ops->free) + dl->ins->ops->free(&dl->ops); + else + ins__delete(&dl->ops); + free(dl); +} + +int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) +{ + if (raw || !dl->ins) + return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw); + + return ins__scnprintf(dl->ins, bf, size, &dl->ops); +} + +static void disasm__add(struct list_head *head, struct disasm_line *line) +{ + list_add_tail(&line->node, head); +} + +struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos) +{ + list_for_each_entry_continue(pos, head, node) + if (pos->offset >= 0) + return pos; + + return NULL; +} + +double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, + s64 end, const char **path) +{ + struct source_line *src_line = notes->src->lines; + double percent = 0.0; + + if (src_line) { + size_t sizeof_src_line = sizeof(*src_line) + + sizeof(src_line->p) * (src_line->nr_pcnt - 1); + + while (offset < end) { + src_line = (void *)notes->src->lines + + (sizeof_src_line * offset); + + if (*path == NULL) + *path = src_line->path; + + percent += src_line->p[evidx].percent; + offset++; + } + } else { + struct sym_hist *h = annotation__histogram(notes, evidx); + unsigned int hits = 0; + + while (offset < end) + hits += h->addr[offset++]; + + if (h->sum) + percent = 100.0 * hits / h->sum; + } + + return percent; +} + +static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start, + struct perf_evsel *evsel, u64 len, int min_pcnt, int printed, + int max_lines, struct disasm_line *queue) +{ + static const char *prev_line; + static const char *prev_color; + + if (dl->offset != -1) { + const char *path = NULL; + double percent, max_percent = 0.0; + double *ppercents = &percent; + int i, nr_percent = 1; + const char *color; + struct annotation *notes = symbol__annotation(sym); + s64 offset = dl->offset; + const u64 addr = start + offset; + struct disasm_line *next; + + next = disasm__get_next_ip_line(¬es->src->source, dl); + + if (perf_evsel__is_group_event(evsel)) { + nr_percent = evsel->nr_members; + ppercents = calloc(nr_percent, sizeof(double)); + if (ppercents == NULL) + return -1; + } + + for (i = 0; i < nr_percent; i++) { + percent = disasm__calc_percent(notes, + notes->src->lines ? i : evsel->idx + i, + offset, + next ? next->offset : (s64) len, + &path); + + ppercents[i] = percent; + if (percent > max_percent) + max_percent = percent; + } + + if (max_percent < min_pcnt) + return -1; + + if (max_lines && printed >= max_lines) + return 1; + + if (queue != NULL) { + list_for_each_entry_from(queue, ¬es->src->source, node) { + if (queue == dl) + break; + disasm_line__print(queue, sym, start, evsel, len, + 0, 0, 1, NULL); + } + } + + color = get_percent_color(max_percent); + + /* + * Also color the filename and line if needed, with + * the same color than the percentage. Don't print it + * twice for close colored addr with the same filename:line + */ + if (path) { + if (!prev_line || strcmp(prev_line, path) + || color != prev_color) { + color_fprintf(stdout, color, " %s", path); + prev_line = path; + prev_color = color; + } + } + + for (i = 0; i < nr_percent; i++) { + percent = ppercents[i]; + color = get_percent_color(percent); + color_fprintf(stdout, color, " %7.2f", percent); + } + + printf(" : "); + color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr); + color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line); + + if (ppercents != &percent) + free(ppercents); + + } else if (max_lines && printed >= max_lines) + return 1; + else { + int width = 8; + + if (queue) + return -1; + + if (perf_evsel__is_group_event(evsel)) + width *= evsel->nr_members; + + if (!*dl->line) + printf(" %*s:\n", width, " "); + else + printf(" %*s: %s\n", width, " ", dl->line); + } + + return 0; +} + +/* + * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) + * which looks like following + * + * 0000000000415500 <_init>: + * 415500: sub $0x8,%rsp + * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> + * 41550b: test %rax,%rax + * 41550e: je 415515 <_init+0x15> + * 415510: callq 416e70 <__gmon_start__@plt> + * 415515: add $0x8,%rsp + * 415519: retq + * + * it will be parsed and saved into struct disasm_line as + * + * + * The offset will be a relative offset from the start of the symbol and -1 + * means that it's not a disassembly line so should be treated differently. + * The ops.raw part will be parsed further according to type of the instruction. + */ +static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, + FILE *file, size_t privsize, + int *line_nr) +{ + struct annotation *notes = symbol__annotation(sym); + struct disasm_line *dl; + char *line = NULL, *parsed_line, *tmp, *tmp2, *c; + size_t line_len; + s64 line_ip, offset = -1; + regmatch_t match[2]; + + if (getline(&line, &line_len, file) < 0) + return -1; + + if (!line) + return -1; + + while (line_len != 0 && isspace(line[line_len - 1])) + line[--line_len] = '\0'; + + c = strchr(line, '\n'); + if (c) + *c = 0; + + line_ip = -1; + parsed_line = line; + + /* /filename:linenr ? Save line number and ignore. */ + if (regexec(&file_lineno, line, 2, match, 0) == 0) { + *line_nr = atoi(line + match[1].rm_so); + return 0; + } + + /* + * Strip leading spaces: + */ + tmp = line; + while (*tmp) { + if (*tmp != ' ') + break; + tmp++; + } + + if (*tmp) { + /* + * Parse hexa addresses followed by ':' + */ + line_ip = strtoull(tmp, &tmp2, 16); + if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') + line_ip = -1; + } + + if (line_ip != -1) { + u64 start = map__rip_2objdump(map, sym->start), + end = map__rip_2objdump(map, sym->end); + + offset = line_ip - start; + if ((u64)line_ip < start || (u64)line_ip >= end) + offset = -1; + else + parsed_line = tmp2 + 1; + } + + dl = disasm_line__new(offset, parsed_line, privsize, *line_nr); + free(line); + (*line_nr)++; + + if (dl == NULL) + return -1; + + if (dl->ops.target.offset == UINT64_MAX) + dl->ops.target.offset = dl->ops.target.addr - + map__rip_2objdump(map, sym->start); + + /* kcore has no symbols, so add the call target name */ + if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) { + struct addr_map_symbol target = { + .map = map, + .addr = dl->ops.target.addr, + }; + + if (!map_groups__find_ams(&target, NULL) && + target.sym->start == target.al_addr) + dl->ops.target.name = strdup(target.sym->name); + } + + disasm__add(¬es->src->source, dl); + + return 0; +} + +static __attribute__((constructor)) void symbol__init_regexpr(void) +{ + regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED); +} + +static void delete_last_nop(struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + struct list_head *list = ¬es->src->source; + struct disasm_line *dl; + + while (!list_empty(list)) { + dl = list_entry(list->prev, struct disasm_line, node); + + if (dl->ins && dl->ins->ops) { + if (dl->ins->ops != &nop_ops) + return; + } else { + if (!strstr(dl->line, " nop ") && + !strstr(dl->line, " nopl ") && + !strstr(dl->line, " nopw ")) + return; + } + + list_del(&dl->node); + disasm_line__free(dl); + } +} + +int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) +{ + struct dso *dso = map->dso; + char *filename = dso__build_id_filename(dso, NULL, 0); + bool free_filename = true; + char command[PATH_MAX * 2]; + FILE *file; + int err = 0; + char symfs_filename[PATH_MAX]; + struct kcore_extract kce; + bool delete_extract = false; + int lineno = 0; + + if (filename) + symbol__join_symfs(symfs_filename, filename); + + if (filename == NULL) { + if (dso->has_build_id) { + pr_err("Can't annotate %s: not enough memory\n", + sym->name); + return -ENOMEM; + } + goto fallback; + } else if (dso__is_kcore(dso)) { + goto fallback; + } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || + strstr(command, "[kernel.kallsyms]") || + access(symfs_filename, R_OK)) { + free(filename); +fallback: + /* + * If we don't have build-ids or the build-id file isn't in the + * cache, or is just a kallsyms file, well, lets hope that this + * DSO is the same as when 'perf record' ran. + */ + filename = (char *)dso->long_name; + symbol__join_symfs(symfs_filename, filename); + free_filename = false; + } + + if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && + !dso__is_kcore(dso)) { + char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; + char *build_id_msg = NULL; + + if (dso->annotate_warned) + goto out_free_filename; + + if (dso->has_build_id) { + build_id__sprintf(dso->build_id, + sizeof(dso->build_id), bf + 15); + build_id_msg = bf; + } + err = -ENOENT; + dso->annotate_warned = 1; + pr_err("Can't annotate %s:\n\n" + "No vmlinux file%s\nwas found in the path.\n\n" + "Please use:\n\n" + " perf buildid-cache -vu vmlinux\n\n" + "or:\n\n" + " --vmlinux vmlinux\n", + sym->name, build_id_msg ?: ""); + goto out_free_filename; + } + + pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, + filename, sym->name, map->unmap_ip(map, sym->start), + map->unmap_ip(map, sym->end)); + + pr_debug("annotating [%p] %30s : [%p] %30s\n", + dso, dso->long_name, sym, sym->name); + + if (dso__is_kcore(dso)) { + kce.kcore_filename = symfs_filename; + kce.addr = map__rip_2objdump(map, sym->start); + kce.offs = sym->start; + kce.len = sym->end - sym->start; + if (!kcore_extract__create(&kce)) { + delete_extract = true; + strlcpy(symfs_filename, kce.extract_filename, + sizeof(symfs_filename)); + if (free_filename) { + free(filename); + free_filename = false; + } + filename = symfs_filename; + } + } else if (dso__needs_decompress(dso)) { + char tmp[PATH_MAX]; + struct kmod_path m; + int fd; + bool ret; + + if (kmod_path__parse_ext(&m, symfs_filename)) + goto out_free_filename; + + snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX"); + + fd = mkstemp(tmp); + if (fd < 0) { + free(m.ext); + goto out_free_filename; + } + + ret = decompress_to_file(m.ext, symfs_filename, fd); + + free(m.ext); + close(fd); + + if (!ret) + goto out_free_filename; + + strcpy(symfs_filename, tmp); + } + + snprintf(command, sizeof(command), + "%s %s%s --start-address=0x%016" PRIx64 + " --stop-address=0x%016" PRIx64 + " -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand", + objdump_path ? objdump_path : "objdump", + disassembler_style ? "-M " : "", + disassembler_style ? disassembler_style : "", + map__rip_2objdump(map, sym->start), + map__rip_2objdump(map, sym->end), + symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", + symbol_conf.annotate_src ? "-S" : "", + symfs_filename, filename); + + pr_debug("Executing: %s\n", command); + + file = popen(command, "r"); + if (!file) + goto out_remove_tmp; + + while (!feof(file)) + if (symbol__parse_objdump_line(sym, map, file, privsize, + &lineno) < 0) + break; + + /* + * kallsyms does not have symbol sizes so there may a nop at the end. + * Remove it. + */ + if (dso__is_kcore(dso)) + delete_last_nop(sym); + + pclose(file); + +out_remove_tmp: + if (dso__needs_decompress(dso)) + unlink(symfs_filename); +out_free_filename: + if (delete_extract) + kcore_extract__delete(&kce); + if (free_filename) + free(filename); + return err; +} + +static void insert_source_line(struct rb_root *root, struct source_line *src_line) +{ + struct source_line *iter; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + int i, ret; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct source_line, node); + + ret = strcmp(iter->path, src_line->path); + if (ret == 0) { + for (i = 0; i < src_line->nr_pcnt; i++) + iter->p[i].percent_sum += src_line->p[i].percent; + return; + } + + if (ret < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + for (i = 0; i < src_line->nr_pcnt; i++) + src_line->p[i].percent_sum = src_line->p[i].percent; + + rb_link_node(&src_line->node, parent, p); + rb_insert_color(&src_line->node, root); +} + +static int cmp_source_line(struct source_line *a, struct source_line *b) +{ + int i; + + for (i = 0; i < a->nr_pcnt; i++) { + if (a->p[i].percent_sum == b->p[i].percent_sum) + continue; + return a->p[i].percent_sum > b->p[i].percent_sum; + } + + return 0; +} + +static void __resort_source_line(struct rb_root *root, struct source_line *src_line) +{ + struct source_line *iter; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct source_line, node); + + if (cmp_source_line(src_line, iter)) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&src_line->node, parent, p); + rb_insert_color(&src_line->node, root); +} + +static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) +{ + struct source_line *src_line; + struct rb_node *node; + + node = rb_first(src_root); + while (node) { + struct rb_node *next; + + src_line = rb_entry(node, struct source_line, node); + next = rb_next(node); + rb_erase(node, src_root); + + __resort_source_line(dest_root, src_line); + node = next; + } +} + +static void symbol__free_source_line(struct symbol *sym, int len) +{ + struct annotation *notes = symbol__annotation(sym); + struct source_line *src_line = notes->src->lines; + size_t sizeof_src_line; + int i; + + sizeof_src_line = sizeof(*src_line) + + (sizeof(src_line->p) * (src_line->nr_pcnt - 1)); + + for (i = 0; i < len; i++) { + free_srcline(src_line->path); + src_line = (void *)src_line + sizeof_src_line; + } + + zfree(¬es->src->lines); +} + +/* Get the filename:line for the colored entries */ +static int symbol__get_source_line(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, + struct rb_root *root, int len) +{ + u64 start; + int i, k; + int evidx = evsel->idx; + struct source_line *src_line; + struct annotation *notes = symbol__annotation(sym); + struct sym_hist *h = annotation__histogram(notes, evidx); + struct rb_root tmp_root = RB_ROOT; + int nr_pcnt = 1; + u64 h_sum = h->sum; + size_t sizeof_src_line = sizeof(struct source_line); + + if (perf_evsel__is_group_event(evsel)) { + for (i = 1; i < evsel->nr_members; i++) { + h = annotation__histogram(notes, evidx + i); + h_sum += h->sum; + } + nr_pcnt = evsel->nr_members; + sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p); + } + + if (!h_sum) + return 0; + + src_line = notes->src->lines = calloc(len, sizeof_src_line); + if (!notes->src->lines) + return -1; + + start = map__rip_2objdump(map, sym->start); + + for (i = 0; i < len; i++) { + u64 offset; + double percent_max = 0.0; + + src_line->nr_pcnt = nr_pcnt; + + for (k = 0; k < nr_pcnt; k++) { + h = annotation__histogram(notes, evidx + k); + src_line->p[k].percent = 100.0 * h->addr[i] / h->sum; + + if (src_line->p[k].percent > percent_max) + percent_max = src_line->p[k].percent; + } + + if (percent_max <= 0.5) + goto next; + + offset = start + i; + src_line->path = get_srcline(map->dso, offset, NULL, false); + insert_source_line(&tmp_root, src_line); + + next: + src_line = (void *)src_line + sizeof_src_line; + } + + resort_source_line(root, &tmp_root); + return 0; +} + +static void print_summary(struct rb_root *root, const char *filename) +{ + struct source_line *src_line; + struct rb_node *node; + + printf("\nSorted summary for file %s\n", filename); + printf("----------------------------------------------\n\n"); + + if (RB_EMPTY_ROOT(root)) { + printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); + return; + } + + node = rb_first(root); + while (node) { + double percent, percent_max = 0.0; + const char *color; + char *path; + int i; + + src_line = rb_entry(node, struct source_line, node); + for (i = 0; i < src_line->nr_pcnt; i++) { + percent = src_line->p[i].percent_sum; + color = get_percent_color(percent); + color_fprintf(stdout, color, " %7.2f", percent); + + if (percent > percent_max) + percent_max = percent; + } + + path = src_line->path; + color = get_percent_color(percent_max); + color_fprintf(stdout, color, " %s\n", path); + + node = rb_next(node); + } +} + +static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel) +{ + struct annotation *notes = symbol__annotation(sym); + struct sym_hist *h = annotation__histogram(notes, evsel->idx); + u64 len = symbol__size(sym), offset; + + for (offset = 0; offset < len; ++offset) + if (h->addr[offset] != 0) + printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, + sym->start + offset, h->addr[offset]); + printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); +} + +int symbol__annotate_printf(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool full_paths, + int min_pcnt, int max_lines, int context) +{ + struct dso *dso = map->dso; + char *filename; + const char *d_filename; + const char *evsel_name = perf_evsel__name(evsel); + struct annotation *notes = symbol__annotation(sym); + struct disasm_line *pos, *queue = NULL; + u64 start = map__rip_2objdump(map, sym->start); + int printed = 2, queue_len = 0; + int more = 0; + u64 len; + int width = 8; + int namelen, evsel_name_len, graph_dotted_len; + + filename = strdup(dso->long_name); + if (!filename) + return -ENOMEM; + + if (full_paths) + d_filename = filename; + else + d_filename = basename(filename); + + len = symbol__size(sym); + namelen = strlen(d_filename); + evsel_name_len = strlen(evsel_name); + + if (perf_evsel__is_group_event(evsel)) + width *= evsel->nr_members; + + printf(" %-*.*s| Source code & Disassembly of %s for %s\n", + width, width, "Percent", d_filename, evsel_name); + + graph_dotted_len = width + namelen + evsel_name_len; + printf("-%-*.*s-----------------------------------------\n", + graph_dotted_len, graph_dotted_len, graph_dotted_line); + + if (verbose) + symbol__annotate_hits(sym, evsel); + + list_for_each_entry(pos, ¬es->src->source, node) { + if (context && queue == NULL) { + queue = pos; + queue_len = 0; + } + + switch (disasm_line__print(pos, sym, start, evsel, len, + min_pcnt, printed, max_lines, + queue)) { + case 0: + ++printed; + if (context) { + printed += queue_len; + queue = NULL; + queue_len = 0; + } + break; + case 1: + /* filtered by max_lines */ + ++more; + break; + case -1: + default: + /* + * Filtered by min_pcnt or non IP lines when + * context != 0 + */ + if (!context) + break; + if (queue_len == context) + queue = list_entry(queue->node.next, typeof(*queue), node); + else + ++queue_len; + break; + } + } + + free(filename); + + return more; +} + +void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) +{ + struct annotation *notes = symbol__annotation(sym); + struct sym_hist *h = annotation__histogram(notes, evidx); + + memset(h, 0, notes->src->sizeof_sym_hist); +} + +void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) +{ + struct annotation *notes = symbol__annotation(sym); + struct sym_hist *h = annotation__histogram(notes, evidx); + int len = symbol__size(sym), offset; + + h->sum = 0; + for (offset = 0; offset < len; ++offset) { + h->addr[offset] = h->addr[offset] * 7 / 8; + h->sum += h->addr[offset]; + } +} + +void disasm__purge(struct list_head *head) +{ + struct disasm_line *pos, *n; + + list_for_each_entry_safe(pos, n, head, node) { + list_del(&pos->node); + disasm_line__free(pos); + } +} + +static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) +{ + size_t printed; + + if (dl->offset == -1) + return fprintf(fp, "%s\n", dl->line); + + printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name); + + if (dl->ops.raw[0] != '\0') { + printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", + dl->ops.raw); + } + + return printed + fprintf(fp, "\n"); +} + +size_t disasm__fprintf(struct list_head *head, FILE *fp) +{ + struct disasm_line *pos; + size_t printed = 0; + + list_for_each_entry(pos, head, node) + printed += disasm_line__fprintf(pos, fp); + + return printed; +} + +int symbol__tty_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool print_lines, + bool full_paths, int min_pcnt, int max_lines) +{ + struct dso *dso = map->dso; + struct rb_root source_line = RB_ROOT; + u64 len; + + if (symbol__annotate(sym, map, 0) < 0) + return -1; + + len = symbol__size(sym); + + if (print_lines) { + symbol__get_source_line(sym, map, evsel, &source_line, len); + print_summary(&source_line, dso->long_name); + } + + symbol__annotate_printf(sym, map, evsel, full_paths, + min_pcnt, max_lines, 0); + if (print_lines) + symbol__free_source_line(sym, len); + + disasm__purge(&symbol__annotation(sym)->src->source); + + return 0; +} + +int hist_entry__annotate(struct hist_entry *he, size_t privsize) +{ + return symbol__annotate(he->ms.sym, he->ms.map, privsize); +} + +bool ui__has_annotation(void) +{ + return use_browser == 1 && sort__has_sym; +} diff --git a/kernel/tools/perf/util/annotate.h b/kernel/tools/perf/util/annotate.h new file mode 100644 index 000000000..cadbdc90a --- /dev/null +++ b/kernel/tools/perf/util/annotate.h @@ -0,0 +1,172 @@ +#ifndef __PERF_ANNOTATE_H +#define __PERF_ANNOTATE_H + +#include +#include +#include +#include "symbol.h" +#include "hist.h" +#include "sort.h" +#include +#include +#include + +struct ins; + +struct ins_operands { + char *raw; + struct { + char *raw; + char *name; + u64 addr; + u64 offset; + } target; + union { + struct { + char *raw; + char *name; + u64 addr; + } source; + struct { + struct ins *ins; + struct ins_operands *ops; + } locked; + }; +}; + +struct ins_ops { + void (*free)(struct ins_operands *ops); + int (*parse)(struct ins_operands *ops); + int (*scnprintf)(struct ins *ins, char *bf, size_t size, + struct ins_operands *ops); +}; + +struct ins { + const char *name; + struct ins_ops *ops; +}; + +bool ins__is_jump(const struct ins *ins); +bool ins__is_call(const struct ins *ins); +int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops); + +struct annotation; + +struct disasm_line { + struct list_head node; + s64 offset; + char *line; + char *name; + struct ins *ins; + int line_nr; + struct ins_operands ops; +}; + +static inline bool disasm_line__has_offset(const struct disasm_line *dl) +{ + return dl->ops.target.offset != UINT64_MAX; +} + +void disasm_line__free(struct disasm_line *dl); +struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos); +int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw); +size_t disasm__fprintf(struct list_head *head, FILE *fp); +double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, + s64 end, const char **path); + +struct sym_hist { + u64 sum; + u64 addr[0]; +}; + +struct source_line_percent { + double percent; + double percent_sum; +}; + +struct source_line { + struct rb_node node; + char *path; + int nr_pcnt; + struct source_line_percent p[1]; +}; + +/** struct annotated_source - symbols with hits have this attached as in sannotation + * + * @histogram: Array of addr hit histograms per event being monitored + * @lines: If 'print_lines' is specified, per source code line percentages + * @source: source parsed from a disassembler like objdump -dS + * + * lines is allocated, percentages calculated and all sorted by percentage + * when the annotation is about to be presented, so the percentages are for + * one of the entries in the histogram array, i.e. for the event/counter being + * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate + * returns. + */ +struct annotated_source { + struct list_head source; + struct source_line *lines; + int nr_histograms; + int sizeof_sym_hist; + struct sym_hist histograms[0]; +}; + +struct annotation { + pthread_mutex_t lock; + struct annotated_source *src; +}; + +static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) +{ + return (((void *)¬es->src->histograms) + + (notes->src->sizeof_sym_hist * idx)); +} + +static inline struct annotation *symbol__annotation(struct symbol *sym) +{ + return (void *)sym - symbol_conf.priv_size; +} + +int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx); + +int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr); + +int symbol__alloc_hist(struct symbol *sym); +void symbol__annotate_zero_histograms(struct symbol *sym); + +int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); + +int hist_entry__annotate(struct hist_entry *he, size_t privsize); + +int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); +int symbol__annotate_printf(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool full_paths, + int min_pcnt, int max_lines, int context); +void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); +void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); +void disasm__purge(struct list_head *head); + +bool ui__has_annotation(void); + +int symbol__tty_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool print_lines, + bool full_paths, int min_pcnt, int max_lines); + +#ifdef HAVE_SLANG_SUPPORT +int symbol__tui_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt); +#else +static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, + struct map *map __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt + __maybe_unused) +{ + return 0; +} +#endif + +extern const char *disassembler_style; + +#endif /* __PERF_ANNOTATE_H */ diff --git a/kernel/tools/perf/util/bitmap.c b/kernel/tools/perf/util/bitmap.c new file mode 100644 index 000000000..0a1adc111 --- /dev/null +++ b/kernel/tools/perf/util/bitmap.c @@ -0,0 +1,31 @@ +/* + * From lib/bitmap.c + * Helper functions for bitmap.h. + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include + +int __bitmap_weight(const unsigned long *bitmap, int bits) +{ + int k, w = 0, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; k++) + w += hweight_long(bitmap[k]); + + if (bits % BITS_PER_LONG) + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + + return w; +} + +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] | bitmap2[k]; +} diff --git a/kernel/tools/perf/util/build-id.c b/kernel/tools/perf/util/build-id.c new file mode 100644 index 000000000..61867dff5 --- /dev/null +++ b/kernel/tools/perf/util/build-id.c @@ -0,0 +1,538 @@ +/* + * build-id.c + * + * build-id support + * + * Copyright (C) 2009, 2010 Red Hat Inc. + * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo + */ +#include "util.h" +#include +#include "build-id.h" +#include "event.h" +#include "symbol.h" +#include +#include "debug.h" +#include "session.h" +#include "tool.h" +#include "header.h" +#include "vdso.h" + + +static bool no_buildid_cache; + +int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine) +{ + struct addr_location al; + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); + + if (thread == NULL) { + pr_err("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, &al); + + if (al.map != NULL) + al.map->dso->hit = 1; + + return 0; +} + +static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample + __maybe_unused, + struct machine *machine) +{ + struct thread *thread = machine__findnew_thread(machine, + event->fork.pid, + event->fork.tid); + + dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, + event->fork.ppid, event->fork.ptid); + + if (thread) + machine__remove_thread(machine, thread); + + return 0; +} + +struct perf_tool build_id__mark_dso_hit_ops = { + .sample = build_id__mark_dso_hit, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + .fork = perf_event__process_fork, + .exit = perf_event__exit_del_thread, + .attr = perf_event__process_attr, + .build_id = perf_event__process_build_id, +}; + +int build_id__sprintf(const u8 *build_id, int len, char *bf) +{ + char *bid = bf; + const u8 *raw = build_id; + int i; + + for (i = 0; i < len; ++i) { + sprintf(bid, "%02x", *raw); + ++raw; + bid += 2; + } + + return raw - build_id; +} + +/* asnprintf consolidates asprintf and snprintf */ +static int asnprintf(char **strp, size_t size, const char *fmt, ...) +{ + va_list ap; + int ret; + + if (!strp) + return -EINVAL; + + va_start(ap, fmt); + if (*strp) + ret = vsnprintf(*strp, size, fmt, ap); + else + ret = vasprintf(strp, fmt, ap); + va_end(ap); + + return ret; +} + +static char *build_id__filename(const char *sbuild_id, char *bf, size_t size) +{ + char *tmp = bf; + int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir, + sbuild_id, sbuild_id + 2); + if (ret < 0 || (tmp && size < (unsigned int)ret)) + return NULL; + return bf; +} + +char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size) +{ + char build_id_hex[BUILD_ID_SIZE * 2 + 1]; + + if (!dso->has_build_id) + return NULL; + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex); + return build_id__filename(build_id_hex, bf, size); +} + +#define dsos__for_each_with_build_id(pos, head) \ + list_for_each_entry(pos, head, node) \ + if (!pos->has_build_id) \ + continue; \ + else + +static int write_buildid(const char *name, size_t name_len, u8 *build_id, + pid_t pid, u16 misc, int fd) +{ + int err; + struct build_id_event b; + size_t len; + + len = name_len + 1; + len = PERF_ALIGN(len, NAME_ALIGN); + + memset(&b, 0, sizeof(b)); + memcpy(&b.build_id, build_id, BUILD_ID_SIZE); + b.pid = pid; + b.header.misc = misc; + b.header.size = sizeof(b) + len; + + err = writen(fd, &b, sizeof(b)); + if (err < 0) + return err; + + return write_padded(fd, name, name_len + 1, len); +} + +static int __dsos__write_buildid_table(struct list_head *head, + struct machine *machine, + pid_t pid, u16 misc, int fd) +{ + char nm[PATH_MAX]; + struct dso *pos; + + dsos__for_each_with_build_id(pos, head) { + int err; + const char *name; + size_t name_len; + + if (!pos->hit) + continue; + + if (dso__is_vdso(pos)) { + name = pos->short_name; + name_len = pos->short_name_len + 1; + } else if (dso__is_kcore(pos)) { + machine__mmap_name(machine, nm, sizeof(nm)); + name = nm; + name_len = strlen(nm) + 1; + } else { + name = pos->long_name; + name_len = pos->long_name_len + 1; + } + + err = write_buildid(name, name_len, pos->build_id, + pid, misc, fd); + if (err) + return err; + } + + return 0; +} + +static int machine__write_buildid_table(struct machine *machine, int fd) +{ + int err; + u16 kmisc = PERF_RECORD_MISC_KERNEL, + umisc = PERF_RECORD_MISC_USER; + + if (!machine__is_host(machine)) { + kmisc = PERF_RECORD_MISC_GUEST_KERNEL; + umisc = PERF_RECORD_MISC_GUEST_USER; + } + + err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine, + machine->pid, kmisc, fd); + if (err == 0) + err = __dsos__write_buildid_table(&machine->user_dsos.head, + machine, machine->pid, umisc, + fd); + return err; +} + +int perf_session__write_buildid_table(struct perf_session *session, int fd) +{ + struct rb_node *nd; + int err = machine__write_buildid_table(&session->machines.host, fd); + + if (err) + return err; + + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + err = machine__write_buildid_table(pos, fd); + if (err) + break; + } + return err; +} + +static int __dsos__hit_all(struct list_head *head) +{ + struct dso *pos; + + list_for_each_entry(pos, head, node) + pos->hit = true; + + return 0; +} + +static int machine__hit_all_dsos(struct machine *machine) +{ + int err; + + err = __dsos__hit_all(&machine->kernel_dsos.head); + if (err) + return err; + + return __dsos__hit_all(&machine->user_dsos.head); +} + +int dsos__hit_all(struct perf_session *session) +{ + struct rb_node *nd; + int err; + + err = machine__hit_all_dsos(&session->machines.host); + if (err) + return err; + + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + + err = machine__hit_all_dsos(pos); + if (err) + return err; + } + + return 0; +} + +void disable_buildid_cache(void) +{ + no_buildid_cache = true; +} + +static char *build_id_cache__dirname_from_path(const char *name, + bool is_kallsyms, bool is_vdso) +{ + char *realname = (char *)name, *filename; + bool slash = is_kallsyms || is_vdso; + + if (!slash) { + realname = realpath(name, NULL); + if (!realname) + return NULL; + } + + if (asprintf(&filename, "%s%s%s", buildid_dir, slash ? "/" : "", + is_vdso ? DSO__NAME_VDSO : realname) < 0) + filename = NULL; + + if (!slash) + free(realname); + + return filename; +} + +int build_id_cache__list_build_ids(const char *pathname, + struct strlist **result) +{ + struct strlist *list; + char *dir_name; + DIR *dir; + struct dirent *d; + int ret = 0; + + list = strlist__new(true, NULL); + dir_name = build_id_cache__dirname_from_path(pathname, false, false); + if (!list || !dir_name) { + ret = -ENOMEM; + goto out; + } + + /* List up all dirents */ + dir = opendir(dir_name); + if (!dir) { + ret = -errno; + goto out; + } + + while ((d = readdir(dir)) != NULL) { + if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) + continue; + strlist__add(list, d->d_name); + } + closedir(dir); + +out: + free(dir_name); + if (ret) + strlist__delete(list); + else + *result = list; + + return ret; +} + +int build_id_cache__add_s(const char *sbuild_id, const char *name, + bool is_kallsyms, bool is_vdso) +{ + const size_t size = PATH_MAX; + char *realname = NULL, *filename = NULL, *dir_name = NULL, + *linkname = zalloc(size), *targetname, *tmp; + int err = -1; + + if (!is_kallsyms) { + realname = realpath(name, NULL); + if (!realname) + goto out_free; + } + + dir_name = build_id_cache__dirname_from_path(name, is_kallsyms, is_vdso); + if (!dir_name) + goto out_free; + + if (mkdir_p(dir_name, 0755)) + goto out_free; + + if (asprintf(&filename, "%s/%s", dir_name, sbuild_id) < 0) { + filename = NULL; + goto out_free; + } + + if (access(filename, F_OK)) { + if (is_kallsyms) { + if (copyfile("/proc/kallsyms", filename)) + goto out_free; + } else if (link(realname, filename) && errno != EEXIST && + copyfile(name, filename)) + goto out_free; + } + + if (!build_id__filename(sbuild_id, linkname, size)) + goto out_free; + tmp = strrchr(linkname, '/'); + *tmp = '\0'; + + if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) + goto out_free; + + *tmp = '/'; + targetname = filename + strlen(buildid_dir) - 5; + memcpy(targetname, "../..", 5); + + if (symlink(targetname, linkname) == 0) + err = 0; +out_free: + if (!is_kallsyms) + free(realname); + free(filename); + free(dir_name); + free(linkname); + return err; +} + +static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, + const char *name, bool is_kallsyms, + bool is_vdso) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(build_id, build_id_size, sbuild_id); + + return build_id_cache__add_s(sbuild_id, name, is_kallsyms, is_vdso); +} + +bool build_id_cache__cached(const char *sbuild_id) +{ + bool ret = false; + char *filename = build_id__filename(sbuild_id, NULL, 0); + + if (filename && !access(filename, F_OK)) + ret = true; + free(filename); + + return ret; +} + +int build_id_cache__remove_s(const char *sbuild_id) +{ + const size_t size = PATH_MAX; + char *filename = zalloc(size), + *linkname = zalloc(size), *tmp; + int err = -1; + + if (filename == NULL || linkname == NULL) + goto out_free; + + if (!build_id__filename(sbuild_id, linkname, size)) + goto out_free; + + if (access(linkname, F_OK)) + goto out_free; + + if (readlink(linkname, filename, size - 1) < 0) + goto out_free; + + if (unlink(linkname)) + goto out_free; + + /* + * Since the link is relative, we must make it absolute: + */ + tmp = strrchr(linkname, '/') + 1; + snprintf(tmp, size - (tmp - linkname), "%s", filename); + + if (unlink(linkname)) + goto out_free; + + err = 0; +out_free: + free(filename); + free(linkname); + return err; +} + +static int dso__cache_build_id(struct dso *dso, struct machine *machine) +{ + bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; + bool is_vdso = dso__is_vdso(dso); + const char *name = dso->long_name; + char nm[PATH_MAX]; + + if (dso__is_kcore(dso)) { + is_kallsyms = true; + machine__mmap_name(machine, nm, sizeof(nm)); + name = nm; + } + return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name, + is_kallsyms, is_vdso); +} + +static int __dsos__cache_build_ids(struct list_head *head, + struct machine *machine) +{ + struct dso *pos; + int err = 0; + + dsos__for_each_with_build_id(pos, head) + if (dso__cache_build_id(pos, machine)) + err = -1; + + return err; +} + +static int machine__cache_build_ids(struct machine *machine) +{ + int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine); + ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine); + return ret; +} + +int perf_session__cache_build_ids(struct perf_session *session) +{ + struct rb_node *nd; + int ret; + + if (no_buildid_cache) + return 0; + + if (mkdir(buildid_dir, 0755) != 0 && errno != EEXIST) + return -1; + + ret = machine__cache_build_ids(&session->machines.host); + + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret |= machine__cache_build_ids(pos); + } + return ret ? -1 : 0; +} + +static bool machine__read_build_ids(struct machine *machine, bool with_hits) +{ + bool ret; + + ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits); + ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits); + return ret; +} + +bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) +{ + struct rb_node *nd; + bool ret = machine__read_build_ids(&session->machines.host, with_hits); + + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret |= machine__read_build_ids(pos, with_hits); + } + + return ret; +} diff --git a/kernel/tools/perf/util/build-id.h b/kernel/tools/perf/util/build-id.h new file mode 100644 index 000000000..85011222c --- /dev/null +++ b/kernel/tools/perf/util/build-id.h @@ -0,0 +1,34 @@ +#ifndef PERF_BUILD_ID_H_ +#define PERF_BUILD_ID_H_ 1 + +#define BUILD_ID_SIZE 20 + +#include "tool.h" +#include "strlist.h" +#include + +extern struct perf_tool build_id__mark_dso_hit_ops; +struct dso; + +int build_id__sprintf(const u8 *build_id, int len, char *bf); +char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); + +int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, struct perf_evsel *evsel, + struct machine *machine); + +int dsos__hit_all(struct perf_session *session); + +bool perf_session__read_build_ids(struct perf_session *session, bool with_hits); +int perf_session__write_buildid_table(struct perf_session *session, int fd); +int perf_session__cache_build_ids(struct perf_session *session); + +int build_id_cache__list_build_ids(const char *pathname, + struct strlist **result); +bool build_id_cache__cached(const char *sbuild_id); +int build_id_cache__add_s(const char *sbuild_id, + const char *name, bool is_kallsyms, bool is_vdso); +int build_id_cache__remove_s(const char *sbuild_id); +void disable_buildid_cache(void); + +#endif diff --git a/kernel/tools/perf/util/cache.h b/kernel/tools/perf/util/cache.h new file mode 100644 index 000000000..fbcca21d6 --- /dev/null +++ b/kernel/tools/perf/util/cache.h @@ -0,0 +1,80 @@ +#ifndef __PERF_CACHE_H +#define __PERF_CACHE_H + +#include +#include "util.h" +#include "strbuf.h" +#include "../perf.h" +#include "../ui/ui.h" + +#define CMD_EXEC_PATH "--exec-path" +#define CMD_PERF_DIR "--perf-dir=" +#define CMD_WORK_TREE "--work-tree=" +#define CMD_DEBUGFS_DIR "--debugfs-dir=" + +#define PERF_DIR_ENVIRONMENT "PERF_DIR" +#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" +#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" +#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" +#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR" +#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR" + +typedef int (*config_fn_t)(const char *, const char *, void *); +extern int perf_default_config(const char *, const char *, void *); +extern int perf_config(config_fn_t fn, void *); +extern int perf_config_int(const char *, const char *); +extern u64 perf_config_u64(const char *, const char *); +extern int perf_config_bool(const char *, const char *); +extern int config_error_nonbool(const char *); +extern const char *perf_config_dirname(const char *, const char *); + +/* pager.c */ +extern void setup_pager(void); +extern const char *pager_program; +extern int pager_in_use(void); +extern int pager_use_color; + +char *alias_lookup(const char *alias); +int split_cmdline(char *cmdline, const char ***argv); + +#define alloc_nr(x) (((x)+16)*3/2) + +/* + * Realloc the buffer pointed at by variable 'x' so that it can hold + * at least 'nr' entries; the number of entries currently allocated + * is 'alloc', using the standard growing factor alloc_nr() macro. + * + * DO NOT USE any expression with side-effect for 'x' or 'alloc'. + */ +#define ALLOC_GROW(x, nr, alloc) \ + do { \ + if ((nr) > alloc) { \ + if (alloc_nr(alloc) < (nr)) \ + alloc = (nr); \ + else \ + alloc = alloc_nr(alloc); \ + x = xrealloc((x), alloc * sizeof(*(x))); \ + } \ + } while(0) + + +static inline int is_absolute_path(const char *path) +{ + return path[0] == '/'; +} + +const char *make_nonrelative_path(const char *path); +char *strip_path_suffix(const char *path, const char *suffix); + +extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); + +extern char *perf_pathdup(const char *fmt, ...) + __attribute__((format (printf, 1, 2))); + +#ifndef __UCLIBC__ +/* Matches the libc/libbsd function attribute so we declare this unconditionally: */ +extern size_t strlcpy(char *dest, const char *src, size_t size); +#endif + +#endif /* __PERF_CACHE_H */ diff --git a/kernel/tools/perf/util/callchain.c b/kernel/tools/perf/util/callchain.c new file mode 100644 index 000000000..9f643ee77 --- /dev/null +++ b/kernel/tools/perf/util/callchain.c @@ -0,0 +1,881 @@ +/* + * Copyright (C) 2009-2011, Frederic Weisbecker + * + * Handle the callchains from the stream in an ad-hoc radix tree and then + * sort them in an rbtree. + * + * Using a radix for code path provides a fast retrieval and factorizes + * memory use. Also that lets us use the paths in a hierarchical graph view. + * + */ + +#include +#include +#include +#include +#include + +#include "asm/bug.h" + +#include "hist.h" +#include "util.h" +#include "sort.h" +#include "machine.h" +#include "callchain.h" + +__thread struct callchain_cursor callchain_cursor; + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +static int get_stack_size(const char *str, unsigned long *_size) +{ + char *endptr; + unsigned long size; + unsigned long max_size = round_down(USHRT_MAX, sizeof(u64)); + + size = strtoul(str, &endptr, 0); + + do { + if (*endptr) + break; + + size = round_up(size, sizeof(u64)); + if (!size || size > max_size) + break; + + *_size = size; + return 0; + + } while (0); + + pr_err("callchain: Incorrect stack dump size (max %ld): %s\n", + max_size, str); + return -1; +} +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ + +int parse_callchain_record_opt(const char *arg) +{ + char *tok, *name, *saveptr = NULL; + char *buf; + int ret = -1; + + /* We need buffer that we know we can write to. */ + buf = malloc(strlen(arg) + 1); + if (!buf) + return -ENOMEM; + + strcpy(buf, arg); + + tok = strtok_r((char *)buf, ",", &saveptr); + name = tok ? : (char *)buf; + + do { + /* Framepointer style */ + if (!strncmp(name, "fp", sizeof("fp"))) { + if (!strtok_r(NULL, ",", &saveptr)) { + callchain_param.record_mode = CALLCHAIN_FP; + ret = 0; + } else + pr_err("callchain: No more arguments " + "needed for --call-graph fp\n"); + break; + +#ifdef HAVE_DWARF_UNWIND_SUPPORT + /* Dwarf style */ + } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) { + const unsigned long default_stack_dump_size = 8192; + + ret = 0; + callchain_param.record_mode = CALLCHAIN_DWARF; + callchain_param.dump_size = default_stack_dump_size; + + tok = strtok_r(NULL, ",", &saveptr); + if (tok) { + unsigned long size = 0; + + ret = get_stack_size(tok, &size); + callchain_param.dump_size = size; + } +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ + } else if (!strncmp(name, "lbr", sizeof("lbr"))) { + if (!strtok_r(NULL, ",", &saveptr)) { + callchain_param.record_mode = CALLCHAIN_LBR; + ret = 0; + } else + pr_err("callchain: No more arguments " + "needed for --call-graph lbr\n"); + break; + } else { + pr_err("callchain: Unknown --call-graph option " + "value: %s\n", arg); + break; + } + + } while (0); + + free(buf); + return ret; +} + +static int parse_callchain_mode(const char *value) +{ + if (!strncmp(value, "graph", strlen(value))) { + callchain_param.mode = CHAIN_GRAPH_ABS; + return 0; + } + if (!strncmp(value, "flat", strlen(value))) { + callchain_param.mode = CHAIN_FLAT; + return 0; + } + if (!strncmp(value, "fractal", strlen(value))) { + callchain_param.mode = CHAIN_GRAPH_REL; + return 0; + } + return -1; +} + +static int parse_callchain_order(const char *value) +{ + if (!strncmp(value, "caller", strlen(value))) { + callchain_param.order = ORDER_CALLER; + return 0; + } + if (!strncmp(value, "callee", strlen(value))) { + callchain_param.order = ORDER_CALLEE; + return 0; + } + return -1; +} + +static int parse_callchain_sort_key(const char *value) +{ + if (!strncmp(value, "function", strlen(value))) { + callchain_param.key = CCKEY_FUNCTION; + return 0; + } + if (!strncmp(value, "address", strlen(value))) { + callchain_param.key = CCKEY_ADDRESS; + return 0; + } + if (!strncmp(value, "branch", strlen(value))) { + callchain_param.branch_callstack = 1; + return 0; + } + return -1; +} + +int +parse_callchain_report_opt(const char *arg) +{ + char *tok; + char *endptr; + bool minpcnt_set = false; + + symbol_conf.use_callchain = true; + + if (!arg) + return 0; + + while ((tok = strtok((char *)arg, ",")) != NULL) { + if (!strncmp(tok, "none", strlen(tok))) { + callchain_param.mode = CHAIN_NONE; + symbol_conf.use_callchain = false; + return 0; + } + + if (!parse_callchain_mode(tok) || + !parse_callchain_order(tok) || + !parse_callchain_sort_key(tok)) { + /* parsing ok - move on to the next */ + } else if (!minpcnt_set) { + /* try to get the min percent */ + callchain_param.min_percent = strtod(tok, &endptr); + if (tok == endptr) + return -1; + minpcnt_set = true; + } else { + /* try print limit at last */ + callchain_param.print_limit = strtoul(tok, &endptr, 0); + if (tok == endptr) + return -1; + } + + arg = NULL; + } + + if (callchain_register_param(&callchain_param) < 0) { + pr_err("Can't register callchain params\n"); + return -1; + } + return 0; +} + +int perf_callchain_config(const char *var, const char *value) +{ + char *endptr; + + if (prefixcmp(var, "call-graph.")) + return 0; + var += sizeof("call-graph.") - 1; + + if (!strcmp(var, "record-mode")) + return parse_callchain_record_opt(value); +#ifdef HAVE_DWARF_UNWIND_SUPPORT + if (!strcmp(var, "dump-size")) { + unsigned long size = 0; + int ret; + + ret = get_stack_size(value, &size); + callchain_param.dump_size = size; + + return ret; + } +#endif + if (!strcmp(var, "print-type")) + return parse_callchain_mode(value); + if (!strcmp(var, "order")) + return parse_callchain_order(value); + if (!strcmp(var, "sort-key")) + return parse_callchain_sort_key(value); + if (!strcmp(var, "threshold")) { + callchain_param.min_percent = strtod(value, &endptr); + if (value == endptr) + return -1; + } + if (!strcmp(var, "print-limit")) { + callchain_param.print_limit = strtod(value, &endptr); + if (value == endptr) + return -1; + } + + return 0; +} + +static void +rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, + enum chain_mode mode) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct callchain_node *rnode; + u64 chain_cumul = callchain_cumul_hits(chain); + + while (*p) { + u64 rnode_cumul; + + parent = *p; + rnode = rb_entry(parent, struct callchain_node, rb_node); + rnode_cumul = callchain_cumul_hits(rnode); + + switch (mode) { + case CHAIN_FLAT: + if (rnode->hit < chain->hit) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + break; + case CHAIN_GRAPH_ABS: /* Falldown */ + case CHAIN_GRAPH_REL: + if (rnode_cumul < chain_cumul) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + break; + case CHAIN_NONE: + default: + break; + } + } + + rb_link_node(&chain->rb_node, parent, p); + rb_insert_color(&chain->rb_node, root); +} + +static void +__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, + u64 min_hit) +{ + struct rb_node *n; + struct callchain_node *child; + + n = rb_first(&node->rb_root_in); + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + n = rb_next(n); + + __sort_chain_flat(rb_root, child, min_hit); + } + + if (node->hit && node->hit >= min_hit) + rb_insert_callchain(rb_root, node, CHAIN_FLAT); +} + +/* + * Once we get every callchains from the stream, we can now + * sort them by hit + */ +static void +sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, + u64 min_hit, struct callchain_param *param __maybe_unused) +{ + __sort_chain_flat(rb_root, &root->node, min_hit); +} + +static void __sort_chain_graph_abs(struct callchain_node *node, + u64 min_hit) +{ + struct rb_node *n; + struct callchain_node *child; + + node->rb_root = RB_ROOT; + n = rb_first(&node->rb_root_in); + + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + n = rb_next(n); + + __sort_chain_graph_abs(child, min_hit); + if (callchain_cumul_hits(child) >= min_hit) + rb_insert_callchain(&node->rb_root, child, + CHAIN_GRAPH_ABS); + } +} + +static void +sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, + u64 min_hit, struct callchain_param *param __maybe_unused) +{ + __sort_chain_graph_abs(&chain_root->node, min_hit); + rb_root->rb_node = chain_root->node.rb_root.rb_node; +} + +static void __sort_chain_graph_rel(struct callchain_node *node, + double min_percent) +{ + struct rb_node *n; + struct callchain_node *child; + u64 min_hit; + + node->rb_root = RB_ROOT; + min_hit = ceil(node->children_hit * min_percent); + + n = rb_first(&node->rb_root_in); + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + n = rb_next(n); + + __sort_chain_graph_rel(child, min_percent); + if (callchain_cumul_hits(child) >= min_hit) + rb_insert_callchain(&node->rb_root, child, + CHAIN_GRAPH_REL); + } +} + +static void +sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, + u64 min_hit __maybe_unused, struct callchain_param *param) +{ + __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); + rb_root->rb_node = chain_root->node.rb_root.rb_node; +} + +int callchain_register_param(struct callchain_param *param) +{ + switch (param->mode) { + case CHAIN_GRAPH_ABS: + param->sort = sort_chain_graph_abs; + break; + case CHAIN_GRAPH_REL: + param->sort = sort_chain_graph_rel; + break; + case CHAIN_FLAT: + param->sort = sort_chain_flat; + break; + case CHAIN_NONE: + default: + return -1; + } + return 0; +} + +/* + * Create a child for a parent. If inherit_children, then the new child + * will become the new parent of it's parent children + */ +static struct callchain_node * +create_child(struct callchain_node *parent, bool inherit_children) +{ + struct callchain_node *new; + + new = zalloc(sizeof(*new)); + if (!new) { + perror("not enough memory to create child for code path tree"); + return NULL; + } + new->parent = parent; + INIT_LIST_HEAD(&new->val); + + if (inherit_children) { + struct rb_node *n; + struct callchain_node *child; + + new->rb_root_in = parent->rb_root_in; + parent->rb_root_in = RB_ROOT; + + n = rb_first(&new->rb_root_in); + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + child->parent = new; + n = rb_next(n); + } + + /* make it the first child */ + rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node); + rb_insert_color(&new->rb_node_in, &parent->rb_root_in); + } + + return new; +} + + +/* + * Fill the node with callchain values + */ +static void +fill_node(struct callchain_node *node, struct callchain_cursor *cursor) +{ + struct callchain_cursor_node *cursor_node; + + node->val_nr = cursor->nr - cursor->pos; + if (!node->val_nr) + pr_warning("Warning: empty node in callchain tree\n"); + + cursor_node = callchain_cursor_current(cursor); + + while (cursor_node) { + struct callchain_list *call; + + call = zalloc(sizeof(*call)); + if (!call) { + perror("not enough memory for the code path tree"); + return; + } + call->ip = cursor_node->ip; + call->ms.sym = cursor_node->sym; + call->ms.map = cursor_node->map; + list_add_tail(&call->list, &node->val); + + callchain_cursor_advance(cursor); + cursor_node = callchain_cursor_current(cursor); + } +} + +static struct callchain_node * +add_child(struct callchain_node *parent, + struct callchain_cursor *cursor, + u64 period) +{ + struct callchain_node *new; + + new = create_child(parent, false); + fill_node(new, cursor); + + new->children_hit = 0; + new->hit = period; + return new; +} + +static s64 match_chain(struct callchain_cursor_node *node, + struct callchain_list *cnode) +{ + struct symbol *sym = node->sym; + + if (cnode->ms.sym && sym && + callchain_param.key == CCKEY_FUNCTION) + return cnode->ms.sym->start - sym->start; + else + return cnode->ip - node->ip; +} + +/* + * Split the parent in two parts (a new child is created) and + * give a part of its callchain to the created child. + * Then create another child to host the given callchain of new branch + */ +static void +split_add_child(struct callchain_node *parent, + struct callchain_cursor *cursor, + struct callchain_list *to_split, + u64 idx_parents, u64 idx_local, u64 period) +{ + struct callchain_node *new; + struct list_head *old_tail; + unsigned int idx_total = idx_parents + idx_local; + + /* split */ + new = create_child(parent, true); + + /* split the callchain and move a part to the new child */ + old_tail = parent->val.prev; + list_del_range(&to_split->list, old_tail); + new->val.next = &to_split->list; + new->val.prev = old_tail; + to_split->list.prev = &new->val; + old_tail->next = &new->val; + + /* split the hits */ + new->hit = parent->hit; + new->children_hit = parent->children_hit; + parent->children_hit = callchain_cumul_hits(new); + new->val_nr = parent->val_nr - idx_local; + parent->val_nr = idx_local; + + /* create a new child for the new branch if any */ + if (idx_total < cursor->nr) { + struct callchain_node *first; + struct callchain_list *cnode; + struct callchain_cursor_node *node; + struct rb_node *p, **pp; + + parent->hit = 0; + parent->children_hit += period; + + node = callchain_cursor_current(cursor); + new = add_child(parent, cursor, period); + + /* + * This is second child since we moved parent's children + * to new (first) child above. + */ + p = parent->rb_root_in.rb_node; + first = rb_entry(p, struct callchain_node, rb_node_in); + cnode = list_first_entry(&first->val, struct callchain_list, + list); + + if (match_chain(node, cnode) < 0) + pp = &p->rb_left; + else + pp = &p->rb_right; + + rb_link_node(&new->rb_node_in, p, pp); + rb_insert_color(&new->rb_node_in, &parent->rb_root_in); + } else { + parent->hit = period; + } +} + +static int +append_chain(struct callchain_node *root, + struct callchain_cursor *cursor, + u64 period); + +static void +append_chain_children(struct callchain_node *root, + struct callchain_cursor *cursor, + u64 period) +{ + struct callchain_node *rnode; + struct callchain_cursor_node *node; + struct rb_node **p = &root->rb_root_in.rb_node; + struct rb_node *parent = NULL; + + node = callchain_cursor_current(cursor); + if (!node) + return; + + /* lookup in childrens */ + while (*p) { + s64 ret; + + parent = *p; + rnode = rb_entry(parent, struct callchain_node, rb_node_in); + + /* If at least first entry matches, rely to children */ + ret = append_chain(rnode, cursor, period); + if (ret == 0) + goto inc_children_hit; + + if (ret < 0) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + /* nothing in children, add to the current node */ + rnode = add_child(root, cursor, period); + rb_link_node(&rnode->rb_node_in, parent, p); + rb_insert_color(&rnode->rb_node_in, &root->rb_root_in); + +inc_children_hit: + root->children_hit += period; +} + +static int +append_chain(struct callchain_node *root, + struct callchain_cursor *cursor, + u64 period) +{ + struct callchain_list *cnode; + u64 start = cursor->pos; + bool found = false; + u64 matches; + int cmp = 0; + + /* + * Lookup in the current node + * If we have a symbol, then compare the start to match + * anywhere inside a function, unless function + * mode is disabled. + */ + list_for_each_entry(cnode, &root->val, list) { + struct callchain_cursor_node *node; + + node = callchain_cursor_current(cursor); + if (!node) + break; + + cmp = match_chain(node, cnode); + if (cmp) + break; + + found = true; + + callchain_cursor_advance(cursor); + } + + /* matches not, relay no the parent */ + if (!found) { + WARN_ONCE(!cmp, "Chain comparison error\n"); + return cmp; + } + + matches = cursor->pos - start; + + /* we match only a part of the node. Split it and add the new chain */ + if (matches < root->val_nr) { + split_add_child(root, cursor, cnode, start, matches, period); + return 0; + } + + /* we match 100% of the path, increment the hit */ + if (matches == root->val_nr && cursor->pos == cursor->nr) { + root->hit += period; + return 0; + } + + /* We match the node and still have a part remaining */ + append_chain_children(root, cursor, period); + + return 0; +} + +int callchain_append(struct callchain_root *root, + struct callchain_cursor *cursor, + u64 period) +{ + if (!cursor->nr) + return 0; + + callchain_cursor_commit(cursor); + + append_chain_children(&root->node, cursor, period); + + if (cursor->nr > root->max_depth) + root->max_depth = cursor->nr; + + return 0; +} + +static int +merge_chain_branch(struct callchain_cursor *cursor, + struct callchain_node *dst, struct callchain_node *src) +{ + struct callchain_cursor_node **old_last = cursor->last; + struct callchain_node *child; + struct callchain_list *list, *next_list; + struct rb_node *n; + int old_pos = cursor->nr; + int err = 0; + + list_for_each_entry_safe(list, next_list, &src->val, list) { + callchain_cursor_append(cursor, list->ip, + list->ms.map, list->ms.sym); + list_del(&list->list); + free(list); + } + + if (src->hit) { + callchain_cursor_commit(cursor); + append_chain_children(dst, cursor, src->hit); + } + + n = rb_first(&src->rb_root_in); + while (n) { + child = container_of(n, struct callchain_node, rb_node_in); + n = rb_next(n); + rb_erase(&child->rb_node_in, &src->rb_root_in); + + err = merge_chain_branch(cursor, dst, child); + if (err) + break; + + free(child); + } + + cursor->nr = old_pos; + cursor->last = old_last; + + return err; +} + +int callchain_merge(struct callchain_cursor *cursor, + struct callchain_root *dst, struct callchain_root *src) +{ + return merge_chain_branch(cursor, &dst->node, &src->node); +} + +int callchain_cursor_append(struct callchain_cursor *cursor, + u64 ip, struct map *map, struct symbol *sym) +{ + struct callchain_cursor_node *node = *cursor->last; + + if (!node) { + node = calloc(1, sizeof(*node)); + if (!node) + return -ENOMEM; + + *cursor->last = node; + } + + node->ip = ip; + node->map = map; + node->sym = sym; + + cursor->nr++; + + cursor->last = &node->next; + + return 0; +} + +int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent, + struct perf_evsel *evsel, struct addr_location *al, + int max_stack) +{ + if (sample->callchain == NULL) + return 0; + + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain || + sort__has_parent) { + return thread__resolve_callchain(al->thread, evsel, sample, + parent, al, max_stack); + } + return 0; +} + +int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample) +{ + if (!symbol_conf.use_callchain || sample->callchain == NULL) + return 0; + return callchain_append(he->callchain, &callchain_cursor, sample->period); +} + +int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved) +{ + al->map = node->map; + al->sym = node->sym; + if (node->map) + al->addr = node->map->map_ip(node->map, node->ip); + else + al->addr = node->ip; + + if (al->sym == NULL) { + if (hide_unresolved) + return 0; + if (al->map == NULL) + goto out; + } + + if (al->map->groups == &al->machine->kmaps) { + if (machine__is_host(al->machine)) { + al->cpumode = PERF_RECORD_MISC_KERNEL; + al->level = 'k'; + } else { + al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL; + al->level = 'g'; + } + } else { + if (machine__is_host(al->machine)) { + al->cpumode = PERF_RECORD_MISC_USER; + al->level = '.'; + } else if (perf_guest) { + al->cpumode = PERF_RECORD_MISC_GUEST_USER; + al->level = 'u'; + } else { + al->cpumode = PERF_RECORD_MISC_HYPERVISOR; + al->level = 'H'; + } + } + +out: + return 1; +} + +char *callchain_list__sym_name(struct callchain_list *cl, + char *bf, size_t bfsize, bool show_dso) +{ + int printed; + + if (cl->ms.sym) { + if (callchain_param.key == CCKEY_ADDRESS && + cl->ms.map && !cl->srcline) + cl->srcline = get_srcline(cl->ms.map->dso, + map__rip_2objdump(cl->ms.map, + cl->ip), + cl->ms.sym, false); + if (cl->srcline) + printed = scnprintf(bf, bfsize, "%s %s", + cl->ms.sym->name, cl->srcline); + else + printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name); + } else + printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); + + if (show_dso) + scnprintf(bf + printed, bfsize - printed, " %s", + cl->ms.map ? + cl->ms.map->dso->short_name : + "unknown"); + + return bf; +} + +static void free_callchain_node(struct callchain_node *node) +{ + struct callchain_list *list, *tmp; + struct callchain_node *child; + struct rb_node *n; + + list_for_each_entry_safe(list, tmp, &node->val, list) { + list_del(&list->list); + free(list); + } + + n = rb_first(&node->rb_root_in); + while (n) { + child = container_of(n, struct callchain_node, rb_node_in); + n = rb_next(n); + rb_erase(&child->rb_node_in, &node->rb_root_in); + + free_callchain_node(child); + free(child); + } +} + +void free_callchain(struct callchain_root *root) +{ + if (!symbol_conf.use_callchain) + return; + + free_callchain_node(&root->node); +} diff --git a/kernel/tools/perf/util/callchain.h b/kernel/tools/perf/util/callchain.h new file mode 100644 index 000000000..6033a0a21 --- /dev/null +++ b/kernel/tools/perf/util/callchain.h @@ -0,0 +1,204 @@ +#ifndef __PERF_CALLCHAIN_H +#define __PERF_CALLCHAIN_H + +#include "../perf.h" +#include +#include +#include "event.h" +#include "symbol.h" + +enum perf_call_graph_mode { + CALLCHAIN_NONE, + CALLCHAIN_FP, + CALLCHAIN_DWARF, + CALLCHAIN_LBR, + CALLCHAIN_MAX +}; + +enum chain_mode { + CHAIN_NONE, + CHAIN_FLAT, + CHAIN_GRAPH_ABS, + CHAIN_GRAPH_REL +}; + +enum chain_order { + ORDER_CALLER, + ORDER_CALLEE +}; + +struct callchain_node { + struct callchain_node *parent; + struct list_head val; + struct rb_node rb_node_in; /* to insert nodes in an rbtree */ + struct rb_node rb_node; /* to sort nodes in an output tree */ + struct rb_root rb_root_in; /* input tree of children */ + struct rb_root rb_root; /* sorted output tree of children */ + unsigned int val_nr; + u64 hit; + u64 children_hit; +}; + +struct callchain_root { + u64 max_depth; + struct callchain_node node; +}; + +struct callchain_param; + +typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *, + u64, struct callchain_param *); + +enum chain_key { + CCKEY_FUNCTION, + CCKEY_ADDRESS +}; + +struct callchain_param { + bool enabled; + enum perf_call_graph_mode record_mode; + u32 dump_size; + enum chain_mode mode; + u32 print_limit; + double min_percent; + sort_chain_func_t sort; + enum chain_order order; + enum chain_key key; + bool branch_callstack; +}; + +extern struct callchain_param callchain_param; + +struct callchain_list { + u64 ip; + struct map_symbol ms; + char *srcline; + struct list_head list; +}; + +/* + * A callchain cursor is a single linked list that + * let one feed a callchain progressively. + * It keeps persistent allocated entries to minimize + * allocations. + */ +struct callchain_cursor_node { + u64 ip; + struct map *map; + struct symbol *sym; + struct callchain_cursor_node *next; +}; + +struct callchain_cursor { + u64 nr; + struct callchain_cursor_node *first; + struct callchain_cursor_node **last; + u64 pos; + struct callchain_cursor_node *curr; +}; + +extern __thread struct callchain_cursor callchain_cursor; + +static inline void callchain_init(struct callchain_root *root) +{ + INIT_LIST_HEAD(&root->node.val); + + root->node.parent = NULL; + root->node.hit = 0; + root->node.children_hit = 0; + root->node.rb_root_in = RB_ROOT; + root->max_depth = 0; +} + +static inline u64 callchain_cumul_hits(struct callchain_node *node) +{ + return node->hit + node->children_hit; +} + +int callchain_register_param(struct callchain_param *param); +int callchain_append(struct callchain_root *root, + struct callchain_cursor *cursor, + u64 period); + +int callchain_merge(struct callchain_cursor *cursor, + struct callchain_root *dst, struct callchain_root *src); + +/* + * Initialize a cursor before adding entries inside, but keep + * the previously allocated entries as a cache. + */ +static inline void callchain_cursor_reset(struct callchain_cursor *cursor) +{ + cursor->nr = 0; + cursor->last = &cursor->first; +} + +int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, + struct map *map, struct symbol *sym); + +/* Close a cursor writing session. Initialize for the reader */ +static inline void callchain_cursor_commit(struct callchain_cursor *cursor) +{ + cursor->curr = cursor->first; + cursor->pos = 0; +} + +/* Cursor reading iteration helpers */ +static inline struct callchain_cursor_node * +callchain_cursor_current(struct callchain_cursor *cursor) +{ + if (cursor->pos == cursor->nr) + return NULL; + + return cursor->curr; +} + +static inline void callchain_cursor_advance(struct callchain_cursor *cursor) +{ + cursor->curr = cursor->curr->next; + cursor->pos++; +} + +struct option; +struct hist_entry; + +int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset); +int record_callchain_opt(const struct option *opt, const char *arg, int unset); + +int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent, + struct perf_evsel *evsel, struct addr_location *al, + int max_stack); +int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample); +int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved); + +extern const char record_callchain_help[]; +int parse_callchain_record_opt(const char *arg); +int parse_callchain_report_opt(const char *arg); +int perf_callchain_config(const char *var, const char *value); + +static inline void callchain_cursor_snapshot(struct callchain_cursor *dest, + struct callchain_cursor *src) +{ + *dest = *src; + + dest->first = src->curr; + dest->nr -= src->pos; +} + +#ifdef HAVE_SKIP_CALLCHAIN_IDX +extern int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain); +#else +static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused, + struct ip_callchain *chain __maybe_unused) +{ + return -1; +} +#endif + +char *callchain_list__sym_name(struct callchain_list *cl, + char *bf, size_t bfsize, bool show_dso); + +void free_callchain(struct callchain_root *root); + +#endif /* __PERF_CALLCHAIN_H */ diff --git a/kernel/tools/perf/util/cgroup.c b/kernel/tools/perf/util/cgroup.c new file mode 100644 index 000000000..88f7be399 --- /dev/null +++ b/kernel/tools/perf/util/cgroup.c @@ -0,0 +1,177 @@ +#include "util.h" +#include "../perf.h" +#include "parse-options.h" +#include "evsel.h" +#include "cgroup.h" +#include "evlist.h" + +int nr_cgroups; + +static int +cgroupfs_find_mountpoint(char *buf, size_t maxlen) +{ + FILE *fp; + char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1]; + char *token, *saved_ptr = NULL; + int found = 0; + + fp = fopen("/proc/mounts", "r"); + if (!fp) + return -1; + + /* + * in order to handle split hierarchy, we need to scan /proc/mounts + * and inspect every cgroupfs mount point to find one that has + * perf_event subsystem + */ + while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %" + STR(PATH_MAX)"s %*d %*d\n", + mountpoint, type, tokens) == 3) { + + if (!strcmp(type, "cgroup")) { + + token = strtok_r(tokens, ",", &saved_ptr); + + while (token != NULL) { + if (!strcmp(token, "perf_event")) { + found = 1; + break; + } + token = strtok_r(NULL, ",", &saved_ptr); + } + } + if (found) + break; + } + fclose(fp); + if (!found) + return -1; + + if (strlen(mountpoint) < maxlen) { + strcpy(buf, mountpoint); + return 0; + } + return -1; +} + +static int open_cgroup(char *name) +{ + char path[PATH_MAX + 1]; + char mnt[PATH_MAX + 1]; + int fd; + + + if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1)) + return -1; + + snprintf(path, PATH_MAX, "%s/%s", mnt, name); + + fd = open(path, O_RDONLY); + if (fd == -1) + fprintf(stderr, "no access to cgroup %s\n", path); + + return fd; +} + +static int add_cgroup(struct perf_evlist *evlist, char *str) +{ + struct perf_evsel *counter; + struct cgroup_sel *cgrp = NULL; + int n; + /* + * check if cgrp is already defined, if so we reuse it + */ + evlist__for_each(evlist, counter) { + cgrp = counter->cgrp; + if (!cgrp) + continue; + if (!strcmp(cgrp->name, str)) + break; + + cgrp = NULL; + } + + if (!cgrp) { + cgrp = zalloc(sizeof(*cgrp)); + if (!cgrp) + return -1; + + cgrp->name = str; + + cgrp->fd = open_cgroup(str); + if (cgrp->fd == -1) { + free(cgrp); + return -1; + } + } + + /* + * find corresponding event + * if add cgroup N, then need to find event N + */ + n = 0; + evlist__for_each(evlist, counter) { + if (n == nr_cgroups) + goto found; + n++; + } + if (cgrp->refcnt == 0) + free(cgrp); + + return -1; +found: + cgrp->refcnt++; + counter->cgrp = cgrp; + return 0; +} + +void close_cgroup(struct cgroup_sel *cgrp) +{ + if (!cgrp) + return; + + /* XXX: not reentrant */ + if (--cgrp->refcnt == 0) { + close(cgrp->fd); + zfree(&cgrp->name); + free(cgrp); + } +} + +int parse_cgroups(const struct option *opt __maybe_unused, const char *str, + int unset __maybe_unused) +{ + struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; + const char *p, *e, *eos = str + strlen(str); + char *s; + int ret; + + if (list_empty(&evlist->entries)) { + fprintf(stderr, "must define events before cgroups\n"); + return -1; + } + + for (;;) { + p = strchr(str, ','); + e = p ? p : eos; + + /* allow empty cgroups, i.e., skip */ + if (e - str) { + /* termination added */ + s = strndup(str, e - str); + if (!s) + return -1; + ret = add_cgroup(evlist, s); + if (ret) { + free(s); + return -1; + } + } + /* nr_cgroups is increased een for empty cgroups */ + nr_cgroups++; + if (!p) + break; + str = p+1; + } + return 0; +} diff --git a/kernel/tools/perf/util/cgroup.h b/kernel/tools/perf/util/cgroup.h new file mode 100644 index 000000000..89acd6deb --- /dev/null +++ b/kernel/tools/perf/util/cgroup.h @@ -0,0 +1,17 @@ +#ifndef __CGROUP_H__ +#define __CGROUP_H__ + +struct option; + +struct cgroup_sel { + char *name; + int fd; + int refcnt; +}; + + +extern int nr_cgroups; /* number of explicit cgroups defined */ +extern void close_cgroup(struct cgroup_sel *cgrp); +extern int parse_cgroups(const struct option *opt, const char *str, int unset); + +#endif /* __CGROUP_H__ */ diff --git a/kernel/tools/perf/util/cloexec.c b/kernel/tools/perf/util/cloexec.c new file mode 100644 index 000000000..85b523885 --- /dev/null +++ b/kernel/tools/perf/util/cloexec.c @@ -0,0 +1,92 @@ +#include +#include "util.h" +#include "../perf.h" +#include "cloexec.h" +#include "asm/bug.h" +#include "debug.h" + +static unsigned long flag = PERF_FLAG_FD_CLOEXEC; + +int __weak sched_getcpu(void) +{ + errno = ENOSYS; + return -1; +} + +static int perf_flag_probe(void) +{ + /* use 'safest' configuration as used in perf_evsel__fallback() */ + struct perf_event_attr attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + .exclude_kernel = 1, + }; + int fd; + int err; + int cpu; + pid_t pid = -1; + char sbuf[STRERR_BUFSIZE]; + + cpu = sched_getcpu(); + if (cpu < 0) + cpu = 0; + + /* + * Using -1 for the pid is a workaround to avoid gratuitous jump label + * changes. + */ + while (1) { + /* check cloexec flag */ + fd = sys_perf_event_open(&attr, pid, cpu, -1, + PERF_FLAG_FD_CLOEXEC); + if (fd < 0 && pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + break; + } + err = errno; + + if (fd >= 0) { + close(fd); + return 1; + } + + WARN_ONCE(err != EINVAL && err != EBUSY, + "perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n", + err, strerror_r(err, sbuf, sizeof(sbuf))); + + /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ + while (1) { + fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); + if (fd < 0 && pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + break; + } + err = errno; + + if (fd >= 0) + close(fd); + + if (WARN_ONCE(fd < 0 && err != EBUSY, + "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", + err, strerror_r(err, sbuf, sizeof(sbuf)))) + return -1; + + return 0; +} + +unsigned long perf_event_open_cloexec_flag(void) +{ + static bool probed; + + if (!probed) { + if (perf_flag_probe() <= 0) + flag = 0; + probed = true; + } + + return flag; +} diff --git a/kernel/tools/perf/util/cloexec.h b/kernel/tools/perf/util/cloexec.h new file mode 100644 index 000000000..68888c29b --- /dev/null +++ b/kernel/tools/perf/util/cloexec.h @@ -0,0 +1,12 @@ +#ifndef __PERF_CLOEXEC_H +#define __PERF_CLOEXEC_H + +unsigned long perf_event_open_cloexec_flag(void); + +#ifdef __GLIBC_PREREQ +#if !__GLIBC_PREREQ(2, 6) +extern int sched_getcpu(void) __THROW; +#endif +#endif + +#endif /* __PERF_CLOEXEC_H */ diff --git a/kernel/tools/perf/util/color.c b/kernel/tools/perf/util/color.c new file mode 100644 index 000000000..55355b3d4 --- /dev/null +++ b/kernel/tools/perf/util/color.c @@ -0,0 +1,227 @@ +#include +#include "cache.h" +#include "color.h" +#include + +int perf_use_color_default = -1; + +int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) +{ + if (value) { + if (!strcasecmp(value, "never")) + return 0; + if (!strcasecmp(value, "always")) + return 1; + if (!strcasecmp(value, "auto")) + goto auto_color; + } + + /* Missing or explicit false to turn off colorization */ + if (!perf_config_bool(var, value)) + return 0; + + /* any normal truth value defaults to 'auto' */ + auto_color: + if (stdout_is_tty < 0) + stdout_is_tty = isatty(1); + if (stdout_is_tty || (pager_in_use() && pager_use_color)) { + char *term = getenv("TERM"); + if (term && strcmp(term, "dumb")) + return 1; + } + return 0; +} + +int perf_color_default_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "color.ui")) { + perf_use_color_default = perf_config_colorbool(var, value, -1); + return 0; + } + + return perf_default_config(var, value, cb); +} + +static int __color_vsnprintf(char *bf, size_t size, const char *color, + const char *fmt, va_list args, const char *trail) +{ + int r = 0; + + /* + * Auto-detect: + */ + if (perf_use_color_default < 0) { + if (isatty(1) || pager_in_use()) + perf_use_color_default = 1; + else + perf_use_color_default = 0; + } + + if (perf_use_color_default && *color) + r += scnprintf(bf, size, "%s", color); + r += vscnprintf(bf + r, size - r, fmt, args); + if (perf_use_color_default && *color) + r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET); + if (trail) + r += scnprintf(bf + r, size - r, "%s", trail); + return r; +} + +static int __color_vfprintf(FILE *fp, const char *color, const char *fmt, + va_list args, const char *trail) +{ + int r = 0; + + /* + * Auto-detect: + */ + if (perf_use_color_default < 0) { + if (isatty(fileno(fp)) || pager_in_use()) + perf_use_color_default = 1; + else + perf_use_color_default = 0; + } + + if (perf_use_color_default && *color) + r += fprintf(fp, "%s", color); + r += vfprintf(fp, fmt, args); + if (perf_use_color_default && *color) + r += fprintf(fp, "%s", PERF_COLOR_RESET); + if (trail) + r += fprintf(fp, "%s", trail); + return r; +} + +int color_vsnprintf(char *bf, size_t size, const char *color, + const char *fmt, va_list args) +{ + return __color_vsnprintf(bf, size, color, fmt, args, NULL); +} + +int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args) +{ + return __color_vfprintf(fp, color, fmt, args, NULL); +} + +int color_snprintf(char *bf, size_t size, const char *color, + const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + r = color_vsnprintf(bf, size, color, fmt, args); + va_end(args); + return r; +} + +int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + r = color_vfprintf(fp, color, fmt, args); + va_end(args); + return r; +} + +int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...) +{ + va_list args; + int r; + va_start(args, fmt); + r = __color_vfprintf(fp, color, fmt, args, "\n"); + va_end(args); + return r; +} + +/* + * This function splits the buffer by newlines and colors the lines individually. + * + * Returns 0 on success. + */ +int color_fwrite_lines(FILE *fp, const char *color, + size_t count, const char *buf) +{ + if (!*color) + return fwrite(buf, count, 1, fp) != 1; + + while (count) { + char *p = memchr(buf, '\n', count); + + if (p != buf && (fputs(color, fp) < 0 || + fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 || + fputs(PERF_COLOR_RESET, fp) < 0)) + return -1; + if (!p) + return 0; + if (fputc('\n', fp) < 0) + return -1; + count -= p + 1 - buf; + buf = p + 1; + } + return 0; +} + +const char *get_percent_color(double percent) +{ + const char *color = PERF_COLOR_NORMAL; + + /* + * We color high-overhead entries in red, mid-overhead + * entries in green - and keep the low overhead places + * normal: + */ + if (fabs(percent) >= MIN_RED) + color = PERF_COLOR_RED; + else { + if (fabs(percent) > MIN_GREEN) + color = PERF_COLOR_GREEN; + } + return color; +} + +int percent_color_fprintf(FILE *fp, const char *fmt, double percent) +{ + int r; + const char *color; + + color = get_percent_color(percent); + r = color_fprintf(fp, color, fmt, percent); + + return r; +} + +int value_color_snprintf(char *bf, size_t size, const char *fmt, double value) +{ + const char *color = get_percent_color(value); + return color_snprintf(bf, size, color, fmt, value); +} + +int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...) +{ + va_list args; + double percent; + + va_start(args, fmt); + percent = va_arg(args, double); + va_end(args); + return value_color_snprintf(bf, size, fmt, percent); +} + +int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...) +{ + va_list args; + int len; + double percent; + const char *color; + + va_start(args, fmt); + len = va_arg(args, int); + percent = va_arg(args, double); + va_end(args); + + color = get_percent_color(percent); + return color_snprintf(bf, size, color, fmt, len, percent); +} diff --git a/kernel/tools/perf/util/color.h b/kernel/tools/perf/util/color.h new file mode 100644 index 000000000..38146f922 --- /dev/null +++ b/kernel/tools/perf/util/color.h @@ -0,0 +1,46 @@ +#ifndef __PERF_COLOR_H +#define __PERF_COLOR_H + +/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ +#define COLOR_MAXLEN 24 + +#define PERF_COLOR_NORMAL "" +#define PERF_COLOR_RESET "\033[m" +#define PERF_COLOR_BOLD "\033[1m" +#define PERF_COLOR_RED "\033[31m" +#define PERF_COLOR_GREEN "\033[32m" +#define PERF_COLOR_YELLOW "\033[33m" +#define PERF_COLOR_BLUE "\033[34m" +#define PERF_COLOR_MAGENTA "\033[35m" +#define PERF_COLOR_CYAN "\033[36m" +#define PERF_COLOR_BG_RED "\033[41m" + +#define MIN_GREEN 0.5 +#define MIN_RED 5.0 + +/* + * This variable stores the value of color.ui + */ +extern int perf_use_color_default; + + +/* + * Use this instead of perf_default_config if you need the value of color.ui. + */ +int perf_color_default_config(const char *var, const char *value, void *cb); + +int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); +int color_vsnprintf(char *bf, size_t size, const char *color, + const char *fmt, va_list args); +int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args); +int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); +int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...); +int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); +int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); +int value_color_snprintf(char *bf, size_t size, const char *fmt, double value); +int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...); +int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...); +int percent_color_fprintf(FILE *fp, const char *fmt, double percent); +const char *get_percent_color(double percent); + +#endif /* __PERF_COLOR_H */ diff --git a/kernel/tools/perf/util/comm.c b/kernel/tools/perf/util/comm.c new file mode 100644 index 000000000..b2bb59df6 --- /dev/null +++ b/kernel/tools/perf/util/comm.c @@ -0,0 +1,125 @@ +#include "comm.h" +#include "util.h" +#include +#include + +struct comm_str { + char *str; + struct rb_node rb_node; + int ref; +}; + +/* Should perhaps be moved to struct machine */ +static struct rb_root comm_str_root; + +static void comm_str__get(struct comm_str *cs) +{ + cs->ref++; +} + +static void comm_str__put(struct comm_str *cs) +{ + if (!--cs->ref) { + rb_erase(&cs->rb_node, &comm_str_root); + zfree(&cs->str); + free(cs); + } +} + +static struct comm_str *comm_str__alloc(const char *str) +{ + struct comm_str *cs; + + cs = zalloc(sizeof(*cs)); + if (!cs) + return NULL; + + cs->str = strdup(str); + if (!cs->str) { + free(cs); + return NULL; + } + + return cs; +} + +static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct comm_str *iter, *new; + int cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct comm_str, rb_node); + + cmp = strcmp(str, iter->str); + if (!cmp) + return iter; + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + new = comm_str__alloc(str); + if (!new) + return NULL; + + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, root); + + return new; +} + +struct comm *comm__new(const char *str, u64 timestamp, bool exec) +{ + struct comm *comm = zalloc(sizeof(*comm)); + + if (!comm) + return NULL; + + comm->start = timestamp; + comm->exec = exec; + + comm->comm_str = comm_str__findnew(str, &comm_str_root); + if (!comm->comm_str) { + free(comm); + return NULL; + } + + comm_str__get(comm->comm_str); + + return comm; +} + +int comm__override(struct comm *comm, const char *str, u64 timestamp, bool exec) +{ + struct comm_str *new, *old = comm->comm_str; + + new = comm_str__findnew(str, &comm_str_root); + if (!new) + return -ENOMEM; + + comm_str__get(new); + comm_str__put(old); + comm->comm_str = new; + comm->start = timestamp; + if (exec) + comm->exec = true; + + return 0; +} + +void comm__free(struct comm *comm) +{ + comm_str__put(comm->comm_str); + free(comm); +} + +const char *comm__str(const struct comm *comm) +{ + return comm->comm_str->str; +} diff --git a/kernel/tools/perf/util/comm.h b/kernel/tools/perf/util/comm.h new file mode 100644 index 000000000..71c9c3934 --- /dev/null +++ b/kernel/tools/perf/util/comm.h @@ -0,0 +1,27 @@ +#ifndef __PERF_COMM_H +#define __PERF_COMM_H + +#include "../perf.h" +#include +#include + +struct comm_str; + +struct comm { + struct comm_str *comm_str; + u64 start; + struct list_head list; + bool exec; + union { /* Tool specific area */ + void *priv; + u64 db_id; + }; +}; + +void comm__free(struct comm *comm); +struct comm *comm__new(const char *str, u64 timestamp, bool exec); +const char *comm__str(const struct comm *comm); +int comm__override(struct comm *comm, const char *str, u64 timestamp, + bool exec); + +#endif /* __PERF_COMM_H */ diff --git a/kernel/tools/perf/util/config.c b/kernel/tools/perf/util/config.c new file mode 100644 index 000000000..e18f653cd --- /dev/null +++ b/kernel/tools/perf/util/config.c @@ -0,0 +1,564 @@ +/* + * config.c + * + * Helper functions for parsing config items. + * Originally copied from GIT source. + * + * Copyright (C) Linus Torvalds, 2005 + * Copyright (C) Johannes Schindelin, 2005 + * + */ +#include "util.h" +#include "cache.h" +#include "exec_cmd.h" +#include "util/hist.h" /* perf_hist_config */ + +#define MAXNAME (256) + +#define DEBUG_CACHE_DIR ".debug" + + +char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */ + +static FILE *config_file; +static const char *config_file_name; +static int config_linenr; +static int config_file_eof; + +static const char *config_exclusive_filename; + +static int get_next_char(void) +{ + int c; + FILE *f; + + c = '\n'; + if ((f = config_file) != NULL) { + c = fgetc(f); + if (c == '\r') { + /* DOS like systems */ + c = fgetc(f); + if (c != '\n') { + ungetc(c, f); + c = '\r'; + } + } + if (c == '\n') + config_linenr++; + if (c == EOF) { + config_file_eof = 1; + c = '\n'; + } + } + return c; +} + +static char *parse_value(void) +{ + static char value[1024]; + int quote = 0, comment = 0, space = 0; + size_t len = 0; + + for (;;) { + int c = get_next_char(); + + if (len >= sizeof(value) - 1) + return NULL; + if (c == '\n') { + if (quote) + return NULL; + value[len] = 0; + return value; + } + if (comment) + continue; + if (isspace(c) && !quote) { + space = 1; + continue; + } + if (!quote) { + if (c == ';' || c == '#') { + comment = 1; + continue; + } + } + if (space) { + if (len) + value[len++] = ' '; + space = 0; + } + if (c == '\\') { + c = get_next_char(); + switch (c) { + case '\n': + continue; + case 't': + c = '\t'; + break; + case 'b': + c = '\b'; + break; + case 'n': + c = '\n'; + break; + /* Some characters escape as themselves */ + case '\\': case '"': + break; + /* Reject unknown escape sequences */ + default: + return NULL; + } + value[len++] = c; + continue; + } + if (c == '"') { + quote = 1-quote; + continue; + } + value[len++] = c; + } +} + +static inline int iskeychar(int c) +{ + return isalnum(c) || c == '-' || c == '_'; +} + +static int get_value(config_fn_t fn, void *data, char *name, unsigned int len) +{ + int c; + char *value; + + /* Get the full name */ + for (;;) { + c = get_next_char(); + if (config_file_eof) + break; + if (!iskeychar(c)) + break; + name[len++] = c; + if (len >= MAXNAME) + return -1; + } + name[len] = 0; + while (c == ' ' || c == '\t') + c = get_next_char(); + + value = NULL; + if (c != '\n') { + if (c != '=') + return -1; + value = parse_value(); + if (!value) + return -1; + } + return fn(name, value, data); +} + +static int get_extended_base_var(char *name, int baselen, int c) +{ + do { + if (c == '\n') + return -1; + c = get_next_char(); + } while (isspace(c)); + + /* We require the format to be '[base "extension"]' */ + if (c != '"') + return -1; + name[baselen++] = '.'; + + for (;;) { + int ch = get_next_char(); + + if (ch == '\n') + return -1; + if (ch == '"') + break; + if (ch == '\\') { + ch = get_next_char(); + if (ch == '\n') + return -1; + } + name[baselen++] = ch; + if (baselen > MAXNAME / 2) + return -1; + } + + /* Final ']' */ + if (get_next_char() != ']') + return -1; + return baselen; +} + +static int get_base_var(char *name) +{ + int baselen = 0; + + for (;;) { + int c = get_next_char(); + if (config_file_eof) + return -1; + if (c == ']') + return baselen; + if (isspace(c)) + return get_extended_base_var(name, baselen, c); + if (!iskeychar(c) && c != '.') + return -1; + if (baselen > MAXNAME / 2) + return -1; + name[baselen++] = tolower(c); + } +} + +static int perf_parse_file(config_fn_t fn, void *data) +{ + int comment = 0; + int baselen = 0; + static char var[MAXNAME]; + + /* U+FEFF Byte Order Mark in UTF8 */ + static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf"; + const unsigned char *bomptr = utf8_bom; + + for (;;) { + int line, c = get_next_char(); + + if (bomptr && *bomptr) { + /* We are at the file beginning; skip UTF8-encoded BOM + * if present. Sane editors won't put this in on their + * own, but e.g. Windows Notepad will do it happily. */ + if ((unsigned char) c == *bomptr) { + bomptr++; + continue; + } else { + /* Do not tolerate partial BOM. */ + if (bomptr != utf8_bom) + break; + /* No BOM at file beginning. Cool. */ + bomptr = NULL; + } + } + if (c == '\n') { + if (config_file_eof) + return 0; + comment = 0; + continue; + } + if (comment || isspace(c)) + continue; + if (c == '#' || c == ';') { + comment = 1; + continue; + } + if (c == '[') { + baselen = get_base_var(var); + if (baselen <= 0) + break; + var[baselen++] = '.'; + var[baselen] = 0; + continue; + } + if (!isalpha(c)) + break; + var[baselen] = tolower(c); + + /* + * The get_value function might or might not reach the '\n', + * so saving the current line number for error reporting. + */ + line = config_linenr; + if (get_value(fn, data, var, baselen+1) < 0) { + config_linenr = line; + break; + } + } + die("bad config file line %d in %s", config_linenr, config_file_name); +} + +static int parse_unit_factor(const char *end, unsigned long *val) +{ + if (!*end) + return 1; + else if (!strcasecmp(end, "k")) { + *val *= 1024; + return 1; + } + else if (!strcasecmp(end, "m")) { + *val *= 1024 * 1024; + return 1; + } + else if (!strcasecmp(end, "g")) { + *val *= 1024 * 1024 * 1024; + return 1; + } + return 0; +} + +static int perf_parse_llong(const char *value, long long *ret) +{ + if (value && *value) { + char *end; + long long val = strtoll(value, &end, 0); + unsigned long factor = 1; + + if (!parse_unit_factor(end, &factor)) + return 0; + *ret = val * factor; + return 1; + } + return 0; +} + +static int perf_parse_long(const char *value, long *ret) +{ + if (value && *value) { + char *end; + long val = strtol(value, &end, 0); + unsigned long factor = 1; + if (!parse_unit_factor(end, &factor)) + return 0; + *ret = val * factor; + return 1; + } + return 0; +} + +static void die_bad_config(const char *name) +{ + if (config_file_name) + die("bad config value for '%s' in %s", name, config_file_name); + die("bad config value for '%s'", name); +} + +u64 perf_config_u64(const char *name, const char *value) +{ + long long ret = 0; + + if (!perf_parse_llong(value, &ret)) + die_bad_config(name); + return (u64) ret; +} + +int perf_config_int(const char *name, const char *value) +{ + long ret = 0; + if (!perf_parse_long(value, &ret)) + die_bad_config(name); + return ret; +} + +static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool) +{ + *is_bool = 1; + if (!value) + return 1; + if (!*value) + return 0; + if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on")) + return 1; + if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off")) + return 0; + *is_bool = 0; + return perf_config_int(name, value); +} + +int perf_config_bool(const char *name, const char *value) +{ + int discard; + return !!perf_config_bool_or_int(name, value, &discard); +} + +const char *perf_config_dirname(const char *name, const char *value) +{ + if (!name) + return NULL; + return value; +} + +static int perf_default_core_config(const char *var __maybe_unused, + const char *value __maybe_unused) +{ + /* Add other config variables here. */ + return 0; +} + +static int perf_ui_config(const char *var, const char *value) +{ + /* Add other config variables here. */ + if (!strcmp(var, "ui.show-headers")) { + symbol_conf.show_hist_headers = perf_config_bool(var, value); + return 0; + } + return 0; +} + +int perf_default_config(const char *var, const char *value, + void *dummy __maybe_unused) +{ + if (!prefixcmp(var, "core.")) + return perf_default_core_config(var, value); + + if (!prefixcmp(var, "hist.")) + return perf_hist_config(var, value); + + if (!prefixcmp(var, "ui.")) + return perf_ui_config(var, value); + + if (!prefixcmp(var, "call-graph.")) + return perf_callchain_config(var, value); + + /* Add other config variables here. */ + return 0; +} + +static int perf_config_from_file(config_fn_t fn, const char *filename, void *data) +{ + int ret; + FILE *f = fopen(filename, "r"); + + ret = -1; + if (f) { + config_file = f; + config_file_name = filename; + config_linenr = 1; + config_file_eof = 0; + ret = perf_parse_file(fn, data); + fclose(f); + config_file_name = NULL; + } + return ret; +} + +static const char *perf_etc_perfconfig(void) +{ + static const char *system_wide; + if (!system_wide) + system_wide = system_path(ETC_PERFCONFIG); + return system_wide; +} + +static int perf_env_bool(const char *k, int def) +{ + const char *v = getenv(k); + return v ? perf_config_bool(k, v) : def; +} + +static int perf_config_system(void) +{ + return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); +} + +static int perf_config_global(void) +{ + return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); +} + +int perf_config(config_fn_t fn, void *data) +{ + int ret = 0, found = 0; + const char *home = NULL; + + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ + if (config_exclusive_filename) + return perf_config_from_file(fn, config_exclusive_filename, data); + if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) { + ret += perf_config_from_file(fn, perf_etc_perfconfig(), + data); + found += 1; + } + + home = getenv("HOME"); + if (perf_config_global() && home) { + char *user_config = strdup(mkpath("%s/.perfconfig", home)); + struct stat st; + + if (user_config == NULL) { + warning("Not enough memory to process %s/.perfconfig, " + "ignoring it.", home); + goto out; + } + + if (stat(user_config, &st) < 0) + goto out_free; + + if (st.st_uid && (st.st_uid != geteuid())) { + warning("File %s not owned by current user or root, " + "ignoring it.", user_config); + goto out_free; + } + + if (!st.st_size) + goto out_free; + + ret += perf_config_from_file(fn, user_config, data); + found += 1; +out_free: + free(user_config); + } +out: + if (found == 0) + return -1; + return ret; +} + +/* + * Call this to report error for your variable that should not + * get a boolean value (i.e. "[my] var" means "true"). + */ +int config_error_nonbool(const char *var) +{ + return error("Missing value for '%s'", var); +} + +struct buildid_dir_config { + char *dir; +}; + +static int buildid_dir_command_config(const char *var, const char *value, + void *data) +{ + struct buildid_dir_config *c = data; + const char *v; + + /* same dir for all commands */ + if (!strcmp(var, "buildid.dir")) { + v = perf_config_dirname(var, value); + if (!v) + return -1; + strncpy(c->dir, v, MAXPATHLEN-1); + c->dir[MAXPATHLEN-1] = '\0'; + } + return 0; +} + +static void check_buildid_dir_config(void) +{ + struct buildid_dir_config c; + c.dir = buildid_dir; + perf_config(buildid_dir_command_config, &c); +} + +void set_buildid_dir(const char *dir) +{ + if (dir) + scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir); + + /* try config file */ + if (buildid_dir[0] == '\0') + check_buildid_dir_config(); + + /* default to $HOME/.debug */ + if (buildid_dir[0] == '\0') { + char *v = getenv("HOME"); + if (v) { + snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s", + v, DEBUG_CACHE_DIR); + } else { + strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1); + } + buildid_dir[MAXPATHLEN-1] = '\0'; + } + /* for communicating with external commands */ + setenv("PERF_BUILDID_DIR", buildid_dir, 1); +} diff --git a/kernel/tools/perf/util/cpumap.c b/kernel/tools/perf/util/cpumap.c new file mode 100644 index 000000000..c4e55b710 --- /dev/null +++ b/kernel/tools/perf/util/cpumap.c @@ -0,0 +1,479 @@ +#include "util.h" +#include +#include "../perf.h" +#include "cpumap.h" +#include +#include +#include + +static struct cpu_map *cpu_map__default_new(void) +{ + struct cpu_map *cpus; + int nr_cpus; + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (nr_cpus < 0) + return NULL; + + cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); + if (cpus != NULL) { + int i; + for (i = 0; i < nr_cpus; ++i) + cpus->map[i] = i; + + cpus->nr = nr_cpus; + } + + return cpus; +} + +static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) +{ + size_t payload_size = nr_cpus * sizeof(int); + struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); + + if (cpus != NULL) { + cpus->nr = nr_cpus; + memcpy(cpus->map, tmp_cpus, payload_size); + } + + return cpus; +} + +struct cpu_map *cpu_map__read(FILE *file) +{ + struct cpu_map *cpus = NULL; + int nr_cpus = 0; + int *tmp_cpus = NULL, *tmp; + int max_entries = 0; + int n, cpu, prev; + char sep; + + sep = 0; + prev = -1; + for (;;) { + n = fscanf(file, "%u%c", &cpu, &sep); + if (n <= 0) + break; + if (prev >= 0) { + int new_max = nr_cpus + cpu - prev - 1; + + if (new_max >= max_entries) { + max_entries = new_max + MAX_NR_CPUS / 2; + tmp = realloc(tmp_cpus, max_entries * sizeof(int)); + if (tmp == NULL) + goto out_free_tmp; + tmp_cpus = tmp; + } + + while (++prev < cpu) + tmp_cpus[nr_cpus++] = prev; + } + if (nr_cpus == max_entries) { + max_entries += MAX_NR_CPUS; + tmp = realloc(tmp_cpus, max_entries * sizeof(int)); + if (tmp == NULL) + goto out_free_tmp; + tmp_cpus = tmp; + } + + tmp_cpus[nr_cpus++] = cpu; + if (n == 2 && sep == '-') + prev = cpu; + else + prev = -1; + if (n == 1 || sep == '\n') + break; + } + + if (nr_cpus > 0) + cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); + else + cpus = cpu_map__default_new(); +out_free_tmp: + free(tmp_cpus); + return cpus; +} + +static struct cpu_map *cpu_map__read_all_cpu_map(void) +{ + struct cpu_map *cpus = NULL; + FILE *onlnf; + + onlnf = fopen("/sys/devices/system/cpu/online", "r"); + if (!onlnf) + return cpu_map__default_new(); + + cpus = cpu_map__read(onlnf); + fclose(onlnf); + return cpus; +} + +struct cpu_map *cpu_map__new(const char *cpu_list) +{ + struct cpu_map *cpus = NULL; + unsigned long start_cpu, end_cpu = 0; + char *p = NULL; + int i, nr_cpus = 0; + int *tmp_cpus = NULL, *tmp; + int max_entries = 0; + + if (!cpu_list) + return cpu_map__read_all_cpu_map(); + + if (!isdigit(*cpu_list)) + goto out; + + while (isdigit(*cpu_list)) { + p = NULL; + start_cpu = strtoul(cpu_list, &p, 0); + if (start_cpu >= INT_MAX + || (*p != '\0' && *p != ',' && *p != '-')) + goto invalid; + + if (*p == '-') { + cpu_list = ++p; + p = NULL; + end_cpu = strtoul(cpu_list, &p, 0); + + if (end_cpu >= INT_MAX || (*p != '\0' && *p != ',')) + goto invalid; + + if (end_cpu < start_cpu) + goto invalid; + } else { + end_cpu = start_cpu; + } + + for (; start_cpu <= end_cpu; start_cpu++) { + /* check for duplicates */ + for (i = 0; i < nr_cpus; i++) + if (tmp_cpus[i] == (int)start_cpu) + goto invalid; + + if (nr_cpus == max_entries) { + max_entries += MAX_NR_CPUS; + tmp = realloc(tmp_cpus, max_entries * sizeof(int)); + if (tmp == NULL) + goto invalid; + tmp_cpus = tmp; + } + tmp_cpus[nr_cpus++] = (int)start_cpu; + } + if (*p) + ++p; + + cpu_list = p; + } + + if (nr_cpus > 0) + cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); + else + cpus = cpu_map__default_new(); +invalid: + free(tmp_cpus); +out: + return cpus; +} + +size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp) +{ + int i; + size_t printed = fprintf(fp, "%d cpu%s: ", + map->nr, map->nr > 1 ? "s" : ""); + for (i = 0; i < map->nr; ++i) + printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]); + + return printed + fprintf(fp, "\n"); +} + +struct cpu_map *cpu_map__dummy_new(void) +{ + struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); + + if (cpus != NULL) { + cpus->nr = 1; + cpus->map[0] = -1; + } + + return cpus; +} + +void cpu_map__delete(struct cpu_map *map) +{ + free(map); +} + +int cpu_map__get_socket(struct cpu_map *map, int idx) +{ + FILE *fp; + const char *mnt; + char path[PATH_MAX]; + int cpu, ret; + + if (idx > map->nr) + return -1; + + cpu = map->map[idx]; + + mnt = sysfs__mountpoint(); + if (!mnt) + return -1; + + snprintf(path, PATH_MAX, + "%s/devices/system/cpu/cpu%d/topology/physical_package_id", + mnt, cpu); + + fp = fopen(path, "r"); + if (!fp) + return -1; + ret = fscanf(fp, "%d", &cpu); + fclose(fp); + return ret == 1 ? cpu : -1; +} + +static int cmp_ids(const void *a, const void *b) +{ + return *(int *)a - *(int *)b; +} + +static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, + int (*f)(struct cpu_map *map, int cpu)) +{ + struct cpu_map *c; + int nr = cpus->nr; + int cpu, s1, s2; + + /* allocate as much as possible */ + c = calloc(1, sizeof(*c) + nr * sizeof(int)); + if (!c) + return -1; + + for (cpu = 0; cpu < nr; cpu++) { + s1 = f(cpus, cpu); + for (s2 = 0; s2 < c->nr; s2++) { + if (s1 == c->map[s2]) + break; + } + if (s2 == c->nr) { + c->map[c->nr] = s1; + c->nr++; + } + } + /* ensure we process id in increasing order */ + qsort(c->map, c->nr, sizeof(int), cmp_ids); + + *res = c; + return 0; +} + +int cpu_map__get_core(struct cpu_map *map, int idx) +{ + FILE *fp; + const char *mnt; + char path[PATH_MAX]; + int cpu, ret, s; + + if (idx > map->nr) + return -1; + + cpu = map->map[idx]; + + mnt = sysfs__mountpoint(); + if (!mnt) + return -1; + + snprintf(path, PATH_MAX, + "%s/devices/system/cpu/cpu%d/topology/core_id", + mnt, cpu); + + fp = fopen(path, "r"); + if (!fp) + return -1; + ret = fscanf(fp, "%d", &cpu); + fclose(fp); + if (ret != 1) + return -1; + + s = cpu_map__get_socket(map, idx); + if (s == -1) + return -1; + + /* + * encode socket in upper 16 bits + * core_id is relative to socket, and + * we need a global id. So we combine + * socket+ core id + */ + return (s << 16) | (cpu & 0xffff); +} + +int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp) +{ + return cpu_map__build_map(cpus, sockp, cpu_map__get_socket); +} + +int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep) +{ + return cpu_map__build_map(cpus, corep, cpu_map__get_core); +} + +/* setup simple routines to easily access node numbers given a cpu number */ +static int get_max_num(char *path, int *max) +{ + size_t num; + char *buf; + int err = 0; + + if (filename__read_str(path, &buf, &num)) + return -1; + + buf[num] = '\0'; + + /* start on the right, to find highest node num */ + while (--num) { + if ((buf[num] == ',') || (buf[num] == '-')) { + num++; + break; + } + } + if (sscanf(&buf[num], "%d", max) < 1) { + err = -1; + goto out; + } + + /* convert from 0-based to 1-based */ + (*max)++; + +out: + free(buf); + return err; +} + +/* Determine highest possible cpu in the system for sparse allocation */ +static void set_max_cpu_num(void) +{ + const char *mnt; + char path[PATH_MAX]; + int ret = -1; + + /* set up default */ + max_cpu_num = 4096; + + mnt = sysfs__mountpoint(); + if (!mnt) + goto out; + + /* get the highest possible cpu number for a sparse allocation */ + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); + if (ret == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + goto out; + } + + ret = get_max_num(path, &max_cpu_num); + +out: + if (ret) + pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num); +} + +/* Determine highest possible node in the system for sparse allocation */ +static void set_max_node_num(void) +{ + const char *mnt; + char path[PATH_MAX]; + int ret = -1; + + /* set up default */ + max_node_num = 8; + + mnt = sysfs__mountpoint(); + if (!mnt) + goto out; + + /* get the highest possible cpu number for a sparse allocation */ + ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); + if (ret == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + goto out; + } + + ret = get_max_num(path, &max_node_num); + +out: + if (ret) + pr_err("Failed to read max nodes, using default of %d\n", max_node_num); +} + +static int init_cpunode_map(void) +{ + int i; + + set_max_cpu_num(); + set_max_node_num(); + + cpunode_map = calloc(max_cpu_num, sizeof(int)); + if (!cpunode_map) { + pr_err("%s: calloc failed\n", __func__); + return -1; + } + + for (i = 0; i < max_cpu_num; i++) + cpunode_map[i] = -1; + + return 0; +} + +int cpu__setup_cpunode_map(void) +{ + struct dirent *dent1, *dent2; + DIR *dir1, *dir2; + unsigned int cpu, mem; + char buf[PATH_MAX]; + char path[PATH_MAX]; + const char *mnt; + int n; + + /* initialize globals */ + if (init_cpunode_map()) + return -1; + + mnt = sysfs__mountpoint(); + if (!mnt) + return 0; + + n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); + if (n == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + return -1; + } + + dir1 = opendir(path); + if (!dir1) + return 0; + + /* walk tree and setup map */ + while ((dent1 = readdir(dir1)) != NULL) { + if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1) + continue; + + n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); + if (n == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + continue; + } + + dir2 = opendir(buf); + if (!dir2) + continue; + while ((dent2 = readdir(dir2)) != NULL) { + if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1) + continue; + cpunode_map[cpu] = mem; + } + closedir(dir2); + } + closedir(dir1); + return 0; +} diff --git a/kernel/tools/perf/util/cpumap.h b/kernel/tools/perf/util/cpumap.h new file mode 100644 index 000000000..61a654849 --- /dev/null +++ b/kernel/tools/perf/util/cpumap.h @@ -0,0 +1,84 @@ +#ifndef __PERF_CPUMAP_H +#define __PERF_CPUMAP_H + +#include +#include + +#include "perf.h" +#include "util/debug.h" + +struct cpu_map { + int nr; + int map[]; +}; + +struct cpu_map *cpu_map__new(const char *cpu_list); +struct cpu_map *cpu_map__dummy_new(void); +void cpu_map__delete(struct cpu_map *map); +struct cpu_map *cpu_map__read(FILE *file); +size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); +int cpu_map__get_socket(struct cpu_map *map, int idx); +int cpu_map__get_core(struct cpu_map *map, int idx); +int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp); +int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep); + +static inline int cpu_map__socket(struct cpu_map *sock, int s) +{ + if (!sock || s > sock->nr || s < 0) + return 0; + return sock->map[s]; +} + +static inline int cpu_map__id_to_socket(int id) +{ + return id >> 16; +} + +static inline int cpu_map__id_to_cpu(int id) +{ + return id & 0xffff; +} + +static inline int cpu_map__nr(const struct cpu_map *map) +{ + return map ? map->nr : 1; +} + +static inline bool cpu_map__empty(const struct cpu_map *map) +{ + return map ? map->map[0] == -1 : true; +} + +int max_cpu_num; +int max_node_num; +int *cpunode_map; + +int cpu__setup_cpunode_map(void); + +static inline int cpu__max_node(void) +{ + if (unlikely(!max_node_num)) + pr_debug("cpu_map not initialized\n"); + + return max_node_num; +} + +static inline int cpu__max_cpu(void) +{ + if (unlikely(!max_cpu_num)) + pr_debug("cpu_map not initialized\n"); + + return max_cpu_num; +} + +static inline int cpu__get_node(int cpu) +{ + if (unlikely(cpunode_map == NULL)) { + pr_debug("cpu_map not initialized\n"); + return -1; + } + + return cpunode_map[cpu]; +} + +#endif /* __PERF_CPUMAP_H */ diff --git a/kernel/tools/perf/util/ctype.c b/kernel/tools/perf/util/ctype.c new file mode 100644 index 000000000..aada3ac5e --- /dev/null +++ b/kernel/tools/perf/util/ctype.c @@ -0,0 +1,39 @@ +/* + * Sane locale-independent, ASCII ctype. + * + * No surprises, and works with signed and unsigned chars. + */ +#include "util.h" + +enum { + S = GIT_SPACE, + A = GIT_ALPHA, + D = GIT_DIGIT, + G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ + R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */ + P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */ + + PS = GIT_SPACE | GIT_PRINT_EXTRA, +}; + +unsigned char sane_ctype[256] = { +/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */ + PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */ + D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */ + P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ + A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */ + P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ + A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */ + /* Nothing in the 128.. range */ +}; + +const char *graph_line = + "_____________________________________________________________________" + "_____________________________________________________________________"; +const char *graph_dotted_line = + "---------------------------------------------------------------------" + "---------------------------------------------------------------------" + "---------------------------------------------------------------------"; diff --git a/kernel/tools/perf/util/data-convert-bt.c b/kernel/tools/perf/util/data-convert-bt.c new file mode 100644 index 000000000..dd17c9a32 --- /dev/null +++ b/kernel/tools/perf/util/data-convert-bt.c @@ -0,0 +1,857 @@ +/* + * CTF writing support via babeltrace. + * + * Copyright (C) 2014, Jiri Olsa + * Copyright (C) 2014, Sebastian Andrzej Siewior + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "asm/bug.h" +#include "data-convert-bt.h" +#include "session.h" +#include "util.h" +#include "debug.h" +#include "tool.h" +#include "evlist.h" +#include "evsel.h" +#include "machine.h" + +#define pr_N(n, fmt, ...) \ + eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) + +#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) +#define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) + +#define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) + +struct evsel_priv { + struct bt_ctf_event_class *event_class; +}; + +struct ctf_writer { + /* writer primitives */ + struct bt_ctf_writer *writer; + struct bt_ctf_stream *stream; + struct bt_ctf_stream_class *stream_class; + struct bt_ctf_clock *clock; + + /* data types */ + union { + struct { + struct bt_ctf_field_type *s64; + struct bt_ctf_field_type *u64; + struct bt_ctf_field_type *s32; + struct bt_ctf_field_type *u32; + struct bt_ctf_field_type *string; + struct bt_ctf_field_type *u64_hex; + }; + struct bt_ctf_field_type *array[6]; + } data; +}; + +struct convert { + struct perf_tool tool; + struct ctf_writer writer; + + u64 events_size; + u64 events_count; +}; + +static int value_set(struct bt_ctf_field_type *type, + struct bt_ctf_event *event, + const char *name, u64 val) +{ + struct bt_ctf_field *field; + bool sign = bt_ctf_field_type_integer_get_signed(type); + int ret; + + field = bt_ctf_field_create(type); + if (!field) { + pr_err("failed to create a field %s\n", name); + return -1; + } + + if (sign) { + ret = bt_ctf_field_signed_integer_set_value(field, val); + if (ret) { + pr_err("failed to set field value %s\n", name); + goto err; + } + } else { + ret = bt_ctf_field_unsigned_integer_set_value(field, val); + if (ret) { + pr_err("failed to set field value %s\n", name); + goto err; + } + } + + ret = bt_ctf_event_set_payload(event, name, field); + if (ret) { + pr_err("failed to set payload %s\n", name); + goto err; + } + + pr2(" SET [%s = %" PRIu64 "]\n", name, val); + +err: + bt_ctf_field_put(field); + return ret; +} + +#define __FUNC_VALUE_SET(_name, _val_type) \ +static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ + struct bt_ctf_event *event, \ + const char *name, \ + _val_type val) \ +{ \ + struct bt_ctf_field_type *type = cw->data._name; \ + return value_set(type, event, name, (u64) val); \ +} + +#define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) + +FUNC_VALUE_SET(s32) +FUNC_VALUE_SET(u32) +FUNC_VALUE_SET(s64) +FUNC_VALUE_SET(u64) +__FUNC_VALUE_SET(u64_hex, u64) + +static struct bt_ctf_field_type* +get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) +{ + unsigned long flags = field->flags; + + if (flags & FIELD_IS_STRING) + return cw->data.string; + + if (!(flags & FIELD_IS_SIGNED)) { + /* unsigned long are mostly pointers */ + if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER) + return cw->data.u64_hex; + } + + if (flags & FIELD_IS_SIGNED) { + if (field->size == 8) + return cw->data.s64; + else + return cw->data.s32; + } + + if (field->size == 8) + return cw->data.u64; + else + return cw->data.u32; +} + +static int add_tracepoint_field_value(struct ctf_writer *cw, + struct bt_ctf_event_class *event_class, + struct bt_ctf_event *event, + struct perf_sample *sample, + struct format_field *fmtf) +{ + struct bt_ctf_field_type *type; + struct bt_ctf_field *array_field; + struct bt_ctf_field *field; + const char *name = fmtf->name; + void *data = sample->raw_data; + unsigned long long value_int; + unsigned long flags = fmtf->flags; + unsigned int n_items; + unsigned int i; + unsigned int offset; + unsigned int len; + int ret; + + offset = fmtf->offset; + len = fmtf->size; + if (flags & FIELD_IS_STRING) + flags &= ~FIELD_IS_ARRAY; + + if (flags & FIELD_IS_DYNAMIC) { + unsigned long long tmp_val; + + tmp_val = pevent_read_number(fmtf->event->pevent, + data + offset, len); + offset = tmp_val; + len = offset >> 16; + offset &= 0xffff; + } + + if (flags & FIELD_IS_ARRAY) { + + type = bt_ctf_event_class_get_field_by_name( + event_class, name); + array_field = bt_ctf_field_create(type); + bt_ctf_field_type_put(type); + if (!array_field) { + pr_err("Failed to create array type %s\n", name); + return -1; + } + + len = fmtf->size / fmtf->arraylen; + n_items = fmtf->arraylen; + } else { + n_items = 1; + array_field = NULL; + } + + type = get_tracepoint_field_type(cw, fmtf); + + for (i = 0; i < n_items; i++) { + if (!(flags & FIELD_IS_STRING)) + value_int = pevent_read_number( + fmtf->event->pevent, + data + offset + i * len, len); + + if (flags & FIELD_IS_ARRAY) + field = bt_ctf_field_array_get_field(array_field, i); + else + field = bt_ctf_field_create(type); + + if (!field) { + pr_err("failed to create a field %s\n", name); + return -1; + } + + if (flags & FIELD_IS_STRING) + ret = bt_ctf_field_string_set_value(field, + data + offset + i * len); + else if (!(flags & FIELD_IS_SIGNED)) + ret = bt_ctf_field_unsigned_integer_set_value( + field, value_int); + else + ret = bt_ctf_field_signed_integer_set_value( + field, value_int); + if (ret) { + pr_err("failed to set file value %s\n", name); + goto err_put_field; + } + if (!(flags & FIELD_IS_ARRAY)) { + ret = bt_ctf_event_set_payload(event, name, field); + if (ret) { + pr_err("failed to set payload %s\n", name); + goto err_put_field; + } + } + bt_ctf_field_put(field); + } + if (flags & FIELD_IS_ARRAY) { + ret = bt_ctf_event_set_payload(event, name, array_field); + if (ret) { + pr_err("Failed add payload array %s\n", name); + return -1; + } + bt_ctf_field_put(array_field); + } + return 0; + +err_put_field: + bt_ctf_field_put(field); + return -1; +} + +static int add_tracepoint_fields_values(struct ctf_writer *cw, + struct bt_ctf_event_class *event_class, + struct bt_ctf_event *event, + struct format_field *fields, + struct perf_sample *sample) +{ + struct format_field *field; + int ret; + + for (field = fields; field; field = field->next) { + ret = add_tracepoint_field_value(cw, event_class, event, sample, + field); + if (ret) + return -1; + } + return 0; +} + +static int add_tracepoint_values(struct ctf_writer *cw, + struct bt_ctf_event_class *event_class, + struct bt_ctf_event *event, + struct perf_evsel *evsel, + struct perf_sample *sample) +{ + struct format_field *common_fields = evsel->tp_format->format.common_fields; + struct format_field *fields = evsel->tp_format->format.fields; + int ret; + + ret = add_tracepoint_fields_values(cw, event_class, event, + common_fields, sample); + if (!ret) + ret = add_tracepoint_fields_values(cw, event_class, event, + fields, sample); + + return ret; +} + +static int add_generic_values(struct ctf_writer *cw, + struct bt_ctf_event *event, + struct perf_evsel *evsel, + struct perf_sample *sample) +{ + u64 type = evsel->attr.sample_type; + int ret; + + /* + * missing: + * PERF_SAMPLE_TIME - not needed as we have it in + * ctf event header + * PERF_SAMPLE_READ - TODO + * PERF_SAMPLE_CALLCHAIN - TODO + * PERF_SAMPLE_RAW - tracepoint fields are handled separately + * PERF_SAMPLE_BRANCH_STACK - TODO + * PERF_SAMPLE_REGS_USER - TODO + * PERF_SAMPLE_STACK_USER - TODO + */ + + if (type & PERF_SAMPLE_IP) { + ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_TID) { + ret = value_set_s32(cw, event, "perf_tid", sample->tid); + if (ret) + return -1; + + ret = value_set_s32(cw, event, "perf_pid", sample->pid); + if (ret) + return -1; + } + + if ((type & PERF_SAMPLE_ID) || + (type & PERF_SAMPLE_IDENTIFIER)) { + ret = value_set_u64(cw, event, "perf_id", sample->id); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_STREAM_ID) { + ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_CPU) { + ret = value_set_u32(cw, event, "perf_cpu", sample->cpu); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_PERIOD) { + ret = value_set_u64(cw, event, "perf_period", sample->period); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_WEIGHT) { + ret = value_set_u64(cw, event, "perf_weight", sample->weight); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_DATA_SRC) { + ret = value_set_u64(cw, event, "perf_data_src", + sample->data_src); + if (ret) + return -1; + } + + if (type & PERF_SAMPLE_TRANSACTION) { + ret = value_set_u64(cw, event, "perf_transaction", + sample->transaction); + if (ret) + return -1; + } + + return 0; +} + +static int process_sample_event(struct perf_tool *tool, + union perf_event *_event __maybe_unused, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine __maybe_unused) +{ + struct convert *c = container_of(tool, struct convert, tool); + struct evsel_priv *priv = evsel->priv; + struct ctf_writer *cw = &c->writer; + struct bt_ctf_event_class *event_class; + struct bt_ctf_event *event; + int ret; + + if (WARN_ONCE(!priv, "Failed to setup all events.\n")) + return 0; + + event_class = priv->event_class; + + /* update stats */ + c->events_count++; + c->events_size += _event->header.size; + + pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); + + event = bt_ctf_event_create(event_class); + if (!event) { + pr_err("Failed to create an CTF event\n"); + return -1; + } + + bt_ctf_clock_set_time(cw->clock, sample->time); + + ret = add_generic_values(cw, event, evsel, sample); + if (ret) + return -1; + + if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { + ret = add_tracepoint_values(cw, event_class, event, + evsel, sample); + if (ret) + return -1; + } + + bt_ctf_stream_append_event(cw->stream, event); + bt_ctf_event_put(event); + return 0; +} + +static int add_tracepoint_fields_types(struct ctf_writer *cw, + struct format_field *fields, + struct bt_ctf_event_class *event_class) +{ + struct format_field *field; + int ret; + + for (field = fields; field; field = field->next) { + struct bt_ctf_field_type *type; + unsigned long flags = field->flags; + + pr2(" field '%s'\n", field->name); + + type = get_tracepoint_field_type(cw, field); + if (!type) + return -1; + + /* + * A string is an array of chars. For this we use the string + * type and don't care that it is an array. What we don't + * support is an array of strings. + */ + if (flags & FIELD_IS_STRING) + flags &= ~FIELD_IS_ARRAY; + + if (flags & FIELD_IS_ARRAY) + type = bt_ctf_field_type_array_create(type, field->arraylen); + + ret = bt_ctf_event_class_add_field(event_class, type, + field->name); + + if (flags & FIELD_IS_ARRAY) + bt_ctf_field_type_put(type); + + if (ret) { + pr_err("Failed to add field '%s\n", field->name); + return -1; + } + } + + return 0; +} + +static int add_tracepoint_types(struct ctf_writer *cw, + struct perf_evsel *evsel, + struct bt_ctf_event_class *class) +{ + struct format_field *common_fields = evsel->tp_format->format.common_fields; + struct format_field *fields = evsel->tp_format->format.fields; + int ret; + + ret = add_tracepoint_fields_types(cw, common_fields, class); + if (!ret) + ret = add_tracepoint_fields_types(cw, fields, class); + + return ret; +} + +static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, + struct bt_ctf_event_class *event_class) +{ + u64 type = evsel->attr.sample_type; + + /* + * missing: + * PERF_SAMPLE_TIME - not needed as we have it in + * ctf event header + * PERF_SAMPLE_READ - TODO + * PERF_SAMPLE_CALLCHAIN - TODO + * PERF_SAMPLE_RAW - tracepoint fields are handled separately + * PERF_SAMPLE_BRANCH_STACK - TODO + * PERF_SAMPLE_REGS_USER - TODO + * PERF_SAMPLE_STACK_USER - TODO + */ + +#define ADD_FIELD(cl, t, n) \ + do { \ + pr2(" field '%s'\n", n); \ + if (bt_ctf_event_class_add_field(cl, t, n)) { \ + pr_err("Failed to add field '%s;\n", n); \ + return -1; \ + } \ + } while (0) + + if (type & PERF_SAMPLE_IP) + ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); + + if (type & PERF_SAMPLE_TID) { + ADD_FIELD(event_class, cw->data.s32, "perf_tid"); + ADD_FIELD(event_class, cw->data.s32, "perf_pid"); + } + + if ((type & PERF_SAMPLE_ID) || + (type & PERF_SAMPLE_IDENTIFIER)) + ADD_FIELD(event_class, cw->data.u64, "perf_id"); + + if (type & PERF_SAMPLE_STREAM_ID) + ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); + + if (type & PERF_SAMPLE_CPU) + ADD_FIELD(event_class, cw->data.u32, "perf_cpu"); + + if (type & PERF_SAMPLE_PERIOD) + ADD_FIELD(event_class, cw->data.u64, "perf_period"); + + if (type & PERF_SAMPLE_WEIGHT) + ADD_FIELD(event_class, cw->data.u64, "perf_weight"); + + if (type & PERF_SAMPLE_DATA_SRC) + ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); + + if (type & PERF_SAMPLE_TRANSACTION) + ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); + +#undef ADD_FIELD + return 0; +} + +static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel) +{ + struct bt_ctf_event_class *event_class; + struct evsel_priv *priv; + const char *name = perf_evsel__name(evsel); + int ret; + + pr("Adding event '%s' (type %d)\n", name, evsel->attr.type); + + event_class = bt_ctf_event_class_create(name); + if (!event_class) + return -1; + + ret = add_generic_types(cw, evsel, event_class); + if (ret) + goto err; + + if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { + ret = add_tracepoint_types(cw, evsel, event_class); + if (ret) + goto err; + } + + ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); + if (ret) { + pr("Failed to add event class into stream.\n"); + goto err; + } + + priv = malloc(sizeof(*priv)); + if (!priv) + goto err; + + priv->event_class = event_class; + evsel->priv = priv; + return 0; + +err: + bt_ctf_event_class_put(event_class); + pr_err("Failed to add event '%s'.\n", name); + return -1; +} + +static int setup_events(struct ctf_writer *cw, struct perf_session *session) +{ + struct perf_evlist *evlist = session->evlist; + struct perf_evsel *evsel; + int ret; + + evlist__for_each(evlist, evsel) { + ret = add_event(cw, evsel); + if (ret) + return ret; + } + return 0; +} + +static int ctf_writer__setup_env(struct ctf_writer *cw, + struct perf_session *session) +{ + struct perf_header *header = &session->header; + struct bt_ctf_writer *writer = cw->writer; + +#define ADD(__n, __v) \ +do { \ + if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ + return -1; \ +} while (0) + + ADD("host", header->env.hostname); + ADD("sysname", "Linux"); + ADD("release", header->env.os_release); + ADD("version", header->env.version); + ADD("machine", header->env.arch); + ADD("domain", "kernel"); + ADD("tracer_name", "perf"); + +#undef ADD + return 0; +} + +static int ctf_writer__setup_clock(struct ctf_writer *cw) +{ + struct bt_ctf_clock *clock = cw->clock; + + bt_ctf_clock_set_description(clock, "perf clock"); + +#define SET(__n, __v) \ +do { \ + if (bt_ctf_clock_set_##__n(clock, __v)) \ + return -1; \ +} while (0) + + SET(frequency, 1000000000); + SET(offset_s, 0); + SET(offset, 0); + SET(precision, 10); + SET(is_absolute, 0); + +#undef SET + return 0; +} + +static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) +{ + struct bt_ctf_field_type *type; + + type = bt_ctf_field_type_integer_create(size); + if (!type) + return NULL; + + if (sign && + bt_ctf_field_type_integer_set_signed(type, 1)) + goto err; + + if (hex && + bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) + goto err; + + pr2("Created type: INTEGER %d-bit %ssigned %s\n", + size, sign ? "un" : "", hex ? "hex" : ""); + return type; + +err: + bt_ctf_field_type_put(type); + return NULL; +} + +static void ctf_writer__cleanup_data(struct ctf_writer *cw) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) + bt_ctf_field_type_put(cw->data.array[i]); +} + +static int ctf_writer__init_data(struct ctf_writer *cw) +{ +#define CREATE_INT_TYPE(type, size, sign, hex) \ +do { \ + (type) = create_int_type(size, sign, hex); \ + if (!(type)) \ + goto err; \ +} while (0) + + CREATE_INT_TYPE(cw->data.s64, 64, true, false); + CREATE_INT_TYPE(cw->data.u64, 64, false, false); + CREATE_INT_TYPE(cw->data.s32, 32, true, false); + CREATE_INT_TYPE(cw->data.u32, 32, false, false); + CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); + + cw->data.string = bt_ctf_field_type_string_create(); + if (cw->data.string) + return 0; + +err: + ctf_writer__cleanup_data(cw); + pr_err("Failed to create data types.\n"); + return -1; +} + +static void ctf_writer__cleanup(struct ctf_writer *cw) +{ + ctf_writer__cleanup_data(cw); + + bt_ctf_clock_put(cw->clock); + bt_ctf_stream_put(cw->stream); + bt_ctf_stream_class_put(cw->stream_class); + bt_ctf_writer_put(cw->writer); + + /* and NULL all the pointers */ + memset(cw, 0, sizeof(*cw)); +} + +static int ctf_writer__init(struct ctf_writer *cw, const char *path) +{ + struct bt_ctf_writer *writer; + struct bt_ctf_stream_class *stream_class; + struct bt_ctf_stream *stream; + struct bt_ctf_clock *clock; + + /* CTF writer */ + writer = bt_ctf_writer_create(path); + if (!writer) + goto err; + + cw->writer = writer; + + /* CTF clock */ + clock = bt_ctf_clock_create("perf_clock"); + if (!clock) { + pr("Failed to create CTF clock.\n"); + goto err_cleanup; + } + + cw->clock = clock; + + if (ctf_writer__setup_clock(cw)) { + pr("Failed to setup CTF clock.\n"); + goto err_cleanup; + } + + /* CTF stream class */ + stream_class = bt_ctf_stream_class_create("perf_stream"); + if (!stream_class) { + pr("Failed to create CTF stream class.\n"); + goto err_cleanup; + } + + cw->stream_class = stream_class; + + /* CTF clock stream setup */ + if (bt_ctf_stream_class_set_clock(stream_class, clock)) { + pr("Failed to assign CTF clock to stream class.\n"); + goto err_cleanup; + } + + if (ctf_writer__init_data(cw)) + goto err_cleanup; + + /* CTF stream instance */ + stream = bt_ctf_writer_create_stream(writer, stream_class); + if (!stream) { + pr("Failed to create CTF stream.\n"); + goto err_cleanup; + } + + cw->stream = stream; + + /* CTF clock writer setup */ + if (bt_ctf_writer_add_clock(writer, clock)) { + pr("Failed to assign CTF clock to writer.\n"); + goto err_cleanup; + } + + return 0; + +err_cleanup: + ctf_writer__cleanup(cw); +err: + pr_err("Failed to setup CTF writer.\n"); + return -1; +} + +int bt_convert__perf2ctf(const char *input, const char *path, bool force) +{ + struct perf_session *session; + struct perf_data_file file = { + .path = input, + .mode = PERF_DATA_MODE_READ, + .force = force, + }; + struct convert c = { + .tool = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + .comm = perf_event__process_comm, + .exit = perf_event__process_exit, + .fork = perf_event__process_fork, + .lost = perf_event__process_lost, + .tracing_data = perf_event__process_tracing_data, + .build_id = perf_event__process_build_id, + .ordered_events = true, + .ordering_requires_timestamps = true, + }, + }; + struct ctf_writer *cw = &c.writer; + int err = -1; + + /* CTF writer */ + if (ctf_writer__init(cw, path)) + return -1; + + /* perf.data session */ + session = perf_session__new(&file, 0, &c.tool); + if (!session) + goto free_writer; + + /* CTF writer env/clock setup */ + if (ctf_writer__setup_env(cw, session)) + goto free_session; + + /* CTF events setup */ + if (setup_events(cw, session)) + goto free_session; + + err = perf_session__process_events(session); + if (!err) + err = bt_ctf_stream_flush(cw->stream); + + fprintf(stderr, + "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", + file.path, path); + + fprintf(stderr, + "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n", + (double) c.events_size / 1024.0 / 1024.0, + c.events_count); + + /* its all good */ +free_session: + perf_session__delete(session); + +free_writer: + ctf_writer__cleanup(cw); + return err; +} diff --git a/kernel/tools/perf/util/data-convert-bt.h b/kernel/tools/perf/util/data-convert-bt.h new file mode 100644 index 000000000..4c204342a --- /dev/null +++ b/kernel/tools/perf/util/data-convert-bt.h @@ -0,0 +1,8 @@ +#ifndef __DATA_CONVERT_BT_H +#define __DATA_CONVERT_BT_H +#ifdef HAVE_LIBBABELTRACE_SUPPORT + +int bt_convert__perf2ctf(const char *input_name, const char *to_ctf, bool force); + +#endif /* HAVE_LIBBABELTRACE_SUPPORT */ +#endif /* __DATA_CONVERT_BT_H */ diff --git a/kernel/tools/perf/util/data.c b/kernel/tools/perf/util/data.c new file mode 100644 index 000000000..1921942fc --- /dev/null +++ b/kernel/tools/perf/util/data.c @@ -0,0 +1,138 @@ +#include +#include +#include +#include +#include +#include + +#include "data.h" +#include "util.h" +#include "debug.h" + +static bool check_pipe(struct perf_data_file *file) +{ + struct stat st; + bool is_pipe = false; + int fd = perf_data_file__is_read(file) ? + STDIN_FILENO : STDOUT_FILENO; + + if (!file->path) { + if (!fstat(fd, &st) && S_ISFIFO(st.st_mode)) + is_pipe = true; + } else { + if (!strcmp(file->path, "-")) + is_pipe = true; + } + + if (is_pipe) + file->fd = fd; + + return file->is_pipe = is_pipe; +} + +static int check_backup(struct perf_data_file *file) +{ + struct stat st; + + if (!stat(file->path, &st) && st.st_size) { + /* TODO check errors properly */ + char oldname[PATH_MAX]; + snprintf(oldname, sizeof(oldname), "%s.old", + file->path); + unlink(oldname); + rename(file->path, oldname); + } + + return 0; +} + +static int open_file_read(struct perf_data_file *file) +{ + struct stat st; + int fd; + char sbuf[STRERR_BUFSIZE]; + + fd = open(file->path, O_RDONLY); + if (fd < 0) { + int err = errno; + + pr_err("failed to open %s: %s", file->path, + strerror_r(err, sbuf, sizeof(sbuf))); + if (err == ENOENT && !strcmp(file->path, "perf.data")) + pr_err(" (try 'perf record' first)"); + pr_err("\n"); + return -err; + } + + if (fstat(fd, &st) < 0) + goto out_close; + + if (!file->force && st.st_uid && (st.st_uid != geteuid())) { + pr_err("File %s not owned by current user or root (use -f to override)\n", + file->path); + goto out_close; + } + + if (!st.st_size) { + pr_info("zero-sized file (%s), nothing to do!\n", + file->path); + goto out_close; + } + + file->size = st.st_size; + return fd; + + out_close: + close(fd); + return -1; +} + +static int open_file_write(struct perf_data_file *file) +{ + int fd; + char sbuf[STRERR_BUFSIZE]; + + if (check_backup(file)) + return -1; + + fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR); + + if (fd < 0) + pr_err("failed to open %s : %s\n", file->path, + strerror_r(errno, sbuf, sizeof(sbuf))); + + return fd; +} + +static int open_file(struct perf_data_file *file) +{ + int fd; + + fd = perf_data_file__is_read(file) ? + open_file_read(file) : open_file_write(file); + + file->fd = fd; + return fd < 0 ? -1 : 0; +} + +int perf_data_file__open(struct perf_data_file *file) +{ + if (check_pipe(file)) + return 0; + + if (!file->path) + file->path = "perf.data"; + + return open_file(file); +} + +void perf_data_file__close(struct perf_data_file *file) +{ + close(file->fd); +} + +ssize_t perf_data_file__write(struct perf_data_file *file, + void *buf, size_t size) +{ + return writen(file->fd, buf, size); +} diff --git a/kernel/tools/perf/util/data.h b/kernel/tools/perf/util/data.h new file mode 100644 index 000000000..2b15d0c95 --- /dev/null +++ b/kernel/tools/perf/util/data.h @@ -0,0 +1,50 @@ +#ifndef __PERF_DATA_H +#define __PERF_DATA_H + +#include + +enum perf_data_mode { + PERF_DATA_MODE_WRITE, + PERF_DATA_MODE_READ, +}; + +struct perf_data_file { + const char *path; + int fd; + bool is_pipe; + bool force; + unsigned long size; + enum perf_data_mode mode; +}; + +static inline bool perf_data_file__is_read(struct perf_data_file *file) +{ + return file->mode == PERF_DATA_MODE_READ; +} + +static inline bool perf_data_file__is_write(struct perf_data_file *file) +{ + return file->mode == PERF_DATA_MODE_WRITE; +} + +static inline int perf_data_file__is_pipe(struct perf_data_file *file) +{ + return file->is_pipe; +} + +static inline int perf_data_file__fd(struct perf_data_file *file) +{ + return file->fd; +} + +static inline unsigned long perf_data_file__size(struct perf_data_file *file) +{ + return file->size; +} + +int perf_data_file__open(struct perf_data_file *file); +void perf_data_file__close(struct perf_data_file *file); +ssize_t perf_data_file__write(struct perf_data_file *file, + void *buf, size_t size); + +#endif /* __PERF_DATA_H */ diff --git a/kernel/tools/perf/util/db-export.c b/kernel/tools/perf/util/db-export.c new file mode 100644 index 000000000..bb39a3ffc --- /dev/null +++ b/kernel/tools/perf/util/db-export.c @@ -0,0 +1,428 @@ +/* + * db-export.c: Support for exporting data suitable for import to a database + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include + +#include "evsel.h" +#include "machine.h" +#include "thread.h" +#include "comm.h" +#include "symbol.h" +#include "event.h" +#include "util.h" +#include "thread-stack.h" +#include "db-export.h" + +struct deferred_export { + struct list_head node; + struct comm *comm; +}; + +static int db_export__deferred(struct db_export *dbe) +{ + struct deferred_export *de; + int err; + + while (!list_empty(&dbe->deferred)) { + de = list_entry(dbe->deferred.next, struct deferred_export, + node); + err = dbe->export_comm(dbe, de->comm); + list_del(&de->node); + free(de); + if (err) + return err; + } + + return 0; +} + +static void db_export__free_deferred(struct db_export *dbe) +{ + struct deferred_export *de; + + while (!list_empty(&dbe->deferred)) { + de = list_entry(dbe->deferred.next, struct deferred_export, + node); + list_del(&de->node); + free(de); + } +} + +static int db_export__defer_comm(struct db_export *dbe, struct comm *comm) +{ + struct deferred_export *de; + + de = zalloc(sizeof(struct deferred_export)); + if (!de) + return -ENOMEM; + + de->comm = comm; + list_add_tail(&de->node, &dbe->deferred); + + return 0; +} + +int db_export__init(struct db_export *dbe) +{ + memset(dbe, 0, sizeof(struct db_export)); + INIT_LIST_HEAD(&dbe->deferred); + return 0; +} + +int db_export__flush(struct db_export *dbe) +{ + return db_export__deferred(dbe); +} + +void db_export__exit(struct db_export *dbe) +{ + db_export__free_deferred(dbe); + call_return_processor__free(dbe->crp); + dbe->crp = NULL; +} + +int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel) +{ + if (evsel->db_id) + return 0; + + evsel->db_id = ++dbe->evsel_last_db_id; + + if (dbe->export_evsel) + return dbe->export_evsel(dbe, evsel); + + return 0; +} + +int db_export__machine(struct db_export *dbe, struct machine *machine) +{ + if (machine->db_id) + return 0; + + machine->db_id = ++dbe->machine_last_db_id; + + if (dbe->export_machine) + return dbe->export_machine(dbe, machine); + + return 0; +} + +int db_export__thread(struct db_export *dbe, struct thread *thread, + struct machine *machine, struct comm *comm) +{ + u64 main_thread_db_id = 0; + int err; + + if (thread->db_id) + return 0; + + thread->db_id = ++dbe->thread_last_db_id; + + if (thread->pid_ != -1) { + struct thread *main_thread; + + if (thread->pid_ == thread->tid) { + main_thread = thread; + } else { + main_thread = machine__findnew_thread(machine, + thread->pid_, + thread->pid_); + if (!main_thread) + return -ENOMEM; + err = db_export__thread(dbe, main_thread, machine, + comm); + if (err) + return err; + if (comm) { + err = db_export__comm_thread(dbe, comm, thread); + if (err) + return err; + } + } + main_thread_db_id = main_thread->db_id; + } + + if (dbe->export_thread) + return dbe->export_thread(dbe, thread, main_thread_db_id, + machine); + + return 0; +} + +int db_export__comm(struct db_export *dbe, struct comm *comm, + struct thread *main_thread) +{ + int err; + + if (comm->db_id) + return 0; + + comm->db_id = ++dbe->comm_last_db_id; + + if (dbe->export_comm) { + if (main_thread->comm_set) + err = dbe->export_comm(dbe, comm); + else + err = db_export__defer_comm(dbe, comm); + if (err) + return err; + } + + return db_export__comm_thread(dbe, comm, main_thread); +} + +int db_export__comm_thread(struct db_export *dbe, struct comm *comm, + struct thread *thread) +{ + u64 db_id; + + db_id = ++dbe->comm_thread_last_db_id; + + if (dbe->export_comm_thread) + return dbe->export_comm_thread(dbe, db_id, comm, thread); + + return 0; +} + +int db_export__dso(struct db_export *dbe, struct dso *dso, + struct machine *machine) +{ + if (dso->db_id) + return 0; + + dso->db_id = ++dbe->dso_last_db_id; + + if (dbe->export_dso) + return dbe->export_dso(dbe, dso, machine); + + return 0; +} + +int db_export__symbol(struct db_export *dbe, struct symbol *sym, + struct dso *dso) +{ + u64 *sym_db_id = symbol__priv(sym); + + if (*sym_db_id) + return 0; + + *sym_db_id = ++dbe->symbol_last_db_id; + + if (dbe->export_symbol) + return dbe->export_symbol(dbe, sym, dso); + + return 0; +} + +static struct thread *get_main_thread(struct machine *machine, struct thread *thread) +{ + if (thread->pid_ == thread->tid) + return thread; + + if (thread->pid_ == -1) + return NULL; + + return machine__find_thread(machine, thread->pid_, thread->pid_); +} + +static int db_ids_from_al(struct db_export *dbe, struct addr_location *al, + u64 *dso_db_id, u64 *sym_db_id, u64 *offset) +{ + int err; + + if (al->map) { + struct dso *dso = al->map->dso; + + err = db_export__dso(dbe, dso, al->machine); + if (err) + return err; + *dso_db_id = dso->db_id; + + if (!al->sym) { + al->sym = symbol__new(al->addr, 0, 0, "unknown"); + if (al->sym) + symbols__insert(&dso->symbols[al->map->type], + al->sym); + } + + if (al->sym) { + u64 *db_id = symbol__priv(al->sym); + + err = db_export__symbol(dbe, al->sym, dso); + if (err) + return err; + *sym_db_id = *db_id; + *offset = al->addr - al->sym->start; + } + } + + return 0; +} + +int db_export__branch_type(struct db_export *dbe, u32 branch_type, + const char *name) +{ + if (dbe->export_branch_type) + return dbe->export_branch_type(dbe, branch_type, name); + + return 0; +} + +int db_export__sample(struct db_export *dbe, union perf_event *event, + struct perf_sample *sample, struct perf_evsel *evsel, + struct addr_location *al) +{ + struct thread* thread = al->thread; + struct export_sample es = { + .event = event, + .sample = sample, + .evsel = evsel, + .al = al, + }; + struct thread *main_thread; + struct comm *comm = NULL; + int err; + + err = db_export__evsel(dbe, evsel); + if (err) + return err; + + err = db_export__machine(dbe, al->machine); + if (err) + return err; + + main_thread = get_main_thread(al->machine, thread); + if (main_thread) + comm = machine__thread_exec_comm(al->machine, main_thread); + + err = db_export__thread(dbe, thread, al->machine, comm); + if (err) + return err; + + if (comm) { + err = db_export__comm(dbe, comm, main_thread); + if (err) + return err; + es.comm_db_id = comm->db_id; + } + + es.db_id = ++dbe->sample_last_db_id; + + err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset); + if (err) + return err; + + if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && + sample_addr_correlates_sym(&evsel->attr)) { + struct addr_location addr_al; + + perf_event__preprocess_sample_addr(event, sample, thread, &addr_al); + err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id, + &es.addr_sym_db_id, &es.addr_offset); + if (err) + return err; + if (dbe->crp) { + err = thread_stack__process(thread, comm, sample, al, + &addr_al, es.db_id, + dbe->crp); + if (err) + return err; + } + } + + if (dbe->export_sample) + return dbe->export_sample(dbe, &es); + + return 0; +} + +static struct { + u32 branch_type; + const char *name; +} branch_types[] = { + {0, "no branch"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"}, + {PERF_IP_FLAG_BRANCH, "unconditional jump"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT, + "software interrupt"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT, + "return from interrupt"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET, + "system call"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET, + "return from system call"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | + PERF_IP_FLAG_INTERRUPT, "hardware interrupt"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"}, + {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"}, + {0, NULL} +}; + +int db_export__branch_types(struct db_export *dbe) +{ + int i, err = 0; + + for (i = 0; branch_types[i].name ; i++) { + err = db_export__branch_type(dbe, branch_types[i].branch_type, + branch_types[i].name); + if (err) + break; + } + return err; +} + +int db_export__call_path(struct db_export *dbe, struct call_path *cp) +{ + int err; + + if (cp->db_id) + return 0; + + if (cp->parent) { + err = db_export__call_path(dbe, cp->parent); + if (err) + return err; + } + + cp->db_id = ++dbe->call_path_last_db_id; + + if (dbe->export_call_path) + return dbe->export_call_path(dbe, cp); + + return 0; +} + +int db_export__call_return(struct db_export *dbe, struct call_return *cr) +{ + int err; + + if (cr->db_id) + return 0; + + err = db_export__call_path(dbe, cr->cp); + if (err) + return err; + + cr->db_id = ++dbe->call_return_last_db_id; + + if (dbe->export_call_return) + return dbe->export_call_return(dbe, cr); + + return 0; +} diff --git a/kernel/tools/perf/util/db-export.h b/kernel/tools/perf/util/db-export.h new file mode 100644 index 000000000..25e22fd76 --- /dev/null +++ b/kernel/tools/perf/util/db-export.h @@ -0,0 +1,106 @@ +/* + * db-export.h: Support for exporting data suitable for import to a database + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __PERF_DB_EXPORT_H +#define __PERF_DB_EXPORT_H + +#include +#include + +struct perf_evsel; +struct machine; +struct thread; +struct comm; +struct dso; +struct perf_sample; +struct addr_location; +struct call_return_processor; +struct call_path; +struct call_return; + +struct export_sample { + union perf_event *event; + struct perf_sample *sample; + struct perf_evsel *evsel; + struct addr_location *al; + u64 db_id; + u64 comm_db_id; + u64 dso_db_id; + u64 sym_db_id; + u64 offset; /* ip offset from symbol start */ + u64 addr_dso_db_id; + u64 addr_sym_db_id; + u64 addr_offset; /* addr offset from symbol start */ +}; + +struct db_export { + int (*export_evsel)(struct db_export *dbe, struct perf_evsel *evsel); + int (*export_machine)(struct db_export *dbe, struct machine *machine); + int (*export_thread)(struct db_export *dbe, struct thread *thread, + u64 main_thread_db_id, struct machine *machine); + int (*export_comm)(struct db_export *dbe, struct comm *comm); + int (*export_comm_thread)(struct db_export *dbe, u64 db_id, + struct comm *comm, struct thread *thread); + int (*export_dso)(struct db_export *dbe, struct dso *dso, + struct machine *machine); + int (*export_symbol)(struct db_export *dbe, struct symbol *sym, + struct dso *dso); + int (*export_branch_type)(struct db_export *dbe, u32 branch_type, + const char *name); + int (*export_sample)(struct db_export *dbe, struct export_sample *es); + int (*export_call_path)(struct db_export *dbe, struct call_path *cp); + int (*export_call_return)(struct db_export *dbe, + struct call_return *cr); + struct call_return_processor *crp; + u64 evsel_last_db_id; + u64 machine_last_db_id; + u64 thread_last_db_id; + u64 comm_last_db_id; + u64 comm_thread_last_db_id; + u64 dso_last_db_id; + u64 symbol_last_db_id; + u64 sample_last_db_id; + u64 call_path_last_db_id; + u64 call_return_last_db_id; + struct list_head deferred; +}; + +int db_export__init(struct db_export *dbe); +int db_export__flush(struct db_export *dbe); +void db_export__exit(struct db_export *dbe); +int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel); +int db_export__machine(struct db_export *dbe, struct machine *machine); +int db_export__thread(struct db_export *dbe, struct thread *thread, + struct machine *machine, struct comm *comm); +int db_export__comm(struct db_export *dbe, struct comm *comm, + struct thread *main_thread); +int db_export__comm_thread(struct db_export *dbe, struct comm *comm, + struct thread *thread); +int db_export__dso(struct db_export *dbe, struct dso *dso, + struct machine *machine); +int db_export__symbol(struct db_export *dbe, struct symbol *sym, + struct dso *dso); +int db_export__branch_type(struct db_export *dbe, u32 branch_type, + const char *name); +int db_export__sample(struct db_export *dbe, union perf_event *event, + struct perf_sample *sample, struct perf_evsel *evsel, + struct addr_location *al); + +int db_export__branch_types(struct db_export *dbe); + +int db_export__call_path(struct db_export *dbe, struct call_path *cp); +int db_export__call_return(struct db_export *dbe, struct call_return *cr); + +#endif diff --git a/kernel/tools/perf/util/debug.c b/kernel/tools/perf/util/debug.c new file mode 100644 index 000000000..2da5581ec --- /dev/null +++ b/kernel/tools/perf/util/debug.c @@ -0,0 +1,189 @@ +/* For general debugging purposes */ + +#include "../perf.h" + +#include +#include +#include + +#include "cache.h" +#include "color.h" +#include "event.h" +#include "debug.h" +#include "util.h" +#include "target.h" + +#define NSECS_PER_SEC 1000000000ULL +#define NSECS_PER_USEC 1000ULL + +int verbose; +bool dump_trace = false, quiet = false; +int debug_ordered_events; +static int redirect_to_stderr; +int debug_data_convert; + +static int _eprintf(int level, int var, const char *fmt, va_list args) +{ + int ret = 0; + + if (var >= level) { + if (use_browser >= 1 && !redirect_to_stderr) + ui_helpline__vshow(fmt, args); + else + ret = vfprintf(stderr, fmt, args); + } + + return ret; +} + +int eprintf(int level, int var, const char *fmt, ...) +{ + va_list args; + int ret; + + va_start(args, fmt); + ret = _eprintf(level, var, fmt, args); + va_end(args); + + return ret; +} + +static int __eprintf_time(u64 t, const char *fmt, va_list args) +{ + int ret = 0; + u64 secs, usecs, nsecs = t; + + secs = nsecs / NSECS_PER_SEC; + nsecs -= secs * NSECS_PER_SEC; + usecs = nsecs / NSECS_PER_USEC; + + ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ", + secs, usecs); + ret += vfprintf(stderr, fmt, args); + return ret; +} + +int eprintf_time(int level, int var, u64 t, const char *fmt, ...) +{ + int ret = 0; + va_list args; + + if (var >= level) { + va_start(args, fmt); + ret = __eprintf_time(t, fmt, args); + va_end(args); + } + + return ret; +} + +/* + * Overloading libtraceevent standard info print + * function, display with -v in perf. + */ +void pr_stat(const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + _eprintf(1, verbose, fmt, args); + va_end(args); + eprintf(1, verbose, "\n"); +} + +int dump_printf(const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (dump_trace) { + va_start(args, fmt); + ret = vprintf(fmt, args); + va_end(args); + } + + return ret; +} + +void trace_event(union perf_event *event) +{ + unsigned char *raw_event = (void *)event; + const char *color = PERF_COLOR_BLUE; + int i, j; + + if (!dump_trace) + return; + + printf("."); + color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n", + event->header.size); + + for (i = 0; i < event->header.size; i++) { + if ((i & 15) == 0) { + printf("."); + color_fprintf(stdout, color, " %04x: ", i); + } + + color_fprintf(stdout, color, " %02x", raw_event[i]); + + if (((i & 15) == 15) || i == event->header.size-1) { + color_fprintf(stdout, color, " "); + for (j = 0; j < 15-(i & 15); j++) + color_fprintf(stdout, color, " "); + for (j = i & ~15; j <= i; j++) { + color_fprintf(stdout, color, "%c", + isprint(raw_event[j]) ? + raw_event[j] : '.'); + } + color_fprintf(stdout, color, "\n"); + } + } + printf(".\n"); +} + +static struct debug_variable { + const char *name; + int *ptr; +} debug_variables[] = { + { .name = "verbose", .ptr = &verbose }, + { .name = "ordered-events", .ptr = &debug_ordered_events}, + { .name = "stderr", .ptr = &redirect_to_stderr}, + { .name = "data-convert", .ptr = &debug_data_convert }, + { .name = NULL, } +}; + +int perf_debug_option(const char *str) +{ + struct debug_variable *var = &debug_variables[0]; + char *vstr, *s = strdup(str); + int v = 1; + + vstr = strchr(s, '='); + if (vstr) + *vstr++ = 0; + + while (var->name) { + if (!strcmp(s, var->name)) + break; + var++; + } + + if (!var->name) { + pr_err("Unknown debug variable name '%s'\n", s); + free(s); + return -1; + } + + if (vstr) { + v = atoi(vstr); + /* + * Allow only values in range (0, 10), + * otherwise set 0. + */ + v = (v < 0) || (v > 10) ? 0 : v; + } + + *var->ptr = v; + free(s); + return 0; +} diff --git a/kernel/tools/perf/util/debug.h b/kernel/tools/perf/util/debug.h new file mode 100644 index 000000000..caac2fdc6 --- /dev/null +++ b/kernel/tools/perf/util/debug.h @@ -0,0 +1,56 @@ +/* For debugging general purposes */ +#ifndef __PERF_DEBUG_H +#define __PERF_DEBUG_H + +#include +#include +#include "event.h" +#include "../ui/helpline.h" +#include "../ui/progress.h" +#include "../ui/util.h" + +extern int verbose; +extern bool quiet, dump_trace; +extern int debug_ordered_events; +extern int debug_data_convert; + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_err(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug(fmt, ...) \ + eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debugN(n, fmt, ...) \ + eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__) + +#define pr_time_N(n, var, t, fmt, ...) \ + eprintf_time(n, var, t, fmt, ##__VA_ARGS__) + +#define pr_oe_time(t, fmt, ...) pr_time_N(1, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_oe_time2(t, fmt, ...) pr_time_N(2, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__) + +#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */ + +int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); +void trace_event(union perf_event *event); + +int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); +int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); + +void pr_stat(const char *fmt, ...); + +int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4))); +int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5))); + +int perf_debug_option(const char *str); + +#endif /* __PERF_DEBUG_H */ diff --git a/kernel/tools/perf/util/dso.c b/kernel/tools/perf/util/dso.c new file mode 100644 index 000000000..fc0ddd579 --- /dev/null +++ b/kernel/tools/perf/util/dso.c @@ -0,0 +1,1172 @@ +#include +#include +#include +#include "symbol.h" +#include "dso.h" +#include "machine.h" +#include "util.h" +#include "debug.h" + +char dso__symtab_origin(const struct dso *dso) +{ + static const char origin[] = { + [DSO_BINARY_TYPE__KALLSYMS] = 'k', + [DSO_BINARY_TYPE__VMLINUX] = 'v', + [DSO_BINARY_TYPE__JAVA_JIT] = 'j', + [DSO_BINARY_TYPE__DEBUGLINK] = 'l', + [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', + [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', + [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', + [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', + [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', + [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', + [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', + [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', + }; + + if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) + return '!'; + return origin[dso->symtab_type]; +} + +int dso__read_binary_type_filename(const struct dso *dso, + enum dso_binary_type type, + char *root_dir, char *filename, size_t size) +{ + char build_id_hex[BUILD_ID_SIZE * 2 + 1]; + int ret = 0; + size_t len; + + switch (type) { + case DSO_BINARY_TYPE__DEBUGLINK: { + char *debuglink; + + len = __symbol__join_symfs(filename, size, dso->long_name); + debuglink = filename + len; + while (debuglink != filename && *debuglink != '/') + debuglink--; + if (*debuglink == '/') + debuglink++; + ret = filename__read_debuglink(filename, debuglink, + size - (debuglink - filename)); + } + break; + case DSO_BINARY_TYPE__BUILD_ID_CACHE: + /* skip the locally configured cache if a symfs is given */ + if (symbol_conf.symfs[0] || + (dso__build_id_filename(dso, filename, size) == NULL)) + ret = -1; + break; + + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s.debug", dso->long_name); + break; + + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s", dso->long_name); + break; + + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + { + const char *last_slash; + size_t dir_size; + + last_slash = dso->long_name + dso->long_name_len; + while (last_slash != dso->long_name && *last_slash != '/') + last_slash--; + + len = __symbol__join_symfs(filename, size, ""); + dir_size = last_slash - dso->long_name + 2; + if (dir_size > (size - len)) { + ret = -1; + break; + } + len += scnprintf(filename + len, dir_size, "%s", dso->long_name); + len += scnprintf(filename + len , size - len, ".debug%s", + last_slash); + break; + } + + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + if (!dso->has_build_id) { + ret = -1; + break; + } + + build_id__sprintf(dso->build_id, + sizeof(dso->build_id), + build_id_hex); + len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); + snprintf(filename + len, size - len, "%.2s/%s.debug", + build_id_hex, build_id_hex + 2); + break; + + case DSO_BINARY_TYPE__VMLINUX: + case DSO_BINARY_TYPE__GUEST_VMLINUX: + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + __symbol__join_symfs(filename, size, dso->long_name); + break; + + case DSO_BINARY_TYPE__GUEST_KMODULE: + case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: + path__join3(filename, size, symbol_conf.symfs, + root_dir, dso->long_name); + break; + + case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: + case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: + __symbol__join_symfs(filename, size, dso->long_name); + break; + + case DSO_BINARY_TYPE__KCORE: + case DSO_BINARY_TYPE__GUEST_KCORE: + snprintf(filename, size, "%s", dso->long_name); + break; + + default: + case DSO_BINARY_TYPE__KALLSYMS: + case DSO_BINARY_TYPE__GUEST_KALLSYMS: + case DSO_BINARY_TYPE__JAVA_JIT: + case DSO_BINARY_TYPE__NOT_FOUND: + ret = -1; + break; + } + + return ret; +} + +static const struct { + const char *fmt; + int (*decompress)(const char *input, int output); +} compressions[] = { +#ifdef HAVE_ZLIB_SUPPORT + { "gz", gzip_decompress_to_file }, +#endif +#ifdef HAVE_LZMA_SUPPORT + { "xz", lzma_decompress_to_file }, +#endif + { NULL, NULL }, +}; + +bool is_supported_compression(const char *ext) +{ + unsigned i; + + for (i = 0; compressions[i].fmt; i++) { + if (!strcmp(ext, compressions[i].fmt)) + return true; + } + return false; +} + +bool is_kernel_module(const char *pathname) +{ + struct kmod_path m; + + if (kmod_path__parse(&m, pathname)) + return NULL; + + return m.kmod; +} + +bool decompress_to_file(const char *ext, const char *filename, int output_fd) +{ + unsigned i; + + for (i = 0; compressions[i].fmt; i++) { + if (!strcmp(ext, compressions[i].fmt)) + return !compressions[i].decompress(filename, + output_fd); + } + return false; +} + +bool dso__needs_decompress(struct dso *dso) +{ + return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || + dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; +} + +/* + * Parses kernel module specified in @path and updates + * @m argument like: + * + * @comp - true if @path contains supported compression suffix, + * false otherwise + * @kmod - true if @path contains '.ko' suffix in right position, + * false otherwise + * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name + * of the kernel module without suffixes, otherwise strudup-ed + * base name of @path + * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string + * the compression suffix + * + * Returns 0 if there's no strdup error, -ENOMEM otherwise. + */ +int __kmod_path__parse(struct kmod_path *m, const char *path, + bool alloc_name, bool alloc_ext) +{ + const char *name = strrchr(path, '/'); + const char *ext = strrchr(path, '.'); + + memset(m, 0x0, sizeof(*m)); + name = name ? name + 1 : path; + + /* No extension, just return name. */ + if (ext == NULL) { + if (alloc_name) { + m->name = strdup(name); + return m->name ? 0 : -ENOMEM; + } + return 0; + } + + if (is_supported_compression(ext + 1)) { + m->comp = true; + ext -= 3; + } + + /* Check .ko extension only if there's enough name left. */ + if (ext > name) + m->kmod = !strncmp(ext, ".ko", 3); + + if (alloc_name) { + if (m->kmod) { + if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) + return -ENOMEM; + } else { + if (asprintf(&m->name, "%s", name) == -1) + return -ENOMEM; + } + + strxfrchar(m->name, '-', '_'); + } + + if (alloc_ext && m->comp) { + m->ext = strdup(ext + 4); + if (!m->ext) { + free((void *) m->name); + return -ENOMEM; + } + } + + return 0; +} + +/* + * Global list of open DSOs and the counter. + */ +static LIST_HEAD(dso__data_open); +static long dso__data_open_cnt; + +static void dso__list_add(struct dso *dso) +{ + list_add_tail(&dso->data.open_entry, &dso__data_open); + dso__data_open_cnt++; +} + +static void dso__list_del(struct dso *dso) +{ + list_del(&dso->data.open_entry); + WARN_ONCE(dso__data_open_cnt <= 0, + "DSO data fd counter out of bounds."); + dso__data_open_cnt--; +} + +static void close_first_dso(void); + +static int do_open(char *name) +{ + int fd; + char sbuf[STRERR_BUFSIZE]; + + do { + fd = open(name, O_RDONLY); + if (fd >= 0) + return fd; + + pr_debug("dso open failed: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); + if (!dso__data_open_cnt || errno != EMFILE) + break; + + close_first_dso(); + } while (1); + + return -1; +} + +static int __open_dso(struct dso *dso, struct machine *machine) +{ + int fd; + char *root_dir = (char *)""; + char *name = malloc(PATH_MAX); + + if (!name) + return -ENOMEM; + + if (machine) + root_dir = machine->root_dir; + + if (dso__read_binary_type_filename(dso, dso->binary_type, + root_dir, name, PATH_MAX)) { + free(name); + return -EINVAL; + } + + fd = do_open(name); + free(name); + return fd; +} + +static void check_data_close(void); + +/** + * dso_close - Open DSO data file + * @dso: dso object + * + * Open @dso's data file descriptor and updates + * list/count of open DSO objects. + */ +static int open_dso(struct dso *dso, struct machine *machine) +{ + int fd = __open_dso(dso, machine); + + if (fd >= 0) { + dso__list_add(dso); + /* + * Check if we crossed the allowed number + * of opened DSOs and close one if needed. + */ + check_data_close(); + } + + return fd; +} + +static void close_data_fd(struct dso *dso) +{ + if (dso->data.fd >= 0) { + close(dso->data.fd); + dso->data.fd = -1; + dso->data.file_size = 0; + dso__list_del(dso); + } +} + +/** + * dso_close - Close DSO data file + * @dso: dso object + * + * Close @dso's data file descriptor and updates + * list/count of open DSO objects. + */ +static void close_dso(struct dso *dso) +{ + close_data_fd(dso); +} + +static void close_first_dso(void) +{ + struct dso *dso; + + dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); + close_dso(dso); +} + +static rlim_t get_fd_limit(void) +{ + struct rlimit l; + rlim_t limit = 0; + + /* Allow half of the current open fd limit. */ + if (getrlimit(RLIMIT_NOFILE, &l) == 0) { + if (l.rlim_cur == RLIM_INFINITY) + limit = l.rlim_cur; + else + limit = l.rlim_cur / 2; + } else { + pr_err("failed to get fd limit\n"); + limit = 1; + } + + return limit; +} + +static bool may_cache_fd(void) +{ + static rlim_t limit; + + if (!limit) + limit = get_fd_limit(); + + if (limit == RLIM_INFINITY) + return true; + + return limit > (rlim_t) dso__data_open_cnt; +} + +/* + * Check and close LRU dso if we crossed allowed limit + * for opened dso file descriptors. The limit is half + * of the RLIMIT_NOFILE files opened. +*/ +static void check_data_close(void) +{ + bool cache_fd = may_cache_fd(); + + if (!cache_fd) + close_first_dso(); +} + +/** + * dso__data_close - Close DSO data file + * @dso: dso object + * + * External interface to close @dso's data file descriptor. + */ +void dso__data_close(struct dso *dso) +{ + close_dso(dso); +} + +/** + * dso__data_fd - Get dso's data file descriptor + * @dso: dso object + * @machine: machine object + * + * External interface to find dso's file, open it and + * returns file descriptor. + */ +int dso__data_fd(struct dso *dso, struct machine *machine) +{ + enum dso_binary_type binary_type_data[] = { + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + int i = 0; + + if (dso->data.status == DSO_DATA_STATUS_ERROR) + return -1; + + if (dso->data.fd >= 0) + goto out; + + if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { + dso->data.fd = open_dso(dso, machine); + goto out; + } + + do { + dso->binary_type = binary_type_data[i++]; + + dso->data.fd = open_dso(dso, machine); + if (dso->data.fd >= 0) + goto out; + + } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); +out: + if (dso->data.fd >= 0) + dso->data.status = DSO_DATA_STATUS_OK; + else + dso->data.status = DSO_DATA_STATUS_ERROR; + + return dso->data.fd; +} + +bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) +{ + u32 flag = 1 << by; + + if (dso->data.status_seen & flag) + return true; + + dso->data.status_seen |= flag; + + return false; +} + +static void +dso_cache__free(struct rb_root *root) +{ + struct rb_node *next = rb_first(root); + + while (next) { + struct dso_cache *cache; + + cache = rb_entry(next, struct dso_cache, rb_node); + next = rb_next(&cache->rb_node); + rb_erase(&cache->rb_node, root); + free(cache); + } +} + +static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset) +{ + struct rb_node * const *p = &root->rb_node; + const struct rb_node *parent = NULL; + struct dso_cache *cache; + + while (*p != NULL) { + u64 end; + + parent = *p; + cache = rb_entry(parent, struct dso_cache, rb_node); + end = cache->offset + DSO__DATA_CACHE_SIZE; + + if (offset < cache->offset) + p = &(*p)->rb_left; + else if (offset >= end) + p = &(*p)->rb_right; + else + return cache; + } + return NULL; +} + +static void +dso_cache__insert(struct rb_root *root, struct dso_cache *new) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct dso_cache *cache; + u64 offset = new->offset; + + while (*p != NULL) { + u64 end; + + parent = *p; + cache = rb_entry(parent, struct dso_cache, rb_node); + end = cache->offset + DSO__DATA_CACHE_SIZE; + + if (offset < cache->offset) + p = &(*p)->rb_left; + else if (offset >= end) + p = &(*p)->rb_right; + } + + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, root); +} + +static ssize_t +dso_cache__memcpy(struct dso_cache *cache, u64 offset, + u8 *data, u64 size) +{ + u64 cache_offset = offset - cache->offset; + u64 cache_size = min(cache->size - cache_offset, size); + + memcpy(data, cache->data + cache_offset, cache_size); + return cache_size; +} + +static ssize_t +dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size) +{ + struct dso_cache *cache; + ssize_t ret; + + do { + u64 cache_offset; + + ret = -ENOMEM; + + cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); + if (!cache) + break; + + cache_offset = offset & DSO__DATA_CACHE_MASK; + + ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset); + if (ret <= 0) + break; + + cache->offset = cache_offset; + cache->size = ret; + dso_cache__insert(&dso->data.cache, cache); + + ret = dso_cache__memcpy(cache, offset, data, size); + + } while (0); + + if (ret <= 0) + free(cache); + + return ret; +} + +static ssize_t dso_cache_read(struct dso *dso, u64 offset, + u8 *data, ssize_t size) +{ + struct dso_cache *cache; + + cache = dso_cache__find(&dso->data.cache, offset); + if (cache) + return dso_cache__memcpy(cache, offset, data, size); + else + return dso_cache__read(dso, offset, data, size); +} + +/* + * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks + * in the rb_tree. Any read to already cached data is served + * by cached data. + */ +static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size) +{ + ssize_t r = 0; + u8 *p = data; + + do { + ssize_t ret; + + ret = dso_cache_read(dso, offset, p, size); + if (ret < 0) + return ret; + + /* Reached EOF, return what we have. */ + if (!ret) + break; + + BUG_ON(ret > size); + + r += ret; + p += ret; + offset += ret; + size -= ret; + + } while (size); + + return r; +} + +static int data_file_size(struct dso *dso) +{ + struct stat st; + char sbuf[STRERR_BUFSIZE]; + + if (!dso->data.file_size) { + if (fstat(dso->data.fd, &st)) { + pr_err("dso mmap failed, fstat: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); + return -1; + } + dso->data.file_size = st.st_size; + } + + return 0; +} + +/** + * dso__data_size - Return dso data size + * @dso: dso object + * @machine: machine object + * + * Return: dso data size + */ +off_t dso__data_size(struct dso *dso, struct machine *machine) +{ + int fd; + + fd = dso__data_fd(dso, machine); + if (fd < 0) + return fd; + + if (data_file_size(dso)) + return -1; + + /* For now just estimate dso data size is close to file size */ + return dso->data.file_size; +} + +static ssize_t data_read_offset(struct dso *dso, u64 offset, + u8 *data, ssize_t size) +{ + if (data_file_size(dso)) + return -1; + + /* Check the offset sanity. */ + if (offset > dso->data.file_size) + return -1; + + if (offset + size < offset) + return -1; + + return cached_read(dso, offset, data, size); +} + +/** + * dso__data_read_offset - Read data from dso file offset + * @dso: dso object + * @machine: machine object + * @offset: file offset + * @data: buffer to store data + * @size: size of the @data buffer + * + * External interface to read data from dso file offset. Open + * dso data file and use cached_read to get the data. + */ +ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) +{ + if (dso__data_fd(dso, machine) < 0) + return -1; + + return data_read_offset(dso, offset, data, size); +} + +/** + * dso__data_read_addr - Read data from dso address + * @dso: dso object + * @machine: machine object + * @add: virtual memory address + * @data: buffer to store data + * @size: size of the @data buffer + * + * External interface to read data from dso address. + */ +ssize_t dso__data_read_addr(struct dso *dso, struct map *map, + struct machine *machine, u64 addr, + u8 *data, ssize_t size) +{ + u64 offset = map->map_ip(map, addr); + return dso__data_read_offset(dso, machine, offset, data, size); +} + +struct map *dso__new_map(const char *name) +{ + struct map *map = NULL; + struct dso *dso = dso__new(name); + + if (dso) + map = map__new2(0, dso, MAP__FUNCTION); + + return map; +} + +struct dso *dso__kernel_findnew(struct machine *machine, const char *name, + const char *short_name, int dso_type) +{ + /* + * The kernel dso could be created by build_id processing. + */ + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name); + + /* + * We need to run this in all cases, since during the build_id + * processing we had no idea this was the kernel dso. + */ + if (dso != NULL) { + dso__set_short_name(dso, short_name, false); + dso->kernel = dso_type; + } + + return dso; +} + +/* + * Find a matching entry and/or link current entry to RB tree. + * Either one of the dso or name parameter must be non-NULL or the + * function will not work. + */ +static struct dso *dso__findlink_by_longname(struct rb_root *root, + struct dso *dso, const char *name) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + + if (!name) + name = dso->long_name; + /* + * Find node with the matching name + */ + while (*p) { + struct dso *this = rb_entry(*p, struct dso, rb_node); + int rc = strcmp(name, this->long_name); + + parent = *p; + if (rc == 0) { + /* + * In case the new DSO is a duplicate of an existing + * one, print an one-time warning & put the new entry + * at the end of the list of duplicates. + */ + if (!dso || (dso == this)) + return this; /* Find matching dso */ + /* + * The core kernel DSOs may have duplicated long name. + * In this case, the short name should be different. + * Comparing the short names to differentiate the DSOs. + */ + rc = strcmp(dso->short_name, this->short_name); + if (rc == 0) { + pr_err("Duplicated dso name: %s\n", name); + return NULL; + } + } + if (rc < 0) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + if (dso) { + /* Add new node and rebalance tree */ + rb_link_node(&dso->rb_node, parent, p); + rb_insert_color(&dso->rb_node, root); + } + return NULL; +} + +static inline struct dso * +dso__find_by_longname(const struct rb_root *root, const char *name) +{ + return dso__findlink_by_longname((struct rb_root *)root, NULL, name); +} + +void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) +{ + if (name == NULL) + return; + + if (dso->long_name_allocated) + free((char *)dso->long_name); + + dso->long_name = name; + dso->long_name_len = strlen(name); + dso->long_name_allocated = name_allocated; +} + +void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) +{ + if (name == NULL) + return; + + if (dso->short_name_allocated) + free((char *)dso->short_name); + + dso->short_name = name; + dso->short_name_len = strlen(name); + dso->short_name_allocated = name_allocated; +} + +static void dso__set_basename(struct dso *dso) +{ + /* + * basename() may modify path buffer, so we must pass + * a copy. + */ + char *base, *lname = strdup(dso->long_name); + + if (!lname) + return; + + /* + * basename() may return a pointer to internal + * storage which is reused in subsequent calls + * so copy the result. + */ + base = strdup(basename(lname)); + + free(lname); + + if (!base) + return; + + dso__set_short_name(dso, base, true); +} + +int dso__name_len(const struct dso *dso) +{ + if (!dso) + return strlen("[unknown]"); + if (verbose) + return dso->long_name_len; + + return dso->short_name_len; +} + +bool dso__loaded(const struct dso *dso, enum map_type type) +{ + return dso->loaded & (1 << type); +} + +bool dso__sorted_by_name(const struct dso *dso, enum map_type type) +{ + return dso->sorted_by_name & (1 << type); +} + +void dso__set_sorted_by_name(struct dso *dso, enum map_type type) +{ + dso->sorted_by_name |= (1 << type); +} + +struct dso *dso__new(const char *name) +{ + struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); + + if (dso != NULL) { + int i; + strcpy(dso->name, name); + dso__set_long_name(dso, dso->name, false); + dso__set_short_name(dso, dso->name, false); + for (i = 0; i < MAP__NR_TYPES; ++i) + dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; + dso->data.cache = RB_ROOT; + dso->data.fd = -1; + dso->data.status = DSO_DATA_STATUS_UNKNOWN; + dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; + dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; + dso->is_64_bit = (sizeof(void *) == 8); + dso->loaded = 0; + dso->rel = 0; + dso->sorted_by_name = 0; + dso->has_build_id = 0; + dso->has_srcline = 1; + dso->a2l_fails = 1; + dso->kernel = DSO_TYPE_USER; + dso->needs_swap = DSO_SWAP__UNSET; + RB_CLEAR_NODE(&dso->rb_node); + INIT_LIST_HEAD(&dso->node); + INIT_LIST_HEAD(&dso->data.open_entry); + } + + return dso; +} + +void dso__delete(struct dso *dso) +{ + int i; + + if (!RB_EMPTY_NODE(&dso->rb_node)) + pr_err("DSO %s is still in rbtree when being deleted!\n", + dso->long_name); + for (i = 0; i < MAP__NR_TYPES; ++i) + symbols__delete(&dso->symbols[i]); + + if (dso->short_name_allocated) { + zfree((char **)&dso->short_name); + dso->short_name_allocated = false; + } + + if (dso->long_name_allocated) { + zfree((char **)&dso->long_name); + dso->long_name_allocated = false; + } + + dso__data_close(dso); + dso_cache__free(&dso->data.cache); + dso__free_a2l(dso); + zfree(&dso->symsrc_filename); + free(dso); +} + +void dso__set_build_id(struct dso *dso, void *build_id) +{ + memcpy(dso->build_id, build_id, sizeof(dso->build_id)); + dso->has_build_id = 1; +} + +bool dso__build_id_equal(const struct dso *dso, u8 *build_id) +{ + return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; +} + +void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) +{ + char path[PATH_MAX]; + + if (machine__is_default_guest(machine)) + return; + sprintf(path, "%s/sys/kernel/notes", machine->root_dir); + if (sysfs__read_build_id(path, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; +} + +int dso__kernel_module_get_build_id(struct dso *dso, + const char *root_dir) +{ + char filename[PATH_MAX]; + /* + * kernel module short names are of the form "[module]" and + * we need just "module" here. + */ + const char *name = dso->short_name + 1; + + snprintf(filename, sizeof(filename), + "%s/sys/module/%.*s/notes/.note.gnu.build-id", + root_dir, (int)strlen(name) - 1, name); + + if (sysfs__read_build_id(filename, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; + + return 0; +} + +bool __dsos__read_build_ids(struct list_head *head, bool with_hits) +{ + bool have_build_id = false; + struct dso *pos; + + list_for_each_entry(pos, head, node) { + if (with_hits && !pos->hit) + continue; + if (pos->has_build_id) { + have_build_id = true; + continue; + } + if (filename__read_build_id(pos->long_name, pos->build_id, + sizeof(pos->build_id)) > 0) { + have_build_id = true; + pos->has_build_id = true; + } + } + + return have_build_id; +} + +void dsos__add(struct dsos *dsos, struct dso *dso) +{ + list_add_tail(&dso->node, &dsos->head); + dso__findlink_by_longname(&dsos->root, dso, NULL); +} + +struct dso *dsos__find(const struct dsos *dsos, const char *name, + bool cmp_short) +{ + struct dso *pos; + + if (cmp_short) { + list_for_each_entry(pos, &dsos->head, node) + if (strcmp(pos->short_name, name) == 0) + return pos; + return NULL; + } + return dso__find_by_longname(&dsos->root, name); +} + +struct dso *dsos__addnew(struct dsos *dsos, const char *name) +{ + struct dso *dso = dso__new(name); + + if (dso != NULL) { + dsos__add(dsos, dso); + dso__set_basename(dso); + } + return dso; +} + +struct dso *__dsos__findnew(struct dsos *dsos, const char *name) +{ + struct dso *dso = dsos__find(dsos, name, false); + + return dso ? dso : dsos__addnew(dsos, name); +} + +size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + struct dso *pos; + size_t ret = 0; + + list_for_each_entry(pos, head, node) { + if (skip && skip(pos, parm)) + continue; + ret += dso__fprintf_buildid(pos, fp); + ret += fprintf(fp, " %s\n", pos->long_name); + } + return ret; +} + +size_t __dsos__fprintf(struct list_head *head, FILE *fp) +{ + struct dso *pos; + size_t ret = 0; + + list_for_each_entry(pos, head, node) { + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + ret += dso__fprintf(pos, i, fp); + } + + return ret; +} + +size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + return fprintf(fp, "%s", sbuild_id); +} + +size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) +{ + struct rb_node *nd; + size_t ret = fprintf(fp, "dso: %s (", dso->short_name); + + if (dso->short_name != dso->long_name) + ret += fprintf(fp, "%s, ", dso->long_name); + ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], + dso__loaded(dso, type) ? "" : "NOT "); + ret += dso__fprintf_buildid(dso, fp); + ret += fprintf(fp, ")\n"); + for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + ret += symbol__fprintf(pos, fp); + } + + return ret; +} + +enum dso_type dso__type(struct dso *dso, struct machine *machine) +{ + int fd; + + fd = dso__data_fd(dso, machine); + if (fd < 0) + return DSO__TYPE_UNKNOWN; + + return dso__type_fd(fd); +} + +int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) +{ + int idx, errnum = dso->load_errno; + /* + * This must have a same ordering as the enum dso_load_errno. + */ + static const char *dso_load__error_str[] = { + "Internal tools/perf/ library error", + "Invalid ELF file", + "Can not read build id", + "Mismatching build id", + "Decompression failure", + }; + + BUG_ON(buflen == 0); + + if (errnum >= 0) { + const char *err = strerror_r(errnum, buf, buflen); + + if (err != buf) + scnprintf(buf, buflen, "%s", err); + + return 0; + } + + if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) + return -1; + + idx = errnum - __DSO_LOAD_ERRNO__START; + scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); + return 0; +} diff --git a/kernel/tools/perf/util/dso.h b/kernel/tools/perf/util/dso.h new file mode 100644 index 000000000..e0901b4ed --- /dev/null +++ b/kernel/tools/perf/util/dso.h @@ -0,0 +1,325 @@ +#ifndef __PERF_DSO +#define __PERF_DSO + +#include +#include +#include +#include +#include +#include "map.h" +#include "build-id.h" + +enum dso_binary_type { + DSO_BINARY_TYPE__KALLSYMS = 0, + DSO_BINARY_TYPE__GUEST_KALLSYMS, + DSO_BINARY_TYPE__VMLINUX, + DSO_BINARY_TYPE__GUEST_VMLINUX, + DSO_BINARY_TYPE__JAVA_JIT, + DSO_BINARY_TYPE__DEBUGLINK, + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, + DSO_BINARY_TYPE__GUEST_KMODULE_COMP, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, + DSO_BINARY_TYPE__KCORE, + DSO_BINARY_TYPE__GUEST_KCORE, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +enum dso_kernel_type { + DSO_TYPE_USER = 0, + DSO_TYPE_KERNEL, + DSO_TYPE_GUEST_KERNEL +}; + +enum dso_swap_type { + DSO_SWAP__UNSET, + DSO_SWAP__NO, + DSO_SWAP__YES, +}; + +enum dso_data_status { + DSO_DATA_STATUS_ERROR = -1, + DSO_DATA_STATUS_UNKNOWN = 0, + DSO_DATA_STATUS_OK = 1, +}; + +enum dso_data_status_seen { + DSO_DATA_STATUS_SEEN_ITRACE, +}; + +enum dso_type { + DSO__TYPE_UNKNOWN, + DSO__TYPE_64BIT, + DSO__TYPE_32BIT, + DSO__TYPE_X32BIT, +}; + +enum dso_load_errno { + DSO_LOAD_ERRNO__SUCCESS = 0, + + /* + * Choose an arbitrary negative big number not to clash with standard + * errno since SUS requires the errno has distinct positive values. + * See 'Issue 6' in the link below. + * + * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html + */ + __DSO_LOAD_ERRNO__START = -10000, + + DSO_LOAD_ERRNO__INTERNAL_ERROR = __DSO_LOAD_ERRNO__START, + + /* for symsrc__init() */ + DSO_LOAD_ERRNO__INVALID_ELF, + DSO_LOAD_ERRNO__CANNOT_READ_BUILDID, + DSO_LOAD_ERRNO__MISMATCHING_BUILDID, + + /* for decompress_kmodule */ + DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE, + + __DSO_LOAD_ERRNO__END, +}; + +#define DSO__SWAP(dso, type, val) \ +({ \ + type ____r = val; \ + BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ + if (dso->needs_swap == DSO_SWAP__YES) { \ + switch (sizeof(____r)) { \ + case 2: \ + ____r = bswap_16(val); \ + break; \ + case 4: \ + ____r = bswap_32(val); \ + break; \ + case 8: \ + ____r = bswap_64(val); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + } \ + ____r; \ +}) + +#define DSO__DATA_CACHE_SIZE 4096 +#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1) + +struct dso_cache { + struct rb_node rb_node; + u64 offset; + u64 size; + char data[0]; +}; + +/* + * DSOs are put into both a list for fast iteration and rbtree for fast + * long name lookup. + */ +struct dsos { + struct list_head head; + struct rb_root root; /* rbtree root sorted by long name */ +}; + +struct dso { + struct list_head node; + struct rb_node rb_node; /* rbtree node sorted by long name */ + struct rb_root symbols[MAP__NR_TYPES]; + struct rb_root symbol_names[MAP__NR_TYPES]; + void *a2l; + char *symsrc_filename; + unsigned int a2l_fails; + enum dso_kernel_type kernel; + enum dso_swap_type needs_swap; + enum dso_binary_type symtab_type; + enum dso_binary_type binary_type; + enum dso_load_errno load_errno; + u8 adjust_symbols:1; + u8 has_build_id:1; + u8 has_srcline:1; + u8 hit:1; + u8 annotate_warned:1; + u8 short_name_allocated:1; + u8 long_name_allocated:1; + u8 is_64_bit:1; + u8 sorted_by_name; + u8 loaded; + u8 rel; + u8 build_id[BUILD_ID_SIZE]; + const char *short_name; + const char *long_name; + u16 long_name_len; + u16 short_name_len; + void *dwfl; /* DWARF debug info */ + + /* dso data file */ + struct { + struct rb_root cache; + int fd; + int status; + u32 status_seen; + size_t file_size; + struct list_head open_entry; + u64 debug_frame_offset; + u64 eh_frame_hdr_offset; + } data; + + union { /* Tool specific area */ + void *priv; + u64 db_id; + }; + + char name[0]; +}; + +/* dso__for_each_symbol - iterate over the symbols of given type + * + * @dso: the 'struct dso *' in which symbols itereated + * @pos: the 'struct symbol *' to use as a loop cursor + * @n: the 'struct rb_node *' to use as a temporary storage + * @type: the 'enum map_type' type of symbols + */ +#define dso__for_each_symbol(dso, pos, n, type) \ + symbols__for_each_entry(&(dso)->symbols[(type)], pos, n) + +static inline void dso__set_loaded(struct dso *dso, enum map_type type) +{ + dso->loaded |= (1 << type); +} + +struct dso *dso__new(const char *name); +void dso__delete(struct dso *dso); + +void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated); +void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated); + +int dso__name_len(const struct dso *dso); + +bool dso__loaded(const struct dso *dso, enum map_type type); + +bool dso__sorted_by_name(const struct dso *dso, enum map_type type); +void dso__set_sorted_by_name(struct dso *dso, enum map_type type); +void dso__sort_by_name(struct dso *dso, enum map_type type); + +void dso__set_build_id(struct dso *dso, void *build_id); +bool dso__build_id_equal(const struct dso *dso, u8 *build_id); +void dso__read_running_kernel_build_id(struct dso *dso, + struct machine *machine); +int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir); + +char dso__symtab_origin(const struct dso *dso); +int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type, + char *root_dir, char *filename, size_t size); +bool is_supported_compression(const char *ext); +bool is_kernel_module(const char *pathname); +bool decompress_to_file(const char *ext, const char *filename, int output_fd); +bool dso__needs_decompress(struct dso *dso); + +struct kmod_path { + char *name; + char *ext; + bool comp; + bool kmod; +}; + +int __kmod_path__parse(struct kmod_path *m, const char *path, + bool alloc_name, bool alloc_ext); + +#define kmod_path__parse(__m, __p) __kmod_path__parse(__m, __p, false, false) +#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false) +#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true) + +/* + * The dso__data_* external interface provides following functions: + * dso__data_fd + * dso__data_close + * dso__data_size + * dso__data_read_offset + * dso__data_read_addr + * + * Please refer to the dso.c object code for each function and + * arguments documentation. Following text tries to explain the + * dso file descriptor caching. + * + * The dso__data* interface allows caching of opened file descriptors + * to speed up the dso data accesses. The idea is to leave the file + * descriptor opened ideally for the whole life of the dso object. + * + * The current usage of the dso__data_* interface is as follows: + * + * Get DSO's fd: + * int fd = dso__data_fd(dso, machine); + * USE 'fd' SOMEHOW + * + * Read DSO's data: + * n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE); + * n = dso__data_read_addr(dso_0, &machine, 0, buf, BUFSIZE); + * + * Eventually close DSO's fd: + * dso__data_close(dso); + * + * It is not necessary to close the DSO object data file. Each time new + * DSO data file is opened, the limit (RLIMIT_NOFILE/2) is checked. Once + * it is crossed, the oldest opened DSO object is closed. + * + * The dso__delete function calls close_dso function to ensure the + * data file descriptor gets closed/unmapped before the dso object + * is freed. + * + * TODO +*/ +int dso__data_fd(struct dso *dso, struct machine *machine); +void dso__data_close(struct dso *dso); + +off_t dso__data_size(struct dso *dso, struct machine *machine); +ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size); +ssize_t dso__data_read_addr(struct dso *dso, struct map *map, + struct machine *machine, u64 addr, + u8 *data, ssize_t size); +bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by); + +struct map *dso__new_map(const char *name); +struct dso *dso__kernel_findnew(struct machine *machine, const char *name, + const char *short_name, int dso_type); + +void dsos__add(struct dsos *dsos, struct dso *dso); +struct dso *dsos__addnew(struct dsos *dsos, const char *name); +struct dso *dsos__find(const struct dsos *dsos, const char *name, + bool cmp_short); +struct dso *__dsos__findnew(struct dsos *dsos, const char *name); +bool __dsos__read_build_ids(struct list_head *head, bool with_hits); + +size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); +size_t __dsos__fprintf(struct list_head *head, FILE *fp); + +size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); +size_t dso__fprintf_symbols_by_name(struct dso *dso, + enum map_type type, FILE *fp); +size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); + +static inline bool dso__is_vmlinux(struct dso *dso) +{ + return dso->binary_type == DSO_BINARY_TYPE__VMLINUX || + dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX; +} + +static inline bool dso__is_kcore(struct dso *dso) +{ + return dso->binary_type == DSO_BINARY_TYPE__KCORE || + dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE; +} + +void dso__free_a2l(struct dso *dso); + +enum dso_type dso__type(struct dso *dso, struct machine *machine); + +int dso__strerror_load(struct dso *dso, char *buf, size_t buflen); + +#endif /* __PERF_DSO */ diff --git a/kernel/tools/perf/util/dwarf-aux.c b/kernel/tools/perf/util/dwarf-aux.c new file mode 100644 index 000000000..c34e02402 --- /dev/null +++ b/kernel/tools/perf/util/dwarf-aux.c @@ -0,0 +1,905 @@ +/* + * dwarf-aux.c : libdw auxiliary interfaces + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include "util.h" +#include "debug.h" +#include "dwarf-aux.h" + +/** + * cu_find_realpath - Find the realpath of the target file + * @cu_die: A DIE(dwarf information entry) of CU(compilation Unit) + * @fname: The tail filename of the target file + * + * Find the real(long) path of @fname in @cu_die. + */ +const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname) +{ + Dwarf_Files *files; + size_t nfiles, i; + const char *src = NULL; + int ret; + + if (!fname) + return NULL; + + ret = dwarf_getsrcfiles(cu_die, &files, &nfiles); + if (ret != 0) + return NULL; + + for (i = 0; i < nfiles; i++) { + src = dwarf_filesrc(files, i, NULL, NULL); + if (strtailcmp(src, fname) == 0) + break; + } + if (i == nfiles) + return NULL; + return src; +} + +/** + * cu_get_comp_dir - Get the path of compilation directory + * @cu_die: a CU DIE + * + * Get the path of compilation directory of given @cu_die. + * Since this depends on DW_AT_comp_dir, older gcc will not + * embedded it. In that case, this returns NULL. + */ +const char *cu_get_comp_dir(Dwarf_Die *cu_die) +{ + Dwarf_Attribute attr; + if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL) + return NULL; + return dwarf_formstring(&attr); +} + +/** + * cu_find_lineinfo - Get a line number and file name for given address + * @cu_die: a CU DIE + * @addr: An address + * @fname: a pointer which returns the file name string + * @lineno: a pointer which returns the line number + * + * Find a line number and file name for @addr in @cu_die. + */ +int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr, + const char **fname, int *lineno) +{ + Dwarf_Line *line; + Dwarf_Addr laddr; + + line = dwarf_getsrc_die(cu_die, (Dwarf_Addr)addr); + if (line && dwarf_lineaddr(line, &laddr) == 0 && + addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) { + *fname = dwarf_linesrc(line, NULL, NULL); + if (!*fname) + /* line number is useless without filename */ + *lineno = 0; + } + + return *lineno ?: -ENOENT; +} + +static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data); + +/** + * cu_walk_functions_at - Walk on function DIEs at given address + * @cu_die: A CU DIE + * @addr: An address + * @callback: A callback which called with found DIEs + * @data: A user data + * + * Walk on function DIEs at given @addr in @cu_die. Passed DIEs + * should be subprogram or inlined-subroutines. + */ +int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, + int (*callback)(Dwarf_Die *, void *), void *data) +{ + Dwarf_Die die_mem; + Dwarf_Die *sc_die; + int ret = -ENOENT; + + /* Inlined function could be recursive. Trace it until fail */ + for (sc_die = die_find_realfunc(cu_die, addr, &die_mem); + sc_die != NULL; + sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr, + &die_mem)) { + ret = callback(sc_die, data); + if (ret) + break; + } + + return ret; + +} + +/** + * die_compare_name - Compare diename and tname + * @dw_die: a DIE + * @tname: a string of target name + * + * Compare the name of @dw_die and @tname. Return false if @dw_die has no name. + */ +bool die_compare_name(Dwarf_Die *dw_die, const char *tname) +{ + const char *name; + name = dwarf_diename(dw_die); + return name ? (strcmp(tname, name) == 0) : false; +} + +/** + * die_get_call_lineno - Get callsite line number of inline-function instance + * @in_die: a DIE of an inlined function instance + * + * Get call-site line number of @in_die. This means from where the inline + * function is called. + */ +int die_get_call_lineno(Dwarf_Die *in_die) +{ + Dwarf_Attribute attr; + Dwarf_Word ret; + + if (!dwarf_attr(in_die, DW_AT_call_line, &attr)) + return -ENOENT; + + dwarf_formudata(&attr, &ret); + return (int)ret; +} + +/** + * die_get_type - Get type DIE + * @vr_die: a DIE of a variable + * @die_mem: where to store a type DIE + * + * Get a DIE of the type of given variable (@vr_die), and store + * it to die_mem. Return NULL if fails to get a type DIE. + */ +Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) +{ + Dwarf_Attribute attr; + + if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) && + dwarf_formref_die(&attr, die_mem)) + return die_mem; + else + return NULL; +} + +/* Get a type die, but skip qualifiers */ +static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) +{ + int tag; + + do { + vr_die = die_get_type(vr_die, die_mem); + if (!vr_die) + break; + tag = dwarf_tag(vr_die); + } while (tag == DW_TAG_const_type || + tag == DW_TAG_restrict_type || + tag == DW_TAG_volatile_type || + tag == DW_TAG_shared_type); + + return vr_die; +} + +/** + * die_get_real_type - Get a type die, but skip qualifiers and typedef + * @vr_die: a DIE of a variable + * @die_mem: where to store a type DIE + * + * Get a DIE of the type of given variable (@vr_die), and store + * it to die_mem. Return NULL if fails to get a type DIE. + * If the type is qualifiers (e.g. const) or typedef, this skips it + * and tries to find real type (structure or basic types, e.g. int). + */ +Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) +{ + do { + vr_die = __die_get_real_type(vr_die, die_mem); + } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef); + + return vr_die; +} + +/* Get attribute and translate it as a udata */ +static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, + Dwarf_Word *result) +{ + Dwarf_Attribute attr; + + if (dwarf_attr(tp_die, attr_name, &attr) == NULL || + dwarf_formudata(&attr, result) != 0) + return -ENOENT; + + return 0; +} + +/* Get attribute and translate it as a sdata */ +static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name, + Dwarf_Sword *result) +{ + Dwarf_Attribute attr; + + if (dwarf_attr(tp_die, attr_name, &attr) == NULL || + dwarf_formsdata(&attr, result) != 0) + return -ENOENT; + + return 0; +} + +/** + * die_is_signed_type - Check whether a type DIE is signed or not + * @tp_die: a DIE of a type + * + * Get the encoding of @tp_die and return true if the encoding + * is signed. + */ +bool die_is_signed_type(Dwarf_Die *tp_die) +{ + Dwarf_Word ret; + + if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret)) + return false; + + return (ret == DW_ATE_signed_char || ret == DW_ATE_signed || + ret == DW_ATE_signed_fixed); +} + +/** + * die_is_func_def - Ensure that this DIE is a subprogram and definition + * @dw_die: a DIE + * + * Ensure that this DIE is a subprogram and NOT a declaration. This + * returns true if @dw_die is a function definition. + **/ +bool die_is_func_def(Dwarf_Die *dw_die) +{ + Dwarf_Attribute attr; + + return (dwarf_tag(dw_die) == DW_TAG_subprogram && + dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL); +} + +/** + * die_is_func_instance - Ensure that this DIE is an instance of a subprogram + * @dw_die: a DIE + * + * Ensure that this DIE is an instance (which has an entry address). + * This returns true if @dw_die is a function instance. If not, you need to + * call die_walk_instances() to find actual instances. + **/ +bool die_is_func_instance(Dwarf_Die *dw_die) +{ + Dwarf_Addr tmp; + + /* Actually gcc optimizes non-inline as like as inlined */ + return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0; +} +/** + * die_get_data_member_location - Get the data-member offset + * @mb_die: a DIE of a member of a data structure + * @offs: The offset of the member in the data structure + * + * Get the offset of @mb_die in the data structure including @mb_die, and + * stores result offset to @offs. If any error occurs this returns errno. + */ +int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs) +{ + Dwarf_Attribute attr; + Dwarf_Op *expr; + size_t nexpr; + int ret; + + if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL) + return -ENOENT; + + if (dwarf_formudata(&attr, offs) != 0) { + /* DW_AT_data_member_location should be DW_OP_plus_uconst */ + ret = dwarf_getlocation(&attr, &expr, &nexpr); + if (ret < 0 || nexpr == 0) + return -ENOENT; + + if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) { + pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n", + expr[0].atom, nexpr); + return -ENOTSUP; + } + *offs = (Dwarf_Word)expr[0].number; + } + return 0; +} + +/* Get the call file index number in CU DIE */ +static int die_get_call_fileno(Dwarf_Die *in_die) +{ + Dwarf_Sword idx; + + if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0) + return (int)idx; + else + return -ENOENT; +} + +/* Get the declared file index number in CU DIE */ +static int die_get_decl_fileno(Dwarf_Die *pdie) +{ + Dwarf_Sword idx; + + if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0) + return (int)idx; + else + return -ENOENT; +} + +/** + * die_get_call_file - Get callsite file name of inlined function instance + * @in_die: a DIE of an inlined function instance + * + * Get call-site file name of @in_die. This means from which file the inline + * function is called. + */ +const char *die_get_call_file(Dwarf_Die *in_die) +{ + Dwarf_Die cu_die; + Dwarf_Files *files; + int idx; + + idx = die_get_call_fileno(in_die); + if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) || + dwarf_getsrcfiles(&cu_die, &files, NULL) != 0) + return NULL; + + return dwarf_filesrc(files, idx, NULL, NULL); +} + + +/** + * die_find_child - Generic DIE search function in DIE tree + * @rt_die: a root DIE + * @callback: a callback function + * @data: a user data passed to the callback function + * @die_mem: a buffer for result DIE + * + * Trace DIE tree from @rt_die and call @callback for each child DIE. + * If @callback returns DIE_FIND_CB_END, this stores the DIE into + * @die_mem and returns it. If @callback returns DIE_FIND_CB_CONTINUE, + * this continues to trace the tree. Optionally, @callback can return + * DIE_FIND_CB_CHILD and DIE_FIND_CB_SIBLING, those means trace only + * the children and trace only the siblings respectively. + * Returns NULL if @callback can't find any appropriate DIE. + */ +Dwarf_Die *die_find_child(Dwarf_Die *rt_die, + int (*callback)(Dwarf_Die *, void *), + void *data, Dwarf_Die *die_mem) +{ + Dwarf_Die child_die; + int ret; + + ret = dwarf_child(rt_die, die_mem); + if (ret != 0) + return NULL; + + do { + ret = callback(die_mem, data); + if (ret == DIE_FIND_CB_END) + return die_mem; + + if ((ret & DIE_FIND_CB_CHILD) && + die_find_child(die_mem, callback, data, &child_die)) { + memcpy(die_mem, &child_die, sizeof(Dwarf_Die)); + return die_mem; + } + } while ((ret & DIE_FIND_CB_SIBLING) && + dwarf_siblingof(die_mem, die_mem) == 0); + + return NULL; +} + +struct __addr_die_search_param { + Dwarf_Addr addr; + Dwarf_Die *die_mem; +}; + +/* die_find callback for non-inlined function search */ +static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) +{ + struct __addr_die_search_param *ad = data; + + /* + * Since a declaration entry doesn't has given pc, this always returns + * function definition entry. + */ + if (dwarf_tag(fn_die) == DW_TAG_subprogram && + dwarf_haspc(fn_die, ad->addr)) { + memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); + return DWARF_CB_ABORT; + } + return DWARF_CB_OK; +} + +/** + * die_find_realfunc - Search a non-inlined function at given address + * @cu_die: a CU DIE which including @addr + * @addr: target address + * @die_mem: a buffer for result DIE + * + * Search a non-inlined function DIE which includes @addr. Stores the + * DIE to @die_mem and returns it if found. Returns NULL if failed. + */ +Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, + Dwarf_Die *die_mem) +{ + struct __addr_die_search_param ad; + ad.addr = addr; + ad.die_mem = die_mem; + /* dwarf_getscopes can't find subprogram. */ + if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0)) + return NULL; + else + return die_mem; +} + +/* die_find callback for inline function search */ +static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data) +{ + Dwarf_Addr *addr = data; + + if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine && + dwarf_haspc(die_mem, *addr)) + return DIE_FIND_CB_END; + + return DIE_FIND_CB_CONTINUE; +} + +/** + * die_find_top_inlinefunc - Search the top inlined function at given address + * @sp_die: a subprogram DIE which including @addr + * @addr: target address + * @die_mem: a buffer for result DIE + * + * Search an inlined function DIE which includes @addr. Stores the + * DIE to @die_mem and returns it if found. Returns NULL if failed. + * Even if several inlined functions are expanded recursively, this + * doesn't trace it down, and returns the topmost one. + */ +Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, + Dwarf_Die *die_mem) +{ + return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); +} + +/** + * die_find_inlinefunc - Search an inlined function at given address + * @sp_die: a subprogram DIE which including @addr + * @addr: target address + * @die_mem: a buffer for result DIE + * + * Search an inlined function DIE which includes @addr. Stores the + * DIE to @die_mem and returns it if found. Returns NULL if failed. + * If several inlined functions are expanded recursively, this trace + * it down and returns deepest one. + */ +Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, + Dwarf_Die *die_mem) +{ + Dwarf_Die tmp_die; + + sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die); + if (!sp_die) + return NULL; + + /* Inlined function could be recursive. Trace it until fail */ + while (sp_die) { + memcpy(die_mem, sp_die, sizeof(Dwarf_Die)); + sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, + &tmp_die); + } + + return die_mem; +} + +struct __instance_walk_param { + void *addr; + int (*callback)(Dwarf_Die *, void *); + void *data; + int retval; +}; + +static int __die_walk_instances_cb(Dwarf_Die *inst, void *data) +{ + struct __instance_walk_param *iwp = data; + Dwarf_Attribute attr_mem; + Dwarf_Die origin_mem; + Dwarf_Attribute *attr; + Dwarf_Die *origin; + int tmp; + + attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem); + if (attr == NULL) + return DIE_FIND_CB_CONTINUE; + + origin = dwarf_formref_die(attr, &origin_mem); + if (origin == NULL || origin->addr != iwp->addr) + return DIE_FIND_CB_CONTINUE; + + /* Ignore redundant instances */ + if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) { + dwarf_decl_line(origin, &tmp); + if (die_get_call_lineno(inst) == tmp) { + tmp = die_get_decl_fileno(origin); + if (die_get_call_fileno(inst) == tmp) + return DIE_FIND_CB_CONTINUE; + } + } + + iwp->retval = iwp->callback(inst, iwp->data); + + return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE; +} + +/** + * die_walk_instances - Walk on instances of given DIE + * @or_die: an abstract original DIE + * @callback: a callback function which is called with instance DIE + * @data: user data + * + * Walk on the instances of give @in_die. @in_die must be an inlined function + * declartion. This returns the return value of @callback if it returns + * non-zero value, or -ENOENT if there is no instance. + */ +int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *), + void *data) +{ + Dwarf_Die cu_die; + Dwarf_Die die_mem; + struct __instance_walk_param iwp = { + .addr = or_die->addr, + .callback = callback, + .data = data, + .retval = -ENOENT, + }; + + if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL) + return -ENOENT; + + die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem); + + return iwp.retval; +} + +/* Line walker internal parameters */ +struct __line_walk_param { + bool recursive; + line_walk_callback_t callback; + void *data; + int retval; +}; + +static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) +{ + struct __line_walk_param *lw = data; + Dwarf_Addr addr = 0; + const char *fname; + int lineno; + + if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { + fname = die_get_call_file(in_die); + lineno = die_get_call_lineno(in_die); + if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { + lw->retval = lw->callback(fname, lineno, addr, lw->data); + if (lw->retval != 0) + return DIE_FIND_CB_END; + } + } + if (!lw->recursive) + /* Don't need to search recursively */ + return DIE_FIND_CB_SIBLING; + + if (addr) { + fname = dwarf_decl_file(in_die); + if (fname && dwarf_decl_line(in_die, &lineno) == 0) { + lw->retval = lw->callback(fname, lineno, addr, lw->data); + if (lw->retval != 0) + return DIE_FIND_CB_END; + } + } + + /* Continue to search nested inlined function call-sites */ + return DIE_FIND_CB_CONTINUE; +} + +/* Walk on lines of blocks included in given DIE */ +static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive, + line_walk_callback_t callback, void *data) +{ + struct __line_walk_param lw = { + .recursive = recursive, + .callback = callback, + .data = data, + .retval = 0, + }; + Dwarf_Die die_mem; + Dwarf_Addr addr; + const char *fname; + int lineno; + + /* Handle function declaration line */ + fname = dwarf_decl_file(sp_die); + if (fname && dwarf_decl_line(sp_die, &lineno) == 0 && + dwarf_entrypc(sp_die, &addr) == 0) { + lw.retval = callback(fname, lineno, addr, data); + if (lw.retval != 0) + goto done; + } + die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem); +done: + return lw.retval; +} + +static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) +{ + struct __line_walk_param *lw = data; + + lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data); + if (lw->retval != 0) + return DWARF_CB_ABORT; + + return DWARF_CB_OK; +} + +/** + * die_walk_lines - Walk on lines inside given DIE + * @rt_die: a root DIE (CU, subprogram or inlined_subroutine) + * @callback: callback routine + * @data: user data + * + * Walk on all lines inside given @rt_die and call @callback on each line. + * If the @rt_die is a function, walk only on the lines inside the function, + * otherwise @rt_die must be a CU DIE. + * Note that this walks not only dwarf line list, but also function entries + * and inline call-site. + */ +int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) +{ + Dwarf_Lines *lines; + Dwarf_Line *line; + Dwarf_Addr addr; + const char *fname; + int lineno, ret = 0; + Dwarf_Die die_mem, *cu_die; + size_t nlines, i; + + /* Get the CU die */ + if (dwarf_tag(rt_die) != DW_TAG_compile_unit) + cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL); + else + cu_die = rt_die; + if (!cu_die) { + pr_debug2("Failed to get CU from given DIE.\n"); + return -EINVAL; + } + + /* Get lines list in the CU */ + if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) { + pr_debug2("Failed to get source lines on this CU.\n"); + return -ENOENT; + } + pr_debug2("Get %zd lines from this CU\n", nlines); + + /* Walk on the lines on lines list */ + for (i = 0; i < nlines; i++) { + line = dwarf_onesrcline(lines, i); + if (line == NULL || + dwarf_lineno(line, &lineno) != 0 || + dwarf_lineaddr(line, &addr) != 0) { + pr_debug2("Failed to get line info. " + "Possible error in debuginfo.\n"); + continue; + } + /* Filter lines based on address */ + if (rt_die != cu_die) + /* + * Address filtering + * The line is included in given function, and + * no inline block includes it. + */ + if (!dwarf_haspc(rt_die, addr) || + die_find_inlinefunc(rt_die, addr, &die_mem)) + continue; + /* Get source line */ + fname = dwarf_linesrc(line, NULL, NULL); + + ret = callback(fname, lineno, addr, data); + if (ret != 0) + return ret; + } + + /* + * Dwarf lines doesn't include function declarations and inlined + * subroutines. We have to check functions list or given function. + */ + if (rt_die != cu_die) + /* + * Don't need walk functions recursively, because nested + * inlined functions don't have lines of the specified DIE. + */ + ret = __die_walk_funclines(rt_die, false, callback, data); + else { + struct __line_walk_param param = { + .callback = callback, + .data = data, + .retval = 0, + }; + dwarf_getfuncs(cu_die, __die_walk_culines_cb, ¶m, 0); + ret = param.retval; + } + + return ret; +} + +struct __find_variable_param { + const char *name; + Dwarf_Addr addr; +}; + +static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) +{ + struct __find_variable_param *fvp = data; + Dwarf_Attribute attr; + int tag; + + tag = dwarf_tag(die_mem); + if ((tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) && + die_compare_name(die_mem, fvp->name) && + /* Does the DIE have location information or external instance? */ + (dwarf_attr(die_mem, DW_AT_external, &attr) || + dwarf_attr(die_mem, DW_AT_location, &attr))) + return DIE_FIND_CB_END; + if (dwarf_haspc(die_mem, fvp->addr)) + return DIE_FIND_CB_CONTINUE; + else + return DIE_FIND_CB_SIBLING; +} + +/** + * die_find_variable_at - Find a given name variable at given address + * @sp_die: a function DIE + * @name: variable name + * @addr: address + * @die_mem: a buffer for result DIE + * + * Find a variable DIE called @name at @addr in @sp_die. + */ +Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name, + Dwarf_Addr addr, Dwarf_Die *die_mem) +{ + struct __find_variable_param fvp = { .name = name, .addr = addr}; + + return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp, + die_mem); +} + +static int __die_find_member_cb(Dwarf_Die *die_mem, void *data) +{ + const char *name = data; + + if (dwarf_tag(die_mem) == DW_TAG_member) { + if (die_compare_name(die_mem, name)) + return DIE_FIND_CB_END; + else if (!dwarf_diename(die_mem)) { /* Unnamed structure */ + Dwarf_Die type_die, tmp_die; + if (die_get_type(die_mem, &type_die) && + die_find_member(&type_die, name, &tmp_die)) + return DIE_FIND_CB_END; + } + } + return DIE_FIND_CB_SIBLING; +} + +/** + * die_find_member - Find a given name member in a data structure + * @st_die: a data structure type DIE + * @name: member name + * @die_mem: a buffer for result DIE + * + * Find a member DIE called @name in @st_die. + */ +Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, + Dwarf_Die *die_mem) +{ + return die_find_child(st_die, __die_find_member_cb, (void *)name, + die_mem); +} + +/** + * die_get_typename - Get the name of given variable DIE + * @vr_die: a variable DIE + * @buf: a buffer for result type name + * @len: a max-length of @buf + * + * Get the name of @vr_die and stores it to @buf. Return the actual length + * of type name if succeeded. Return -E2BIG if @len is not enough long, and + * Return -ENOENT if failed to find type name. + * Note that the result will stores typedef name if possible, and stores + * "*(function_type)" if the type is a function pointer. + */ +int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) +{ + Dwarf_Die type; + int tag, ret, ret2; + const char *tmp = ""; + + if (__die_get_real_type(vr_die, &type) == NULL) + return -ENOENT; + + tag = dwarf_tag(&type); + if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type) + tmp = "*"; + else if (tag == DW_TAG_subroutine_type) { + /* Function pointer */ + ret = snprintf(buf, len, "(function_type)"); + return (ret >= len) ? -E2BIG : ret; + } else { + if (!dwarf_diename(&type)) + return -ENOENT; + if (tag == DW_TAG_union_type) + tmp = "union "; + else if (tag == DW_TAG_structure_type) + tmp = "struct "; + else if (tag == DW_TAG_enumeration_type) + tmp = "enum "; + /* Write a base name */ + ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); + return (ret >= len) ? -E2BIG : ret; + } + ret = die_get_typename(&type, buf, len); + if (ret > 0) { + ret2 = snprintf(buf + ret, len - ret, "%s", tmp); + ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; + } + return ret; +} + +/** + * die_get_varname - Get the name and type of given variable DIE + * @vr_die: a variable DIE + * @buf: a buffer for type and variable name + * @len: the max-length of @buf + * + * Get the name and type of @vr_die and stores it in @buf as "type\tname". + */ +int die_get_varname(Dwarf_Die *vr_die, char *buf, int len) +{ + int ret, ret2; + + ret = die_get_typename(vr_die, buf, len); + if (ret < 0) { + pr_debug("Failed to get type, make it unknown.\n"); + ret = snprintf(buf, len, "(unknown_type)"); + } + if (ret > 0) { + ret2 = snprintf(buf + ret, len - ret, "\t%s", + dwarf_diename(vr_die)); + ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; + } + return ret; +} + diff --git a/kernel/tools/perf/util/dwarf-aux.h b/kernel/tools/perf/util/dwarf-aux.h new file mode 100644 index 000000000..af7dbcd5f --- /dev/null +++ b/kernel/tools/perf/util/dwarf-aux.h @@ -0,0 +1,121 @@ +#ifndef _DWARF_AUX_H +#define _DWARF_AUX_H +/* + * dwarf-aux.h : libdw auxiliary interfaces + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include +#include + +/* Find the realpath of the target file */ +extern const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname); + +/* Get DW_AT_comp_dir (should be NULL with older gcc) */ +extern const char *cu_get_comp_dir(Dwarf_Die *cu_die); + +/* Get a line number and file name for given address */ +extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, + const char **fname, int *lineno); + +/* Walk on funcitons at given address */ +extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, + int (*callback)(Dwarf_Die *, void *), void *data); + +/* Ensure that this DIE is a subprogram and definition (not declaration) */ +extern bool die_is_func_def(Dwarf_Die *dw_die); + +/* Ensure that this DIE is an instance of a subprogram */ +extern bool die_is_func_instance(Dwarf_Die *dw_die); + +/* Compare diename and tname */ +extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); + +/* Get callsite line number of inline-function instance */ +extern int die_get_call_lineno(Dwarf_Die *in_die); + +/* Get callsite file name of inlined function instance */ +extern const char *die_get_call_file(Dwarf_Die *in_die); + +/* Get type die */ +extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem); + +/* Get a type die, but skip qualifiers and typedef */ +extern Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem); + +/* Check whether the DIE is signed or not */ +extern bool die_is_signed_type(Dwarf_Die *tp_die); + +/* Get data_member_location offset */ +extern int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs); + +/* Return values for die_find_child() callbacks */ +enum { + DIE_FIND_CB_END = 0, /* End of Search */ + DIE_FIND_CB_CHILD = 1, /* Search only children */ + DIE_FIND_CB_SIBLING = 2, /* Search only siblings */ + DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */ +}; + +/* Search child DIEs */ +extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die, + int (*callback)(Dwarf_Die *, void *), + void *data, Dwarf_Die *die_mem); + +/* Search a non-inlined function including given address */ +extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, + Dwarf_Die *die_mem); + +/* Search the top inlined function including given address */ +extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, + Dwarf_Die *die_mem); + +/* Search the deepest inlined function including given address */ +extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, + Dwarf_Die *die_mem); + +/* Walk on the instances of given DIE */ +extern int die_walk_instances(Dwarf_Die *in_die, + int (*callback)(Dwarf_Die *, void *), void *data); + +/* Walker on lines (Note: line number will not be sorted) */ +typedef int (* line_walk_callback_t) (const char *fname, int lineno, + Dwarf_Addr addr, void *data); + +/* + * Walk on lines inside given DIE. If the DIE is a subprogram, walk only on + * the lines inside the subprogram, otherwise the DIE must be a CU DIE. + */ +extern int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, + void *data); + +/* Find a variable called 'name' at given address */ +extern Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name, + Dwarf_Addr addr, Dwarf_Die *die_mem); + +/* Find a member called 'name' */ +extern Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, + Dwarf_Die *die_mem); + +/* Get the name of given variable DIE */ +extern int die_get_typename(Dwarf_Die *vr_die, char *buf, int len); + +/* Get the name and type of given variable DIE, stored as "type\tname" */ +extern int die_get_varname(Dwarf_Die *vr_die, char *buf, int len); +#endif diff --git a/kernel/tools/perf/util/environment.c b/kernel/tools/perf/util/environment.c new file mode 100644 index 000000000..275b0ee34 --- /dev/null +++ b/kernel/tools/perf/util/environment.c @@ -0,0 +1,9 @@ +/* + * We put all the perf config variables in this same object + * file, so that programs can link against the config parser + * without having to link against all the rest of perf. + */ +#include "cache.h" + +const char *pager_program; +int pager_use_color = 1; diff --git a/kernel/tools/perf/util/event.c b/kernel/tools/perf/util/event.c new file mode 100644 index 000000000..ff866c4d2 --- /dev/null +++ b/kernel/tools/perf/util/event.c @@ -0,0 +1,978 @@ +#include +#include +#include "event.h" +#include "debug.h" +#include "hist.h" +#include "machine.h" +#include "sort.h" +#include "string.h" +#include "strlist.h" +#include "thread.h" +#include "thread_map.h" +#include "symbol/kallsyms.h" + +static const char *perf_event__names[] = { + [0] = "TOTAL", + [PERF_RECORD_MMAP] = "MMAP", + [PERF_RECORD_MMAP2] = "MMAP2", + [PERF_RECORD_LOST] = "LOST", + [PERF_RECORD_COMM] = "COMM", + [PERF_RECORD_EXIT] = "EXIT", + [PERF_RECORD_THROTTLE] = "THROTTLE", + [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", + [PERF_RECORD_FORK] = "FORK", + [PERF_RECORD_READ] = "READ", + [PERF_RECORD_SAMPLE] = "SAMPLE", + [PERF_RECORD_HEADER_ATTR] = "ATTR", + [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", + [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", + [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", + [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", + [PERF_RECORD_ID_INDEX] = "ID_INDEX", +}; + +const char *perf_event__name(unsigned int id) +{ + if (id >= ARRAY_SIZE(perf_event__names)) + return "INVALID"; + if (!perf_event__names[id]) + return "UNKNOWN"; + return perf_event__names[id]; +} + +static struct perf_sample synth_sample = { + .pid = -1, + .tid = -1, + .time = -1, + .stream_id = -1, + .cpu = -1, + .period = 1, +}; + +/* + * Assumes that the first 4095 bytes of /proc/pid/stat contains + * the comm, tgid and ppid. + */ +static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, + pid_t *tgid, pid_t *ppid) +{ + char filename[PATH_MAX]; + char bf[4096]; + int fd; + size_t size = 0, n; + char *nl, *name, *tgids, *ppids; + + *tgid = -1; + *ppid = -1; + + snprintf(filename, sizeof(filename), "/proc/%d/status", pid); + + fd = open(filename, O_RDONLY); + if (fd < 0) { + pr_debug("couldn't open %s\n", filename); + return -1; + } + + n = read(fd, bf, sizeof(bf) - 1); + close(fd); + if (n <= 0) { + pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", + pid); + return -1; + } + bf[n] = '\0'; + + name = strstr(bf, "Name:"); + tgids = strstr(bf, "Tgid:"); + ppids = strstr(bf, "PPid:"); + + if (name) { + name += 5; /* strlen("Name:") */ + + while (*name && isspace(*name)) + ++name; + + nl = strchr(name, '\n'); + if (nl) + *nl = '\0'; + + size = strlen(name); + if (size >= len) + size = len - 1; + memcpy(comm, name, size); + comm[size] = '\0'; + } else { + pr_debug("Name: string not found for pid %d\n", pid); + } + + if (tgids) { + tgids += 5; /* strlen("Tgid:") */ + *tgid = atoi(tgids); + } else { + pr_debug("Tgid: string not found for pid %d\n", pid); + } + + if (ppids) { + ppids += 5; /* strlen("PPid:") */ + *ppid = atoi(ppids); + } else { + pr_debug("PPid: string not found for pid %d\n", pid); + } + + return 0; +} + +static int perf_event__prepare_comm(union perf_event *event, pid_t pid, + struct machine *machine, + pid_t *tgid, pid_t *ppid) +{ + size_t size; + + *ppid = -1; + + memset(&event->comm, 0, sizeof(event->comm)); + + if (machine__is_host(machine)) { + if (perf_event__get_comm_ids(pid, event->comm.comm, + sizeof(event->comm.comm), + tgid, ppid) != 0) { + return -1; + } + } else { + *tgid = machine->pid; + } + + if (*tgid < 0) + return -1; + + event->comm.pid = *tgid; + event->comm.header.type = PERF_RECORD_COMM; + + size = strlen(event->comm.comm) + 1; + size = PERF_ALIGN(size, sizeof(u64)); + memset(event->comm.comm + size, 0, machine->id_hdr_size); + event->comm.header.size = (sizeof(event->comm) - + (sizeof(event->comm.comm) - size) + + machine->id_hdr_size); + event->comm.tid = pid; + + return 0; +} + +static pid_t perf_event__synthesize_comm(struct perf_tool *tool, + union perf_event *event, pid_t pid, + perf_event__handler_t process, + struct machine *machine) +{ + pid_t tgid, ppid; + + if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) + return -1; + + if (process(tool, event, &synth_sample, machine) != 0) + return -1; + + return tgid; +} + +static int perf_event__synthesize_fork(struct perf_tool *tool, + union perf_event *event, + pid_t pid, pid_t tgid, pid_t ppid, + perf_event__handler_t process, + struct machine *machine) +{ + memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); + + /* + * for main thread set parent to ppid from status file. For other + * threads set parent pid to main thread. ie., assume main thread + * spawns all threads in a process + */ + if (tgid == pid) { + event->fork.ppid = ppid; + event->fork.ptid = ppid; + } else { + event->fork.ppid = tgid; + event->fork.ptid = tgid; + } + event->fork.pid = tgid; + event->fork.tid = pid; + event->fork.header.type = PERF_RECORD_FORK; + + event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); + + if (process(tool, event, &synth_sample, machine) != 0) + return -1; + + return 0; +} + +int perf_event__synthesize_mmap_events(struct perf_tool *tool, + union perf_event *event, + pid_t pid, pid_t tgid, + perf_event__handler_t process, + struct machine *machine, + bool mmap_data) +{ + char filename[PATH_MAX]; + FILE *fp; + int rc = 0; + + if (machine__is_default_guest(machine)) + return 0; + + snprintf(filename, sizeof(filename), "%s/proc/%d/maps", + machine->root_dir, pid); + + fp = fopen(filename, "r"); + if (fp == NULL) { + /* + * We raced with a task exiting - just return: + */ + pr_debug("couldn't open %s\n", filename); + return -1; + } + + event->header.type = PERF_RECORD_MMAP2; + + while (1) { + char bf[BUFSIZ]; + char prot[5]; + char execname[PATH_MAX]; + char anonstr[] = "//anon"; + unsigned int ino; + size_t size; + ssize_t n; + + if (fgets(bf, sizeof(bf), fp) == NULL) + break; + + /* ensure null termination since stack will be reused. */ + strcpy(execname, ""); + + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ + n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", + &event->mmap2.start, &event->mmap2.len, prot, + &event->mmap2.pgoff, &event->mmap2.maj, + &event->mmap2.min, + &ino, execname); + + /* + * Anon maps don't have the execname. + */ + if (n < 7) + continue; + + event->mmap2.ino = (u64)ino; + + /* + * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c + */ + if (machine__is_host(machine)) + event->header.misc = PERF_RECORD_MISC_USER; + else + event->header.misc = PERF_RECORD_MISC_GUEST_USER; + + /* map protection and flags bits */ + event->mmap2.prot = 0; + event->mmap2.flags = 0; + if (prot[0] == 'r') + event->mmap2.prot |= PROT_READ; + if (prot[1] == 'w') + event->mmap2.prot |= PROT_WRITE; + if (prot[2] == 'x') + event->mmap2.prot |= PROT_EXEC; + + if (prot[3] == 's') + event->mmap2.flags |= MAP_SHARED; + else + event->mmap2.flags |= MAP_PRIVATE; + + if (prot[2] != 'x') { + if (!mmap_data || prot[0] != 'r') + continue; + + event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; + } + + if (!strcmp(execname, "")) + strcpy(execname, anonstr); + + size = strlen(execname) + 1; + memcpy(event->mmap2.filename, execname, size); + size = PERF_ALIGN(size, sizeof(u64)); + event->mmap2.len -= event->mmap.start; + event->mmap2.header.size = (sizeof(event->mmap2) - + (sizeof(event->mmap2.filename) - size)); + memset(event->mmap2.filename + size, 0, machine->id_hdr_size); + event->mmap2.header.size += machine->id_hdr_size; + event->mmap2.pid = tgid; + event->mmap2.tid = pid; + + if (process(tool, event, &synth_sample, machine) != 0) { + rc = -1; + break; + } + } + + fclose(fp); + return rc; +} + +int perf_event__synthesize_modules(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + int rc = 0; + struct rb_node *nd; + struct map_groups *kmaps = &machine->kmaps; + union perf_event *event = zalloc((sizeof(event->mmap) + + machine->id_hdr_size)); + if (event == NULL) { + pr_debug("Not enough memory synthesizing mmap event " + "for kernel modules\n"); + return -1; + } + + event->header.type = PERF_RECORD_MMAP; + + /* + * kernel uses 0 for user space maps, see kernel/perf_event.c + * __perf_event_mmap + */ + if (machine__is_host(machine)) + event->header.misc = PERF_RECORD_MISC_KERNEL; + else + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + + for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); + nd; nd = rb_next(nd)) { + size_t size; + struct map *pos = rb_entry(nd, struct map, rb_node); + + if (pos->dso->kernel) + continue; + + size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); + event->mmap.header.type = PERF_RECORD_MMAP; + event->mmap.header.size = (sizeof(event->mmap) - + (sizeof(event->mmap.filename) - size)); + memset(event->mmap.filename + size, 0, machine->id_hdr_size); + event->mmap.header.size += machine->id_hdr_size; + event->mmap.start = pos->start; + event->mmap.len = pos->end - pos->start; + event->mmap.pid = machine->pid; + + memcpy(event->mmap.filename, pos->dso->long_name, + pos->dso->long_name_len + 1); + if (process(tool, event, &synth_sample, machine) != 0) { + rc = -1; + break; + } + } + + free(event); + return rc; +} + +static int __event__synthesize_thread(union perf_event *comm_event, + union perf_event *mmap_event, + union perf_event *fork_event, + pid_t pid, int full, + perf_event__handler_t process, + struct perf_tool *tool, + struct machine *machine, bool mmap_data) +{ + char filename[PATH_MAX]; + DIR *tasks; + struct dirent dirent, *next; + pid_t tgid, ppid; + int rc = 0; + + /* special case: only send one comm event using passed in pid */ + if (!full) { + tgid = perf_event__synthesize_comm(tool, comm_event, pid, + process, machine); + + if (tgid == -1) + return -1; + + return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, + process, machine, mmap_data); + } + + if (machine__is_default_guest(machine)) + return 0; + + snprintf(filename, sizeof(filename), "%s/proc/%d/task", + machine->root_dir, pid); + + tasks = opendir(filename); + if (tasks == NULL) { + pr_debug("couldn't open %s\n", filename); + return 0; + } + + while (!readdir_r(tasks, &dirent, &next) && next) { + char *end; + pid_t _pid; + + _pid = strtol(dirent.d_name, &end, 10); + if (*end) + continue; + + rc = -1; + if (perf_event__prepare_comm(comm_event, _pid, machine, + &tgid, &ppid) != 0) + break; + + if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, + ppid, process, machine) < 0) + break; + /* + * Send the prepared comm event + */ + if (process(tool, comm_event, &synth_sample, machine) != 0) + break; + + rc = 0; + if (_pid == pid) { + /* process the parent's maps too */ + rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, + process, machine, mmap_data); + if (rc) + break; + } + } + + closedir(tasks); + return rc; +} + +int perf_event__synthesize_thread_map(struct perf_tool *tool, + struct thread_map *threads, + perf_event__handler_t process, + struct machine *machine, + bool mmap_data) +{ + union perf_event *comm_event, *mmap_event, *fork_event; + int err = -1, thread, j; + + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); + if (comm_event == NULL) + goto out; + + mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); + if (mmap_event == NULL) + goto out_free_comm; + + fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); + if (fork_event == NULL) + goto out_free_mmap; + + err = 0; + for (thread = 0; thread < threads->nr; ++thread) { + if (__event__synthesize_thread(comm_event, mmap_event, + fork_event, + threads->map[thread], 0, + process, tool, machine, + mmap_data)) { + err = -1; + break; + } + + /* + * comm.pid is set to thread group id by + * perf_event__synthesize_comm + */ + if ((int) comm_event->comm.pid != threads->map[thread]) { + bool need_leader = true; + + /* is thread group leader in thread_map? */ + for (j = 0; j < threads->nr; ++j) { + if ((int) comm_event->comm.pid == threads->map[j]) { + need_leader = false; + break; + } + } + + /* if not, generate events for it */ + if (need_leader && + __event__synthesize_thread(comm_event, mmap_event, + fork_event, + comm_event->comm.pid, 0, + process, tool, machine, + mmap_data)) { + err = -1; + break; + } + } + } + free(fork_event); +out_free_mmap: + free(mmap_event); +out_free_comm: + free(comm_event); +out: + return err; +} + +int perf_event__synthesize_threads(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine, bool mmap_data) +{ + DIR *proc; + char proc_path[PATH_MAX]; + struct dirent dirent, *next; + union perf_event *comm_event, *mmap_event, *fork_event; + int err = -1; + + if (machine__is_default_guest(machine)) + return 0; + + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); + if (comm_event == NULL) + goto out; + + mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); + if (mmap_event == NULL) + goto out_free_comm; + + fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); + if (fork_event == NULL) + goto out_free_mmap; + + snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); + proc = opendir(proc_path); + + if (proc == NULL) + goto out_free_fork; + + while (!readdir_r(proc, &dirent, &next) && next) { + char *end; + pid_t pid = strtol(dirent.d_name, &end, 10); + + if (*end) /* only interested in proper numerical dirents */ + continue; + /* + * We may race with exiting thread, so don't stop just because + * one thread couldn't be synthesized. + */ + __event__synthesize_thread(comm_event, mmap_event, fork_event, pid, + 1, process, tool, machine, mmap_data); + } + + err = 0; + closedir(proc); +out_free_fork: + free(fork_event); +out_free_mmap: + free(mmap_event); +out_free_comm: + free(comm_event); +out: + return err; +} + +struct process_symbol_args { + const char *name; + u64 start; +}; + +static int find_symbol_cb(void *arg, const char *name, char type, + u64 start) +{ + struct process_symbol_args *args = arg; + + /* + * Must be a function or at least an alias, as in PARISC64, where "_text" is + * an 'A' to the same address as "_stext". + */ + if (!(symbol_type__is_a(type, MAP__FUNCTION) || + type == 'A') || strcmp(name, args->name)) + return 0; + + args->start = start; + return 1; +} + +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name) +{ + struct process_symbol_args args = { .name = symbol_name, }; + + if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) + return 0; + + return args.start; +} + +int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + size_t size; + const char *mmap_name; + char name_buff[PATH_MAX]; + struct map *map; + struct kmap *kmap; + int err; + union perf_event *event; + + if (machine->vmlinux_maps[0] == NULL) + return -1; + + /* + * We should get this from /sys/kernel/sections/.text, but till that is + * available use this, and after it is use this as a fallback for older + * kernels. + */ + event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); + if (event == NULL) { + pr_debug("Not enough memory synthesizing mmap event " + "for kernel modules\n"); + return -1; + } + + mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); + if (machine__is_host(machine)) { + /* + * kernel uses PERF_RECORD_MISC_USER for user space maps, + * see kernel/perf_event.c __perf_event_mmap + */ + event->header.misc = PERF_RECORD_MISC_KERNEL; + } else { + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + } + + map = machine->vmlinux_maps[MAP__FUNCTION]; + kmap = map__kmap(map); + size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), + "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; + size = PERF_ALIGN(size, sizeof(u64)); + event->mmap.header.type = PERF_RECORD_MMAP; + event->mmap.header.size = (sizeof(event->mmap) - + (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); + event->mmap.pgoff = kmap->ref_reloc_sym->addr; + event->mmap.start = map->start; + event->mmap.len = map->end - event->mmap.start; + event->mmap.pid = machine->pid; + + err = process(tool, event, &synth_sample, machine); + free(event); + + return err; +} + +size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) +{ + const char *s; + + if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) + s = " exec"; + else + s = ""; + + return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); +} + +int perf_event__process_comm(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_comm_event(machine, event, sample); +} + +int perf_event__process_lost(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_lost_event(machine, event, sample); +} + +size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) +{ + return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", + event->mmap.pid, event->mmap.tid, event->mmap.start, + event->mmap.len, event->mmap.pgoff, + (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', + event->mmap.filename); +} + +size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) +{ + return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 + " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", + event->mmap2.pid, event->mmap2.tid, event->mmap2.start, + event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, + event->mmap2.min, event->mmap2.ino, + event->mmap2.ino_generation, + (event->mmap2.prot & PROT_READ) ? 'r' : '-', + (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', + (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', + (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', + event->mmap2.filename); +} + +int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_mmap_event(machine, event, sample); +} + +int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_mmap2_event(machine, event, sample); +} + +size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) +{ + return fprintf(fp, "(%d:%d):(%d:%d)\n", + event->fork.pid, event->fork.tid, + event->fork.ppid, event->fork.ptid); +} + +int perf_event__process_fork(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_fork_event(machine, event, sample); +} + +int perf_event__process_exit(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_exit_event(machine, event, sample); +} + +size_t perf_event__fprintf(union perf_event *event, FILE *fp) +{ + size_t ret = fprintf(fp, "PERF_RECORD_%s", + perf_event__name(event->header.type)); + + switch (event->header.type) { + case PERF_RECORD_COMM: + ret += perf_event__fprintf_comm(event, fp); + break; + case PERF_RECORD_FORK: + case PERF_RECORD_EXIT: + ret += perf_event__fprintf_task(event, fp); + break; + case PERF_RECORD_MMAP: + ret += perf_event__fprintf_mmap(event, fp); + break; + case PERF_RECORD_MMAP2: + ret += perf_event__fprintf_mmap2(event, fp); + break; + default: + ret += fprintf(fp, "\n"); + } + + return ret; +} + +int perf_event__process(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_event(machine, event, sample); +} + +void thread__find_addr_map(struct thread *thread, u8 cpumode, + enum map_type type, u64 addr, + struct addr_location *al) +{ + struct map_groups *mg = thread->mg; + struct machine *machine = mg->machine; + bool load_map = false; + + al->machine = machine; + al->thread = thread; + al->addr = addr; + al->cpumode = cpumode; + al->filtered = 0; + + if (machine == NULL) { + al->map = NULL; + return; + } + + if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { + al->level = 'k'; + mg = &machine->kmaps; + load_map = true; + } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { + al->level = '.'; + } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { + al->level = 'g'; + mg = &machine->kmaps; + load_map = true; + } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { + al->level = 'u'; + } else { + al->level = 'H'; + al->map = NULL; + + if ((cpumode == PERF_RECORD_MISC_GUEST_USER || + cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && + !perf_guest) + al->filtered |= (1 << HIST_FILTER__GUEST); + if ((cpumode == PERF_RECORD_MISC_USER || + cpumode == PERF_RECORD_MISC_KERNEL) && + !perf_host) + al->filtered |= (1 << HIST_FILTER__HOST); + + return; + } +try_again: + al->map = map_groups__find(mg, type, al->addr); + if (al->map == NULL) { + /* + * If this is outside of all known maps, and is a negative + * address, try to look it up in the kernel dso, as it might be + * a vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in the + * "[vdso]" dso, but for now lets use the old trick of looking + * in the whole kernel symbol list. + */ + if (cpumode == PERF_RECORD_MISC_USER && machine && + mg != &machine->kmaps && + machine__kernel_ip(machine, al->addr)) { + mg = &machine->kmaps; + load_map = true; + goto try_again; + } + } else { + /* + * Kernel maps might be changed when loading symbols so loading + * must be done prior to using kernel maps. + */ + if (load_map) + map__load(al->map, machine->symbol_filter); + al->addr = al->map->map_ip(al->map, al->addr); + } +} + +void thread__find_addr_location(struct thread *thread, + u8 cpumode, enum map_type type, u64 addr, + struct addr_location *al) +{ + thread__find_addr_map(thread, cpumode, type, addr, al); + if (al->map != NULL) + al->sym = map__find_symbol(al->map, al->addr, + thread->mg->machine->symbol_filter); + else + al->sym = NULL; +} + +int perf_event__preprocess_sample(const union perf_event *event, + struct machine *machine, + struct addr_location *al, + struct perf_sample *sample) +{ + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); + + if (thread == NULL) + return -1; + + dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); + /* + * Have we already created the kernel maps for this machine? + * + * This should have happened earlier, when we processed the kernel MMAP + * events, but for older perf.data files there was no such thing, so do + * it now. + */ + if (cpumode == PERF_RECORD_MISC_KERNEL && + machine->vmlinux_maps[MAP__FUNCTION] == NULL) + machine__create_kernel_maps(machine); + + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al); + dump_printf(" ...... dso: %s\n", + al->map ? al->map->dso->long_name : + al->level == 'H' ? "[hypervisor]" : ""); + + if (thread__is_filtered(thread)) + al->filtered |= (1 << HIST_FILTER__THREAD); + + al->sym = NULL; + al->cpu = sample->cpu; + + if (al->map) { + struct dso *dso = al->map->dso; + + if (symbol_conf.dso_list && + (!dso || !(strlist__has_entry(symbol_conf.dso_list, + dso->short_name) || + (dso->short_name != dso->long_name && + strlist__has_entry(symbol_conf.dso_list, + dso->long_name))))) { + al->filtered |= (1 << HIST_FILTER__DSO); + } + + al->sym = map__find_symbol(al->map, al->addr, + machine->symbol_filter); + } + + if (symbol_conf.sym_list && + (!al->sym || !strlist__has_entry(symbol_conf.sym_list, + al->sym->name))) { + al->filtered |= (1 << HIST_FILTER__SYMBOL); + } + + return 0; +} + +bool is_bts_event(struct perf_event_attr *attr) +{ + return attr->type == PERF_TYPE_HARDWARE && + (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && + attr->sample_period == 1; +} + +bool sample_addr_correlates_sym(struct perf_event_attr *attr) +{ + if (attr->type == PERF_TYPE_SOFTWARE && + (attr->config == PERF_COUNT_SW_PAGE_FAULTS || + attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || + attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) + return true; + + if (is_bts_event(attr)) + return true; + + return false; +} + +void perf_event__preprocess_sample_addr(union perf_event *event, + struct perf_sample *sample, + struct thread *thread, + struct addr_location *al) +{ + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + + thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al); + if (!al->map) + thread__find_addr_map(thread, cpumode, MAP__VARIABLE, + sample->addr, al); + + al->cpu = sample->cpu; + al->sym = NULL; + + if (al->map) + al->sym = map__find_symbol(al->map, al->addr, NULL); +} diff --git a/kernel/tools/perf/util/event.h b/kernel/tools/perf/util/event.h new file mode 100644 index 000000000..09b9e8d3f --- /dev/null +++ b/kernel/tools/perf/util/event.h @@ -0,0 +1,395 @@ +#ifndef __PERF_RECORD_H +#define __PERF_RECORD_H + +#include +#include + +#include "../perf.h" +#include "map.h" +#include "build-id.h" +#include "perf_regs.h" + +struct mmap_event { + struct perf_event_header header; + u32 pid, tid; + u64 start; + u64 len; + u64 pgoff; + char filename[PATH_MAX]; +}; + +struct mmap2_event { + struct perf_event_header header; + u32 pid, tid; + u64 start; + u64 len; + u64 pgoff; + u32 maj; + u32 min; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; + char filename[PATH_MAX]; +}; + +struct comm_event { + struct perf_event_header header; + u32 pid, tid; + char comm[16]; +}; + +struct fork_event { + struct perf_event_header header; + u32 pid, ppid; + u32 tid, ptid; + u64 time; +}; + +struct lost_event { + struct perf_event_header header; + u64 id; + u64 lost; +}; + +/* + * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID + */ +struct read_event { + struct perf_event_header header; + u32 pid, tid; + u64 value; + u64 time_enabled; + u64 time_running; + u64 id; +}; + +struct throttle_event { + struct perf_event_header header; + u64 time; + u64 id; + u64 stream_id; +}; + +#define PERF_SAMPLE_MASK \ + (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ + PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ + PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \ + PERF_SAMPLE_IDENTIFIER) + +/* perf sample has 16 bits size limit */ +#define PERF_SAMPLE_MAX_SIZE (1 << 16) + +struct sample_event { + struct perf_event_header header; + u64 array[]; +}; + +struct regs_dump { + u64 abi; + u64 mask; + u64 *regs; + + /* Cached values/mask filled by first register access. */ + u64 cache_regs[PERF_REGS_MAX]; + u64 cache_mask; +}; + +struct stack_dump { + u16 offset; + u64 size; + char *data; +}; + +struct sample_read_value { + u64 value; + u64 id; +}; + +struct sample_read { + u64 time_enabled; + u64 time_running; + union { + struct { + u64 nr; + struct sample_read_value *values; + } group; + struct sample_read_value one; + }; +}; + +struct ip_callchain { + u64 nr; + u64 ips[0]; +}; + +struct branch_flags { + u64 mispred:1; + u64 predicted:1; + u64 in_tx:1; + u64 abort:1; + u64 reserved:60; +}; + +struct branch_entry { + u64 from; + u64 to; + struct branch_flags flags; +}; + +struct branch_stack { + u64 nr; + struct branch_entry entries[0]; +}; + +enum { + PERF_IP_FLAG_BRANCH = 1ULL << 0, + PERF_IP_FLAG_CALL = 1ULL << 1, + PERF_IP_FLAG_RETURN = 1ULL << 2, + PERF_IP_FLAG_CONDITIONAL = 1ULL << 3, + PERF_IP_FLAG_SYSCALLRET = 1ULL << 4, + PERF_IP_FLAG_ASYNC = 1ULL << 5, + PERF_IP_FLAG_INTERRUPT = 1ULL << 6, + PERF_IP_FLAG_TX_ABORT = 1ULL << 7, + PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8, + PERF_IP_FLAG_TRACE_END = 1ULL << 9, + PERF_IP_FLAG_IN_TX = 1ULL << 10, +}; + +#define PERF_BRANCH_MASK (\ + PERF_IP_FLAG_BRANCH |\ + PERF_IP_FLAG_CALL |\ + PERF_IP_FLAG_RETURN |\ + PERF_IP_FLAG_CONDITIONAL |\ + PERF_IP_FLAG_SYSCALLRET |\ + PERF_IP_FLAG_ASYNC |\ + PERF_IP_FLAG_INTERRUPT |\ + PERF_IP_FLAG_TX_ABORT |\ + PERF_IP_FLAG_TRACE_BEGIN |\ + PERF_IP_FLAG_TRACE_END) + +struct perf_sample { + u64 ip; + u32 pid, tid; + u64 time; + u64 addr; + u64 id; + u64 stream_id; + u64 period; + u64 weight; + u64 transaction; + u32 cpu; + u32 raw_size; + u64 data_src; + u32 flags; + u16 insn_len; + void *raw_data; + struct ip_callchain *callchain; + struct branch_stack *branch_stack; + struct regs_dump user_regs; + struct regs_dump intr_regs; + struct stack_dump user_stack; + struct sample_read read; +}; + +#define PERF_MEM_DATA_SRC_NONE \ + (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA)) + +struct build_id_event { + struct perf_event_header header; + pid_t pid; + u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; + char filename[]; +}; + +enum perf_user_event_type { /* above any possible kernel type */ + PERF_RECORD_USER_TYPE_START = 64, + PERF_RECORD_HEADER_ATTR = 64, + PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */ + PERF_RECORD_HEADER_TRACING_DATA = 66, + PERF_RECORD_HEADER_BUILD_ID = 67, + PERF_RECORD_FINISHED_ROUND = 68, + PERF_RECORD_ID_INDEX = 69, + PERF_RECORD_HEADER_MAX +}; + +/* + * The kernel collects the number of events it couldn't send in a stretch and + * when possible sends this number in a PERF_RECORD_LOST event. The number of + * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while + * total_lost tells exactly how many events the kernel in fact lost, i.e. it is + * the sum of all struct lost_event.lost fields reported. + * + * The total_period is needed because by default auto-freq is used, so + * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get + * the total number of low level events, it is necessary to to sum all struct + * sample_event.period and stash the result in total_period. + */ +struct events_stats { + u64 total_period; + u64 total_non_filtered_period; + u64 total_lost; + u64 total_invalid_chains; + u32 nr_events[PERF_RECORD_HEADER_MAX]; + u32 nr_non_filtered_samples; + u32 nr_lost_warned; + u32 nr_unknown_events; + u32 nr_invalid_chains; + u32 nr_unknown_id; + u32 nr_unprocessable_samples; +}; + +struct attr_event { + struct perf_event_header header; + struct perf_event_attr attr; + u64 id[]; +}; + +#define MAX_EVENT_NAME 64 + +struct perf_trace_event_type { + u64 event_id; + char name[MAX_EVENT_NAME]; +}; + +struct event_type_event { + struct perf_event_header header; + struct perf_trace_event_type event_type; +}; + +struct tracing_data_event { + struct perf_event_header header; + u32 size; +}; + +struct id_index_entry { + u64 id; + u64 idx; + u64 cpu; + u64 tid; +}; + +struct id_index_event { + struct perf_event_header header; + u64 nr; + struct id_index_entry entries[0]; +}; + +union perf_event { + struct perf_event_header header; + struct mmap_event mmap; + struct mmap2_event mmap2; + struct comm_event comm; + struct fork_event fork; + struct lost_event lost; + struct read_event read; + struct throttle_event throttle; + struct sample_event sample; + struct attr_event attr; + struct event_type_event event_type; + struct tracing_data_event tracing_data; + struct build_id_event build_id; + struct id_index_event id_index; +}; + +void perf_event__print_totals(void); + +struct perf_tool; +struct thread_map; + +typedef int (*perf_event__handler_t)(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); + +int perf_event__synthesize_thread_map(struct perf_tool *tool, + struct thread_map *threads, + perf_event__handler_t process, + struct machine *machine, bool mmap_data); +int perf_event__synthesize_threads(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine, bool mmap_data); +int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine); + +int perf_event__synthesize_modules(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine); + +int perf_event__process_comm(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_lost(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_mmap(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_mmap2(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_fork(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_exit(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); + +struct addr_location; + +int perf_event__preprocess_sample(const union perf_event *event, + struct machine *machine, + struct addr_location *al, + struct perf_sample *sample); + +struct thread; + +bool is_bts_event(struct perf_event_attr *attr); +bool sample_addr_correlates_sym(struct perf_event_attr *attr); +void perf_event__preprocess_sample_addr(union perf_event *event, + struct perf_sample *sample, + struct thread *thread, + struct addr_location *al); + +const char *perf_event__name(unsigned int id); + +size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, + u64 read_format); +int perf_event__synthesize_sample(union perf_event *event, u64 type, + u64 read_format, + const struct perf_sample *sample, + bool swapped); + +int perf_event__synthesize_mmap_events(struct perf_tool *tool, + union perf_event *event, + pid_t pid, pid_t tgid, + perf_event__handler_t process, + struct machine *machine, + bool mmap_data); + +size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); +size_t perf_event__fprintf(union perf_event *event, FILE *fp); + +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name); + +#endif /* __PERF_RECORD_H */ diff --git a/kernel/tools/perf/util/evlist.c b/kernel/tools/perf/util/evlist.c new file mode 100644 index 000000000..080be93ee --- /dev/null +++ b/kernel/tools/perf/util/evlist.c @@ -0,0 +1,1570 @@ +/* + * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo + * + * Parts came from builtin-{top,stat,record}.c, see those files for further + * copyright notes. + * + * Released under the GPL v2. (and only v2, not any later version) + */ +#include "util.h" +#include +#include +#include "cpumap.h" +#include "thread_map.h" +#include "target.h" +#include "evlist.h" +#include "evsel.h" +#include "debug.h" +#include + +#include "parse-events.h" +#include "parse-options.h" + +#include + +#include +#include +#include + +static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); +static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); + +#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) +#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) + +void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, + struct thread_map *threads) +{ + int i; + + for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) + INIT_HLIST_HEAD(&evlist->heads[i]); + INIT_LIST_HEAD(&evlist->entries); + perf_evlist__set_maps(evlist, cpus, threads); + fdarray__init(&evlist->pollfd, 64); + evlist->workload.pid = -1; +} + +struct perf_evlist *perf_evlist__new(void) +{ + struct perf_evlist *evlist = zalloc(sizeof(*evlist)); + + if (evlist != NULL) + perf_evlist__init(evlist, NULL, NULL); + + return evlist; +} + +struct perf_evlist *perf_evlist__new_default(void) +{ + struct perf_evlist *evlist = perf_evlist__new(); + + if (evlist && perf_evlist__add_default(evlist)) { + perf_evlist__delete(evlist); + evlist = NULL; + } + + return evlist; +} + +/** + * perf_evlist__set_id_pos - set the positions of event ids. + * @evlist: selected event list + * + * Events with compatible sample types all have the same id_pos + * and is_pos. For convenience, put a copy on evlist. + */ +void perf_evlist__set_id_pos(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist); + + evlist->id_pos = first->id_pos; + evlist->is_pos = first->is_pos; +} + +static void perf_evlist__update_id_pos(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) + perf_evsel__calc_id_pos(evsel); + + perf_evlist__set_id_pos(evlist); +} + +static void perf_evlist__purge(struct perf_evlist *evlist) +{ + struct perf_evsel *pos, *n; + + evlist__for_each_safe(evlist, n, pos) { + list_del_init(&pos->node); + perf_evsel__delete(pos); + } + + evlist->nr_entries = 0; +} + +void perf_evlist__exit(struct perf_evlist *evlist) +{ + zfree(&evlist->mmap); + fdarray__exit(&evlist->pollfd); +} + +void perf_evlist__delete(struct perf_evlist *evlist) +{ + perf_evlist__munmap(evlist); + perf_evlist__close(evlist); + cpu_map__delete(evlist->cpus); + thread_map__delete(evlist->threads); + evlist->cpus = NULL; + evlist->threads = NULL; + perf_evlist__purge(evlist); + perf_evlist__exit(evlist); + free(evlist); +} + +void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) +{ + list_add_tail(&entry->node, &evlist->entries); + entry->idx = evlist->nr_entries; + entry->tracking = !entry->idx; + + if (!evlist->nr_entries++) + perf_evlist__set_id_pos(evlist); +} + +void perf_evlist__splice_list_tail(struct perf_evlist *evlist, + struct list_head *list, + int nr_entries) +{ + bool set_id_pos = !evlist->nr_entries; + + list_splice_tail(list, &evlist->entries); + evlist->nr_entries += nr_entries; + if (set_id_pos) + perf_evlist__set_id_pos(evlist); +} + +void __perf_evlist__set_leader(struct list_head *list) +{ + struct perf_evsel *evsel, *leader; + + leader = list_entry(list->next, struct perf_evsel, node); + evsel = list_entry(list->prev, struct perf_evsel, node); + + leader->nr_members = evsel->idx - leader->idx + 1; + + __evlist__for_each(list, evsel) { + evsel->leader = leader; + } +} + +void perf_evlist__set_leader(struct perf_evlist *evlist) +{ + if (evlist->nr_entries) { + evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; + __perf_evlist__set_leader(&evlist->entries); + } +} + +int perf_evlist__add_default(struct perf_evlist *evlist) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + struct perf_evsel *evsel; + + event_attr_init(&attr); + + evsel = perf_evsel__new(&attr); + if (evsel == NULL) + goto error; + + /* use strdup() because free(evsel) assumes name is allocated */ + evsel->name = strdup("cycles"); + if (!evsel->name) + goto error_free; + + perf_evlist__add(evlist, evsel); + return 0; +error_free: + perf_evsel__delete(evsel); +error: + return -ENOMEM; +} + +static int perf_evlist__add_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs) +{ + struct perf_evsel *evsel, *n; + LIST_HEAD(head); + size_t i; + + for (i = 0; i < nr_attrs; i++) { + evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); + if (evsel == NULL) + goto out_delete_partial_list; + list_add_tail(&evsel->node, &head); + } + + perf_evlist__splice_list_tail(evlist, &head, nr_attrs); + + return 0; + +out_delete_partial_list: + __evlist__for_each_safe(&head, n, evsel) + perf_evsel__delete(evsel); + return -1; +} + +int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs) +{ + size_t i; + + for (i = 0; i < nr_attrs; i++) + event_attr_init(attrs + i); + + return perf_evlist__add_attrs(evlist, attrs, nr_attrs); +} + +struct perf_evsel * +perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (evsel->attr.type == PERF_TYPE_TRACEPOINT && + (int)evsel->attr.config == id) + return evsel; + } + + return NULL; +} + +struct perf_evsel * +perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, + const char *name) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && + (strcmp(evsel->name, name) == 0)) + return evsel; + } + + return NULL; +} + +int perf_evlist__add_newtp(struct perf_evlist *evlist, + const char *sys, const char *name, void *handler) +{ + struct perf_evsel *evsel = perf_evsel__newtp(sys, name); + + if (evsel == NULL) + return -1; + + evsel->handler = handler; + perf_evlist__add(evlist, evsel); + return 0; +} + +static int perf_evlist__nr_threads(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + if (evsel->system_wide) + return 1; + else + return thread_map__nr(evlist->threads); +} + +void perf_evlist__disable(struct perf_evlist *evlist) +{ + int cpu, thread; + struct perf_evsel *pos; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + evlist__for_each(evlist, pos) { + if (!perf_evsel__is_group_leader(pos) || !pos->fd) + continue; + nr_threads = perf_evlist__nr_threads(evlist, pos); + for (thread = 0; thread < nr_threads; thread++) + ioctl(FD(pos, cpu, thread), + PERF_EVENT_IOC_DISABLE, 0); + } + } +} + +void perf_evlist__enable(struct perf_evlist *evlist) +{ + int cpu, thread; + struct perf_evsel *pos; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + evlist__for_each(evlist, pos) { + if (!perf_evsel__is_group_leader(pos) || !pos->fd) + continue; + nr_threads = perf_evlist__nr_threads(evlist, pos); + for (thread = 0; thread < nr_threads; thread++) + ioctl(FD(pos, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); + } + } +} + +int perf_evlist__disable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + int cpu, thread, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = perf_evlist__nr_threads(evlist, evsel); + + if (!evsel->fd) + return 0; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + for (thread = 0; thread < nr_threads; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_DISABLE, 0); + if (err) + return err; + } + } + return 0; +} + +int perf_evlist__enable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + int cpu, thread, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = perf_evlist__nr_threads(evlist, evsel); + + if (!evsel->fd) + return -EINVAL; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + for (thread = 0; thread < nr_threads; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + } + return 0; +} + +static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, + struct perf_evsel *evsel, int cpu) +{ + int thread, err; + int nr_threads = perf_evlist__nr_threads(evlist, evsel); + + if (!evsel->fd) + return -EINVAL; + + for (thread = 0; thread < nr_threads; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + return 0; +} + +static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, + struct perf_evsel *evsel, + int thread) +{ + int cpu, err; + int nr_cpus = cpu_map__nr(evlist->cpus); + + if (!evsel->fd) + return -EINVAL; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + return 0; +} + +int perf_evlist__enable_event_idx(struct perf_evlist *evlist, + struct perf_evsel *evsel, int idx) +{ + bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); + + if (per_cpu_mmaps) + return perf_evlist__enable_event_cpu(evlist, evsel, idx); + else + return perf_evlist__enable_event_thread(evlist, evsel, idx); +} + +int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) +{ + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = thread_map__nr(evlist->threads); + int nfds = 0; + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (evsel->system_wide) + nfds += nr_cpus; + else + nfds += nr_cpus * nr_threads; + } + + if (fdarray__available_entries(&evlist->pollfd) < nfds && + fdarray__grow(&evlist->pollfd, nfds) < 0) + return -ENOMEM; + + return 0; +} + +static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) +{ + int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); + /* + * Save the idx so that when we filter out fds POLLHUP'ed we can + * close the associated evlist->mmap[] entry. + */ + if (pos >= 0) { + evlist->pollfd.priv[pos].idx = idx; + + fcntl(fd, F_SETFL, O_NONBLOCK); + } + + return pos; +} + +int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) +{ + return __perf_evlist__add_pollfd(evlist, fd, -1); +} + +static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) +{ + struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); + + perf_evlist__mmap_put(evlist, fda->priv[fd].idx); +} + +int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) +{ + return fdarray__filter(&evlist->pollfd, revents_and_mask, + perf_evlist__munmap_filtered); +} + +int perf_evlist__poll(struct perf_evlist *evlist, int timeout) +{ + return fdarray__poll(&evlist->pollfd, timeout); +} + +static void perf_evlist__id_hash(struct perf_evlist *evlist, + struct perf_evsel *evsel, + int cpu, int thread, u64 id) +{ + int hash; + struct perf_sample_id *sid = SID(evsel, cpu, thread); + + sid->id = id; + sid->evsel = evsel; + hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); + hlist_add_head(&sid->node, &evlist->heads[hash]); +} + +void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, + int cpu, int thread, u64 id) +{ + perf_evlist__id_hash(evlist, evsel, cpu, thread, id); + evsel->id[evsel->ids++] = id; +} + +static int perf_evlist__id_add_fd(struct perf_evlist *evlist, + struct perf_evsel *evsel, + int cpu, int thread, int fd) +{ + u64 read_data[4] = { 0, }; + int id_idx = 1; /* The first entry is the counter value */ + u64 id; + int ret; + + ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); + if (!ret) + goto add; + + if (errno != ENOTTY) + return -1; + + /* Legacy way to get event id.. All hail to old kernels! */ + + /* + * This way does not work with group format read, so bail + * out in that case. + */ + if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) + return -1; + + if (!(evsel->attr.read_format & PERF_FORMAT_ID) || + read(fd, &read_data, sizeof(read_data)) == -1) + return -1; + + if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + ++id_idx; + if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + ++id_idx; + + id = read_data[id_idx]; + + add: + perf_evlist__id_add(evlist, evsel, cpu, thread, id); + return 0; +} + +static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, + struct perf_evsel *evsel, int idx, int cpu, + int thread) +{ + struct perf_sample_id *sid = SID(evsel, cpu, thread); + sid->idx = idx; + if (evlist->cpus && cpu >= 0) + sid->cpu = evlist->cpus->map[cpu]; + else + sid->cpu = -1; + if (!evsel->system_wide && evlist->threads && thread >= 0) + sid->tid = evlist->threads->map[thread]; + else + sid->tid = -1; +} + +struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) +{ + struct hlist_head *head; + struct perf_sample_id *sid; + int hash; + + hash = hash_64(id, PERF_EVLIST__HLIST_BITS); + head = &evlist->heads[hash]; + + hlist_for_each_entry(sid, head, node) + if (sid->id == id) + return sid; + + return NULL; +} + +struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) +{ + struct perf_sample_id *sid; + + if (evlist->nr_entries == 1) + return perf_evlist__first(evlist); + + sid = perf_evlist__id2sid(evlist, id); + if (sid) + return sid->evsel; + + if (!perf_evlist__sample_id_all(evlist)) + return perf_evlist__first(evlist); + + return NULL; +} + +static int perf_evlist__event2id(struct perf_evlist *evlist, + union perf_event *event, u64 *id) +{ + const u64 *array = event->sample.array; + ssize_t n; + + n = (event->header.size - sizeof(event->header)) >> 3; + + if (event->header.type == PERF_RECORD_SAMPLE) { + if (evlist->id_pos >= n) + return -1; + *id = array[evlist->id_pos]; + } else { + if (evlist->is_pos > n) + return -1; + n -= evlist->is_pos; + *id = array[n]; + } + return 0; +} + +static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, + union perf_event *event) +{ + struct perf_evsel *first = perf_evlist__first(evlist); + struct hlist_head *head; + struct perf_sample_id *sid; + int hash; + u64 id; + + if (evlist->nr_entries == 1) + return first; + + if (!first->attr.sample_id_all && + event->header.type != PERF_RECORD_SAMPLE) + return first; + + if (perf_evlist__event2id(evlist, event, &id)) + return NULL; + + /* Synthesized events have an id of zero */ + if (!id) + return first; + + hash = hash_64(id, PERF_EVLIST__HLIST_BITS); + head = &evlist->heads[hash]; + + hlist_for_each_entry(sid, head, node) { + if (sid->id == id) + return sid->evsel; + } + return NULL; +} + +union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) +{ + struct perf_mmap *md = &evlist->mmap[idx]; + u64 head = perf_mmap__read_head(md); + u64 old = md->prev; + unsigned char *data = md->base + page_size; + union perf_event *event = NULL; + + if (evlist->overwrite) { + /* + * If we're further behind than half the buffer, there's a chance + * the writer will bite our tail and mess up the samples under us. + * + * If we somehow ended up ahead of the head, we got messed up. + * + * In either case, truncate and restart at head. + */ + int diff = head - old; + if (diff > md->mask / 2 || diff < 0) { + fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); + + /* + * head points to a known good entry, start there. + */ + old = head; + } + } + + if (old != head) { + size_t size; + + event = (union perf_event *)&data[old & md->mask]; + size = event->header.size; + + /* + * Event straddles the mmap boundary -- header should always + * be inside due to u64 alignment of output. + */ + if ((old & md->mask) + size != ((old + size) & md->mask)) { + unsigned int offset = old; + unsigned int len = min(sizeof(*event), size), cpy; + void *dst = md->event_copy; + + do { + cpy = min(md->mask + 1 - (offset & md->mask), len); + memcpy(dst, &data[offset & md->mask], cpy); + offset += cpy; + dst += cpy; + len -= cpy; + } while (len); + + event = (union perf_event *) md->event_copy; + } + + old += size; + } + + md->prev = old; + + return event; +} + +static bool perf_mmap__empty(struct perf_mmap *md) +{ + return perf_mmap__read_head(md) == md->prev; +} + +static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) +{ + ++evlist->mmap[idx].refcnt; +} + +static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) +{ + BUG_ON(evlist->mmap[idx].refcnt == 0); + + if (--evlist->mmap[idx].refcnt == 0) + __perf_evlist__munmap(evlist, idx); +} + +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) +{ + struct perf_mmap *md = &evlist->mmap[idx]; + + if (!evlist->overwrite) { + u64 old = md->prev; + + perf_mmap__write_tail(md, old); + } + + if (md->refcnt == 1 && perf_mmap__empty(md)) + perf_evlist__mmap_put(evlist, idx); +} + +static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) +{ + if (evlist->mmap[idx].base != NULL) { + munmap(evlist->mmap[idx].base, evlist->mmap_len); + evlist->mmap[idx].base = NULL; + evlist->mmap[idx].refcnt = 0; + } +} + +void perf_evlist__munmap(struct perf_evlist *evlist) +{ + int i; + + if (evlist->mmap == NULL) + return; + + for (i = 0; i < evlist->nr_mmaps; i++) + __perf_evlist__munmap(evlist, i); + + zfree(&evlist->mmap); +} + +static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) +{ + evlist->nr_mmaps = cpu_map__nr(evlist->cpus); + if (cpu_map__empty(evlist->cpus)) + evlist->nr_mmaps = thread_map__nr(evlist->threads); + evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); + return evlist->mmap != NULL ? 0 : -ENOMEM; +} + +struct mmap_params { + int prot; + int mask; +}; + +static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, + struct mmap_params *mp, int fd) +{ + /* + * The last one will be done at perf_evlist__mmap_consume(), so that we + * make sure we don't prevent tools from consuming every last event in + * the ring buffer. + * + * I.e. we can get the POLLHUP meaning that the fd doesn't exist + * anymore, but the last events for it are still in the ring buffer, + * waiting to be consumed. + * + * Tools can chose to ignore this at their own discretion, but the + * evlist layer can't just drop it when filtering events in + * perf_evlist__filter_pollfd(). + */ + evlist->mmap[idx].refcnt = 2; + evlist->mmap[idx].prev = 0; + evlist->mmap[idx].mask = mp->mask; + evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, + MAP_SHARED, fd, 0); + if (evlist->mmap[idx].base == MAP_FAILED) { + pr_debug2("failed to mmap perf event ring buffer, error %d\n", + errno); + evlist->mmap[idx].base = NULL; + return -1; + } + + return 0; +} + +static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, + struct mmap_params *mp, int cpu, + int thread, int *output) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + int fd; + + if (evsel->system_wide && thread) + continue; + + fd = FD(evsel, cpu, thread); + + if (*output == -1) { + *output = fd; + if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) + return -1; + } else { + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) + return -1; + + perf_evlist__mmap_get(evlist, idx); + } + + /* + * The system_wide flag causes a selected event to be opened + * always without a pid. Consequently it will never get a + * POLLHUP, but it is used for tracking in combination with + * other events, so it should not need to be polled anyway. + * Therefore don't add it for polling. + */ + if (!evsel->system_wide && + __perf_evlist__add_pollfd(evlist, fd, idx) < 0) { + perf_evlist__mmap_put(evlist, idx); + return -1; + } + + if (evsel->attr.read_format & PERF_FORMAT_ID) { + if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, + fd) < 0) + return -1; + perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, + thread); + } + } + + return 0; +} + +static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, + struct mmap_params *mp) +{ + int cpu, thread; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = thread_map__nr(evlist->threads); + + pr_debug2("perf event ring buffer mmapped per cpu\n"); + for (cpu = 0; cpu < nr_cpus; cpu++) { + int output = -1; + + for (thread = 0; thread < nr_threads; thread++) { + if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, + thread, &output)) + goto out_unmap; + } + } + + return 0; + +out_unmap: + for (cpu = 0; cpu < nr_cpus; cpu++) + __perf_evlist__munmap(evlist, cpu); + return -1; +} + +static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, + struct mmap_params *mp) +{ + int thread; + int nr_threads = thread_map__nr(evlist->threads); + + pr_debug2("perf event ring buffer mmapped per thread\n"); + for (thread = 0; thread < nr_threads; thread++) { + int output = -1; + + if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, + &output)) + goto out_unmap; + } + + return 0; + +out_unmap: + for (thread = 0; thread < nr_threads; thread++) + __perf_evlist__munmap(evlist, thread); + return -1; +} + +static size_t perf_evlist__mmap_size(unsigned long pages) +{ + if (pages == UINT_MAX) { + int max; + + if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { + /* + * Pick a once upon a time good value, i.e. things look + * strange since we can't read a sysctl value, but lets not + * die yet... + */ + max = 512; + } else { + max -= (page_size / 1024); + } + + pages = (max * 1024) / page_size; + if (!is_power_of_2(pages)) + pages = rounddown_pow_of_two(pages); + } else if (!is_power_of_2(pages)) + return 0; + + return (pages + 1) * page_size; +} + +static long parse_pages_arg(const char *str, unsigned long min, + unsigned long max) +{ + unsigned long pages, val; + static struct parse_tag tags[] = { + { .tag = 'B', .mult = 1 }, + { .tag = 'K', .mult = 1 << 10 }, + { .tag = 'M', .mult = 1 << 20 }, + { .tag = 'G', .mult = 1 << 30 }, + { .tag = 0 }, + }; + + if (str == NULL) + return -EINVAL; + + val = parse_tag_value(str, tags); + if (val != (unsigned long) -1) { + /* we got file size value */ + pages = PERF_ALIGN(val, page_size) / page_size; + } else { + /* we got pages count value */ + char *eptr; + pages = strtoul(str, &eptr, 10); + if (*eptr != '\0') + return -EINVAL; + } + + if (pages == 0 && min == 0) { + /* leave number of pages at 0 */ + } else if (!is_power_of_2(pages)) { + /* round pages up to next power of 2 */ + pages = roundup_pow_of_two(pages); + if (!pages) + return -EINVAL; + pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", + pages * page_size, pages); + } + + if (pages > max) + return -EINVAL; + + return pages; +} + +int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + unsigned int *mmap_pages = opt->value; + unsigned long max = UINT_MAX; + long pages; + + if (max > SIZE_MAX / page_size) + max = SIZE_MAX / page_size; + + pages = parse_pages_arg(str, 1, max); + if (pages < 0) { + pr_err("Invalid argument for --mmap_pages/-m\n"); + return -1; + } + + *mmap_pages = pages; + return 0; +} + +/** + * perf_evlist__mmap - Create mmaps to receive events. + * @evlist: list of events + * @pages: map length in pages + * @overwrite: overwrite older events? + * + * If @overwrite is %false the user needs to signal event consumption using + * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this + * automatically. + * + * Return: %0 on success, negative error code otherwise. + */ +int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, + bool overwrite) +{ + struct perf_evsel *evsel; + const struct cpu_map *cpus = evlist->cpus; + const struct thread_map *threads = evlist->threads; + struct mmap_params mp = { + .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), + }; + + if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) + return -ENOMEM; + + if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) + return -ENOMEM; + + evlist->overwrite = overwrite; + evlist->mmap_len = perf_evlist__mmap_size(pages); + pr_debug("mmap size %zuB\n", evlist->mmap_len); + mp.mask = evlist->mmap_len - page_size - 1; + + evlist__for_each(evlist, evsel) { + if ((evsel->attr.read_format & PERF_FORMAT_ID) && + evsel->sample_id == NULL && + perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) + return -ENOMEM; + } + + if (cpu_map__empty(cpus)) + return perf_evlist__mmap_per_thread(evlist, &mp); + + return perf_evlist__mmap_per_cpu(evlist, &mp); +} + +int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) +{ + evlist->threads = thread_map__new_str(target->pid, target->tid, + target->uid); + + if (evlist->threads == NULL) + return -1; + + if (target__uses_dummy_map(target)) + evlist->cpus = cpu_map__dummy_new(); + else + evlist->cpus = cpu_map__new(target->cpu_list); + + if (evlist->cpus == NULL) + goto out_delete_threads; + + return 0; + +out_delete_threads: + thread_map__delete(evlist->threads); + evlist->threads = NULL; + return -1; +} + +int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) +{ + struct perf_evsel *evsel; + int err = 0; + const int ncpus = cpu_map__nr(evlist->cpus), + nthreads = thread_map__nr(evlist->threads); + + evlist__for_each(evlist, evsel) { + if (evsel->filter == NULL) + continue; + + err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); + if (err) { + *err_evsel = evsel; + break; + } + } + + return err; +} + +int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) +{ + struct perf_evsel *evsel; + int err = 0; + const int ncpus = cpu_map__nr(evlist->cpus), + nthreads = thread_map__nr(evlist->threads); + + evlist__for_each(evlist, evsel) { + err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); + if (err) + break; + } + + return err; +} + +int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) +{ + char *filter; + int ret = -1; + size_t i; + + for (i = 0; i < npids; ++i) { + if (i == 0) { + if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) + return -1; + } else { + char *tmp; + + if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) + goto out_free; + + free(filter); + filter = tmp; + } + } + + ret = perf_evlist__set_filter(evlist, filter); +out_free: + free(filter); + return ret; +} + +int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) +{ + return perf_evlist__set_filter_pids(evlist, 1, &pid); +} + +bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) +{ + struct perf_evsel *pos; + + if (evlist->nr_entries == 1) + return true; + + if (evlist->id_pos < 0 || evlist->is_pos < 0) + return false; + + evlist__for_each(evlist, pos) { + if (pos->id_pos != evlist->id_pos || + pos->is_pos != evlist->is_pos) + return false; + } + + return true; +} + +u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + if (evlist->combined_sample_type) + return evlist->combined_sample_type; + + evlist__for_each(evlist, evsel) + evlist->combined_sample_type |= evsel->attr.sample_type; + + return evlist->combined_sample_type; +} + +u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) +{ + evlist->combined_sample_type = 0; + return __perf_evlist__combined_sample_type(evlist); +} + +bool perf_evlist__valid_read_format(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; + u64 read_format = first->attr.read_format; + u64 sample_type = first->attr.sample_type; + + evlist__for_each(evlist, pos) { + if (read_format != pos->attr.read_format) + return false; + } + + /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ + if ((sample_type & PERF_SAMPLE_READ) && + !(read_format & PERF_FORMAT_ID)) { + return false; + } + + return true; +} + +u64 perf_evlist__read_format(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist); + return first->attr.read_format; +} + +u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist); + struct perf_sample *data; + u64 sample_type; + u16 size = 0; + + if (!first->attr.sample_id_all) + goto out; + + sample_type = first->attr.sample_type; + + if (sample_type & PERF_SAMPLE_TID) + size += sizeof(data->tid) * 2; + + if (sample_type & PERF_SAMPLE_TIME) + size += sizeof(data->time); + + if (sample_type & PERF_SAMPLE_ID) + size += sizeof(data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + size += sizeof(data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + size += sizeof(data->cpu) * 2; + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + size += sizeof(data->id); +out: + return size; +} + +bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; + + evlist__for_each_continue(evlist, pos) { + if (first->attr.sample_id_all != pos->attr.sample_id_all) + return false; + } + + return true; +} + +bool perf_evlist__sample_id_all(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist); + return first->attr.sample_id_all; +} + +void perf_evlist__set_selected(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + evlist->selected = evsel; +} + +void perf_evlist__close(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + int ncpus = cpu_map__nr(evlist->cpus); + int nthreads = thread_map__nr(evlist->threads); + int n; + + evlist__for_each_reverse(evlist, evsel) { + n = evsel->cpus ? evsel->cpus->nr : ncpus; + perf_evsel__close(evsel, n, nthreads); + } +} + +static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) +{ + int err = -ENOMEM; + + /* + * Try reading /sys/devices/system/cpu/online to get + * an all cpus map. + * + * FIXME: -ENOMEM is the best we can do here, the cpu_map + * code needs an overhaul to properly forward the + * error, and we may not want to do that fallback to a + * default cpu identity map :-\ + */ + evlist->cpus = cpu_map__new(NULL); + if (evlist->cpus == NULL) + goto out; + + evlist->threads = thread_map__new_dummy(); + if (evlist->threads == NULL) + goto out_free_cpus; + + err = 0; +out: + return err; +out_free_cpus: + cpu_map__delete(evlist->cpus); + evlist->cpus = NULL; + goto out; +} + +int perf_evlist__open(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + int err; + + /* + * Default: one fd per CPU, all threads, aka systemwide + * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL + */ + if (evlist->threads == NULL && evlist->cpus == NULL) { + err = perf_evlist__create_syswide_maps(evlist); + if (err < 0) + goto out_err; + } + + perf_evlist__update_id_pos(evlist); + + evlist__for_each(evlist, evsel) { + err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); + if (err < 0) + goto out_err; + } + + return 0; +out_err: + perf_evlist__close(evlist); + errno = -err; + return err; +} + +int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, + const char *argv[], bool pipe_output, + void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) +{ + int child_ready_pipe[2], go_pipe[2]; + char bf; + + if (pipe(child_ready_pipe) < 0) { + perror("failed to create 'ready' pipe"); + return -1; + } + + if (pipe(go_pipe) < 0) { + perror("failed to create 'go' pipe"); + goto out_close_ready_pipe; + } + + evlist->workload.pid = fork(); + if (evlist->workload.pid < 0) { + perror("failed to fork"); + goto out_close_pipes; + } + + if (!evlist->workload.pid) { + int ret; + + if (pipe_output) + dup2(2, 1); + + signal(SIGTERM, SIG_DFL); + + close(child_ready_pipe[0]); + close(go_pipe[1]); + fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); + + /* + * Tell the parent we're ready to go + */ + close(child_ready_pipe[1]); + + /* + * Wait until the parent tells us to go. + */ + ret = read(go_pipe[0], &bf, 1); + /* + * The parent will ask for the execvp() to be performed by + * writing exactly one byte, in workload.cork_fd, usually via + * perf_evlist__start_workload(). + * + * For cancelling the workload without actually running it, + * the parent will just close workload.cork_fd, without writing + * anything, i.e. read will return zero and we just exit() + * here. + */ + if (ret != 1) { + if (ret == -1) + perror("unable to read pipe"); + exit(ret); + } + + execvp(argv[0], (char **)argv); + + if (exec_error) { + union sigval val; + + val.sival_int = errno; + if (sigqueue(getppid(), SIGUSR1, val)) + perror(argv[0]); + } else + perror(argv[0]); + exit(-1); + } + + if (exec_error) { + struct sigaction act = { + .sa_flags = SA_SIGINFO, + .sa_sigaction = exec_error, + }; + sigaction(SIGUSR1, &act, NULL); + } + + if (target__none(target)) { + if (evlist->threads == NULL) { + fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", + __func__, __LINE__); + goto out_close_pipes; + } + evlist->threads->map[0] = evlist->workload.pid; + } + + close(child_ready_pipe[1]); + close(go_pipe[0]); + /* + * wait for child to settle + */ + if (read(child_ready_pipe[0], &bf, 1) == -1) { + perror("unable to read pipe"); + goto out_close_pipes; + } + + fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); + evlist->workload.cork_fd = go_pipe[1]; + close(child_ready_pipe[0]); + return 0; + +out_close_pipes: + close(go_pipe[0]); + close(go_pipe[1]); +out_close_ready_pipe: + close(child_ready_pipe[0]); + close(child_ready_pipe[1]); + return -1; +} + +int perf_evlist__start_workload(struct perf_evlist *evlist) +{ + if (evlist->workload.cork_fd > 0) { + char bf = 0; + int ret; + /* + * Remove the cork, let it rip! + */ + ret = write(evlist->workload.cork_fd, &bf, 1); + if (ret < 0) + perror("enable to write to pipe"); + + close(evlist->workload.cork_fd); + return ret; + } + + return 0; +} + +int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, + struct perf_sample *sample) +{ + struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); + + if (!evsel) + return -EFAULT; + return perf_evsel__parse_sample(evsel, event, sample); +} + +size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) +{ + struct perf_evsel *evsel; + size_t printed = 0; + + evlist__for_each(evlist, evsel) { + printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", + perf_evsel__name(evsel)); + } + + return printed + fprintf(fp, "\n"); +} + +int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, + int err, char *buf, size_t size) +{ + int printed, value; + char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + + switch (err) { + case EACCES: + case EPERM: + printed = scnprintf(buf, size, + "Error:\t%s.\n" + "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); + + value = perf_event_paranoid(); + + printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); + + if (value >= 2) { + printed += scnprintf(buf + printed, size - printed, + "For your workloads it needs to be <= 1\nHint:\t"); + } + printed += scnprintf(buf + printed, size - printed, + "For system wide tracing it needs to be set to -1.\n"); + + printed += scnprintf(buf + printed, size - printed, + "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" + "Hint:\tThe current value is %d.", value); + break; + default: + scnprintf(buf, size, "%s", emsg); + break; + } + + return 0; +} + +int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) +{ + char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; + + switch (err) { + case EPERM: + sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); + printed += scnprintf(buf + printed, size - printed, + "Error:\t%s.\n" + "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" + "Hint:\tTried using %zd kB.\n", + emsg, pages_max_per_user, pages_attempted); + + if (pages_attempted >= pages_max_per_user) { + printed += scnprintf(buf + printed, size - printed, + "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", + pages_max_per_user + pages_attempted); + } + + printed += scnprintf(buf + printed, size - printed, + "Hint:\tTry using a smaller -m/--mmap-pages value."); + break; + default: + scnprintf(buf, size, "%s", emsg); + break; + } + + return 0; +} + +void perf_evlist__to_front(struct perf_evlist *evlist, + struct perf_evsel *move_evsel) +{ + struct perf_evsel *evsel, *n; + LIST_HEAD(move); + + if (move_evsel == perf_evlist__first(evlist)) + return; + + evlist__for_each_safe(evlist, n, evsel) { + if (evsel->leader == move_evsel->leader) + list_move_tail(&evsel->node, &move); + } + + list_splice(&move, &evlist->entries); +} + +void perf_evlist__set_tracking_event(struct perf_evlist *evlist, + struct perf_evsel *tracking_evsel) +{ + struct perf_evsel *evsel; + + if (tracking_evsel->tracking) + return; + + evlist__for_each(evlist, evsel) { + if (evsel != tracking_evsel) + evsel->tracking = false; + } + + tracking_evsel->tracking = true; +} diff --git a/kernel/tools/perf/util/evlist.h b/kernel/tools/perf/util/evlist.h new file mode 100644 index 000000000..b5cce95d6 --- /dev/null +++ b/kernel/tools/perf/util/evlist.h @@ -0,0 +1,284 @@ +#ifndef __PERF_EVLIST_H +#define __PERF_EVLIST_H 1 + +#include +#include +#include +#include "../perf.h" +#include "event.h" +#include "evsel.h" +#include "util.h" +#include + +struct pollfd; +struct thread_map; +struct cpu_map; +struct record_opts; + +#define PERF_EVLIST__HLIST_BITS 8 +#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) + +/** + * struct perf_mmap - perf's ring buffer mmap details + * + * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this + */ +struct perf_mmap { + void *base; + int mask; + int refcnt; + u64 prev; + char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); +}; + +struct perf_evlist { + struct list_head entries; + struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; + int nr_entries; + int nr_groups; + int nr_mmaps; + size_t mmap_len; + int id_pos; + int is_pos; + u64 combined_sample_type; + struct { + int cork_fd; + pid_t pid; + } workload; + bool overwrite; + struct fdarray pollfd; + struct perf_mmap *mmap; + struct thread_map *threads; + struct cpu_map *cpus; + struct perf_evsel *selected; + struct events_stats stats; +}; + +struct perf_evsel_str_handler { + const char *name; + void *handler; +}; + +struct perf_evlist *perf_evlist__new(void); +struct perf_evlist *perf_evlist__new_default(void); +void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, + struct thread_map *threads); +void perf_evlist__exit(struct perf_evlist *evlist); +void perf_evlist__delete(struct perf_evlist *evlist); + +void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); +int perf_evlist__add_default(struct perf_evlist *evlist); +int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs); + +#define perf_evlist__add_default_attrs(evlist, array) \ + __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) + +int perf_evlist__add_newtp(struct perf_evlist *evlist, + const char *sys, const char *name, void *handler); + +int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); +int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid); +int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids); + +struct perf_evsel * +perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); + +struct perf_evsel * +perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, + const char *name); + +void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, + int cpu, int thread, u64 id); + +int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); +int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); +int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask); + +int perf_evlist__poll(struct perf_evlist *evlist, int timeout); + +struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); + +struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); + +union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); + +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); + +int perf_evlist__open(struct perf_evlist *evlist); +void perf_evlist__close(struct perf_evlist *evlist); + +void perf_evlist__set_id_pos(struct perf_evlist *evlist); +bool perf_can_sample_identifier(void); +void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts); +int record_opts__config(struct record_opts *opts); + +int perf_evlist__prepare_workload(struct perf_evlist *evlist, + struct target *target, + const char *argv[], bool pipe_output, + void (*exec_error)(int signo, siginfo_t *info, + void *ucontext)); +int perf_evlist__start_workload(struct perf_evlist *evlist); + +struct option; + +int perf_evlist__parse_mmap_pages(const struct option *opt, + const char *str, + int unset); + +int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, + bool overwrite); +void perf_evlist__munmap(struct perf_evlist *evlist); + +void perf_evlist__disable(struct perf_evlist *evlist); +void perf_evlist__enable(struct perf_evlist *evlist); + +int perf_evlist__disable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel); +int perf_evlist__enable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel); +int perf_evlist__enable_event_idx(struct perf_evlist *evlist, + struct perf_evsel *evsel, int idx); + +void perf_evlist__set_selected(struct perf_evlist *evlist, + struct perf_evsel *evsel); + +static inline void perf_evlist__set_maps(struct perf_evlist *evlist, + struct cpu_map *cpus, + struct thread_map *threads) +{ + evlist->cpus = cpus; + evlist->threads = threads; +} + +int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); +int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); + +void __perf_evlist__set_leader(struct list_head *list); +void perf_evlist__set_leader(struct perf_evlist *evlist); + +u64 perf_evlist__read_format(struct perf_evlist *evlist); +u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist); +u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist); +bool perf_evlist__sample_id_all(struct perf_evlist *evlist); +u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); + +int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, + struct perf_sample *sample); + +bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); +bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); +bool perf_evlist__valid_read_format(struct perf_evlist *evlist); + +void perf_evlist__splice_list_tail(struct perf_evlist *evlist, + struct list_head *list, + int nr_entries); + +static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) +{ + return list_entry(evlist->entries.next, struct perf_evsel, node); +} + +static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) +{ + return list_entry(evlist->entries.prev, struct perf_evsel, node); +} + +size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); + +int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); +int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); + +static inline u64 perf_mmap__read_head(struct perf_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->base; + u64 head = ACCESS_ONCE(pc->data_head); + rmb(); + return head; +} + +static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) +{ + struct perf_event_mmap_page *pc = md->base; + + /* + * ensure all reads are done before we write the tail out. + */ + mb(); + pc->data_tail = tail; +} + +bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); +void perf_evlist__to_front(struct perf_evlist *evlist, + struct perf_evsel *move_evsel); + +/** + * __evlist__for_each - iterate thru all the evsels + * @list: list_head instance to iterate + * @evsel: struct evsel iterator + */ +#define __evlist__for_each(list, evsel) \ + list_for_each_entry(evsel, list, node) + +/** + * evlist__for_each - iterate thru all the evsels + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + */ +#define evlist__for_each(evlist, evsel) \ + __evlist__for_each(&(evlist)->entries, evsel) + +/** + * __evlist__for_each_continue - continue iteration thru all the evsels + * @list: list_head instance to iterate + * @evsel: struct evsel iterator + */ +#define __evlist__for_each_continue(list, evsel) \ + list_for_each_entry_continue(evsel, list, node) + +/** + * evlist__for_each_continue - continue iteration thru all the evsels + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + */ +#define evlist__for_each_continue(evlist, evsel) \ + __evlist__for_each_continue(&(evlist)->entries, evsel) + +/** + * __evlist__for_each_reverse - iterate thru all the evsels in reverse order + * @list: list_head instance to iterate + * @evsel: struct evsel iterator + */ +#define __evlist__for_each_reverse(list, evsel) \ + list_for_each_entry_reverse(evsel, list, node) + +/** + * evlist__for_each_reverse - iterate thru all the evsels in reverse order + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + */ +#define evlist__for_each_reverse(evlist, evsel) \ + __evlist__for_each_reverse(&(evlist)->entries, evsel) + +/** + * __evlist__for_each_safe - safely iterate thru all the evsels + * @list: list_head instance to iterate + * @tmp: struct evsel temp iterator + * @evsel: struct evsel iterator + */ +#define __evlist__for_each_safe(list, tmp, evsel) \ + list_for_each_entry_safe(evsel, tmp, list, node) + +/** + * evlist__for_each_safe - safely iterate thru all the evsels + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + * @tmp: struct evsel temp iterator + */ +#define evlist__for_each_safe(evlist, tmp, evsel) \ + __evlist__for_each_safe(&(evlist)->entries, tmp, evsel) + +void perf_evlist__set_tracking_event(struct perf_evlist *evlist, + struct perf_evsel *tracking_evsel); + +#endif /* __PERF_EVLIST_H */ diff --git a/kernel/tools/perf/util/evsel.c b/kernel/tools/perf/util/evsel.c new file mode 100644 index 000000000..33e3fd8c2 --- /dev/null +++ b/kernel/tools/perf/util/evsel.c @@ -0,0 +1,2190 @@ +/* + * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo + * + * Parts came from builtin-{top,stat,record}.c, see those files for further + * copyright notes. + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include +#include +#include +#include +#include +#include +#include +#include "asm/bug.h" +#include "callchain.h" +#include "cgroup.h" +#include "evsel.h" +#include "evlist.h" +#include "util.h" +#include "cpumap.h" +#include "thread_map.h" +#include "target.h" +#include "perf_regs.h" +#include "debug.h" +#include "trace-event.h" + +static struct { + bool sample_id_all; + bool exclude_guest; + bool mmap2; + bool cloexec; + bool clockid; + bool clockid_wrong; +} perf_missing_features; + +static clockid_t clockid; + +static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused) +{ + return 0; +} + +static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused) +{ +} + +static struct { + size_t size; + int (*init)(struct perf_evsel *evsel); + void (*fini)(struct perf_evsel *evsel); +} perf_evsel__object = { + .size = sizeof(struct perf_evsel), + .init = perf_evsel__no_extra_init, + .fini = perf_evsel__no_extra_fini, +}; + +int perf_evsel__object_config(size_t object_size, + int (*init)(struct perf_evsel *evsel), + void (*fini)(struct perf_evsel *evsel)) +{ + + if (object_size == 0) + goto set_methods; + + if (perf_evsel__object.size > object_size) + return -EINVAL; + + perf_evsel__object.size = object_size; + +set_methods: + if (init != NULL) + perf_evsel__object.init = init; + + if (fini != NULL) + perf_evsel__object.fini = fini; + + return 0; +} + +#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) + +int __perf_evsel__sample_size(u64 sample_type) +{ + u64 mask = sample_type & PERF_SAMPLE_MASK; + int size = 0; + int i; + + for (i = 0; i < 64; i++) { + if (mask & (1ULL << i)) + size++; + } + + size *= sizeof(u64); + + return size; +} + +/** + * __perf_evsel__calc_id_pos - calculate id_pos. + * @sample_type: sample type + * + * This function returns the position of the event id (PERF_SAMPLE_ID or + * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct + * sample_event. + */ +static int __perf_evsel__calc_id_pos(u64 sample_type) +{ + int idx = 0; + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + return 0; + + if (!(sample_type & PERF_SAMPLE_ID)) + return -1; + + if (sample_type & PERF_SAMPLE_IP) + idx += 1; + + if (sample_type & PERF_SAMPLE_TID) + idx += 1; + + if (sample_type & PERF_SAMPLE_TIME) + idx += 1; + + if (sample_type & PERF_SAMPLE_ADDR) + idx += 1; + + return idx; +} + +/** + * __perf_evsel__calc_is_pos - calculate is_pos. + * @sample_type: sample type + * + * This function returns the position (counting backwards) of the event id + * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if + * sample_id_all is used there is an id sample appended to non-sample events. + */ +static int __perf_evsel__calc_is_pos(u64 sample_type) +{ + int idx = 1; + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + return 1; + + if (!(sample_type & PERF_SAMPLE_ID)) + return -1; + + if (sample_type & PERF_SAMPLE_CPU) + idx += 1; + + if (sample_type & PERF_SAMPLE_STREAM_ID) + idx += 1; + + return idx; +} + +void perf_evsel__calc_id_pos(struct perf_evsel *evsel) +{ + evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); + evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); +} + +void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit) +{ + if (!(evsel->attr.sample_type & bit)) { + evsel->attr.sample_type |= bit; + evsel->sample_size += sizeof(u64); + perf_evsel__calc_id_pos(evsel); + } +} + +void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit) +{ + if (evsel->attr.sample_type & bit) { + evsel->attr.sample_type &= ~bit; + evsel->sample_size -= sizeof(u64); + perf_evsel__calc_id_pos(evsel); + } +} + +void perf_evsel__set_sample_id(struct perf_evsel *evsel, + bool can_sample_identifier) +{ + if (can_sample_identifier) { + perf_evsel__reset_sample_bit(evsel, ID); + perf_evsel__set_sample_bit(evsel, IDENTIFIER); + } else { + perf_evsel__set_sample_bit(evsel, ID); + } + evsel->attr.read_format |= PERF_FORMAT_ID; +} + +void perf_evsel__init(struct perf_evsel *evsel, + struct perf_event_attr *attr, int idx) +{ + evsel->idx = idx; + evsel->tracking = !idx; + evsel->attr = *attr; + evsel->leader = evsel; + evsel->unit = ""; + evsel->scale = 1.0; + INIT_LIST_HEAD(&evsel->node); + perf_evsel__object.init(evsel); + evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); + perf_evsel__calc_id_pos(evsel); +} + +struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) +{ + struct perf_evsel *evsel = zalloc(perf_evsel__object.size); + + if (evsel != NULL) + perf_evsel__init(evsel, attr, idx); + + return evsel; +} + +struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) +{ + struct perf_evsel *evsel = zalloc(perf_evsel__object.size); + + if (evsel != NULL) { + struct perf_event_attr attr = { + .type = PERF_TYPE_TRACEPOINT, + .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), + }; + + if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) + goto out_free; + + evsel->tp_format = trace_event__tp_format(sys, name); + if (evsel->tp_format == NULL) + goto out_free; + + event_attr_init(&attr); + attr.config = evsel->tp_format->id; + attr.sample_period = 1; + perf_evsel__init(evsel, &attr, idx); + } + + return evsel; + +out_free: + zfree(&evsel->name); + free(evsel); + return NULL; +} + +const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { + "cycles", + "instructions", + "cache-references", + "cache-misses", + "branches", + "branch-misses", + "bus-cycles", + "stalled-cycles-frontend", + "stalled-cycles-backend", + "ref-cycles", +}; + +static const char *__perf_evsel__hw_name(u64 config) +{ + if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) + return perf_evsel__hw_names[config]; + + return "unknown-hardware"; +} + +static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) +{ + int colon = 0, r = 0; + struct perf_event_attr *attr = &evsel->attr; + bool exclude_guest_default = false; + +#define MOD_PRINT(context, mod) do { \ + if (!attr->exclude_##context) { \ + if (!colon) colon = ++r; \ + r += scnprintf(bf + r, size - r, "%c", mod); \ + } } while(0) + + if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { + MOD_PRINT(kernel, 'k'); + MOD_PRINT(user, 'u'); + MOD_PRINT(hv, 'h'); + exclude_guest_default = true; + } + + if (attr->precise_ip) { + if (!colon) + colon = ++r; + r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); + exclude_guest_default = true; + } + + if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { + MOD_PRINT(host, 'H'); + MOD_PRINT(guest, 'G'); + } +#undef MOD_PRINT + if (colon) + bf[colon - 1] = ':'; + return r; +} + +static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) +{ + int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); + return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); +} + +const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { + "cpu-clock", + "task-clock", + "page-faults", + "context-switches", + "cpu-migrations", + "minor-faults", + "major-faults", + "alignment-faults", + "emulation-faults", + "dummy", +}; + +static const char *__perf_evsel__sw_name(u64 config) +{ + if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) + return perf_evsel__sw_names[config]; + return "unknown-software"; +} + +static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) +{ + int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); + return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); +} + +static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) +{ + int r; + + r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); + + if (type & HW_BREAKPOINT_R) + r += scnprintf(bf + r, size - r, "r"); + + if (type & HW_BREAKPOINT_W) + r += scnprintf(bf + r, size - r, "w"); + + if (type & HW_BREAKPOINT_X) + r += scnprintf(bf + r, size - r, "x"); + + return r; +} + +static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) +{ + struct perf_event_attr *attr = &evsel->attr; + int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); + return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); +} + +const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] + [PERF_EVSEL__MAX_ALIASES] = { + { "L1-dcache", "l1-d", "l1d", "L1-data", }, + { "L1-icache", "l1-i", "l1i", "L1-instruction", }, + { "LLC", "L2", }, + { "dTLB", "d-tlb", "Data-TLB", }, + { "iTLB", "i-tlb", "Instruction-TLB", }, + { "branch", "branches", "bpu", "btb", "bpc", }, + { "node", }, +}; + +const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_EVSEL__MAX_ALIASES] = { + { "load", "loads", "read", }, + { "store", "stores", "write", }, + { "prefetch", "prefetches", "speculative-read", "speculative-load", }, +}; + +const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] + [PERF_EVSEL__MAX_ALIASES] = { + { "refs", "Reference", "ops", "access", }, + { "misses", "miss", }, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x +#define CACHE_READ (1 << C(OP_READ)) +#define CACHE_WRITE (1 << C(OP_WRITE)) +#define CACHE_PREFETCH (1 << C(OP_PREFETCH)) +#define COP(x) (1 << x) + +/* + * cache operartion stat + * L1I : Read and prefetch only + * ITLB and BPU : Read-only + */ +static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { + [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), + [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), + [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), + [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), + [C(ITLB)] = (CACHE_READ), + [C(BPU)] = (CACHE_READ), + [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), +}; + +bool perf_evsel__is_cache_op_valid(u8 type, u8 op) +{ + if (perf_evsel__hw_cache_stat[type] & COP(op)) + return true; /* valid */ + else + return false; /* invalid */ +} + +int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, + char *bf, size_t size) +{ + if (result) { + return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], + perf_evsel__hw_cache_op[op][0], + perf_evsel__hw_cache_result[result][0]); + } + + return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], + perf_evsel__hw_cache_op[op][1]); +} + +static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) +{ + u8 op, result, type = (config >> 0) & 0xff; + const char *err = "unknown-ext-hardware-cache-type"; + + if (type > PERF_COUNT_HW_CACHE_MAX) + goto out_err; + + op = (config >> 8) & 0xff; + err = "unknown-ext-hardware-cache-op"; + if (op > PERF_COUNT_HW_CACHE_OP_MAX) + goto out_err; + + result = (config >> 16) & 0xff; + err = "unknown-ext-hardware-cache-result"; + if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) + goto out_err; + + err = "invalid-cache"; + if (!perf_evsel__is_cache_op_valid(type, op)) + goto out_err; + + return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); +out_err: + return scnprintf(bf, size, "%s", err); +} + +static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) +{ + int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); + return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); +} + +static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) +{ + int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); + return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); +} + +const char *perf_evsel__name(struct perf_evsel *evsel) +{ + char bf[128]; + + if (evsel->name) + return evsel->name; + + switch (evsel->attr.type) { + case PERF_TYPE_RAW: + perf_evsel__raw_name(evsel, bf, sizeof(bf)); + break; + + case PERF_TYPE_HARDWARE: + perf_evsel__hw_name(evsel, bf, sizeof(bf)); + break; + + case PERF_TYPE_HW_CACHE: + perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); + break; + + case PERF_TYPE_SOFTWARE: + perf_evsel__sw_name(evsel, bf, sizeof(bf)); + break; + + case PERF_TYPE_TRACEPOINT: + scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); + break; + + case PERF_TYPE_BREAKPOINT: + perf_evsel__bp_name(evsel, bf, sizeof(bf)); + break; + + default: + scnprintf(bf, sizeof(bf), "unknown attr type: %d", + evsel->attr.type); + break; + } + + evsel->name = strdup(bf); + + return evsel->name ?: "unknown"; +} + +const char *perf_evsel__group_name(struct perf_evsel *evsel) +{ + return evsel->group_name ?: "anon group"; +} + +int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) +{ + int ret; + struct perf_evsel *pos; + const char *group_name = perf_evsel__group_name(evsel); + + ret = scnprintf(buf, size, "%s", group_name); + + ret += scnprintf(buf + ret, size - ret, " { %s", + perf_evsel__name(evsel)); + + for_each_group_member(pos, evsel) + ret += scnprintf(buf + ret, size - ret, ", %s", + perf_evsel__name(pos)); + + ret += scnprintf(buf + ret, size - ret, " }"); + + return ret; +} + +static void +perf_evsel__config_callgraph(struct perf_evsel *evsel, + struct record_opts *opts) +{ + bool function = perf_evsel__is_function_event(evsel); + struct perf_event_attr *attr = &evsel->attr; + + perf_evsel__set_sample_bit(evsel, CALLCHAIN); + + if (callchain_param.record_mode == CALLCHAIN_LBR) { + if (!opts->branch_stack) { + if (attr->exclude_user) { + pr_warning("LBR callstack option is only available " + "to get user callchain information. " + "Falling back to framepointers.\n"); + } else { + perf_evsel__set_sample_bit(evsel, BRANCH_STACK); + attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_CALL_STACK; + } + } else + pr_warning("Cannot use LBR callstack with branch stack. " + "Falling back to framepointers.\n"); + } + + if (callchain_param.record_mode == CALLCHAIN_DWARF) { + if (!function) { + perf_evsel__set_sample_bit(evsel, REGS_USER); + perf_evsel__set_sample_bit(evsel, STACK_USER); + attr->sample_regs_user = PERF_REGS_MASK; + attr->sample_stack_user = callchain_param.dump_size; + attr->exclude_callchain_user = 1; + } else { + pr_info("Cannot use DWARF unwind for function trace event," + " falling back to framepointers.\n"); + } + } + + if (function) { + pr_info("Disabling user space callchains for function trace event.\n"); + attr->exclude_callchain_user = 1; + } +} + +/* + * The enable_on_exec/disabled value strategy: + * + * 1) For any type of traced program: + * - all independent events and group leaders are disabled + * - all group members are enabled + * + * Group members are ruled by group leaders. They need to + * be enabled, because the group scheduling relies on that. + * + * 2) For traced programs executed by perf: + * - all independent events and group leaders have + * enable_on_exec set + * - we don't specifically enable or disable any event during + * the record command + * + * Independent events and group leaders are initially disabled + * and get enabled by exec. Group members are ruled by group + * leaders as stated in 1). + * + * 3) For traced programs attached by perf (pid/tid): + * - we specifically enable or disable all events during + * the record command + * + * When attaching events to already running traced we + * enable/disable events specifically, as there's no + * initial traced exec call. + */ +void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) +{ + struct perf_evsel *leader = evsel->leader; + struct perf_event_attr *attr = &evsel->attr; + int track = evsel->tracking; + bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; + + attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; + attr->inherit = !opts->no_inherit; + + perf_evsel__set_sample_bit(evsel, IP); + perf_evsel__set_sample_bit(evsel, TID); + + if (evsel->sample_read) { + perf_evsel__set_sample_bit(evsel, READ); + + /* + * We need ID even in case of single event, because + * PERF_SAMPLE_READ process ID specific data. + */ + perf_evsel__set_sample_id(evsel, false); + + /* + * Apply group format only if we belong to group + * with more than one members. + */ + if (leader->nr_members > 1) { + attr->read_format |= PERF_FORMAT_GROUP; + attr->inherit = 0; + } + } + + /* + * We default some events to have a default interval. But keep + * it a weak assumption overridable by the user. + */ + if (!attr->sample_period || (opts->user_freq != UINT_MAX || + opts->user_interval != ULLONG_MAX)) { + if (opts->freq) { + perf_evsel__set_sample_bit(evsel, PERIOD); + attr->freq = 1; + attr->sample_freq = opts->freq; + } else { + attr->sample_period = opts->default_interval; + } + } + + /* + * Disable sampling for all group members other + * than leader in case leader 'leads' the sampling. + */ + if ((leader != evsel) && leader->sample_read) { + attr->sample_freq = 0; + attr->sample_period = 0; + } + + if (opts->no_samples) + attr->sample_freq = 0; + + if (opts->inherit_stat) + attr->inherit_stat = 1; + + if (opts->sample_address) { + perf_evsel__set_sample_bit(evsel, ADDR); + attr->mmap_data = track; + } + + /* + * We don't allow user space callchains for function trace + * event, due to issues with page faults while tracing page + * fault handler and its overall trickiness nature. + */ + if (perf_evsel__is_function_event(evsel)) + evsel->attr.exclude_callchain_user = 1; + + if (callchain_param.enabled && !evsel->no_aux_samples) + perf_evsel__config_callgraph(evsel, opts); + + if (opts->sample_intr_regs) { + attr->sample_regs_intr = PERF_REGS_MASK; + perf_evsel__set_sample_bit(evsel, REGS_INTR); + } + + if (target__has_cpu(&opts->target)) + perf_evsel__set_sample_bit(evsel, CPU); + + if (opts->period) + perf_evsel__set_sample_bit(evsel, PERIOD); + + /* + * When the user explicitely disabled time don't force it here. + */ + if (opts->sample_time && + (!perf_missing_features.sample_id_all && + (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu))) + perf_evsel__set_sample_bit(evsel, TIME); + + if (opts->raw_samples && !evsel->no_aux_samples) { + perf_evsel__set_sample_bit(evsel, TIME); + perf_evsel__set_sample_bit(evsel, RAW); + perf_evsel__set_sample_bit(evsel, CPU); + } + + if (opts->sample_address) + perf_evsel__set_sample_bit(evsel, DATA_SRC); + + if (opts->no_buffering) { + attr->watermark = 0; + attr->wakeup_events = 1; + } + if (opts->branch_stack && !evsel->no_aux_samples) { + perf_evsel__set_sample_bit(evsel, BRANCH_STACK); + attr->branch_sample_type = opts->branch_stack; + } + + if (opts->sample_weight) + perf_evsel__set_sample_bit(evsel, WEIGHT); + + attr->task = track; + attr->mmap = track; + attr->mmap2 = track && !perf_missing_features.mmap2; + attr->comm = track; + + if (opts->sample_transaction) + perf_evsel__set_sample_bit(evsel, TRANSACTION); + + if (opts->running_time) { + evsel->attr.read_format |= + PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; + } + + /* + * XXX see the function comment above + * + * Disabling only independent events or group leaders, + * keeping group members enabled. + */ + if (perf_evsel__is_group_leader(evsel)) + attr->disabled = 1; + + /* + * Setting enable_on_exec for independent events and + * group leaders for traced executed by perf. + */ + if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && + !opts->initial_delay) + attr->enable_on_exec = 1; + + if (evsel->immediate) { + attr->disabled = 0; + attr->enable_on_exec = 0; + } + + clockid = opts->clockid; + if (opts->use_clockid) { + attr->use_clockid = 1; + attr->clockid = opts->clockid; + } +} + +static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + int cpu, thread; + + if (evsel->system_wide) + nthreads = 1; + + evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); + + if (evsel->fd) { + for (cpu = 0; cpu < ncpus; cpu++) { + for (thread = 0; thread < nthreads; thread++) { + FD(evsel, cpu, thread) = -1; + } + } + } + + return evsel->fd != NULL ? 0 : -ENOMEM; +} + +static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, + int ioc, void *arg) +{ + int cpu, thread; + + if (evsel->system_wide) + nthreads = 1; + + for (cpu = 0; cpu < ncpus; cpu++) { + for (thread = 0; thread < nthreads; thread++) { + int fd = FD(evsel, cpu, thread), + err = ioctl(fd, ioc, arg); + + if (err) + return err; + } + } + + return 0; +} + +int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter) +{ + return perf_evsel__run_ioctl(evsel, ncpus, nthreads, + PERF_EVENT_IOC_SET_FILTER, + (void *)filter); +} + +int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + return perf_evsel__run_ioctl(evsel, ncpus, nthreads, + PERF_EVENT_IOC_ENABLE, + 0); +} + +int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + if (ncpus == 0 || nthreads == 0) + return 0; + + if (evsel->system_wide) + nthreads = 1; + + evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); + if (evsel->sample_id == NULL) + return -ENOMEM; + + evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); + if (evsel->id == NULL) { + xyarray__delete(evsel->sample_id); + evsel->sample_id = NULL; + return -ENOMEM; + } + + return 0; +} + +void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus) +{ + memset(evsel->counts, 0, (sizeof(*evsel->counts) + + (ncpus * sizeof(struct perf_counts_values)))); +} + +int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) +{ + evsel->counts = zalloc((sizeof(*evsel->counts) + + (ncpus * sizeof(struct perf_counts_values)))); + return evsel->counts != NULL ? 0 : -ENOMEM; +} + +static void perf_evsel__free_fd(struct perf_evsel *evsel) +{ + xyarray__delete(evsel->fd); + evsel->fd = NULL; +} + +static void perf_evsel__free_id(struct perf_evsel *evsel) +{ + xyarray__delete(evsel->sample_id); + evsel->sample_id = NULL; + zfree(&evsel->id); +} + +void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + int cpu, thread; + + if (evsel->system_wide) + nthreads = 1; + + for (cpu = 0; cpu < ncpus; cpu++) + for (thread = 0; thread < nthreads; ++thread) { + close(FD(evsel, cpu, thread)); + FD(evsel, cpu, thread) = -1; + } +} + +void perf_evsel__free_counts(struct perf_evsel *evsel) +{ + zfree(&evsel->counts); +} + +void perf_evsel__exit(struct perf_evsel *evsel) +{ + assert(list_empty(&evsel->node)); + perf_evsel__free_fd(evsel); + perf_evsel__free_id(evsel); + close_cgroup(evsel->cgrp); + zfree(&evsel->group_name); + zfree(&evsel->name); + perf_evsel__object.fini(evsel); +} + +void perf_evsel__delete(struct perf_evsel *evsel) +{ + perf_evsel__exit(evsel); + free(evsel); +} + +void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, + struct perf_counts_values *count) +{ + struct perf_counts_values tmp; + + if (!evsel->prev_raw_counts) + return; + + if (cpu == -1) { + tmp = evsel->prev_raw_counts->aggr; + evsel->prev_raw_counts->aggr = *count; + } else { + tmp = evsel->prev_raw_counts->cpu[cpu]; + evsel->prev_raw_counts->cpu[cpu] = *count; + } + + count->val = count->val - tmp.val; + count->ena = count->ena - tmp.ena; + count->run = count->run - tmp.run; +} + +void perf_counts_values__scale(struct perf_counts_values *count, + bool scale, s8 *pscaled) +{ + s8 scaled = 0; + + if (scale) { + if (count->run == 0) { + scaled = -1; + count->val = 0; + } else if (count->run < count->ena) { + scaled = 1; + count->val = (u64)((double) count->val * count->ena / count->run + 0.5); + } + } else + count->ena = count->run = 0; + + if (pscaled) + *pscaled = scaled; +} + +int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, + perf_evsel__read_cb_t cb) +{ + struct perf_counts_values count; + + memset(&count, 0, sizeof(count)); + + if (FD(evsel, cpu, thread) < 0) + return -EINVAL; + + if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0) + return -errno; + + return cb(evsel, cpu, thread, &count); +} + +int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, + int cpu, int thread, bool scale) +{ + struct perf_counts_values count; + size_t nv = scale ? 3 : 1; + + if (FD(evsel, cpu, thread) < 0) + return -EINVAL; + + if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) + return -ENOMEM; + + if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) + return -errno; + + perf_evsel__compute_deltas(evsel, cpu, &count); + perf_counts_values__scale(&count, scale, NULL); + evsel->counts->cpu[cpu] = count; + return 0; +} + +static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) +{ + struct perf_evsel *leader = evsel->leader; + int fd; + + if (perf_evsel__is_group_leader(evsel)) + return -1; + + /* + * Leader must be already processed/open, + * if not it's a bug. + */ + BUG_ON(!leader->fd); + + fd = FD(leader, cpu, thread); + BUG_ON(fd == -1); + + return fd; +} + +struct bit_names { + int bit; + const char *name; +}; + +static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) +{ + bool first_bit = true; + int i = 0; + + do { + if (value & bits[i].bit) { + buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); + first_bit = false; + } + } while (bits[++i].name != NULL); +} + +static void __p_sample_type(char *buf, size_t size, u64 value) +{ +#define bit_name(n) { PERF_SAMPLE_##n, #n } + struct bit_names bits[] = { + bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), + bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), + bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), + bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), + bit_name(IDENTIFIER), bit_name(REGS_INTR), + { .name = NULL, } + }; +#undef bit_name + __p_bits(buf, size, value, bits); +} + +static void __p_read_format(char *buf, size_t size, u64 value) +{ +#define bit_name(n) { PERF_FORMAT_##n, #n } + struct bit_names bits[] = { + bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), + bit_name(ID), bit_name(GROUP), + { .name = NULL, } + }; +#undef bit_name + __p_bits(buf, size, value, bits); +} + +#define BUF_SIZE 1024 + +#define p_hex(val) snprintf(buf, BUF_SIZE, "%"PRIx64, (uint64_t)(val)) +#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) +#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) +#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) +#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) + +#define PRINT_ATTRn(_n, _f, _p) \ +do { \ + if (attr->_f) { \ + _p(attr->_f); \ + ret += attr__fprintf(fp, _n, buf, priv);\ + } \ +} while (0) + +#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) + +int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, + attr__fprintf_f attr__fprintf, void *priv) +{ + char buf[BUF_SIZE]; + int ret = 0; + + PRINT_ATTRf(type, p_unsigned); + PRINT_ATTRf(size, p_unsigned); + PRINT_ATTRf(config, p_hex); + PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); + PRINT_ATTRf(sample_type, p_sample_type); + PRINT_ATTRf(read_format, p_read_format); + + PRINT_ATTRf(disabled, p_unsigned); + PRINT_ATTRf(inherit, p_unsigned); + PRINT_ATTRf(pinned, p_unsigned); + PRINT_ATTRf(exclusive, p_unsigned); + PRINT_ATTRf(exclude_user, p_unsigned); + PRINT_ATTRf(exclude_kernel, p_unsigned); + PRINT_ATTRf(exclude_hv, p_unsigned); + PRINT_ATTRf(exclude_idle, p_unsigned); + PRINT_ATTRf(mmap, p_unsigned); + PRINT_ATTRf(comm, p_unsigned); + PRINT_ATTRf(freq, p_unsigned); + PRINT_ATTRf(inherit_stat, p_unsigned); + PRINT_ATTRf(enable_on_exec, p_unsigned); + PRINT_ATTRf(task, p_unsigned); + PRINT_ATTRf(watermark, p_unsigned); + PRINT_ATTRf(precise_ip, p_unsigned); + PRINT_ATTRf(mmap_data, p_unsigned); + PRINT_ATTRf(sample_id_all, p_unsigned); + PRINT_ATTRf(exclude_host, p_unsigned); + PRINT_ATTRf(exclude_guest, p_unsigned); + PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); + PRINT_ATTRf(exclude_callchain_user, p_unsigned); + PRINT_ATTRf(mmap2, p_unsigned); + PRINT_ATTRf(comm_exec, p_unsigned); + PRINT_ATTRf(use_clockid, p_unsigned); + + PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); + PRINT_ATTRf(bp_type, p_unsigned); + PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); + PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); + PRINT_ATTRf(sample_regs_user, p_hex); + PRINT_ATTRf(sample_stack_user, p_unsigned); + PRINT_ATTRf(clockid, p_signed); + PRINT_ATTRf(sample_regs_intr, p_hex); + + return ret; +} + +static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, + void *priv __attribute__((unused))) +{ + return fprintf(fp, " %-32s %s\n", name, val); +} + +static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, + struct thread_map *threads) +{ + int cpu, thread, nthreads; + unsigned long flags = PERF_FLAG_FD_CLOEXEC; + int pid = -1, err; + enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; + + if (evsel->system_wide) + nthreads = 1; + else + nthreads = threads->nr; + + if (evsel->fd == NULL && + perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) + return -ENOMEM; + + if (evsel->cgrp) { + flags |= PERF_FLAG_PID_CGROUP; + pid = evsel->cgrp->fd; + } + +fallback_missing_features: + if (perf_missing_features.clockid_wrong) + evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */ + if (perf_missing_features.clockid) { + evsel->attr.use_clockid = 0; + evsel->attr.clockid = 0; + } + if (perf_missing_features.cloexec) + flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; + if (perf_missing_features.mmap2) + evsel->attr.mmap2 = 0; + if (perf_missing_features.exclude_guest) + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; +retry_sample_id: + if (perf_missing_features.sample_id_all) + evsel->attr.sample_id_all = 0; + + if (verbose >= 2) { + fprintf(stderr, "%.60s\n", graph_dotted_line); + fprintf(stderr, "perf_event_attr:\n"); + perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL); + fprintf(stderr, "%.60s\n", graph_dotted_line); + } + + for (cpu = 0; cpu < cpus->nr; cpu++) { + + for (thread = 0; thread < nthreads; thread++) { + int group_fd; + + if (!evsel->cgrp && !evsel->system_wide) + pid = threads->map[thread]; + + group_fd = get_group_fd(evsel, cpu, thread); +retry_open: + pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n", + pid, cpus->map[cpu], group_fd, flags); + + FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, + pid, + cpus->map[cpu], + group_fd, flags); + if (FD(evsel, cpu, thread) < 0) { + err = -errno; + pr_debug2("sys_perf_event_open failed, error %d\n", + err); + goto try_fallback; + } + set_rlimit = NO_CHANGE; + + /* + * If we succeeded but had to kill clockid, fail and + * have perf_evsel__open_strerror() print us a nice + * error. + */ + if (perf_missing_features.clockid || + perf_missing_features.clockid_wrong) { + err = -EINVAL; + goto out_close; + } + } + } + + return 0; + +try_fallback: + /* + * perf stat needs between 5 and 22 fds per CPU. When we run out + * of them try to increase the limits. + */ + if (err == -EMFILE && set_rlimit < INCREASED_MAX) { + struct rlimit l; + int old_errno = errno; + + if (getrlimit(RLIMIT_NOFILE, &l) == 0) { + if (set_rlimit == NO_CHANGE) + l.rlim_cur = l.rlim_max; + else { + l.rlim_cur = l.rlim_max + 1000; + l.rlim_max = l.rlim_cur; + } + if (setrlimit(RLIMIT_NOFILE, &l) == 0) { + set_rlimit++; + errno = old_errno; + goto retry_open; + } + } + errno = old_errno; + } + + if (err != -EINVAL || cpu > 0 || thread > 0) + goto out_close; + + /* + * Must probe features in the order they were added to the + * perf_event_attr interface. + */ + if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) { + perf_missing_features.clockid_wrong = true; + goto fallback_missing_features; + } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) { + perf_missing_features.clockid = true; + goto fallback_missing_features; + } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { + perf_missing_features.cloexec = true; + goto fallback_missing_features; + } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { + perf_missing_features.mmap2 = true; + goto fallback_missing_features; + } else if (!perf_missing_features.exclude_guest && + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { + perf_missing_features.exclude_guest = true; + goto fallback_missing_features; + } else if (!perf_missing_features.sample_id_all) { + perf_missing_features.sample_id_all = true; + goto retry_sample_id; + } + +out_close: + do { + while (--thread >= 0) { + close(FD(evsel, cpu, thread)); + FD(evsel, cpu, thread) = -1; + } + thread = nthreads; + } while (--cpu >= 0); + return err; +} + +void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + if (evsel->fd == NULL) + return; + + perf_evsel__close_fd(evsel, ncpus, nthreads); + perf_evsel__free_fd(evsel); +} + +static struct { + struct cpu_map map; + int cpus[1]; +} empty_cpu_map = { + .map.nr = 1, + .cpus = { -1, }, +}; + +static struct { + struct thread_map map; + int threads[1]; +} empty_thread_map = { + .map.nr = 1, + .threads = { -1, }, +}; + +int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, + struct thread_map *threads) +{ + if (cpus == NULL) { + /* Work around old compiler warnings about strict aliasing */ + cpus = &empty_cpu_map.map; + } + + if (threads == NULL) + threads = &empty_thread_map.map; + + return __perf_evsel__open(evsel, cpus, threads); +} + +int perf_evsel__open_per_cpu(struct perf_evsel *evsel, + struct cpu_map *cpus) +{ + return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); +} + +int perf_evsel__open_per_thread(struct perf_evsel *evsel, + struct thread_map *threads) +{ + return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); +} + +static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, + const union perf_event *event, + struct perf_sample *sample) +{ + u64 type = evsel->attr.sample_type; + const u64 *array = event->sample.array; + bool swapped = evsel->needs_swap; + union u64_swap u; + + array += ((event->header.size - + sizeof(event->header)) / sizeof(u64)) - 1; + + if (type & PERF_SAMPLE_IDENTIFIER) { + sample->id = *array; + array--; + } + + if (type & PERF_SAMPLE_CPU) { + u.val64 = *array; + if (swapped) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + } + + sample->cpu = u.val32[0]; + array--; + } + + if (type & PERF_SAMPLE_STREAM_ID) { + sample->stream_id = *array; + array--; + } + + if (type & PERF_SAMPLE_ID) { + sample->id = *array; + array--; + } + + if (type & PERF_SAMPLE_TIME) { + sample->time = *array; + array--; + } + + if (type & PERF_SAMPLE_TID) { + u.val64 = *array; + if (swapped) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + } + + sample->pid = u.val32[0]; + sample->tid = u.val32[1]; + array--; + } + + return 0; +} + +static inline bool overflow(const void *endp, u16 max_size, const void *offset, + u64 size) +{ + return size > max_size || offset + size > endp; +} + +#define OVERFLOW_CHECK(offset, size, max_size) \ + do { \ + if (overflow(endp, (max_size), (offset), (size))) \ + return -EFAULT; \ + } while (0) + +#define OVERFLOW_CHECK_u64(offset) \ + OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) + +int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *data) +{ + u64 type = evsel->attr.sample_type; + bool swapped = evsel->needs_swap; + const u64 *array; + u16 max_size = event->header.size; + const void *endp = (void *)event + max_size; + u64 sz; + + /* + * used for cross-endian analysis. See git commit 65014ab3 + * for why this goofiness is needed. + */ + union u64_swap u; + + memset(data, 0, sizeof(*data)); + data->cpu = data->pid = data->tid = -1; + data->stream_id = data->id = data->time = -1ULL; + data->period = evsel->attr.sample_period; + data->weight = 0; + + if (event->header.type != PERF_RECORD_SAMPLE) { + if (!evsel->attr.sample_id_all) + return 0; + return perf_evsel__parse_id_sample(evsel, event, data); + } + + array = event->sample.array; + + /* + * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes + * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to + * check the format does not go past the end of the event. + */ + if (evsel->sample_size + sizeof(event->header) > event->header.size) + return -EFAULT; + + data->id = -1ULL; + if (type & PERF_SAMPLE_IDENTIFIER) { + data->id = *array; + array++; + } + + if (type & PERF_SAMPLE_IP) { + data->ip = *array; + array++; + } + + if (type & PERF_SAMPLE_TID) { + u.val64 = *array; + if (swapped) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + } + + data->pid = u.val32[0]; + data->tid = u.val32[1]; + array++; + } + + if (type & PERF_SAMPLE_TIME) { + data->time = *array; + array++; + } + + data->addr = 0; + if (type & PERF_SAMPLE_ADDR) { + data->addr = *array; + array++; + } + + if (type & PERF_SAMPLE_ID) { + data->id = *array; + array++; + } + + if (type & PERF_SAMPLE_STREAM_ID) { + data->stream_id = *array; + array++; + } + + if (type & PERF_SAMPLE_CPU) { + + u.val64 = *array; + if (swapped) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + } + + data->cpu = u.val32[0]; + array++; + } + + if (type & PERF_SAMPLE_PERIOD) { + data->period = *array; + array++; + } + + if (type & PERF_SAMPLE_READ) { + u64 read_format = evsel->attr.read_format; + + OVERFLOW_CHECK_u64(array); + if (read_format & PERF_FORMAT_GROUP) + data->read.group.nr = *array; + else + data->read.one.value = *array; + + array++; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + OVERFLOW_CHECK_u64(array); + data->read.time_enabled = *array; + array++; + } + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + OVERFLOW_CHECK_u64(array); + data->read.time_running = *array; + array++; + } + + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + const u64 max_group_nr = UINT64_MAX / + sizeof(struct sample_read_value); + + if (data->read.group.nr > max_group_nr) + return -EFAULT; + sz = data->read.group.nr * + sizeof(struct sample_read_value); + OVERFLOW_CHECK(array, sz, max_size); + data->read.group.values = + (struct sample_read_value *)array; + array = (void *)array + sz; + } else { + OVERFLOW_CHECK_u64(array); + data->read.one.id = *array; + array++; + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); + + OVERFLOW_CHECK_u64(array); + data->callchain = (struct ip_callchain *)array++; + if (data->callchain->nr > max_callchain_nr) + return -EFAULT; + sz = data->callchain->nr * sizeof(u64); + OVERFLOW_CHECK(array, sz, max_size); + array = (void *)array + sz; + } + + if (type & PERF_SAMPLE_RAW) { + OVERFLOW_CHECK_u64(array); + u.val64 = *array; + if (WARN_ONCE(swapped, + "Endianness of raw data not corrected!\n")) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + } + data->raw_size = u.val32[0]; + array = (void *)array + sizeof(u32); + + OVERFLOW_CHECK(array, data->raw_size, max_size); + data->raw_data = (void *)array; + array = (void *)array + data->raw_size; + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + const u64 max_branch_nr = UINT64_MAX / + sizeof(struct branch_entry); + + OVERFLOW_CHECK_u64(array); + data->branch_stack = (struct branch_stack *)array++; + + if (data->branch_stack->nr > max_branch_nr) + return -EFAULT; + sz = data->branch_stack->nr * sizeof(struct branch_entry); + OVERFLOW_CHECK(array, sz, max_size); + array = (void *)array + sz; + } + + if (type & PERF_SAMPLE_REGS_USER) { + OVERFLOW_CHECK_u64(array); + data->user_regs.abi = *array; + array++; + + if (data->user_regs.abi) { + u64 mask = evsel->attr.sample_regs_user; + + sz = hweight_long(mask) * sizeof(u64); + OVERFLOW_CHECK(array, sz, max_size); + data->user_regs.mask = mask; + data->user_regs.regs = (u64 *)array; + array = (void *)array + sz; + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + OVERFLOW_CHECK_u64(array); + sz = *array++; + + data->user_stack.offset = ((char *)(array - 1) + - (char *) event); + + if (!sz) { + data->user_stack.size = 0; + } else { + OVERFLOW_CHECK(array, sz, max_size); + data->user_stack.data = (char *)array; + array = (void *)array + sz; + OVERFLOW_CHECK_u64(array); + data->user_stack.size = *array++; + if (WARN_ONCE(data->user_stack.size > sz, + "user stack dump failure\n")) + return -EFAULT; + } + } + + data->weight = 0; + if (type & PERF_SAMPLE_WEIGHT) { + OVERFLOW_CHECK_u64(array); + data->weight = *array; + array++; + } + + data->data_src = PERF_MEM_DATA_SRC_NONE; + if (type & PERF_SAMPLE_DATA_SRC) { + OVERFLOW_CHECK_u64(array); + data->data_src = *array; + array++; + } + + data->transaction = 0; + if (type & PERF_SAMPLE_TRANSACTION) { + OVERFLOW_CHECK_u64(array); + data->transaction = *array; + array++; + } + + data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; + if (type & PERF_SAMPLE_REGS_INTR) { + OVERFLOW_CHECK_u64(array); + data->intr_regs.abi = *array; + array++; + + if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { + u64 mask = evsel->attr.sample_regs_intr; + + sz = hweight_long(mask) * sizeof(u64); + OVERFLOW_CHECK(array, sz, max_size); + data->intr_regs.mask = mask; + data->intr_regs.regs = (u64 *)array; + array = (void *)array + sz; + } + } + + return 0; +} + +size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, + u64 read_format) +{ + size_t sz, result = sizeof(struct sample_event); + + if (type & PERF_SAMPLE_IDENTIFIER) + result += sizeof(u64); + + if (type & PERF_SAMPLE_IP) + result += sizeof(u64); + + if (type & PERF_SAMPLE_TID) + result += sizeof(u64); + + if (type & PERF_SAMPLE_TIME) + result += sizeof(u64); + + if (type & PERF_SAMPLE_ADDR) + result += sizeof(u64); + + if (type & PERF_SAMPLE_ID) + result += sizeof(u64); + + if (type & PERF_SAMPLE_STREAM_ID) + result += sizeof(u64); + + if (type & PERF_SAMPLE_CPU) + result += sizeof(u64); + + if (type & PERF_SAMPLE_PERIOD) + result += sizeof(u64); + + if (type & PERF_SAMPLE_READ) { + result += sizeof(u64); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + result += sizeof(u64); + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + result += sizeof(u64); + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + sz = sample->read.group.nr * + sizeof(struct sample_read_value); + result += sz; + } else { + result += sizeof(u64); + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + sz = (sample->callchain->nr + 1) * sizeof(u64); + result += sz; + } + + if (type & PERF_SAMPLE_RAW) { + result += sizeof(u32); + result += sample->raw_size; + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + sz = sample->branch_stack->nr * sizeof(struct branch_entry); + sz += sizeof(u64); + result += sz; + } + + if (type & PERF_SAMPLE_REGS_USER) { + if (sample->user_regs.abi) { + result += sizeof(u64); + sz = hweight_long(sample->user_regs.mask) * sizeof(u64); + result += sz; + } else { + result += sizeof(u64); + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + sz = sample->user_stack.size; + result += sizeof(u64); + if (sz) { + result += sz; + result += sizeof(u64); + } + } + + if (type & PERF_SAMPLE_WEIGHT) + result += sizeof(u64); + + if (type & PERF_SAMPLE_DATA_SRC) + result += sizeof(u64); + + if (type & PERF_SAMPLE_TRANSACTION) + result += sizeof(u64); + + if (type & PERF_SAMPLE_REGS_INTR) { + if (sample->intr_regs.abi) { + result += sizeof(u64); + sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); + result += sz; + } else { + result += sizeof(u64); + } + } + + return result; +} + +int perf_event__synthesize_sample(union perf_event *event, u64 type, + u64 read_format, + const struct perf_sample *sample, + bool swapped) +{ + u64 *array; + size_t sz; + /* + * used for cross-endian analysis. See git commit 65014ab3 + * for why this goofiness is needed. + */ + union u64_swap u; + + array = event->sample.array; + + if (type & PERF_SAMPLE_IDENTIFIER) { + *array = sample->id; + array++; + } + + if (type & PERF_SAMPLE_IP) { + *array = sample->ip; + array++; + } + + if (type & PERF_SAMPLE_TID) { + u.val32[0] = sample->pid; + u.val32[1] = sample->tid; + if (swapped) { + /* + * Inverse of what is done in perf_evsel__parse_sample + */ + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + u.val64 = bswap_64(u.val64); + } + + *array = u.val64; + array++; + } + + if (type & PERF_SAMPLE_TIME) { + *array = sample->time; + array++; + } + + if (type & PERF_SAMPLE_ADDR) { + *array = sample->addr; + array++; + } + + if (type & PERF_SAMPLE_ID) { + *array = sample->id; + array++; + } + + if (type & PERF_SAMPLE_STREAM_ID) { + *array = sample->stream_id; + array++; + } + + if (type & PERF_SAMPLE_CPU) { + u.val32[0] = sample->cpu; + if (swapped) { + /* + * Inverse of what is done in perf_evsel__parse_sample + */ + u.val32[0] = bswap_32(u.val32[0]); + u.val64 = bswap_64(u.val64); + } + *array = u.val64; + array++; + } + + if (type & PERF_SAMPLE_PERIOD) { + *array = sample->period; + array++; + } + + if (type & PERF_SAMPLE_READ) { + if (read_format & PERF_FORMAT_GROUP) + *array = sample->read.group.nr; + else + *array = sample->read.one.value; + array++; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + *array = sample->read.time_enabled; + array++; + } + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + *array = sample->read.time_running; + array++; + } + + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + sz = sample->read.group.nr * + sizeof(struct sample_read_value); + memcpy(array, sample->read.group.values, sz); + array = (void *)array + sz; + } else { + *array = sample->read.one.id; + array++; + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + sz = (sample->callchain->nr + 1) * sizeof(u64); + memcpy(array, sample->callchain, sz); + array = (void *)array + sz; + } + + if (type & PERF_SAMPLE_RAW) { + u.val32[0] = sample->raw_size; + if (WARN_ONCE(swapped, + "Endianness of raw data not corrected!\n")) { + /* + * Inverse of what is done in perf_evsel__parse_sample + */ + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + u.val64 = bswap_64(u.val64); + } + *array = u.val64; + array = (void *)array + sizeof(u32); + + memcpy(array, sample->raw_data, sample->raw_size); + array = (void *)array + sample->raw_size; + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + sz = sample->branch_stack->nr * sizeof(struct branch_entry); + sz += sizeof(u64); + memcpy(array, sample->branch_stack, sz); + array = (void *)array + sz; + } + + if (type & PERF_SAMPLE_REGS_USER) { + if (sample->user_regs.abi) { + *array++ = sample->user_regs.abi; + sz = hweight_long(sample->user_regs.mask) * sizeof(u64); + memcpy(array, sample->user_regs.regs, sz); + array = (void *)array + sz; + } else { + *array++ = 0; + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + sz = sample->user_stack.size; + *array++ = sz; + if (sz) { + memcpy(array, sample->user_stack.data, sz); + array = (void *)array + sz; + *array++ = sz; + } + } + + if (type & PERF_SAMPLE_WEIGHT) { + *array = sample->weight; + array++; + } + + if (type & PERF_SAMPLE_DATA_SRC) { + *array = sample->data_src; + array++; + } + + if (type & PERF_SAMPLE_TRANSACTION) { + *array = sample->transaction; + array++; + } + + if (type & PERF_SAMPLE_REGS_INTR) { + if (sample->intr_regs.abi) { + *array++ = sample->intr_regs.abi; + sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); + memcpy(array, sample->intr_regs.regs, sz); + array = (void *)array + sz; + } else { + *array++ = 0; + } + } + + return 0; +} + +struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) +{ + return pevent_find_field(evsel->tp_format, name); +} + +void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name) +{ + struct format_field *field = perf_evsel__field(evsel, name); + int offset; + + if (!field) + return NULL; + + offset = field->offset; + + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(sample->raw_data + field->offset); + offset &= 0xffff; + } + + return sample->raw_data + offset; +} + +u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name) +{ + struct format_field *field = perf_evsel__field(evsel, name); + void *ptr; + u64 value; + + if (!field) + return 0; + + ptr = sample->raw_data + field->offset; + + switch (field->size) { + case 1: + return *(u8 *)ptr; + case 2: + value = *(u16 *)ptr; + break; + case 4: + value = *(u32 *)ptr; + break; + case 8: + memcpy(&value, ptr, sizeof(u64)); + break; + default: + return 0; + } + + if (!evsel->needs_swap) + return value; + + switch (field->size) { + case 2: + return bswap_16(value); + case 4: + return bswap_32(value); + case 8: + return bswap_64(value); + default: + return 0; + } + + return 0; +} + +static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (!*first) { + ret += fprintf(fp, ","); + } else { + ret += fprintf(fp, ":"); + *first = false; + } + + va_start(args, fmt); + ret += vfprintf(fp, fmt, args); + va_end(args); + return ret; +} + +static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv) +{ + return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val); +} + +int perf_evsel__fprintf(struct perf_evsel *evsel, + struct perf_attr_details *details, FILE *fp) +{ + bool first = true; + int printed = 0; + + if (details->event_group) { + struct perf_evsel *pos; + + if (!perf_evsel__is_group_leader(evsel)) + return 0; + + if (evsel->nr_members > 1) + printed += fprintf(fp, "%s{", evsel->group_name ?: ""); + + printed += fprintf(fp, "%s", perf_evsel__name(evsel)); + for_each_group_member(pos, evsel) + printed += fprintf(fp, ",%s", perf_evsel__name(pos)); + + if (evsel->nr_members > 1) + printed += fprintf(fp, "}"); + goto out; + } + + printed += fprintf(fp, "%s", perf_evsel__name(evsel)); + + if (details->verbose) { + printed += perf_event_attr__fprintf(fp, &evsel->attr, + __print_attr__fprintf, &first); + } else if (details->freq) { + printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, + (u64)evsel->attr.sample_freq); + } +out: + fputc('\n', fp); + return ++printed; +} + +bool perf_evsel__fallback(struct perf_evsel *evsel, int err, + char *msg, size_t msgsize) +{ + if ((err == ENOENT || err == ENXIO || err == ENODEV) && + evsel->attr.type == PERF_TYPE_HARDWARE && + evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { + /* + * If it's cycles then fall back to hrtimer based + * cpu-clock-tick sw counter, which is always available even if + * no PMU support. + * + * PPC returns ENXIO until 2.6.37 (behavior changed with commit + * b0a873e). + */ + scnprintf(msg, msgsize, "%s", +"The cycles event is not supported, trying to fall back to cpu-clock-ticks"); + + evsel->attr.type = PERF_TYPE_SOFTWARE; + evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; + + zfree(&evsel->name); + return true; + } + + return false; +} + +int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, + int err, char *msg, size_t size) +{ + char sbuf[STRERR_BUFSIZE]; + + switch (err) { + case EPERM: + case EACCES: + return scnprintf(msg, size, + "You may not have permission to collect %sstats.\n" + "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" + " -1 - Not paranoid at all\n" + " 0 - Disallow raw tracepoint access for unpriv\n" + " 1 - Disallow cpu events for unpriv\n" + " 2 - Disallow kernel profiling for unpriv", + target->system_wide ? "system-wide " : ""); + case ENOENT: + return scnprintf(msg, size, "The %s event is not supported.", + perf_evsel__name(evsel)); + case EMFILE: + return scnprintf(msg, size, "%s", + "Too many events are opened.\n" + "Try again after reducing the number of events."); + case ENODEV: + if (target->cpu_list) + return scnprintf(msg, size, "%s", + "No such device - did you specify an out-of-range profile CPU?\n"); + break; + case EOPNOTSUPP: + if (evsel->attr.precise_ip) + return scnprintf(msg, size, "%s", + "\'precise\' request may not be supported. Try removing 'p' modifier."); +#if defined(__i386__) || defined(__x86_64__) + if (evsel->attr.type == PERF_TYPE_HARDWARE) + return scnprintf(msg, size, "%s", + "No hardware sampling interrupt available.\n" + "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); +#endif + break; + case EBUSY: + if (find_process("oprofiled")) + return scnprintf(msg, size, + "The PMU counters are busy/taken by another profiler.\n" + "We found oprofile daemon running, please stop it and try again."); + break; + case EINVAL: + if (perf_missing_features.clockid) + return scnprintf(msg, size, "clockid feature not supported."); + if (perf_missing_features.clockid_wrong) + return scnprintf(msg, size, "wrong clockid (%d).", clockid); + break; + default: + break; + } + + return scnprintf(msg, size, + "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" + "/bin/dmesg may provide additional information.\n" + "No CONFIG_PERF_EVENTS=y kernel support configured?\n", + err, strerror_r(err, sbuf, sizeof(sbuf)), + perf_evsel__name(evsel)); +} diff --git a/kernel/tools/perf/util/evsel.h b/kernel/tools/perf/util/evsel.h new file mode 100644 index 000000000..e486151b0 --- /dev/null +++ b/kernel/tools/perf/util/evsel.h @@ -0,0 +1,369 @@ +#ifndef __PERF_EVSEL_H +#define __PERF_EVSEL_H 1 + +#include +#include +#include +#include +#include +#include "xyarray.h" +#include "symbol.h" + +struct perf_counts_values { + union { + struct { + u64 val; + u64 ena; + u64 run; + }; + u64 values[3]; + }; +}; + +struct perf_counts { + s8 scaled; + struct perf_counts_values aggr; + struct perf_counts_values cpu[]; +}; + +struct perf_evsel; + +/* + * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are + * more than one entry in the evlist. + */ +struct perf_sample_id { + struct hlist_node node; + u64 id; + struct perf_evsel *evsel; + int idx; + int cpu; + pid_t tid; + + /* Holds total ID period value for PERF_SAMPLE_READ processing. */ + u64 period; +}; + +struct cgroup_sel; + +/** struct perf_evsel - event selector + * + * @name - Can be set to retain the original event name passed by the user, + * so that when showing results in tools such as 'perf stat', we + * show the name used, not some alias. + * @id_pos: the position of the event id (PERF_SAMPLE_ID or + * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of + * struct sample_event + * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or + * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all + * is used there is an id sample appended to non-sample events + * @priv: And what is in its containing unnamed union are tool specific + */ +struct perf_evsel { + struct list_head node; + struct perf_event_attr attr; + char *filter; + struct xyarray *fd; + struct xyarray *sample_id; + u64 *id; + struct perf_counts *counts; + struct perf_counts *prev_raw_counts; + int idx; + u32 ids; + char *name; + double scale; + const char *unit; + bool snapshot; + struct event_format *tp_format; + union { + void *priv; + off_t id_offset; + u64 db_id; + }; + struct cgroup_sel *cgrp; + void *handler; + struct cpu_map *cpus; + unsigned int sample_size; + int id_pos; + int is_pos; + bool supported; + bool needs_swap; + bool no_aux_samples; + bool immediate; + bool system_wide; + bool tracking; + bool per_pkg; + unsigned long *per_pkg_mask; + /* parse modifier helper */ + int exclude_GH; + int nr_members; + int sample_read; + struct perf_evsel *leader; + char *group_name; +}; + +union u64_swap { + u64 val64; + u32 val32[2]; +}; + +struct cpu_map; +struct target; +struct thread_map; +struct perf_evlist; +struct record_opts; + +void perf_counts_values__scale(struct perf_counts_values *count, + bool scale, s8 *pscaled); + +void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, + struct perf_counts_values *count); + +int perf_evsel__object_config(size_t object_size, + int (*init)(struct perf_evsel *evsel), + void (*fini)(struct perf_evsel *evsel)); + +struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx); + +static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) +{ + return perf_evsel__new_idx(attr, 0); +} + +struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx); + +static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name) +{ + return perf_evsel__newtp_idx(sys, name, 0); +} + +struct event_format *event_format__new(const char *sys, const char *name); + +void perf_evsel__init(struct perf_evsel *evsel, + struct perf_event_attr *attr, int idx); +void perf_evsel__exit(struct perf_evsel *evsel); +void perf_evsel__delete(struct perf_evsel *evsel); + +void perf_evsel__config(struct perf_evsel *evsel, + struct record_opts *opts); + +int __perf_evsel__sample_size(u64 sample_type); +void perf_evsel__calc_id_pos(struct perf_evsel *evsel); + +bool perf_evsel__is_cache_op_valid(u8 type, u8 op); + +#define PERF_EVSEL__MAX_ALIASES 8 + +extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] + [PERF_EVSEL__MAX_ALIASES]; +extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_EVSEL__MAX_ALIASES]; +extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] + [PERF_EVSEL__MAX_ALIASES]; +extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX]; +extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX]; +int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, + char *bf, size_t size); +const char *perf_evsel__name(struct perf_evsel *evsel); + +const char *perf_evsel__group_name(struct perf_evsel *evsel); +int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size); + +int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); +int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); +void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus); +void perf_evsel__free_counts(struct perf_evsel *evsel); +void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); + +void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit); +void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit); + +#define perf_evsel__set_sample_bit(evsel, bit) \ + __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit) + +#define perf_evsel__reset_sample_bit(evsel, bit) \ + __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) + +void perf_evsel__set_sample_id(struct perf_evsel *evsel, + bool use_sample_identifier); + +int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter); +int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); + +int perf_evsel__open_per_cpu(struct perf_evsel *evsel, + struct cpu_map *cpus); +int perf_evsel__open_per_thread(struct perf_evsel *evsel, + struct thread_map *threads); +int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, + struct thread_map *threads); +void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); + +struct perf_sample; + +void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name); +u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name); + +static inline char *perf_evsel__strval(struct perf_evsel *evsel, + struct perf_sample *sample, + const char *name) +{ + return perf_evsel__rawptr(evsel, sample, name); +} + +struct format_field; + +struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name); + +#define perf_evsel__match(evsel, t, c) \ + (evsel->attr.type == PERF_TYPE_##t && \ + evsel->attr.config == PERF_COUNT_##c) + +static inline bool perf_evsel__match2(struct perf_evsel *e1, + struct perf_evsel *e2) +{ + return (e1->attr.type == e2->attr.type) && + (e1->attr.config == e2->attr.config); +} + +#define perf_evsel__cmp(a, b) \ + ((a) && \ + (b) && \ + (a)->attr.type == (b)->attr.type && \ + (a)->attr.config == (b)->attr.config) + +typedef int (perf_evsel__read_cb_t)(struct perf_evsel *evsel, + int cpu, int thread, + struct perf_counts_values *count); + +int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, + perf_evsel__read_cb_t cb); + +int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, + int cpu, int thread, bool scale); + +/** + * perf_evsel__read_on_cpu - Read out the results on a CPU and thread + * + * @evsel - event selector to read value + * @cpu - CPU of interest + * @thread - thread of interest + */ +static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel, + int cpu, int thread) +{ + return __perf_evsel__read_on_cpu(evsel, cpu, thread, false); +} + +/** + * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled + * + * @evsel - event selector to read value + * @cpu - CPU of interest + * @thread - thread of interest + */ +static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel, + int cpu, int thread) +{ + return __perf_evsel__read_on_cpu(evsel, cpu, thread, true); +} + +int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *sample); + +static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) +{ + return list_entry(evsel->node.next, struct perf_evsel, node); +} + +static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel) +{ + return list_entry(evsel->node.prev, struct perf_evsel, node); +} + +/** + * perf_evsel__is_group_leader - Return whether given evsel is a leader event + * + * @evsel - evsel selector to be tested + * + * Return %true if @evsel is a group leader or a stand-alone event + */ +static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel) +{ + return evsel->leader == evsel; +} + +/** + * perf_evsel__is_group_event - Return whether given evsel is a group event + * + * @evsel - evsel selector to be tested + * + * Return %true iff event group view is enabled and @evsel is a actual group + * leader which has other members in the group + */ +static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel) +{ + if (!symbol_conf.event_group) + return false; + + return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1; +} + +/** + * perf_evsel__is_function_event - Return whether given evsel is a function + * trace event + * + * @evsel - evsel selector to be tested + * + * Return %true if event is function trace event + */ +static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel) +{ +#define FUNCTION_EVENT "ftrace:function" + + return evsel->name && + !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); + +#undef FUNCTION_EVENT +} + +struct perf_attr_details { + bool freq; + bool verbose; + bool event_group; + bool force; +}; + +int perf_evsel__fprintf(struct perf_evsel *evsel, + struct perf_attr_details *details, FILE *fp); + +bool perf_evsel__fallback(struct perf_evsel *evsel, int err, + char *msg, size_t msgsize); +int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, + int err, char *msg, size_t size); + +static inline int perf_evsel__group_idx(struct perf_evsel *evsel) +{ + return evsel->idx - evsel->leader->idx; +} + +#define for_each_group_member(_evsel, _leader) \ +for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \ + (_evsel) && (_evsel)->leader == (_leader); \ + (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node)) + +static inline bool has_branch_callstack(struct perf_evsel *evsel) +{ + return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; +} + +typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *); + +int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, + attr__fprintf_f attr__fprintf, void *priv); + +#endif /* __PERF_EVSEL_H */ diff --git a/kernel/tools/perf/util/exec_cmd.c b/kernel/tools/perf/util/exec_cmd.c new file mode 100644 index 000000000..7adf4ad15 --- /dev/null +++ b/kernel/tools/perf/util/exec_cmd.c @@ -0,0 +1,148 @@ +#include "cache.h" +#include "exec_cmd.h" +#include "quote.h" + +#include + +#define MAX_ARGS 32 + +static const char *argv_exec_path; +static const char *argv0_path; + +const char *system_path(const char *path) +{ + static const char *prefix = PREFIX; + struct strbuf d = STRBUF_INIT; + + if (is_absolute_path(path)) + return path; + + strbuf_addf(&d, "%s/%s", prefix, path); + path = strbuf_detach(&d, NULL); + return path; +} + +const char *perf_extract_argv0_path(const char *argv0) +{ + const char *slash; + + if (!argv0 || !*argv0) + return NULL; + slash = argv0 + strlen(argv0); + + while (argv0 <= slash && !is_dir_sep(*slash)) + slash--; + + if (slash >= argv0) { + argv0_path = strndup(argv0, slash - argv0); + return argv0_path ? slash + 1 : NULL; + } + + return argv0; +} + +void perf_set_argv_exec_path(const char *exec_path) +{ + argv_exec_path = exec_path; + /* + * Propagate this setting to external programs. + */ + setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1); +} + + +/* Returns the highest-priority, location to look for perf programs. */ +const char *perf_exec_path(void) +{ + const char *env; + + if (argv_exec_path) + return argv_exec_path; + + env = getenv(EXEC_PATH_ENVIRONMENT); + if (env && *env) { + return env; + } + + return system_path(PERF_EXEC_PATH); +} + +static void add_path(struct strbuf *out, const char *path) +{ + if (path && *path) { + if (is_absolute_path(path)) + strbuf_addstr(out, path); + else + strbuf_addstr(out, make_nonrelative_path(path)); + + strbuf_addch(out, PATH_SEP); + } +} + +void setup_path(void) +{ + const char *old_path = getenv("PATH"); + struct strbuf new_path = STRBUF_INIT; + + add_path(&new_path, perf_exec_path()); + add_path(&new_path, argv0_path); + + if (old_path) + strbuf_addstr(&new_path, old_path); + else + strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin"); + + setenv("PATH", new_path.buf, 1); + + strbuf_release(&new_path); +} + +static const char **prepare_perf_cmd(const char **argv) +{ + int argc; + const char **nargv; + + for (argc = 0; argv[argc]; argc++) + ; /* just counting */ + nargv = malloc(sizeof(*nargv) * (argc + 2)); + + nargv[0] = "perf"; + for (argc = 0; argv[argc]; argc++) + nargv[argc + 1] = argv[argc]; + nargv[argc + 1] = NULL; + return nargv; +} + +int execv_perf_cmd(const char **argv) { + const char **nargv = prepare_perf_cmd(argv); + + /* execvp() can only ever return if it fails */ + execvp("perf", (char **)nargv); + + free(nargv); + return -1; +} + + +int execl_perf_cmd(const char *cmd,...) +{ + int argc; + const char *argv[MAX_ARGS + 1]; + const char *arg; + va_list param; + + va_start(param, cmd); + argv[0] = cmd; + argc = 1; + while (argc < MAX_ARGS) { + arg = argv[argc++] = va_arg(param, char *); + if (!arg) + break; + } + va_end(param); + if (MAX_ARGS <= argc) + return error("too many args to run %s", cmd); + + argv[argc] = NULL; + return execv_perf_cmd(argv); +} diff --git a/kernel/tools/perf/util/exec_cmd.h b/kernel/tools/perf/util/exec_cmd.h new file mode 100644 index 000000000..bc4b91596 --- /dev/null +++ b/kernel/tools/perf/util/exec_cmd.h @@ -0,0 +1,12 @@ +#ifndef __PERF_EXEC_CMD_H +#define __PERF_EXEC_CMD_H + +extern void perf_set_argv_exec_path(const char *exec_path); +extern const char *perf_extract_argv0_path(const char *path); +extern const char *perf_exec_path(void); +extern void setup_path(void); +extern int execv_perf_cmd(const char **argv); /* NULL terminated */ +extern int execl_perf_cmd(const char *cmd, ...); +extern const char *system_path(const char *path); + +#endif /* __PERF_EXEC_CMD_H */ diff --git a/kernel/tools/perf/util/find-vdso-map.c b/kernel/tools/perf/util/find-vdso-map.c new file mode 100644 index 000000000..95ef1cffc --- /dev/null +++ b/kernel/tools/perf/util/find-vdso-map.c @@ -0,0 +1,30 @@ +static int find_vdso_map(void **start, void **end) +{ + FILE *maps; + char line[128]; + int found = 0; + + maps = fopen("/proc/self/maps", "r"); + if (!maps) { + fprintf(stderr, "vdso: cannot open maps\n"); + return -1; + } + + while (!found && fgets(line, sizeof(line), maps)) { + int m = -1; + + /* We care only about private r-x mappings. */ + if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", + start, end, &m)) + continue; + if (m < 0) + continue; + + if (!strncmp(&line[m], VDSO__MAP_NAME, + sizeof(VDSO__MAP_NAME) - 1)) + found = 1; + } + + fclose(maps); + return !found; +} diff --git a/kernel/tools/perf/util/generate-cmdlist.sh b/kernel/tools/perf/util/generate-cmdlist.sh new file mode 100755 index 000000000..36a885d2c --- /dev/null +++ b/kernel/tools/perf/util/generate-cmdlist.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +echo "/* Automatically generated by $0 */ +struct cmdname_help +{ + char name[16]; + char help[80]; +}; + +static struct cmdname_help common_cmds[] = {" + +sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt | +sort | +while read cmd +do + sed -n ' + /^NAME/,/perf-'"$cmd"'/H + ${ + x + s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/perf-$cmd.txt" +done + +echo "#ifdef HAVE_LIBELF_SUPPORT" +sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt | +sort | +while read cmd +do + sed -n ' + /^NAME/,/perf-'"$cmd"'/H + ${ + x + s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/perf-$cmd.txt" +done +echo "#endif /* HAVE_LIBELF_SUPPORT */" +echo "};" diff --git a/kernel/tools/perf/util/header.c b/kernel/tools/perf/util/header.c new file mode 100644 index 000000000..918fd8ae2 --- /dev/null +++ b/kernel/tools/perf/util/header.c @@ -0,0 +1,2781 @@ +#include "util.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "evlist.h" +#include "evsel.h" +#include "header.h" +#include "../perf.h" +#include "trace-event.h" +#include "session.h" +#include "symbol.h" +#include "debug.h" +#include "cpumap.h" +#include "pmu.h" +#include "vdso.h" +#include "strbuf.h" +#include "build-id.h" +#include "data.h" + +static u32 header_argc; +static const char **header_argv; + +/* + * magic2 = "PERFILE2" + * must be a numerical value to let the endianness + * determine the memory layout. That way we are able + * to detect endianness when reading the perf.data file + * back. + * + * we check for legacy (PERFFILE) format. + */ +static const char *__perf_magic1 = "PERFFILE"; +static const u64 __perf_magic2 = 0x32454c4946524550ULL; +static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; + +#define PERF_MAGIC __perf_magic2 + +struct perf_file_attr { + struct perf_event_attr attr; + struct perf_file_section ids; +}; + +void perf_header__set_feat(struct perf_header *header, int feat) +{ + set_bit(feat, header->adds_features); +} + +void perf_header__clear_feat(struct perf_header *header, int feat) +{ + clear_bit(feat, header->adds_features); +} + +bool perf_header__has_feat(const struct perf_header *header, int feat) +{ + return test_bit(feat, header->adds_features); +} + +static int do_write(int fd, const void *buf, size_t size) +{ + while (size) { + int ret = write(fd, buf, size); + + if (ret < 0) + return -errno; + + size -= ret; + buf += ret; + } + + return 0; +} + +int write_padded(int fd, const void *bf, size_t count, size_t count_aligned) +{ + static const char zero_buf[NAME_ALIGN]; + int err = do_write(fd, bf, count); + + if (!err) + err = do_write(fd, zero_buf, count_aligned - count); + + return err; +} + +static int do_write_string(int fd, const char *str) +{ + u32 len, olen; + int ret; + + olen = strlen(str) + 1; + len = PERF_ALIGN(olen, NAME_ALIGN); + + /* write len, incl. \0 */ + ret = do_write(fd, &len, sizeof(len)); + if (ret < 0) + return ret; + + return write_padded(fd, str, olen, len); +} + +static char *do_read_string(int fd, struct perf_header *ph) +{ + ssize_t sz, ret; + u32 len; + char *buf; + + sz = readn(fd, &len, sizeof(len)); + if (sz < (ssize_t)sizeof(len)) + return NULL; + + if (ph->needs_swap) + len = bswap_32(len); + + buf = malloc(len); + if (!buf) + return NULL; + + ret = readn(fd, buf, len); + if (ret == (ssize_t)len) { + /* + * strings are padded by zeroes + * thus the actual strlen of buf + * may be less than len + */ + return buf; + } + + free(buf); + return NULL; +} + +int +perf_header__set_cmdline(int argc, const char **argv) +{ + int i; + + /* + * If header_argv has already been set, do not override it. + * This allows a command to set the cmdline, parse args and + * then call another builtin function that implements a + * command -- e.g, cmd_kvm calling cmd_record. + */ + if (header_argv) + return 0; + + header_argc = (u32)argc; + + /* do not include NULL termination */ + header_argv = calloc(argc, sizeof(char *)); + if (!header_argv) + return -ENOMEM; + + /* + * must copy argv contents because it gets moved + * around during option parsing + */ + for (i = 0; i < argc ; i++) + header_argv[i] = argv[i]; + + return 0; +} + +static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist) +{ + return read_tracing_data(fd, &evlist->entries); +} + + +static int write_build_id(int fd, struct perf_header *h, + struct perf_evlist *evlist __maybe_unused) +{ + struct perf_session *session; + int err; + + session = container_of(h, struct perf_session, header); + + if (!perf_session__read_build_ids(session, true)) + return -1; + + err = perf_session__write_buildid_table(session, fd); + if (err < 0) { + pr_debug("failed to write buildid table\n"); + return err; + } + perf_session__cache_build_ids(session); + + return 0; +} + +static int write_hostname(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + struct utsname uts; + int ret; + + ret = uname(&uts); + if (ret < 0) + return -1; + + return do_write_string(fd, uts.nodename); +} + +static int write_osrelease(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + struct utsname uts; + int ret; + + ret = uname(&uts); + if (ret < 0) + return -1; + + return do_write_string(fd, uts.release); +} + +static int write_arch(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + struct utsname uts; + int ret; + + ret = uname(&uts); + if (ret < 0) + return -1; + + return do_write_string(fd, uts.machine); +} + +static int write_version(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + return do_write_string(fd, perf_version_string); +} + +static int __write_cpudesc(int fd, const char *cpuinfo_proc) +{ + FILE *file; + char *buf = NULL; + char *s, *p; + const char *search = cpuinfo_proc; + size_t len = 0; + int ret = -1; + + if (!search) + return -1; + + file = fopen("/proc/cpuinfo", "r"); + if (!file) + return -1; + + while (getline(&buf, &len, file) > 0) { + ret = strncmp(buf, search, strlen(search)); + if (!ret) + break; + } + + if (ret) { + ret = -1; + goto done; + } + + s = buf; + + p = strchr(buf, ':'); + if (p && *(p+1) == ' ' && *(p+2)) + s = p + 2; + p = strchr(s, '\n'); + if (p) + *p = '\0'; + + /* squash extra space characters (branding string) */ + p = s; + while (*p) { + if (isspace(*p)) { + char *r = p + 1; + char *q = r; + *p = ' '; + while (*q && isspace(*q)) + q++; + if (q != (p+1)) + while ((*r++ = *q++)); + } + p++; + } + ret = do_write_string(fd, s); +done: + free(buf); + fclose(file); + return ret; +} + +static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ +#ifndef CPUINFO_PROC +#define CPUINFO_PROC {"model name", } +#endif + const char *cpuinfo_procs[] = CPUINFO_PROC; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { + int ret; + ret = __write_cpudesc(fd, cpuinfo_procs[i]); + if (ret >= 0) + return ret; + } + return -1; +} + + +static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + long nr; + u32 nrc, nra; + int ret; + + nr = sysconf(_SC_NPROCESSORS_CONF); + if (nr < 0) + return -1; + + nrc = (u32)(nr & UINT_MAX); + + nr = sysconf(_SC_NPROCESSORS_ONLN); + if (nr < 0) + return -1; + + nra = (u32)(nr & UINT_MAX); + + ret = do_write(fd, &nrc, sizeof(nrc)); + if (ret < 0) + return ret; + + return do_write(fd, &nra, sizeof(nra)); +} + +static int write_event_desc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + u32 nre, nri, sz; + int ret; + + nre = evlist->nr_entries; + + /* + * write number of events + */ + ret = do_write(fd, &nre, sizeof(nre)); + if (ret < 0) + return ret; + + /* + * size of perf_event_attr struct + */ + sz = (u32)sizeof(evsel->attr); + ret = do_write(fd, &sz, sizeof(sz)); + if (ret < 0) + return ret; + + evlist__for_each(evlist, evsel) { + ret = do_write(fd, &evsel->attr, sz); + if (ret < 0) + return ret; + /* + * write number of unique id per event + * there is one id per instance of an event + * + * copy into an nri to be independent of the + * type of ids, + */ + nri = evsel->ids; + ret = do_write(fd, &nri, sizeof(nri)); + if (ret < 0) + return ret; + + /* + * write event string as passed on cmdline + */ + ret = do_write_string(fd, perf_evsel__name(evsel)); + if (ret < 0) + return ret; + /* + * write unique ids for this event + */ + ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); + if (ret < 0) + return ret; + } + return 0; +} + +static int write_cmdline(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + char buf[MAXPATHLEN]; + char proc[32]; + u32 i, n; + int ret; + + /* + * actual atual path to perf binary + */ + sprintf(proc, "/proc/%d/exe", getpid()); + ret = readlink(proc, buf, sizeof(buf)); + if (ret <= 0) + return -1; + + /* readlink() does not add null termination */ + buf[ret] = '\0'; + + /* account for binary path */ + n = header_argc + 1; + + ret = do_write(fd, &n, sizeof(n)); + if (ret < 0) + return ret; + + ret = do_write_string(fd, buf); + if (ret < 0) + return ret; + + for (i = 0 ; i < header_argc; i++) { + ret = do_write_string(fd, header_argv[i]); + if (ret < 0) + return ret; + } + return 0; +} + +#define CORE_SIB_FMT \ + "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" +#define THRD_SIB_FMT \ + "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" + +struct cpu_topo { + u32 core_sib; + u32 thread_sib; + char **core_siblings; + char **thread_siblings; +}; + +static int build_cpu_topo(struct cpu_topo *tp, int cpu) +{ + FILE *fp; + char filename[MAXPATHLEN]; + char *buf = NULL, *p; + size_t len = 0; + ssize_t sret; + u32 i = 0; + int ret = -1; + + sprintf(filename, CORE_SIB_FMT, cpu); + fp = fopen(filename, "r"); + if (!fp) + goto try_threads; + + sret = getline(&buf, &len, fp); + fclose(fp); + if (sret <= 0) + goto try_threads; + + p = strchr(buf, '\n'); + if (p) + *p = '\0'; + + for (i = 0; i < tp->core_sib; i++) { + if (!strcmp(buf, tp->core_siblings[i])) + break; + } + if (i == tp->core_sib) { + tp->core_siblings[i] = buf; + tp->core_sib++; + buf = NULL; + len = 0; + } + ret = 0; + +try_threads: + sprintf(filename, THRD_SIB_FMT, cpu); + fp = fopen(filename, "r"); + if (!fp) + goto done; + + if (getline(&buf, &len, fp) <= 0) + goto done; + + p = strchr(buf, '\n'); + if (p) + *p = '\0'; + + for (i = 0; i < tp->thread_sib; i++) { + if (!strcmp(buf, tp->thread_siblings[i])) + break; + } + if (i == tp->thread_sib) { + tp->thread_siblings[i] = buf; + tp->thread_sib++; + buf = NULL; + } + ret = 0; +done: + if(fp) + fclose(fp); + free(buf); + return ret; +} + +static void free_cpu_topo(struct cpu_topo *tp) +{ + u32 i; + + if (!tp) + return; + + for (i = 0 ; i < tp->core_sib; i++) + zfree(&tp->core_siblings[i]); + + for (i = 0 ; i < tp->thread_sib; i++) + zfree(&tp->thread_siblings[i]); + + free(tp); +} + +static struct cpu_topo *build_cpu_topology(void) +{ + struct cpu_topo *tp; + void *addr; + u32 nr, i; + size_t sz; + long ncpus; + int ret = -1; + + ncpus = sysconf(_SC_NPROCESSORS_CONF); + if (ncpus < 0) + return NULL; + + nr = (u32)(ncpus & UINT_MAX); + + sz = nr * sizeof(char *); + + addr = calloc(1, sizeof(*tp) + 2 * sz); + if (!addr) + return NULL; + + tp = addr; + + addr += sizeof(*tp); + tp->core_siblings = addr; + addr += sz; + tp->thread_siblings = addr; + + for (i = 0; i < nr; i++) { + ret = build_cpu_topo(tp, i); + if (ret < 0) + break; + } + if (ret) { + free_cpu_topo(tp); + tp = NULL; + } + return tp; +} + +static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + struct cpu_topo *tp; + u32 i; + int ret; + + tp = build_cpu_topology(); + if (!tp) + return -1; + + ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib)); + if (ret < 0) + goto done; + + for (i = 0; i < tp->core_sib; i++) { + ret = do_write_string(fd, tp->core_siblings[i]); + if (ret < 0) + goto done; + } + ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib)); + if (ret < 0) + goto done; + + for (i = 0; i < tp->thread_sib; i++) { + ret = do_write_string(fd, tp->thread_siblings[i]); + if (ret < 0) + break; + } +done: + free_cpu_topo(tp); + return ret; +} + + + +static int write_total_mem(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + char *buf = NULL; + FILE *fp; + size_t len = 0; + int ret = -1, n; + uint64_t mem; + + fp = fopen("/proc/meminfo", "r"); + if (!fp) + return -1; + + while (getline(&buf, &len, fp) > 0) { + ret = strncmp(buf, "MemTotal:", 9); + if (!ret) + break; + } + if (!ret) { + n = sscanf(buf, "%*s %"PRIu64, &mem); + if (n == 1) + ret = do_write(fd, &mem, sizeof(mem)); + } else + ret = -1; + free(buf); + fclose(fp); + return ret; +} + +static int write_topo_node(int fd, int node) +{ + char str[MAXPATHLEN]; + char field[32]; + char *buf = NULL, *p; + size_t len = 0; + FILE *fp; + u64 mem_total, mem_free, mem; + int ret = -1; + + sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); + fp = fopen(str, "r"); + if (!fp) + return -1; + + while (getline(&buf, &len, fp) > 0) { + /* skip over invalid lines */ + if (!strchr(buf, ':')) + continue; + if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) + goto done; + if (!strcmp(field, "MemTotal:")) + mem_total = mem; + if (!strcmp(field, "MemFree:")) + mem_free = mem; + } + + fclose(fp); + fp = NULL; + + ret = do_write(fd, &mem_total, sizeof(u64)); + if (ret) + goto done; + + ret = do_write(fd, &mem_free, sizeof(u64)); + if (ret) + goto done; + + ret = -1; + sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); + + fp = fopen(str, "r"); + if (!fp) + goto done; + + if (getline(&buf, &len, fp) <= 0) + goto done; + + p = strchr(buf, '\n'); + if (p) + *p = '\0'; + + ret = do_write_string(fd, buf); +done: + free(buf); + if (fp) + fclose(fp); + return ret; +} + +static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + char *buf = NULL; + size_t len = 0; + FILE *fp; + struct cpu_map *node_map = NULL; + char *c; + u32 nr, i, j; + int ret = -1; + + fp = fopen("/sys/devices/system/node/online", "r"); + if (!fp) + return -1; + + if (getline(&buf, &len, fp) <= 0) + goto done; + + c = strchr(buf, '\n'); + if (c) + *c = '\0'; + + node_map = cpu_map__new(buf); + if (!node_map) + goto done; + + nr = (u32)node_map->nr; + + ret = do_write(fd, &nr, sizeof(nr)); + if (ret < 0) + goto done; + + for (i = 0; i < nr; i++) { + j = (u32)node_map->map[i]; + ret = do_write(fd, &j, sizeof(j)); + if (ret < 0) + break; + + ret = write_topo_node(fd, i); + if (ret < 0) + break; + } +done: + free(buf); + fclose(fp); + free(node_map); + return ret; +} + +/* + * File format: + * + * struct pmu_mappings { + * u32 pmu_num; + * struct pmu_map { + * u32 type; + * char name[]; + * }[pmu_num]; + * }; + */ + +static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + struct perf_pmu *pmu = NULL; + off_t offset = lseek(fd, 0, SEEK_CUR); + __u32 pmu_num = 0; + int ret; + + /* write real pmu_num later */ + ret = do_write(fd, &pmu_num, sizeof(pmu_num)); + if (ret < 0) + return ret; + + while ((pmu = perf_pmu__scan(pmu))) { + if (!pmu->name) + continue; + pmu_num++; + + ret = do_write(fd, &pmu->type, sizeof(pmu->type)); + if (ret < 0) + return ret; + + ret = do_write_string(fd, pmu->name); + if (ret < 0) + return ret; + } + + if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { + /* discard all */ + lseek(fd, offset, SEEK_SET); + return -1; + } + + return 0; +} + +/* + * File format: + * + * struct group_descs { + * u32 nr_groups; + * struct group_desc { + * char name[]; + * u32 leader_idx; + * u32 nr_members; + * }[nr_groups]; + * }; + */ +static int write_group_desc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist) +{ + u32 nr_groups = evlist->nr_groups; + struct perf_evsel *evsel; + int ret; + + ret = do_write(fd, &nr_groups, sizeof(nr_groups)); + if (ret < 0) + return ret; + + evlist__for_each(evlist, evsel) { + if (perf_evsel__is_group_leader(evsel) && + evsel->nr_members > 1) { + const char *name = evsel->group_name ?: "{anon_group}"; + u32 leader_idx = evsel->idx; + u32 nr_members = evsel->nr_members; + + ret = do_write_string(fd, name); + if (ret < 0) + return ret; + + ret = do_write(fd, &leader_idx, sizeof(leader_idx)); + if (ret < 0) + return ret; + + ret = do_write(fd, &nr_members, sizeof(nr_members)); + if (ret < 0) + return ret; + } + } + return 0; +} + +/* + * default get_cpuid(): nothing gets recorded + * actual implementation must be in arch/$(ARCH)/util/header.c + */ +int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused, + size_t sz __maybe_unused) +{ + return -1; +} + +static int write_cpuid(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + char buffer[64]; + int ret; + + ret = get_cpuid(buffer, sizeof(buffer)); + if (!ret) + goto write_it; + + return -1; +write_it: + return do_write_string(fd, buffer); +} + +static int write_branch_stack(int fd __maybe_unused, + struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + return 0; +} + +static void print_hostname(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + fprintf(fp, "# hostname : %s\n", ph->env.hostname); +} + +static void print_osrelease(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + fprintf(fp, "# os release : %s\n", ph->env.os_release); +} + +static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp) +{ + fprintf(fp, "# arch : %s\n", ph->env.arch); +} + +static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc); +} + +static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online); + fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail); +} + +static void print_version(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + fprintf(fp, "# perf version : %s\n", ph->env.version); +} + +static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + int nr, i; + char *str; + + nr = ph->env.nr_cmdline; + str = ph->env.cmdline; + + fprintf(fp, "# cmdline : "); + + for (i = 0; i < nr; i++) { + fprintf(fp, "%s ", str); + str += strlen(str) + 1; + } + fputc('\n', fp); +} + +static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + int nr, i; + char *str; + + nr = ph->env.nr_sibling_cores; + str = ph->env.sibling_cores; + + for (i = 0; i < nr; i++) { + fprintf(fp, "# sibling cores : %s\n", str); + str += strlen(str) + 1; + } + + nr = ph->env.nr_sibling_threads; + str = ph->env.sibling_threads; + + for (i = 0; i < nr; i++) { + fprintf(fp, "# sibling threads : %s\n", str); + str += strlen(str) + 1; + } +} + +static void free_event_desc(struct perf_evsel *events) +{ + struct perf_evsel *evsel; + + if (!events) + return; + + for (evsel = events; evsel->attr.size; evsel++) { + zfree(&evsel->name); + zfree(&evsel->id); + } + + free(events); +} + +static struct perf_evsel * +read_event_desc(struct perf_header *ph, int fd) +{ + struct perf_evsel *evsel, *events = NULL; + u64 *id; + void *buf = NULL; + u32 nre, sz, nr, i, j; + ssize_t ret; + size_t msz; + + /* number of events */ + ret = readn(fd, &nre, sizeof(nre)); + if (ret != (ssize_t)sizeof(nre)) + goto error; + + if (ph->needs_swap) + nre = bswap_32(nre); + + ret = readn(fd, &sz, sizeof(sz)); + if (ret != (ssize_t)sizeof(sz)) + goto error; + + if (ph->needs_swap) + sz = bswap_32(sz); + + /* buffer to hold on file attr struct */ + buf = malloc(sz); + if (!buf) + goto error; + + /* the last event terminates with evsel->attr.size == 0: */ + events = calloc(nre + 1, sizeof(*events)); + if (!events) + goto error; + + msz = sizeof(evsel->attr); + if (sz < msz) + msz = sz; + + for (i = 0, evsel = events; i < nre; evsel++, i++) { + evsel->idx = i; + + /* + * must read entire on-file attr struct to + * sync up with layout. + */ + ret = readn(fd, buf, sz); + if (ret != (ssize_t)sz) + goto error; + + if (ph->needs_swap) + perf_event__attr_swap(buf); + + memcpy(&evsel->attr, buf, msz); + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != (ssize_t)sizeof(nr)) + goto error; + + if (ph->needs_swap) { + nr = bswap_32(nr); + evsel->needs_swap = true; + } + + evsel->name = do_read_string(fd, ph); + + if (!nr) + continue; + + id = calloc(nr, sizeof(*id)); + if (!id) + goto error; + evsel->ids = nr; + evsel->id = id; + + for (j = 0 ; j < nr; j++) { + ret = readn(fd, id, sizeof(*id)); + if (ret != (ssize_t)sizeof(*id)) + goto error; + if (ph->needs_swap) + *id = bswap_64(*id); + id++; + } + } +out: + free(buf); + return events; +error: + if (events) + free_event_desc(events); + events = NULL; + goto out; +} + +static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, + void *priv __attribute__((unused))) +{ + return fprintf(fp, ", %s = %s", name, val); +} + +static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) +{ + struct perf_evsel *evsel, *events = read_event_desc(ph, fd); + u32 j; + u64 *id; + + if (!events) { + fprintf(fp, "# event desc: not available or unable to read\n"); + return; + } + + for (evsel = events; evsel->attr.size; evsel++) { + fprintf(fp, "# event : name = %s, ", evsel->name); + + if (evsel->ids) { + fprintf(fp, ", id = {"); + for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { + if (j) + fputc(',', fp); + fprintf(fp, " %"PRIu64, *id); + } + fprintf(fp, " }"); + } + + perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); + + fputc('\n', fp); + } + + free_event_desc(events); +} + +static void print_total_mem(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem); +} + +static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + u32 nr, c, i; + char *str, *tmp; + uint64_t mem_total, mem_free; + + /* nr nodes */ + nr = ph->env.nr_numa_nodes; + str = ph->env.numa_nodes; + + for (i = 0; i < nr; i++) { + /* node number */ + c = strtoul(str, &tmp, 0); + if (*tmp != ':') + goto error; + + str = tmp + 1; + mem_total = strtoull(str, &tmp, 0); + if (*tmp != ':') + goto error; + + str = tmp + 1; + mem_free = strtoull(str, &tmp, 0); + if (*tmp != ':') + goto error; + + fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," + " free = %"PRIu64" kB\n", + c, mem_total, mem_free); + + str = tmp + 1; + fprintf(fp, "# node%u cpu list : %s\n", c, str); + + str += strlen(str) + 1; + } + return; +error: + fprintf(fp, "# numa topology : not available\n"); +} + +static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp) +{ + fprintf(fp, "# cpuid : %s\n", ph->env.cpuid); +} + +static void print_branch_stack(struct perf_header *ph __maybe_unused, + int fd __maybe_unused, FILE *fp) +{ + fprintf(fp, "# contains samples with branch stack\n"); +} + +static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + const char *delimiter = "# pmu mappings: "; + char *str, *tmp; + u32 pmu_num; + u32 type; + + pmu_num = ph->env.nr_pmu_mappings; + if (!pmu_num) { + fprintf(fp, "# pmu mappings: not available\n"); + return; + } + + str = ph->env.pmu_mappings; + + while (pmu_num) { + type = strtoul(str, &tmp, 0); + if (*tmp != ':') + goto error; + + str = tmp + 1; + fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); + + delimiter = ", "; + str += strlen(str) + 1; + pmu_num--; + } + + fprintf(fp, "\n"); + + if (!pmu_num) + return; +error: + fprintf(fp, "# pmu mappings: unable to read\n"); +} + +static void print_group_desc(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + struct perf_session *session; + struct perf_evsel *evsel; + u32 nr = 0; + + session = container_of(ph, struct perf_session, header); + + evlist__for_each(session->evlist, evsel) { + if (perf_evsel__is_group_leader(evsel) && + evsel->nr_members > 1) { + fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", + perf_evsel__name(evsel)); + + nr = evsel->nr_members - 1; + } else if (nr) { + fprintf(fp, ",%s", perf_evsel__name(evsel)); + + if (--nr == 0) + fprintf(fp, "}\n"); + } + } +} + +static int __event_process_build_id(struct build_id_event *bev, + char *filename, + struct perf_session *session) +{ + int err = -1; + struct dsos *dsos; + struct machine *machine; + u16 misc; + struct dso *dso; + enum dso_kernel_type dso_type; + + machine = perf_session__findnew_machine(session, bev->pid); + if (!machine) + goto out; + + misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + + switch (misc) { + case PERF_RECORD_MISC_KERNEL: + dso_type = DSO_TYPE_KERNEL; + dsos = &machine->kernel_dsos; + break; + case PERF_RECORD_MISC_GUEST_KERNEL: + dso_type = DSO_TYPE_GUEST_KERNEL; + dsos = &machine->kernel_dsos; + break; + case PERF_RECORD_MISC_USER: + case PERF_RECORD_MISC_GUEST_USER: + dso_type = DSO_TYPE_USER; + dsos = &machine->user_dsos; + break; + default: + goto out; + } + + dso = __dsos__findnew(dsos, filename); + if (dso != NULL) { + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + dso__set_build_id(dso, &bev->build_id); + + if (!is_kernel_module(filename)) + dso->kernel = dso_type; + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), + sbuild_id); + pr_debug("build id event received for %s: %s\n", + dso->long_name, sbuild_id); + } + + err = 0; +out: + return err; +} + +static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, + int input, u64 offset, u64 size) +{ + struct perf_session *session = container_of(header, struct perf_session, header); + struct { + struct perf_event_header header; + u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; + char filename[0]; + } old_bev; + struct build_id_event bev; + char filename[PATH_MAX]; + u64 limit = offset + size; + + while (offset < limit) { + ssize_t len; + + if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) + return -1; + + if (header->needs_swap) + perf_event_header__bswap(&old_bev.header); + + len = old_bev.header.size - sizeof(old_bev); + if (readn(input, filename, len) != len) + return -1; + + bev.header = old_bev.header; + + /* + * As the pid is the missing value, we need to fill + * it properly. The header.misc value give us nice hint. + */ + bev.pid = HOST_KERNEL_ID; + if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || + bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) + bev.pid = DEFAULT_GUEST_KERNEL_ID; + + memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); + __event_process_build_id(&bev, filename, session); + + offset += bev.header.size; + } + + return 0; +} + +static int perf_header__read_build_ids(struct perf_header *header, + int input, u64 offset, u64 size) +{ + struct perf_session *session = container_of(header, struct perf_session, header); + struct build_id_event bev; + char filename[PATH_MAX]; + u64 limit = offset + size, orig_offset = offset; + int err = -1; + + while (offset < limit) { + ssize_t len; + + if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) + goto out; + + if (header->needs_swap) + perf_event_header__bswap(&bev.header); + + len = bev.header.size - sizeof(bev); + if (readn(input, filename, len) != len) + goto out; + /* + * The a1645ce1 changeset: + * + * "perf: 'perf kvm' tool for monitoring guest performance from host" + * + * Added a field to struct build_id_event that broke the file + * format. + * + * Since the kernel build-id is the first entry, process the + * table using the old format if the well known + * '[kernel.kallsyms]' string for the kernel build-id has the + * first 4 characters chopped off (where the pid_t sits). + */ + if (memcmp(filename, "nel.kallsyms]", 13) == 0) { + if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) + return -1; + return perf_header__read_build_ids_abi_quirk(header, input, offset, size); + } + + __event_process_build_id(&bev, filename, session); + + offset += bev.header.size; + } + err = 0; +out: + return err; +} + +static int process_tracing_data(struct perf_file_section *section __maybe_unused, + struct perf_header *ph __maybe_unused, + int fd, void *data) +{ + ssize_t ret = trace_report(fd, data, false); + return ret < 0 ? -1 : 0; +} + +static int process_build_id(struct perf_file_section *section, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) + pr_debug("Failed to read buildids, continuing...\n"); + return 0; +} + +static int process_hostname(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.hostname = do_read_string(fd, ph); + return ph->env.hostname ? 0 : -ENOMEM; +} + +static int process_osrelease(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.os_release = do_read_string(fd, ph); + return ph->env.os_release ? 0 : -ENOMEM; +} + +static int process_version(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.version = do_read_string(fd, ph); + return ph->env.version ? 0 : -ENOMEM; +} + +static int process_arch(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.arch = do_read_string(fd, ph); + return ph->env.arch ? 0 : -ENOMEM; +} + +static int process_nrcpus(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ssize_t ret; + u32 nr; + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_cpus_online = nr; + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_cpus_avail = nr; + return 0; +} + +static int process_cpudesc(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.cpu_desc = do_read_string(fd, ph); + return ph->env.cpu_desc ? 0 : -ENOMEM; +} + +static int process_cpuid(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.cpuid = do_read_string(fd, ph); + return ph->env.cpuid ? 0 : -ENOMEM; +} + +static int process_total_mem(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + uint64_t mem; + ssize_t ret; + + ret = readn(fd, &mem, sizeof(mem)); + if (ret != sizeof(mem)) + return -1; + + if (ph->needs_swap) + mem = bswap_64(mem); + + ph->env.total_mem = mem; + return 0; +} + +static struct perf_evsel * +perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (evsel->idx == idx) + return evsel; + } + + return NULL; +} + +static void +perf_evlist__set_event_name(struct perf_evlist *evlist, + struct perf_evsel *event) +{ + struct perf_evsel *evsel; + + if (!event->name) + return; + + evsel = perf_evlist__find_by_index(evlist, event->idx); + if (!evsel) + return; + + if (evsel->name) + return; + + evsel->name = strdup(event->name); +} + +static int +process_event_desc(struct perf_file_section *section __maybe_unused, + struct perf_header *header, int fd, + void *data __maybe_unused) +{ + struct perf_session *session; + struct perf_evsel *evsel, *events = read_event_desc(header, fd); + + if (!events) + return 0; + + session = container_of(header, struct perf_session, header); + for (evsel = events; evsel->attr.size; evsel++) + perf_evlist__set_event_name(session->evlist, evsel); + + free_event_desc(events); + + return 0; +} + +static int process_cmdline(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ssize_t ret; + char *str; + u32 nr, i; + struct strbuf sb; + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_cmdline = nr; + strbuf_init(&sb, 128); + + for (i = 0; i < nr; i++) { + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.cmdline = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_cpu_topology(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ssize_t ret; + u32 nr, i; + char *str; + struct strbuf sb; + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_sibling_cores = nr; + strbuf_init(&sb, 128); + + for (i = 0; i < nr; i++) { + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.sibling_cores = strbuf_detach(&sb, NULL); + + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_sibling_threads = nr; + + for (i = 0; i < nr; i++) { + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.sibling_threads = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_numa_topology(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ssize_t ret; + u32 nr, node, i; + char *str; + uint64_t mem_total, mem_free; + struct strbuf sb; + + /* nr nodes */ + ret = readn(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + goto error; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_numa_nodes = nr; + strbuf_init(&sb, 256); + + for (i = 0; i < nr; i++) { + /* node number */ + ret = readn(fd, &node, sizeof(node)); + if (ret != sizeof(node)) + goto error; + + ret = readn(fd, &mem_total, sizeof(u64)); + if (ret != sizeof(u64)) + goto error; + + ret = readn(fd, &mem_free, sizeof(u64)); + if (ret != sizeof(u64)) + goto error; + + if (ph->needs_swap) { + node = bswap_32(node); + mem_total = bswap_64(mem_total); + mem_free = bswap_64(mem_free); + } + + strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":", + node, mem_total, mem_free); + + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.numa_nodes = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_pmu_mappings(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ssize_t ret; + char *name; + u32 pmu_num; + u32 type; + struct strbuf sb; + + ret = readn(fd, &pmu_num, sizeof(pmu_num)); + if (ret != sizeof(pmu_num)) + return -1; + + if (ph->needs_swap) + pmu_num = bswap_32(pmu_num); + + if (!pmu_num) { + pr_debug("pmu mappings not available\n"); + return 0; + } + + ph->env.nr_pmu_mappings = pmu_num; + strbuf_init(&sb, 128); + + while (pmu_num) { + if (readn(fd, &type, sizeof(type)) != sizeof(type)) + goto error; + if (ph->needs_swap) + type = bswap_32(type); + + name = do_read_string(fd, ph); + if (!name) + goto error; + + strbuf_addf(&sb, "%u:%s", type, name); + /* include a NULL character at the end */ + strbuf_add(&sb, "", 1); + + free(name); + pmu_num--; + } + ph->env.pmu_mappings = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_group_desc(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret = -1; + u32 i, nr, nr_groups; + struct perf_session *session; + struct perf_evsel *evsel, *leader = NULL; + struct group_desc { + char *name; + u32 leader_idx; + u32 nr_members; + } *desc; + + if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups)) + return -1; + + if (ph->needs_swap) + nr_groups = bswap_32(nr_groups); + + ph->env.nr_groups = nr_groups; + if (!nr_groups) { + pr_debug("group desc not available\n"); + return 0; + } + + desc = calloc(nr_groups, sizeof(*desc)); + if (!desc) + return -1; + + for (i = 0; i < nr_groups; i++) { + desc[i].name = do_read_string(fd, ph); + if (!desc[i].name) + goto out_free; + + if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32)) + goto out_free; + + if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32)) + goto out_free; + + if (ph->needs_swap) { + desc[i].leader_idx = bswap_32(desc[i].leader_idx); + desc[i].nr_members = bswap_32(desc[i].nr_members); + } + } + + /* + * Rebuild group relationship based on the group_desc + */ + session = container_of(ph, struct perf_session, header); + session->evlist->nr_groups = nr_groups; + + i = nr = 0; + evlist__for_each(session->evlist, evsel) { + if (evsel->idx == (int) desc[i].leader_idx) { + evsel->leader = evsel; + /* {anon_group} is a dummy name */ + if (strcmp(desc[i].name, "{anon_group}")) { + evsel->group_name = desc[i].name; + desc[i].name = NULL; + } + evsel->nr_members = desc[i].nr_members; + + if (i >= nr_groups || nr > 0) { + pr_debug("invalid group desc\n"); + goto out_free; + } + + leader = evsel; + nr = evsel->nr_members - 1; + i++; + } else if (nr) { + /* This is a group member */ + evsel->leader = leader; + + nr--; + } + } + + if (i != nr_groups || nr != 0) { + pr_debug("invalid group desc\n"); + goto out_free; + } + + ret = 0; +out_free: + for (i = 0; i < nr_groups; i++) + zfree(&desc[i].name); + free(desc); + + return ret; +} + +struct feature_ops { + int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); + void (*print)(struct perf_header *h, int fd, FILE *fp); + int (*process)(struct perf_file_section *section, + struct perf_header *h, int fd, void *data); + const char *name; + bool full_only; +}; + +#define FEAT_OPA(n, func) \ + [n] = { .name = #n, .write = write_##func, .print = print_##func } +#define FEAT_OPP(n, func) \ + [n] = { .name = #n, .write = write_##func, .print = print_##func, \ + .process = process_##func } +#define FEAT_OPF(n, func) \ + [n] = { .name = #n, .write = write_##func, .print = print_##func, \ + .process = process_##func, .full_only = true } + +/* feature_ops not implemented: */ +#define print_tracing_data NULL +#define print_build_id NULL + +static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { + FEAT_OPP(HEADER_TRACING_DATA, tracing_data), + FEAT_OPP(HEADER_BUILD_ID, build_id), + FEAT_OPP(HEADER_HOSTNAME, hostname), + FEAT_OPP(HEADER_OSRELEASE, osrelease), + FEAT_OPP(HEADER_VERSION, version), + FEAT_OPP(HEADER_ARCH, arch), + FEAT_OPP(HEADER_NRCPUS, nrcpus), + FEAT_OPP(HEADER_CPUDESC, cpudesc), + FEAT_OPP(HEADER_CPUID, cpuid), + FEAT_OPP(HEADER_TOTAL_MEM, total_mem), + FEAT_OPP(HEADER_EVENT_DESC, event_desc), + FEAT_OPP(HEADER_CMDLINE, cmdline), + FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), + FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), + FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), + FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), + FEAT_OPP(HEADER_GROUP_DESC, group_desc), +}; + +struct header_print_data { + FILE *fp; + bool full; /* extended list of headers */ +}; + +static int perf_file_section__fprintf_info(struct perf_file_section *section, + struct perf_header *ph, + int feat, int fd, void *data) +{ + struct header_print_data *hd = data; + + if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { + pr_debug("Failed to lseek to %" PRIu64 " offset for feature " + "%d, continuing...\n", section->offset, feat); + return 0; + } + if (feat >= HEADER_LAST_FEATURE) { + pr_warning("unknown feature %d\n", feat); + return 0; + } + if (!feat_ops[feat].print) + return 0; + + if (!feat_ops[feat].full_only || hd->full) + feat_ops[feat].print(ph, fd, hd->fp); + else + fprintf(hd->fp, "# %s info available, use -I to display\n", + feat_ops[feat].name); + + return 0; +} + +int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) +{ + struct header_print_data hd; + struct perf_header *header = &session->header; + int fd = perf_data_file__fd(session->file); + hd.fp = fp; + hd.full = full; + + perf_header__process_sections(header, fd, &hd, + perf_file_section__fprintf_info); + return 0; +} + +static int do_write_feat(int fd, struct perf_header *h, int type, + struct perf_file_section **p, + struct perf_evlist *evlist) +{ + int err; + int ret = 0; + + if (perf_header__has_feat(h, type)) { + if (!feat_ops[type].write) + return -1; + + (*p)->offset = lseek(fd, 0, SEEK_CUR); + + err = feat_ops[type].write(fd, h, evlist); + if (err < 0) { + pr_debug("failed to write feature %d\n", type); + + /* undo anything written */ + lseek(fd, (*p)->offset, SEEK_SET); + + return -1; + } + (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset; + (*p)++; + } + return ret; +} + +static int perf_header__adds_write(struct perf_header *header, + struct perf_evlist *evlist, int fd) +{ + int nr_sections; + struct perf_file_section *feat_sec, *p; + int sec_size; + u64 sec_start; + int feat; + int err; + + nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); + if (!nr_sections) + return 0; + + feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); + if (feat_sec == NULL) + return -ENOMEM; + + sec_size = sizeof(*feat_sec) * nr_sections; + + sec_start = header->feat_offset; + lseek(fd, sec_start + sec_size, SEEK_SET); + + for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { + if (do_write_feat(fd, header, feat, &p, evlist)) + perf_header__clear_feat(header, feat); + } + + lseek(fd, sec_start, SEEK_SET); + /* + * may write more than needed due to dropped feature, but + * this is okay, reader will skip the mising entries + */ + err = do_write(fd, feat_sec, sec_size); + if (err < 0) + pr_debug("failed to write feature section\n"); + free(feat_sec); + return err; +} + +int perf_header__write_pipe(int fd) +{ + struct perf_pipe_file_header f_header; + int err; + + f_header = (struct perf_pipe_file_header){ + .magic = PERF_MAGIC, + .size = sizeof(f_header), + }; + + err = do_write(fd, &f_header, sizeof(f_header)); + if (err < 0) { + pr_debug("failed to write perf pipe header\n"); + return err; + } + + return 0; +} + +int perf_session__write_header(struct perf_session *session, + struct perf_evlist *evlist, + int fd, bool at_exit) +{ + struct perf_file_header f_header; + struct perf_file_attr f_attr; + struct perf_header *header = &session->header; + struct perf_evsel *evsel; + u64 attr_offset; + int err; + + lseek(fd, sizeof(f_header), SEEK_SET); + + evlist__for_each(session->evlist, evsel) { + evsel->id_offset = lseek(fd, 0, SEEK_CUR); + err = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); + if (err < 0) { + pr_debug("failed to write perf header\n"); + return err; + } + } + + attr_offset = lseek(fd, 0, SEEK_CUR); + + evlist__for_each(evlist, evsel) { + f_attr = (struct perf_file_attr){ + .attr = evsel->attr, + .ids = { + .offset = evsel->id_offset, + .size = evsel->ids * sizeof(u64), + } + }; + err = do_write(fd, &f_attr, sizeof(f_attr)); + if (err < 0) { + pr_debug("failed to write perf header attribute\n"); + return err; + } + } + + if (!header->data_offset) + header->data_offset = lseek(fd, 0, SEEK_CUR); + header->feat_offset = header->data_offset + header->data_size; + + if (at_exit) { + err = perf_header__adds_write(header, evlist, fd); + if (err < 0) + return err; + } + + f_header = (struct perf_file_header){ + .magic = PERF_MAGIC, + .size = sizeof(f_header), + .attr_size = sizeof(f_attr), + .attrs = { + .offset = attr_offset, + .size = evlist->nr_entries * sizeof(f_attr), + }, + .data = { + .offset = header->data_offset, + .size = header->data_size, + }, + /* event_types is ignored, store zeros */ + }; + + memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); + + lseek(fd, 0, SEEK_SET); + err = do_write(fd, &f_header, sizeof(f_header)); + if (err < 0) { + pr_debug("failed to write perf header\n"); + return err; + } + lseek(fd, header->data_offset + header->data_size, SEEK_SET); + + return 0; +} + +static int perf_header__getbuffer64(struct perf_header *header, + int fd, void *buf, size_t size) +{ + if (readn(fd, buf, size) <= 0) + return -1; + + if (header->needs_swap) + mem_bswap_64(buf, size); + + return 0; +} + +int perf_header__process_sections(struct perf_header *header, int fd, + void *data, + int (*process)(struct perf_file_section *section, + struct perf_header *ph, + int feat, int fd, void *data)) +{ + struct perf_file_section *feat_sec, *sec; + int nr_sections; + int sec_size; + int feat; + int err; + + nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); + if (!nr_sections) + return 0; + + feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); + if (!feat_sec) + return -1; + + sec_size = sizeof(*feat_sec) * nr_sections; + + lseek(fd, header->feat_offset, SEEK_SET); + + err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); + if (err < 0) + goto out_free; + + for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { + err = process(sec++, header, feat, fd, data); + if (err < 0) + goto out_free; + } + err = 0; +out_free: + free(feat_sec); + return err; +} + +static const int attr_file_abi_sizes[] = { + [0] = PERF_ATTR_SIZE_VER0, + [1] = PERF_ATTR_SIZE_VER1, + [2] = PERF_ATTR_SIZE_VER2, + [3] = PERF_ATTR_SIZE_VER3, + [4] = PERF_ATTR_SIZE_VER4, + 0, +}; + +/* + * In the legacy file format, the magic number is not used to encode endianness. + * hdr_sz was used to encode endianness. But given that hdr_sz can vary based + * on ABI revisions, we need to try all combinations for all endianness to + * detect the endianness. + */ +static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) +{ + uint64_t ref_size, attr_size; + int i; + + for (i = 0 ; attr_file_abi_sizes[i]; i++) { + ref_size = attr_file_abi_sizes[i] + + sizeof(struct perf_file_section); + if (hdr_sz != ref_size) { + attr_size = bswap_64(hdr_sz); + if (attr_size != ref_size) + continue; + + ph->needs_swap = true; + } + pr_debug("ABI%d perf.data file detected, need_swap=%d\n", + i, + ph->needs_swap); + return 0; + } + /* could not determine endianness */ + return -1; +} + +#define PERF_PIPE_HDR_VER0 16 + +static const size_t attr_pipe_abi_sizes[] = { + [0] = PERF_PIPE_HDR_VER0, + 0, +}; + +/* + * In the legacy pipe format, there is an implicit assumption that endiannesss + * between host recording the samples, and host parsing the samples is the + * same. This is not always the case given that the pipe output may always be + * redirected into a file and analyzed on a different machine with possibly a + * different endianness and perf_event ABI revsions in the perf tool itself. + */ +static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) +{ + u64 attr_size; + int i; + + for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { + if (hdr_sz != attr_pipe_abi_sizes[i]) { + attr_size = bswap_64(hdr_sz); + if (attr_size != hdr_sz) + continue; + + ph->needs_swap = true; + } + pr_debug("Pipe ABI%d perf.data file detected\n", i); + return 0; + } + return -1; +} + +bool is_perf_magic(u64 magic) +{ + if (!memcmp(&magic, __perf_magic1, sizeof(magic)) + || magic == __perf_magic2 + || magic == __perf_magic2_sw) + return true; + + return false; +} + +static int check_magic_endian(u64 magic, uint64_t hdr_sz, + bool is_pipe, struct perf_header *ph) +{ + int ret; + + /* check for legacy format */ + ret = memcmp(&magic, __perf_magic1, sizeof(magic)); + if (ret == 0) { + ph->version = PERF_HEADER_VERSION_1; + pr_debug("legacy perf.data format\n"); + if (is_pipe) + return try_all_pipe_abis(hdr_sz, ph); + + return try_all_file_abis(hdr_sz, ph); + } + /* + * the new magic number serves two purposes: + * - unique number to identify actual perf.data files + * - encode endianness of file + */ + ph->version = PERF_HEADER_VERSION_2; + + /* check magic number with one endianness */ + if (magic == __perf_magic2) + return 0; + + /* check magic number with opposite endianness */ + if (magic != __perf_magic2_sw) + return -1; + + ph->needs_swap = true; + + return 0; +} + +int perf_file_header__read(struct perf_file_header *header, + struct perf_header *ph, int fd) +{ + ssize_t ret; + + lseek(fd, 0, SEEK_SET); + + ret = readn(fd, header, sizeof(*header)); + if (ret <= 0) + return -1; + + if (check_magic_endian(header->magic, + header->attr_size, false, ph) < 0) { + pr_debug("magic/endian check failed\n"); + return -1; + } + + if (ph->needs_swap) { + mem_bswap_64(header, offsetof(struct perf_file_header, + adds_features)); + } + + if (header->size != sizeof(*header)) { + /* Support the previous format */ + if (header->size == offsetof(typeof(*header), adds_features)) + bitmap_zero(header->adds_features, HEADER_FEAT_BITS); + else + return -1; + } else if (ph->needs_swap) { + /* + * feature bitmap is declared as an array of unsigned longs -- + * not good since its size can differ between the host that + * generated the data file and the host analyzing the file. + * + * We need to handle endianness, but we don't know the size of + * the unsigned long where the file was generated. Take a best + * guess at determining it: try 64-bit swap first (ie., file + * created on a 64-bit host), and check if the hostname feature + * bit is set (this feature bit is forced on as of fbe96f2). + * If the bit is not, undo the 64-bit swap and try a 32-bit + * swap. If the hostname bit is still not set (e.g., older data + * file), punt and fallback to the original behavior -- + * clearing all feature bits and setting buildid. + */ + mem_bswap_64(&header->adds_features, + BITS_TO_U64(HEADER_FEAT_BITS)); + + if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { + /* unswap as u64 */ + mem_bswap_64(&header->adds_features, + BITS_TO_U64(HEADER_FEAT_BITS)); + + /* unswap as u32 */ + mem_bswap_32(&header->adds_features, + BITS_TO_U32(HEADER_FEAT_BITS)); + } + + if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { + bitmap_zero(header->adds_features, HEADER_FEAT_BITS); + set_bit(HEADER_BUILD_ID, header->adds_features); + } + } + + memcpy(&ph->adds_features, &header->adds_features, + sizeof(ph->adds_features)); + + ph->data_offset = header->data.offset; + ph->data_size = header->data.size; + ph->feat_offset = header->data.offset + header->data.size; + return 0; +} + +static int perf_file_section__process(struct perf_file_section *section, + struct perf_header *ph, + int feat, int fd, void *data) +{ + if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { + pr_debug("Failed to lseek to %" PRIu64 " offset for feature " + "%d, continuing...\n", section->offset, feat); + return 0; + } + + if (feat >= HEADER_LAST_FEATURE) { + pr_debug("unknown feature %d, continuing...\n", feat); + return 0; + } + + if (!feat_ops[feat].process) + return 0; + + return feat_ops[feat].process(section, ph, fd, data); +} + +static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, + struct perf_header *ph, int fd, + bool repipe) +{ + ssize_t ret; + + ret = readn(fd, header, sizeof(*header)); + if (ret <= 0) + return -1; + + if (check_magic_endian(header->magic, header->size, true, ph) < 0) { + pr_debug("endian/magic failed\n"); + return -1; + } + + if (ph->needs_swap) + header->size = bswap_64(header->size); + + if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) + return -1; + + return 0; +} + +static int perf_header__read_pipe(struct perf_session *session) +{ + struct perf_header *header = &session->header; + struct perf_pipe_file_header f_header; + + if (perf_file_header__read_pipe(&f_header, header, + perf_data_file__fd(session->file), + session->repipe) < 0) { + pr_debug("incompatible file format\n"); + return -EINVAL; + } + + return 0; +} + +static int read_attr(int fd, struct perf_header *ph, + struct perf_file_attr *f_attr) +{ + struct perf_event_attr *attr = &f_attr->attr; + size_t sz, left; + size_t our_sz = sizeof(f_attr->attr); + ssize_t ret; + + memset(f_attr, 0, sizeof(*f_attr)); + + /* read minimal guaranteed structure */ + ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); + if (ret <= 0) { + pr_debug("cannot read %d bytes of header attr\n", + PERF_ATTR_SIZE_VER0); + return -1; + } + + /* on file perf_event_attr size */ + sz = attr->size; + + if (ph->needs_swap) + sz = bswap_32(sz); + + if (sz == 0) { + /* assume ABI0 */ + sz = PERF_ATTR_SIZE_VER0; + } else if (sz > our_sz) { + pr_debug("file uses a more recent and unsupported ABI" + " (%zu bytes extra)\n", sz - our_sz); + return -1; + } + /* what we have not yet read and that we know about */ + left = sz - PERF_ATTR_SIZE_VER0; + if (left) { + void *ptr = attr; + ptr += PERF_ATTR_SIZE_VER0; + + ret = readn(fd, ptr, left); + } + /* read perf_file_section, ids are read in caller */ + ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); + + return ret <= 0 ? -1 : 0; +} + +static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, + struct pevent *pevent) +{ + struct event_format *event; + char bf[128]; + + /* already prepared */ + if (evsel->tp_format) + return 0; + + if (pevent == NULL) { + pr_debug("broken or missing trace data\n"); + return -1; + } + + event = pevent_find_event(pevent, evsel->attr.config); + if (event == NULL) + return -1; + + if (!evsel->name) { + snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); + evsel->name = strdup(bf); + if (evsel->name == NULL) + return -1; + } + + evsel->tp_format = event; + return 0; +} + +static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, + struct pevent *pevent) +{ + struct perf_evsel *pos; + + evlist__for_each(evlist, pos) { + if (pos->attr.type == PERF_TYPE_TRACEPOINT && + perf_evsel__prepare_tracepoint_event(pos, pevent)) + return -1; + } + + return 0; +} + +int perf_session__read_header(struct perf_session *session) +{ + struct perf_data_file *file = session->file; + struct perf_header *header = &session->header; + struct perf_file_header f_header; + struct perf_file_attr f_attr; + u64 f_id; + int nr_attrs, nr_ids, i, j; + int fd = perf_data_file__fd(file); + + session->evlist = perf_evlist__new(); + if (session->evlist == NULL) + return -ENOMEM; + + if (perf_data_file__is_pipe(file)) + return perf_header__read_pipe(session); + + if (perf_file_header__read(&f_header, header, fd) < 0) + return -EINVAL; + + /* + * Sanity check that perf.data was written cleanly; data size is + * initialized to 0 and updated only if the on_exit function is run. + * If data size is still 0 then the file contains only partial + * information. Just warn user and process it as much as it can. + */ + if (f_header.data.size == 0) { + pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" + "Was the 'perf record' command properly terminated?\n", + file->path); + } + + nr_attrs = f_header.attrs.size / f_header.attr_size; + lseek(fd, f_header.attrs.offset, SEEK_SET); + + for (i = 0; i < nr_attrs; i++) { + struct perf_evsel *evsel; + off_t tmp; + + if (read_attr(fd, header, &f_attr) < 0) + goto out_errno; + + if (header->needs_swap) { + f_attr.ids.size = bswap_64(f_attr.ids.size); + f_attr.ids.offset = bswap_64(f_attr.ids.offset); + perf_event__attr_swap(&f_attr.attr); + } + + tmp = lseek(fd, 0, SEEK_CUR); + evsel = perf_evsel__new(&f_attr.attr); + + if (evsel == NULL) + goto out_delete_evlist; + + evsel->needs_swap = header->needs_swap; + /* + * Do it before so that if perf_evsel__alloc_id fails, this + * entry gets purged too at perf_evlist__delete(). + */ + perf_evlist__add(session->evlist, evsel); + + nr_ids = f_attr.ids.size / sizeof(u64); + /* + * We don't have the cpu and thread maps on the header, so + * for allocating the perf_sample_id table we fake 1 cpu and + * hattr->ids threads. + */ + if (perf_evsel__alloc_id(evsel, 1, nr_ids)) + goto out_delete_evlist; + + lseek(fd, f_attr.ids.offset, SEEK_SET); + + for (j = 0; j < nr_ids; j++) { + if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) + goto out_errno; + + perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); + } + + lseek(fd, tmp, SEEK_SET); + } + + symbol_conf.nr_events = nr_attrs; + + perf_header__process_sections(header, fd, &session->tevent, + perf_file_section__process); + + if (perf_evlist__prepare_tracepoint_events(session->evlist, + session->tevent.pevent)) + goto out_delete_evlist; + + return 0; +out_errno: + return -errno; + +out_delete_evlist: + perf_evlist__delete(session->evlist); + session->evlist = NULL; + return -ENOMEM; +} + +int perf_event__synthesize_attr(struct perf_tool *tool, + struct perf_event_attr *attr, u32 ids, u64 *id, + perf_event__handler_t process) +{ + union perf_event *ev; + size_t size; + int err; + + size = sizeof(struct perf_event_attr); + size = PERF_ALIGN(size, sizeof(u64)); + size += sizeof(struct perf_event_header); + size += ids * sizeof(u64); + + ev = malloc(size); + + if (ev == NULL) + return -ENOMEM; + + ev->attr.attr = *attr; + memcpy(ev->attr.id, id, ids * sizeof(u64)); + + ev->attr.header.type = PERF_RECORD_HEADER_ATTR; + ev->attr.header.size = (u16)size; + + if (ev->attr.header.size == size) + err = process(tool, ev, NULL, NULL); + else + err = -E2BIG; + + free(ev); + + return err; +} + +int perf_event__synthesize_attrs(struct perf_tool *tool, + struct perf_session *session, + perf_event__handler_t process) +{ + struct perf_evsel *evsel; + int err = 0; + + evlist__for_each(session->evlist, evsel) { + err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, + evsel->id, process); + if (err) { + pr_debug("failed to create perf header attribute\n"); + return err; + } + } + + return err; +} + +int perf_event__process_attr(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_evlist **pevlist) +{ + u32 i, ids, n_ids; + struct perf_evsel *evsel; + struct perf_evlist *evlist = *pevlist; + + if (evlist == NULL) { + *pevlist = evlist = perf_evlist__new(); + if (evlist == NULL) + return -ENOMEM; + } + + evsel = perf_evsel__new(&event->attr.attr); + if (evsel == NULL) + return -ENOMEM; + + perf_evlist__add(evlist, evsel); + + ids = event->header.size; + ids -= (void *)&event->attr.id - (void *)event; + n_ids = ids / sizeof(u64); + /* + * We don't have the cpu and thread maps on the header, so + * for allocating the perf_sample_id table we fake 1 cpu and + * hattr->ids threads. + */ + if (perf_evsel__alloc_id(evsel, 1, n_ids)) + return -ENOMEM; + + for (i = 0; i < n_ids; i++) { + perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); + } + + symbol_conf.nr_events = evlist->nr_entries; + + return 0; +} + +int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, + struct perf_evlist *evlist, + perf_event__handler_t process) +{ + union perf_event ev; + struct tracing_data *tdata; + ssize_t size = 0, aligned_size = 0, padding; + int err __maybe_unused = 0; + + /* + * We are going to store the size of the data followed + * by the data contents. Since the fd descriptor is a pipe, + * we cannot seek back to store the size of the data once + * we know it. Instead we: + * + * - write the tracing data to the temp file + * - get/write the data size to pipe + * - write the tracing data from the temp file + * to the pipe + */ + tdata = tracing_data_get(&evlist->entries, fd, true); + if (!tdata) + return -1; + + memset(&ev, 0, sizeof(ev)); + + ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; + size = tdata->size; + aligned_size = PERF_ALIGN(size, sizeof(u64)); + padding = aligned_size - size; + ev.tracing_data.header.size = sizeof(ev.tracing_data); + ev.tracing_data.size = aligned_size; + + process(tool, &ev, NULL, NULL); + + /* + * The put function will copy all the tracing data + * stored in temp file to the pipe. + */ + tracing_data_put(tdata); + + write_padded(fd, NULL, 0, padding); + + return aligned_size; +} + +int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_session *session) +{ + ssize_t size_read, padding, size = event->tracing_data.size; + int fd = perf_data_file__fd(session->file); + off_t offset = lseek(fd, 0, SEEK_CUR); + char buf[BUFSIZ]; + + /* setup for reading amidst mmap */ + lseek(fd, offset + sizeof(struct tracing_data_event), + SEEK_SET); + + size_read = trace_report(fd, &session->tevent, + session->repipe); + padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; + + if (readn(fd, buf, padding) < 0) { + pr_err("%s: reading input file", __func__); + return -1; + } + if (session->repipe) { + int retw = write(STDOUT_FILENO, buf, padding); + if (retw <= 0 || retw != padding) { + pr_err("%s: repiping tracing data padding", __func__); + return -1; + } + } + + if (size_read + padding != size) { + pr_err("%s: tracing data size mismatch", __func__); + return -1; + } + + perf_evlist__prepare_tracepoint_events(session->evlist, + session->tevent.pevent); + + return size_read + padding; +} + +int perf_event__synthesize_build_id(struct perf_tool *tool, + struct dso *pos, u16 misc, + perf_event__handler_t process, + struct machine *machine) +{ + union perf_event ev; + size_t len; + int err = 0; + + if (!pos->hit) + return err; + + memset(&ev, 0, sizeof(ev)); + + len = pos->long_name_len + 1; + len = PERF_ALIGN(len, NAME_ALIGN); + memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); + ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; + ev.build_id.header.misc = misc; + ev.build_id.pid = machine->pid; + ev.build_id.header.size = sizeof(ev.build_id) + len; + memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); + + err = process(tool, &ev, NULL, machine); + + return err; +} + +int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_session *session) +{ + __event_process_build_id(&event->build_id, + event->build_id.filename, + session); + return 0; +} diff --git a/kernel/tools/perf/util/header.h b/kernel/tools/perf/util/header.h new file mode 100644 index 000000000..3bb90ac17 --- /dev/null +++ b/kernel/tools/perf/util/header.h @@ -0,0 +1,159 @@ +#ifndef __PERF_HEADER_H +#define __PERF_HEADER_H + +#include +#include +#include +#include +#include +#include "event.h" + + +enum { + HEADER_RESERVED = 0, /* always cleared */ + HEADER_FIRST_FEATURE = 1, + HEADER_TRACING_DATA = 1, + HEADER_BUILD_ID, + + HEADER_HOSTNAME, + HEADER_OSRELEASE, + HEADER_VERSION, + HEADER_ARCH, + HEADER_NRCPUS, + HEADER_CPUDESC, + HEADER_CPUID, + HEADER_TOTAL_MEM, + HEADER_CMDLINE, + HEADER_EVENT_DESC, + HEADER_CPU_TOPOLOGY, + HEADER_NUMA_TOPOLOGY, + HEADER_BRANCH_STACK, + HEADER_PMU_MAPPINGS, + HEADER_GROUP_DESC, + HEADER_LAST_FEATURE, + HEADER_FEAT_BITS = 256, +}; + +enum perf_header_version { + PERF_HEADER_VERSION_1, + PERF_HEADER_VERSION_2, +}; + +struct perf_file_section { + u64 offset; + u64 size; +}; + +struct perf_file_header { + u64 magic; + u64 size; + u64 attr_size; + struct perf_file_section attrs; + struct perf_file_section data; + /* event_types is ignored */ + struct perf_file_section event_types; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); +}; + +struct perf_pipe_file_header { + u64 magic; + u64 size; +}; + +struct perf_header; + +int perf_file_header__read(struct perf_file_header *header, + struct perf_header *ph, int fd); + +struct perf_session_env { + char *hostname; + char *os_release; + char *version; + char *arch; + int nr_cpus_online; + int nr_cpus_avail; + char *cpu_desc; + char *cpuid; + unsigned long long total_mem; + + int nr_cmdline; + int nr_sibling_cores; + int nr_sibling_threads; + int nr_numa_nodes; + int nr_pmu_mappings; + int nr_groups; + char *cmdline; + char *sibling_cores; + char *sibling_threads; + char *numa_nodes; + char *pmu_mappings; +}; + +struct perf_header { + enum perf_header_version version; + bool needs_swap; + u64 data_offset; + u64 data_size; + u64 feat_offset; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); + struct perf_session_env env; +}; + +struct perf_evlist; +struct perf_session; + +int perf_session__read_header(struct perf_session *session); +int perf_session__write_header(struct perf_session *session, + struct perf_evlist *evlist, + int fd, bool at_exit); +int perf_header__write_pipe(int fd); + +void perf_header__set_feat(struct perf_header *header, int feat); +void perf_header__clear_feat(struct perf_header *header, int feat); +bool perf_header__has_feat(const struct perf_header *header, int feat); + +int perf_header__set_cmdline(int argc, const char **argv); + +int perf_header__process_sections(struct perf_header *header, int fd, + void *data, + int (*process)(struct perf_file_section *section, + struct perf_header *ph, + int feat, int fd, void *data)); + +int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); + +int perf_event__synthesize_attr(struct perf_tool *tool, + struct perf_event_attr *attr, u32 ids, u64 *id, + perf_event__handler_t process); +int perf_event__synthesize_attrs(struct perf_tool *tool, + struct perf_session *session, + perf_event__handler_t process); +int perf_event__process_attr(struct perf_tool *tool, union perf_event *event, + struct perf_evlist **pevlist); + +int perf_event__synthesize_tracing_data(struct perf_tool *tool, + int fd, struct perf_evlist *evlist, + perf_event__handler_t process); +int perf_event__process_tracing_data(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); + +int perf_event__synthesize_build_id(struct perf_tool *tool, + struct dso *pos, u16 misc, + perf_event__handler_t process, + struct machine *machine); +int perf_event__process_build_id(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); +bool is_perf_magic(u64 magic); + +#define NAME_ALIGN 64 + +int write_padded(int fd, const void *bf, size_t count, size_t count_aligned); + +/* + * arch specific callback + */ +int get_cpuid(char *buffer, size_t sz); + +#endif /* __PERF_HEADER_H */ diff --git a/kernel/tools/perf/util/help.c b/kernel/tools/perf/util/help.c new file mode 100644 index 000000000..86c37c472 --- /dev/null +++ b/kernel/tools/perf/util/help.c @@ -0,0 +1,339 @@ +#include "cache.h" +#include "../builtin.h" +#include "exec_cmd.h" +#include "levenshtein.h" +#include "help.h" +#include + +void add_cmdname(struct cmdnames *cmds, const char *name, size_t len) +{ + struct cmdname *ent = malloc(sizeof(*ent) + len + 1); + + ent->len = len; + memcpy(ent->name, name, len); + ent->name[len] = 0; + + ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc); + cmds->names[cmds->cnt++] = ent; +} + +static void clean_cmdnames(struct cmdnames *cmds) +{ + unsigned int i; + + for (i = 0; i < cmds->cnt; ++i) + zfree(&cmds->names[i]); + zfree(&cmds->names); + cmds->cnt = 0; + cmds->alloc = 0; +} + +static int cmdname_compare(const void *a_, const void *b_) +{ + struct cmdname *a = *(struct cmdname **)a_; + struct cmdname *b = *(struct cmdname **)b_; + return strcmp(a->name, b->name); +} + +static void uniq(struct cmdnames *cmds) +{ + unsigned int i, j; + + if (!cmds->cnt) + return; + + for (i = j = 1; i < cmds->cnt; i++) + if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) + cmds->names[j++] = cmds->names[i]; + + cmds->cnt = j; +} + +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) +{ + size_t ci, cj, ei; + int cmp; + + ci = cj = ei = 0; + while (ci < cmds->cnt && ei < excludes->cnt) { + cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name); + if (cmp < 0) + cmds->names[cj++] = cmds->names[ci++]; + else if (cmp == 0) + ci++, ei++; + else if (cmp > 0) + ei++; + } + + while (ci < cmds->cnt) + cmds->names[cj++] = cmds->names[ci++]; + + cmds->cnt = cj; +} + +static void pretty_print_string_list(struct cmdnames *cmds, int longest) +{ + int cols = 1, rows; + int space = longest + 1; /* min 1 SP between words */ + struct winsize win; + int max_cols; + int i, j; + + get_term_dimensions(&win); + max_cols = win.ws_col - 1; /* don't print *on* the edge */ + + if (space < max_cols) + cols = max_cols / space; + rows = (cmds->cnt + cols - 1) / cols; + + for (i = 0; i < rows; i++) { + printf(" "); + + for (j = 0; j < cols; j++) { + unsigned int n = j * rows + i; + unsigned int size = space; + + if (n >= cmds->cnt) + break; + if (j == cols-1 || n + rows >= cmds->cnt) + size = 1; + printf("%-*s", size, cmds->names[n]->name); + } + putchar('\n'); + } +} + +static int is_executable(const char *name) +{ + struct stat st; + + if (stat(name, &st) || /* stat, not lstat */ + !S_ISREG(st.st_mode)) + return 0; + + return st.st_mode & S_IXUSR; +} + +static void list_commands_in_dir(struct cmdnames *cmds, + const char *path, + const char *prefix) +{ + int prefix_len; + DIR *dir = opendir(path); + struct dirent *de; + struct strbuf buf = STRBUF_INIT; + int len; + + if (!dir) + return; + if (!prefix) + prefix = "perf-"; + prefix_len = strlen(prefix); + + strbuf_addf(&buf, "%s/", path); + len = buf.len; + + while ((de = readdir(dir)) != NULL) { + int entlen; + + if (prefixcmp(de->d_name, prefix)) + continue; + + strbuf_setlen(&buf, len); + strbuf_addstr(&buf, de->d_name); + if (!is_executable(buf.buf)) + continue; + + entlen = strlen(de->d_name) - prefix_len; + if (has_extension(de->d_name, ".exe")) + entlen -= 4; + + add_cmdname(cmds, de->d_name + prefix_len, entlen); + } + closedir(dir); + strbuf_release(&buf); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + const char *env_path = getenv("PATH"); + const char *exec_path = perf_exec_path(); + + if (exec_path) { + list_commands_in_dir(main_cmds, exec_path, prefix); + qsort(main_cmds->names, main_cmds->cnt, + sizeof(*main_cmds->names), cmdname_compare); + uniq(main_cmds); + } + + if (env_path) { + char *paths, *path, *colon; + path = paths = strdup(env_path); + while (1) { + if ((colon = strchr(path, PATH_SEP))) + *colon = 0; + if (!exec_path || strcmp(path, exec_path)) + list_commands_in_dir(other_cmds, path, prefix); + + if (!colon) + break; + path = colon + 1; + } + free(paths); + + qsort(other_cmds->names, other_cmds->cnt, + sizeof(*other_cmds->names), cmdname_compare); + uniq(other_cmds); + } + exclude_cmds(other_cmds, main_cmds); +} + +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds) +{ + unsigned int i, longest = 0; + + for (i = 0; i < main_cmds->cnt; i++) + if (longest < main_cmds->names[i]->len) + longest = main_cmds->names[i]->len; + for (i = 0; i < other_cmds->cnt; i++) + if (longest < other_cmds->names[i]->len) + longest = other_cmds->names[i]->len; + + if (main_cmds->cnt) { + const char *exec_path = perf_exec_path(); + printf("available %s in '%s'\n", title, exec_path); + printf("----------------"); + mput_char('-', strlen(title) + strlen(exec_path)); + putchar('\n'); + pretty_print_string_list(main_cmds, longest); + putchar('\n'); + } + + if (other_cmds->cnt) { + printf("%s available from elsewhere on your $PATH\n", title); + printf("---------------------------------------"); + mput_char('-', strlen(title)); + putchar('\n'); + pretty_print_string_list(other_cmds, longest); + putchar('\n'); + } +} + +int is_in_cmdlist(struct cmdnames *c, const char *s) +{ + unsigned int i; + + for (i = 0; i < c->cnt; i++) + if (!strcmp(s, c->names[i]->name)) + return 1; + return 0; +} + +static int autocorrect; +static struct cmdnames aliases; + +static int perf_unknown_cmd_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "help.autocorrect")) + autocorrect = perf_config_int(var,value); + /* Also use aliases for command lookup */ + if (!prefixcmp(var, "alias.")) + add_cmdname(&aliases, var + 6, strlen(var + 6)); + + return perf_default_config(var, value, cb); +} + +static int levenshtein_compare(const void *p1, const void *p2) +{ + const struct cmdname *const *c1 = p1, *const *c2 = p2; + const char *s1 = (*c1)->name, *s2 = (*c2)->name; + int l1 = (*c1)->len; + int l2 = (*c2)->len; + return l1 != l2 ? l1 - l2 : strcmp(s1, s2); +} + +static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) +{ + unsigned int i; + + ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc); + + for (i = 0; i < old->cnt; i++) + cmds->names[cmds->cnt++] = old->names[i]; + zfree(&old->names); + old->cnt = 0; +} + +const char *help_unknown_cmd(const char *cmd) +{ + unsigned int i, n = 0, best_similarity = 0; + struct cmdnames main_cmds, other_cmds; + + memset(&main_cmds, 0, sizeof(main_cmds)); + memset(&other_cmds, 0, sizeof(main_cmds)); + memset(&aliases, 0, sizeof(aliases)); + + perf_config(perf_unknown_cmd_config, NULL); + + load_command_list("perf-", &main_cmds, &other_cmds); + + add_cmd_list(&main_cmds, &aliases); + add_cmd_list(&main_cmds, &other_cmds); + qsort(main_cmds.names, main_cmds.cnt, + sizeof(main_cmds.names), cmdname_compare); + uniq(&main_cmds); + + if (main_cmds.cnt) { + /* This reuses cmdname->len for similarity index */ + for (i = 0; i < main_cmds.cnt; ++i) + main_cmds.names[i]->len = + levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4); + + qsort(main_cmds.names, main_cmds.cnt, + sizeof(*main_cmds.names), levenshtein_compare); + + best_similarity = main_cmds.names[0]->len; + n = 1; + while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) + ++n; + } + + if (autocorrect && n == 1) { + const char *assumed = main_cmds.names[0]->name; + + main_cmds.names[0] = NULL; + clean_cmdnames(&main_cmds); + fprintf(stderr, "WARNING: You called a perf program named '%s', " + "which does not exist.\n" + "Continuing under the assumption that you meant '%s'\n", + cmd, assumed); + if (autocorrect > 0) { + fprintf(stderr, "in %0.1f seconds automatically...\n", + (float)autocorrect/10.0); + poll(NULL, 0, autocorrect * 100); + } + return assumed; + } + + fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd); + + if (main_cmds.cnt && best_similarity < 6) { + fprintf(stderr, "\nDid you mean %s?\n", + n < 2 ? "this": "one of these"); + + for (i = 0; i < n; i++) + fprintf(stderr, "\t%s\n", main_cmds.names[i]->name); + } + + exit(1); +} + +int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused, + const char *prefix __maybe_unused) +{ + printf("perf version %s\n", perf_version_string); + return 0; +} diff --git a/kernel/tools/perf/util/help.h b/kernel/tools/perf/util/help.h new file mode 100644 index 000000000..7f5c6dedd --- /dev/null +++ b/kernel/tools/perf/util/help.h @@ -0,0 +1,29 @@ +#ifndef __PERF_HELP_H +#define __PERF_HELP_H + +struct cmdnames { + size_t alloc; + size_t cnt; + struct cmdname { + size_t len; /* also used for similarity index in help.c */ + char name[FLEX_ARRAY]; + } **names; +}; + +static inline void mput_char(char c, unsigned int num) +{ + while(num--) + putchar(c); +} + +void load_command_list(const char *prefix, + struct cmdnames *main_cmds, + struct cmdnames *other_cmds); +void add_cmdname(struct cmdnames *cmds, const char *name, size_t len); +/* Here we require that excludes is a sorted list. */ +void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes); +int is_in_cmdlist(struct cmdnames *c, const char *s); +void list_commands(const char *title, struct cmdnames *main_cmds, + struct cmdnames *other_cmds); + +#endif /* __PERF_HELP_H */ diff --git a/kernel/tools/perf/util/hist.c b/kernel/tools/perf/util/hist.c new file mode 100644 index 000000000..cc22b9158 --- /dev/null +++ b/kernel/tools/perf/util/hist.c @@ -0,0 +1,1482 @@ +#include "util.h" +#include "build-id.h" +#include "hist.h" +#include "session.h" +#include "sort.h" +#include "evlist.h" +#include "evsel.h" +#include "annotate.h" +#include "ui/progress.h" +#include + +static bool hists__filter_entry_by_dso(struct hists *hists, + struct hist_entry *he); +static bool hists__filter_entry_by_thread(struct hists *hists, + struct hist_entry *he); +static bool hists__filter_entry_by_symbol(struct hists *hists, + struct hist_entry *he); + +u16 hists__col_len(struct hists *hists, enum hist_column col) +{ + return hists->col_len[col]; +} + +void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) +{ + hists->col_len[col] = len; +} + +bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) +{ + if (len > hists__col_len(hists, col)) { + hists__set_col_len(hists, col, len); + return true; + } + return false; +} + +void hists__reset_col_len(struct hists *hists) +{ + enum hist_column col; + + for (col = 0; col < HISTC_NR_COLS; ++col) + hists__set_col_len(hists, col, 0); +} + +static void hists__set_unres_dso_col_len(struct hists *hists, int dso) +{ + const unsigned int unresolved_col_width = BITS_PER_LONG / 4; + + if (hists__col_len(hists, dso) < unresolved_col_width && + !symbol_conf.col_width_list_str && !symbol_conf.field_sep && + !symbol_conf.dso_list) + hists__set_col_len(hists, dso, unresolved_col_width); +} + +void hists__calc_col_len(struct hists *hists, struct hist_entry *h) +{ + const unsigned int unresolved_col_width = BITS_PER_LONG / 4; + int symlen; + u16 len; + + /* + * +4 accounts for '[x] ' priv level info + * +2 accounts for 0x prefix on raw addresses + * +3 accounts for ' y ' symtab origin info + */ + if (h->ms.sym) { + symlen = h->ms.sym->namelen + 4; + if (verbose) + symlen += BITS_PER_LONG / 4 + 2 + 3; + hists__new_col_len(hists, HISTC_SYMBOL, symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_SYMBOL, symlen); + hists__set_unres_dso_col_len(hists, HISTC_DSO); + } + + len = thread__comm_len(h->thread); + if (hists__new_col_len(hists, HISTC_COMM, len)) + hists__set_col_len(hists, HISTC_THREAD, len + 6); + + if (h->ms.map) { + len = dso__name_len(h->ms.map->dso); + hists__new_col_len(hists, HISTC_DSO, len); + } + + if (h->parent) + hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); + + if (h->branch_info) { + if (h->branch_info->from.sym) { + symlen = (int)h->branch_info->from.sym->namelen + 4; + if (verbose) + symlen += BITS_PER_LONG / 4 + 2 + 3; + hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); + + symlen = dso__name_len(h->branch_info->from.map->dso); + hists__new_col_len(hists, HISTC_DSO_FROM, symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); + hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); + } + + if (h->branch_info->to.sym) { + symlen = (int)h->branch_info->to.sym->namelen + 4; + if (verbose) + symlen += BITS_PER_LONG / 4 + 2 + 3; + hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); + + symlen = dso__name_len(h->branch_info->to.map->dso); + hists__new_col_len(hists, HISTC_DSO_TO, symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); + hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); + } + } + + if (h->mem_info) { + if (h->mem_info->daddr.sym) { + symlen = (int)h->mem_info->daddr.sym->namelen + 4 + + unresolved_col_width + 2; + hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, + symlen); + hists__new_col_len(hists, HISTC_MEM_DCACHELINE, + symlen + 1); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, + symlen); + } + if (h->mem_info->daddr.map) { + symlen = dso__name_len(h->mem_info->daddr.map->dso); + hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, + symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); + } + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); + hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); + } + + hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); + hists__new_col_len(hists, HISTC_MEM_TLB, 22); + hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); + hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); + hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); + hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); + + if (h->transaction) + hists__new_col_len(hists, HISTC_TRANSACTION, + hist_entry__transaction_len()); +} + +void hists__output_recalc_col_len(struct hists *hists, int max_rows) +{ + struct rb_node *next = rb_first(&hists->entries); + struct hist_entry *n; + int row = 0; + + hists__reset_col_len(hists); + + while (next && row++ < max_rows) { + n = rb_entry(next, struct hist_entry, rb_node); + if (!n->filtered) + hists__calc_col_len(hists, n); + next = rb_next(&n->rb_node); + } +} + +static void he_stat__add_cpumode_period(struct he_stat *he_stat, + unsigned int cpumode, u64 period) +{ + switch (cpumode) { + case PERF_RECORD_MISC_KERNEL: + he_stat->period_sys += period; + break; + case PERF_RECORD_MISC_USER: + he_stat->period_us += period; + break; + case PERF_RECORD_MISC_GUEST_KERNEL: + he_stat->period_guest_sys += period; + break; + case PERF_RECORD_MISC_GUEST_USER: + he_stat->period_guest_us += period; + break; + default: + break; + } +} + +static void he_stat__add_period(struct he_stat *he_stat, u64 period, + u64 weight) +{ + + he_stat->period += period; + he_stat->weight += weight; + he_stat->nr_events += 1; +} + +static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) +{ + dest->period += src->period; + dest->period_sys += src->period_sys; + dest->period_us += src->period_us; + dest->period_guest_sys += src->period_guest_sys; + dest->period_guest_us += src->period_guest_us; + dest->nr_events += src->nr_events; + dest->weight += src->weight; +} + +static void he_stat__decay(struct he_stat *he_stat) +{ + he_stat->period = (he_stat->period * 7) / 8; + he_stat->nr_events = (he_stat->nr_events * 7) / 8; + /* XXX need decay for weight too? */ +} + +static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) +{ + u64 prev_period = he->stat.period; + u64 diff; + + if (prev_period == 0) + return true; + + he_stat__decay(&he->stat); + if (symbol_conf.cumulate_callchain) + he_stat__decay(he->stat_acc); + + diff = prev_period - he->stat.period; + + hists->stats.total_period -= diff; + if (!he->filtered) + hists->stats.total_non_filtered_period -= diff; + + return he->stat.period == 0; +} + +static void hists__delete_entry(struct hists *hists, struct hist_entry *he) +{ + rb_erase(&he->rb_node, &hists->entries); + + if (sort__need_collapse) + rb_erase(&he->rb_node_in, &hists->entries_collapsed); + + --hists->nr_entries; + if (!he->filtered) + --hists->nr_non_filtered_entries; + + hist_entry__delete(he); +} + +void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) +{ + struct rb_node *next = rb_first(&hists->entries); + struct hist_entry *n; + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + if (((zap_user && n->level == '.') || + (zap_kernel && n->level != '.') || + hists__decay_entry(hists, n))) { + hists__delete_entry(hists, n); + } + } +} + +void hists__delete_entries(struct hists *hists) +{ + struct rb_node *next = rb_first(&hists->entries); + struct hist_entry *n; + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + hists__delete_entry(hists, n); + } +} + +/* + * histogram, sorted on item, collects periods + */ + +static struct hist_entry *hist_entry__new(struct hist_entry *template, + bool sample_self) +{ + size_t callchain_size = 0; + struct hist_entry *he; + + if (symbol_conf.use_callchain) + callchain_size = sizeof(struct callchain_root); + + he = zalloc(sizeof(*he) + callchain_size); + + if (he != NULL) { + *he = *template; + + if (symbol_conf.cumulate_callchain) { + he->stat_acc = malloc(sizeof(he->stat)); + if (he->stat_acc == NULL) { + free(he); + return NULL; + } + memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); + if (!sample_self) + memset(&he->stat, 0, sizeof(he->stat)); + } + + if (he->ms.map) + he->ms.map->referenced = true; + + if (he->branch_info) { + /* + * This branch info is (a part of) allocated from + * sample__resolve_bstack() and will be freed after + * adding new entries. So we need to save a copy. + */ + he->branch_info = malloc(sizeof(*he->branch_info)); + if (he->branch_info == NULL) { + free(he->stat_acc); + free(he); + return NULL; + } + + memcpy(he->branch_info, template->branch_info, + sizeof(*he->branch_info)); + + if (he->branch_info->from.map) + he->branch_info->from.map->referenced = true; + if (he->branch_info->to.map) + he->branch_info->to.map->referenced = true; + } + + if (he->mem_info) { + if (he->mem_info->iaddr.map) + he->mem_info->iaddr.map->referenced = true; + if (he->mem_info->daddr.map) + he->mem_info->daddr.map->referenced = true; + } + + if (symbol_conf.use_callchain) + callchain_init(he->callchain); + + INIT_LIST_HEAD(&he->pairs.node); + thread__get(he->thread); + } + + return he; +} + +static u8 symbol__parent_filter(const struct symbol *parent) +{ + if (symbol_conf.exclude_other && parent == NULL) + return 1 << HIST_FILTER__PARENT; + return 0; +} + +static struct hist_entry *add_hist_entry(struct hists *hists, + struct hist_entry *entry, + struct addr_location *al, + bool sample_self) +{ + struct rb_node **p; + struct rb_node *parent = NULL; + struct hist_entry *he; + int64_t cmp; + u64 period = entry->stat.period; + u64 weight = entry->stat.weight; + + p = &hists->entries_in->rb_node; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node_in); + + /* + * Make sure that it receives arguments in a same order as + * hist_entry__collapse() so that we can use an appropriate + * function when searching an entry regardless which sort + * keys were used. + */ + cmp = hist_entry__cmp(he, entry); + + if (!cmp) { + if (sample_self) + he_stat__add_period(&he->stat, period, weight); + if (symbol_conf.cumulate_callchain) + he_stat__add_period(he->stat_acc, period, weight); + + /* + * This mem info was allocated from sample__resolve_mem + * and will not be used anymore. + */ + zfree(&entry->mem_info); + + /* If the map of an existing hist_entry has + * become out-of-date due to an exec() or + * similar, update it. Otherwise we will + * mis-adjust symbol addresses when computing + * the history counter to increment. + */ + if (he->ms.map != entry->ms.map) { + he->ms.map = entry->ms.map; + if (he->ms.map) + he->ms.map->referenced = true; + } + goto out; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = hist_entry__new(entry, sample_self); + if (!he) + return NULL; + + hists->nr_entries++; + + rb_link_node(&he->rb_node_in, parent, p); + rb_insert_color(&he->rb_node_in, hists->entries_in); +out: + if (sample_self) + he_stat__add_cpumode_period(&he->stat, al->cpumode, period); + if (symbol_conf.cumulate_callchain) + he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); + return he; +} + +struct hist_entry *__hists__add_entry(struct hists *hists, + struct addr_location *al, + struct symbol *sym_parent, + struct branch_info *bi, + struct mem_info *mi, + u64 period, u64 weight, u64 transaction, + bool sample_self) +{ + struct hist_entry entry = { + .thread = al->thread, + .comm = thread__comm(al->thread), + .ms = { + .map = al->map, + .sym = al->sym, + }, + .cpu = al->cpu, + .cpumode = al->cpumode, + .ip = al->addr, + .level = al->level, + .stat = { + .nr_events = 1, + .period = period, + .weight = weight, + }, + .parent = sym_parent, + .filtered = symbol__parent_filter(sym_parent) | al->filtered, + .hists = hists, + .branch_info = bi, + .mem_info = mi, + .transaction = transaction, + }; + + return add_hist_entry(hists, &entry, al, sample_self); +} + +static int +iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_sample *sample = iter->sample; + struct mem_info *mi; + + mi = sample__resolve_mem(sample, al); + if (mi == NULL) + return -ENOMEM; + + iter->priv = mi; + return 0; +} + +static int +iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + u64 cost; + struct mem_info *mi = iter->priv; + struct hists *hists = evsel__hists(iter->evsel); + struct hist_entry *he; + + if (mi == NULL) + return -EINVAL; + + cost = iter->sample->weight; + if (!cost) + cost = 1; + + /* + * must pass period=weight in order to get the correct + * sorting from hists__collapse_resort() which is solely + * based on periods. We want sorting be done on nr_events * weight + * and this is indirectly achieved by passing period=weight here + * and the he_stat__add_period() function. + */ + he = __hists__add_entry(hists, al, iter->parent, NULL, mi, + cost, cost, 0, true); + if (!he) + return -ENOMEM; + + iter->he = he; + return 0; +} + +static int +iter_finish_mem_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + struct perf_evsel *evsel = iter->evsel; + struct hists *hists = evsel__hists(evsel); + struct hist_entry *he = iter->he; + int err = -EINVAL; + + if (he == NULL) + goto out; + + hists__inc_nr_samples(hists, he->filtered); + + err = hist_entry__append_callchain(he, iter->sample); + +out: + /* + * We don't need to free iter->priv (mem_info) here since + * the mem info was either already freed in add_hist_entry() or + * passed to a new hist entry by hist_entry__new(). + */ + iter->priv = NULL; + + iter->he = NULL; + return err; +} + +static int +iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi; + struct perf_sample *sample = iter->sample; + + bi = sample__resolve_bstack(sample, al); + if (!bi) + return -ENOMEM; + + iter->curr = 0; + iter->total = sample->branch_stack->nr; + + iter->priv = bi; + return 0; +} + +static int +iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + /* to avoid calling callback function */ + iter->he = NULL; + + return 0; +} + +static int +iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi = iter->priv; + int i = iter->curr; + + if (bi == NULL) + return 0; + + if (iter->curr >= iter->total) + return 0; + + al->map = bi[i].to.map; + al->sym = bi[i].to.sym; + al->addr = bi[i].to.addr; + return 1; +} + +static int +iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi; + struct perf_evsel *evsel = iter->evsel; + struct hists *hists = evsel__hists(evsel); + struct hist_entry *he = NULL; + int i = iter->curr; + int err = 0; + + bi = iter->priv; + + if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) + goto out; + + /* + * The report shows the percentage of total branches captured + * and not events sampled. Thus we use a pseudo period of 1. + */ + he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL, + 1, 1, 0, true); + if (he == NULL) + return -ENOMEM; + + hists__inc_nr_samples(hists, he->filtered); + +out: + iter->he = he; + iter->curr++; + return err; +} + +static int +iter_finish_branch_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + zfree(&iter->priv); + iter->he = NULL; + + return iter->curr >= iter->total ? 0 : -1; +} + +static int +iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry *he; + + he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, true); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + return 0; +} + +static int +iter_finish_normal_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + + if (he == NULL) + return 0; + + iter->he = NULL; + + hists__inc_nr_samples(evsel__hists(evsel), he->filtered); + + return hist_entry__append_callchain(he, sample); +} + +static int +iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + struct hist_entry **he_cache; + + callchain_cursor_commit(&callchain_cursor); + + /* + * This is for detecting cycles or recursions so that they're + * cumulated only one time to prevent entries more than 100% + * overhead. + */ + he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1)); + if (he_cache == NULL) + return -ENOMEM; + + iter->priv = he_cache; + iter->curr = 0; + + return 0; +} + +static int +iter_add_single_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct hists *hists = evsel__hists(evsel); + struct perf_sample *sample = iter->sample; + struct hist_entry **he_cache = iter->priv; + struct hist_entry *he; + int err = 0; + + he = __hists__add_entry(hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, true); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + he_cache[iter->curr++] = he; + + hist_entry__append_callchain(he, sample); + + /* + * We need to re-initialize the cursor since callchain_append() + * advanced the cursor to the end. + */ + callchain_cursor_commit(&callchain_cursor); + + hists__inc_nr_samples(hists, he->filtered); + + return err; +} + +static int +iter_next_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct callchain_cursor_node *node; + + node = callchain_cursor_current(&callchain_cursor); + if (node == NULL) + return 0; + + return fill_callchain_info(al, node, iter->hide_unresolved); +} + +static int +iter_add_next_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry **he_cache = iter->priv; + struct hist_entry *he; + struct hist_entry he_tmp = { + .cpu = al->cpu, + .thread = al->thread, + .comm = thread__comm(al->thread), + .ip = al->addr, + .ms = { + .map = al->map, + .sym = al->sym, + }, + .parent = iter->parent, + }; + int i; + struct callchain_cursor cursor; + + callchain_cursor_snapshot(&cursor, &callchain_cursor); + + callchain_cursor_advance(&callchain_cursor); + + /* + * Check if there's duplicate entries in the callchain. + * It's possible that it has cycles or recursive calls. + */ + for (i = 0; i < iter->curr; i++) { + if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { + /* to avoid calling callback function */ + iter->he = NULL; + return 0; + } + } + + he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, false); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + he_cache[iter->curr++] = he; + + if (symbol_conf.use_callchain) + callchain_append(he->callchain, &cursor, sample->period); + return 0; +} + +static int +iter_finish_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + zfree(&iter->priv); + iter->he = NULL; + + return 0; +} + +const struct hist_iter_ops hist_iter_mem = { + .prepare_entry = iter_prepare_mem_entry, + .add_single_entry = iter_add_single_mem_entry, + .next_entry = iter_next_nop_entry, + .add_next_entry = iter_add_next_nop_entry, + .finish_entry = iter_finish_mem_entry, +}; + +const struct hist_iter_ops hist_iter_branch = { + .prepare_entry = iter_prepare_branch_entry, + .add_single_entry = iter_add_single_branch_entry, + .next_entry = iter_next_branch_entry, + .add_next_entry = iter_add_next_branch_entry, + .finish_entry = iter_finish_branch_entry, +}; + +const struct hist_iter_ops hist_iter_normal = { + .prepare_entry = iter_prepare_normal_entry, + .add_single_entry = iter_add_single_normal_entry, + .next_entry = iter_next_nop_entry, + .add_next_entry = iter_add_next_nop_entry, + .finish_entry = iter_finish_normal_entry, +}; + +const struct hist_iter_ops hist_iter_cumulative = { + .prepare_entry = iter_prepare_cumulative_entry, + .add_single_entry = iter_add_single_cumulative_entry, + .next_entry = iter_next_cumulative_entry, + .add_next_entry = iter_add_next_cumulative_entry, + .finish_entry = iter_finish_cumulative_entry, +}; + +int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + struct perf_evsel *evsel, struct perf_sample *sample, + int max_stack_depth, void *arg) +{ + int err, err2; + + err = sample__resolve_callchain(sample, &iter->parent, evsel, al, + max_stack_depth); + if (err) + return err; + + iter->evsel = evsel; + iter->sample = sample; + + err = iter->ops->prepare_entry(iter, al); + if (err) + goto out; + + err = iter->ops->add_single_entry(iter, al); + if (err) + goto out; + + if (iter->he && iter->add_entry_cb) { + err = iter->add_entry_cb(iter, al, true, arg); + if (err) + goto out; + } + + while (iter->ops->next_entry(iter, al)) { + err = iter->ops->add_next_entry(iter, al); + if (err) + break; + + if (iter->he && iter->add_entry_cb) { + err = iter->add_entry_cb(iter, al, false, arg); + if (err) + goto out; + } + } + +out: + err2 = iter->ops->finish_entry(iter, al); + if (!err) + err = err2; + + return err; +} + +int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct perf_hpp_fmt *fmt; + int64_t cmp = 0; + + perf_hpp__for_each_sort_list(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + cmp = fmt->cmp(fmt, left, right); + if (cmp) + break; + } + + return cmp; +} + +int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct perf_hpp_fmt *fmt; + int64_t cmp = 0; + + perf_hpp__for_each_sort_list(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + cmp = fmt->collapse(fmt, left, right); + if (cmp) + break; + } + + return cmp; +} + +void hist_entry__delete(struct hist_entry *he) +{ + thread__zput(he->thread); + zfree(&he->branch_info); + zfree(&he->mem_info); + zfree(&he->stat_acc); + free_srcline(he->srcline); + free_callchain(he->callchain); + free(he); +} + +/* + * collapse the histogram + */ + +static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, + struct rb_root *root, + struct hist_entry *he) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node_in); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + he_stat__add_stat(&iter->stat, &he->stat); + if (symbol_conf.cumulate_callchain) + he_stat__add_stat(iter->stat_acc, he->stat_acc); + + if (symbol_conf.use_callchain) { + callchain_cursor_reset(&callchain_cursor); + callchain_merge(&callchain_cursor, + iter->callchain, + he->callchain); + } + hist_entry__delete(he); + return false; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + hists->nr_entries++; + + rb_link_node(&he->rb_node_in, parent, p); + rb_insert_color(&he->rb_node_in, root); + return true; +} + +static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) +{ + struct rb_root *root; + + pthread_mutex_lock(&hists->lock); + + root = hists->entries_in; + if (++hists->entries_in > &hists->entries_in_array[1]) + hists->entries_in = &hists->entries_in_array[0]; + + pthread_mutex_unlock(&hists->lock); + + return root; +} + +static void hists__apply_filters(struct hists *hists, struct hist_entry *he) +{ + hists__filter_entry_by_dso(hists, he); + hists__filter_entry_by_thread(hists, he); + hists__filter_entry_by_symbol(hists, he); +} + +void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) +{ + struct rb_root *root; + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + hists->nr_entries = 0; + + root = hists__get_rotate_entries_in(hists); + + next = rb_first(root); + + while (next) { + if (session_done()) + break; + n = rb_entry(next, struct hist_entry, rb_node_in); + next = rb_next(&n->rb_node_in); + + rb_erase(&n->rb_node_in, root); + if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { + /* + * If it wasn't combined with one of the entries already + * collapsed, we need to apply the filters that may have + * been set by, say, the hist_browser. + */ + hists__apply_filters(hists, n); + } + if (prog) + ui_progress__update(prog, 1); + } +} + +static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) +{ + struct perf_hpp_fmt *fmt; + int64_t cmp = 0; + + perf_hpp__for_each_sort_list(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + cmp = fmt->sort(fmt, a, b); + if (cmp) + break; + } + + return cmp; +} + +static void hists__reset_filter_stats(struct hists *hists) +{ + hists->nr_non_filtered_entries = 0; + hists->stats.total_non_filtered_period = 0; +} + +void hists__reset_stats(struct hists *hists) +{ + hists->nr_entries = 0; + hists->stats.total_period = 0; + + hists__reset_filter_stats(hists); +} + +static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) +{ + hists->nr_non_filtered_entries++; + hists->stats.total_non_filtered_period += h->stat.period; +} + +void hists__inc_stats(struct hists *hists, struct hist_entry *h) +{ + if (!h->filtered) + hists__inc_filter_stats(hists, h); + + hists->nr_entries++; + hists->stats.total_period += h->stat.period; +} + +static void __hists__insert_output_entry(struct rb_root *entries, + struct hist_entry *he, + u64 min_callchain_hits) +{ + struct rb_node **p = &entries->rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + if (symbol_conf.use_callchain) + callchain_param.sort(&he->sorted_chain, he->callchain, + min_callchain_hits, &callchain_param); + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (hist_entry__sort(he, iter) > 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, entries); +} + +void hists__output_resort(struct hists *hists, struct ui_progress *prog) +{ + struct rb_root *root; + struct rb_node *next; + struct hist_entry *n; + u64 min_callchain_hits; + + min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + next = rb_first(root); + hists->entries = RB_ROOT; + + hists__reset_stats(hists); + hists__reset_col_len(hists); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node_in); + next = rb_next(&n->rb_node_in); + + __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); + hists__inc_stats(hists, n); + + if (!n->filtered) + hists__calc_col_len(hists, n); + + if (prog) + ui_progress__update(prog, 1); + } +} + +static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, + enum hist_filter filter) +{ + h->filtered &= ~(1 << filter); + if (h->filtered) + return; + + /* force fold unfiltered entry for simplicity */ + h->ms.unfolded = false; + h->row_offset = 0; + h->nr_rows = 0; + + hists->stats.nr_non_filtered_samples += h->stat.nr_events; + + hists__inc_filter_stats(hists, h); + hists__calc_col_len(hists, h); +} + + +static bool hists__filter_entry_by_dso(struct hists *hists, + struct hist_entry *he) +{ + if (hists->dso_filter != NULL && + (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { + he->filtered |= (1 << HIST_FILTER__DSO); + return true; + } + + return false; +} + +void hists__filter_by_dso(struct hists *hists) +{ + struct rb_node *nd; + + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); + hists__reset_col_len(hists); + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + + if (symbol_conf.exclude_other && !h->parent) + continue; + + if (hists__filter_entry_by_dso(hists, h)) + continue; + + hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); + } +} + +static bool hists__filter_entry_by_thread(struct hists *hists, + struct hist_entry *he) +{ + if (hists->thread_filter != NULL && + he->thread != hists->thread_filter) { + he->filtered |= (1 << HIST_FILTER__THREAD); + return true; + } + + return false; +} + +void hists__filter_by_thread(struct hists *hists) +{ + struct rb_node *nd; + + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); + hists__reset_col_len(hists); + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + + if (hists__filter_entry_by_thread(hists, h)) + continue; + + hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); + } +} + +static bool hists__filter_entry_by_symbol(struct hists *hists, + struct hist_entry *he) +{ + if (hists->symbol_filter_str != NULL && + (!he->ms.sym || strstr(he->ms.sym->name, + hists->symbol_filter_str) == NULL)) { + he->filtered |= (1 << HIST_FILTER__SYMBOL); + return true; + } + + return false; +} + +void hists__filter_by_symbol(struct hists *hists) +{ + struct rb_node *nd; + + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); + hists__reset_col_len(hists); + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + + if (hists__filter_entry_by_symbol(hists, h)) + continue; + + hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); + } +} + +void events_stats__inc(struct events_stats *stats, u32 type) +{ + ++stats->nr_events[0]; + ++stats->nr_events[type]; +} + +void hists__inc_nr_events(struct hists *hists, u32 type) +{ + events_stats__inc(&hists->stats, type); +} + +void hists__inc_nr_samples(struct hists *hists, bool filtered) +{ + events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); + if (!filtered) + hists->stats.nr_non_filtered_samples++; +} + +static struct hist_entry *hists__add_dummy_entry(struct hists *hists, + struct hist_entry *pair) +{ + struct rb_root *root; + struct rb_node **p; + struct rb_node *parent = NULL; + struct hist_entry *he; + int64_t cmp; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + p = &root->rb_node; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node_in); + + cmp = hist_entry__collapse(he, pair); + + if (!cmp) + goto out; + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = hist_entry__new(pair, true); + if (he) { + memset(&he->stat, 0, sizeof(he->stat)); + he->hists = hists; + rb_link_node(&he->rb_node_in, parent, p); + rb_insert_color(&he->rb_node_in, root); + hists__inc_stats(hists, he); + he->dummy = true; + } +out: + return he; +} + +static struct hist_entry *hists__find_entry(struct hists *hists, + struct hist_entry *he) +{ + struct rb_node *n; + + if (sort__need_collapse) + n = hists->entries_collapsed.rb_node; + else + n = hists->entries_in->rb_node; + + while (n) { + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); + int64_t cmp = hist_entry__collapse(iter, he); + + if (cmp < 0) + n = n->rb_left; + else if (cmp > 0) + n = n->rb_right; + else + return iter; + } + + return NULL; +} + +/* + * Look for pairs to link to the leader buckets (hist_entries): + */ +void hists__match(struct hists *leader, struct hists *other) +{ + struct rb_root *root; + struct rb_node *nd; + struct hist_entry *pos, *pair; + + if (sort__need_collapse) + root = &leader->entries_collapsed; + else + root = leader->entries_in; + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node_in); + pair = hists__find_entry(other, pos); + + if (pair) + hist_entry__add_pair(pair, pos); + } +} + +/* + * Look for entries in the other hists that are not present in the leader, if + * we find them, just add a dummy entry on the leader hists, with period=0, + * nr_events=0, to serve as the list header. + */ +int hists__link(struct hists *leader, struct hists *other) +{ + struct rb_root *root; + struct rb_node *nd; + struct hist_entry *pos, *pair; + + if (sort__need_collapse) + root = &other->entries_collapsed; + else + root = other->entries_in; + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node_in); + + if (!hist_entry__has_pairs(pos)) { + pair = hists__add_dummy_entry(leader, pos); + if (pair == NULL) + return -1; + hist_entry__add_pair(pos, pair); + } + } + + return 0; +} + + +size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp) +{ + struct perf_evsel *pos; + size_t ret = 0; + + evlist__for_each(evlist, pos) { + ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); + ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp); + } + + return ret; +} + + +u64 hists__total_period(struct hists *hists) +{ + return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : + hists->stats.total_period; +} + +int parse_filter_percentage(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (!strcmp(arg, "relative")) + symbol_conf.filter_relative = true; + else if (!strcmp(arg, "absolute")) + symbol_conf.filter_relative = false; + else + return -1; + + return 0; +} + +int perf_hist_config(const char *var, const char *value) +{ + if (!strcmp(var, "hist.percentage")) + return parse_filter_percentage(NULL, value, 0); + + return 0; +} + +static int hists_evsel__init(struct perf_evsel *evsel) +{ + struct hists *hists = evsel__hists(evsel); + + memset(hists, 0, sizeof(*hists)); + hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; + hists->entries_in = &hists->entries_in_array[0]; + hists->entries_collapsed = RB_ROOT; + hists->entries = RB_ROOT; + pthread_mutex_init(&hists->lock, NULL); + return 0; +} + +/* + * XXX We probably need a hists_evsel__exit() to free the hist_entries + * stored in the rbtree... + */ + +int hists__init(void) +{ + int err = perf_evsel__object_config(sizeof(struct hists_evsel), + hists_evsel__init, NULL); + if (err) + fputs("FATAL ERROR: Couldn't setup hists class\n", stderr); + + return err; +} diff --git a/kernel/tools/perf/util/hist.h b/kernel/tools/perf/util/hist.h new file mode 100644 index 000000000..9f31b89a5 --- /dev/null +++ b/kernel/tools/perf/util/hist.h @@ -0,0 +1,358 @@ +#ifndef __PERF_HIST_H +#define __PERF_HIST_H + +#include +#include +#include "callchain.h" +#include "evsel.h" +#include "header.h" +#include "color.h" +#include "ui/progress.h" + +struct hist_entry; +struct addr_location; +struct symbol; + +enum hist_filter { + HIST_FILTER__DSO, + HIST_FILTER__THREAD, + HIST_FILTER__PARENT, + HIST_FILTER__SYMBOL, + HIST_FILTER__GUEST, + HIST_FILTER__HOST, +}; + +enum hist_column { + HISTC_SYMBOL, + HISTC_DSO, + HISTC_THREAD, + HISTC_COMM, + HISTC_PARENT, + HISTC_CPU, + HISTC_SRCLINE, + HISTC_MISPREDICT, + HISTC_IN_TX, + HISTC_ABORT, + HISTC_SYMBOL_FROM, + HISTC_SYMBOL_TO, + HISTC_DSO_FROM, + HISTC_DSO_TO, + HISTC_LOCAL_WEIGHT, + HISTC_GLOBAL_WEIGHT, + HISTC_MEM_DADDR_SYMBOL, + HISTC_MEM_DADDR_DSO, + HISTC_MEM_LOCKED, + HISTC_MEM_TLB, + HISTC_MEM_LVL, + HISTC_MEM_SNOOP, + HISTC_MEM_DCACHELINE, + HISTC_TRANSACTION, + HISTC_NR_COLS, /* Last entry */ +}; + +struct thread; +struct dso; + +struct hists { + struct rb_root entries_in_array[2]; + struct rb_root *entries_in; + struct rb_root entries; + struct rb_root entries_collapsed; + u64 nr_entries; + u64 nr_non_filtered_entries; + struct thread *thread_filter; + const struct dso *dso_filter; + const char *uid_filter_str; + const char *symbol_filter_str; + pthread_mutex_t lock; + struct events_stats stats; + u64 event_stream; + u16 col_len[HISTC_NR_COLS]; +}; + +struct hist_entry_iter; + +struct hist_iter_ops { + int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *); + int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *); + int (*next_entry)(struct hist_entry_iter *, struct addr_location *); + int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *); + int (*finish_entry)(struct hist_entry_iter *, struct addr_location *); +}; + +struct hist_entry_iter { + int total; + int curr; + + bool hide_unresolved; + + struct perf_evsel *evsel; + struct perf_sample *sample; + struct hist_entry *he; + struct symbol *parent; + void *priv; + + const struct hist_iter_ops *ops; + /* user-defined callback function (optional) */ + int (*add_entry_cb)(struct hist_entry_iter *iter, + struct addr_location *al, bool single, void *arg); +}; + +extern const struct hist_iter_ops hist_iter_normal; +extern const struct hist_iter_ops hist_iter_branch; +extern const struct hist_iter_ops hist_iter_mem; +extern const struct hist_iter_ops hist_iter_cumulative; + +struct hist_entry *__hists__add_entry(struct hists *hists, + struct addr_location *al, + struct symbol *parent, + struct branch_info *bi, + struct mem_info *mi, u64 period, + u64 weight, u64 transaction, + bool sample_self); +int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + struct perf_evsel *evsel, struct perf_sample *sample, + int max_stack_depth, void *arg); + +int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); +int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); +int hist_entry__transaction_len(void); +int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, + struct hists *hists); +void hist_entry__delete(struct hist_entry *he); + +void hists__output_resort(struct hists *hists, struct ui_progress *prog); +void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); + +void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel); +void hists__delete_entries(struct hists *hists); +void hists__output_recalc_col_len(struct hists *hists, int max_rows); + +u64 hists__total_period(struct hists *hists); +void hists__reset_stats(struct hists *hists); +void hists__inc_stats(struct hists *hists, struct hist_entry *h); +void hists__inc_nr_events(struct hists *hists, u32 type); +void hists__inc_nr_samples(struct hists *hists, bool filtered); +void events_stats__inc(struct events_stats *stats, u32 type); +size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); + +size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, + int max_cols, float min_pcnt, FILE *fp); +size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp); + +void hists__filter_by_dso(struct hists *hists); +void hists__filter_by_thread(struct hists *hists); +void hists__filter_by_symbol(struct hists *hists); + +static inline bool hists__has_filter(struct hists *hists) +{ + return hists->thread_filter || hists->dso_filter || + hists->symbol_filter_str; +} + +u16 hists__col_len(struct hists *hists, enum hist_column col); +void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len); +bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len); +void hists__reset_col_len(struct hists *hists); +void hists__calc_col_len(struct hists *hists, struct hist_entry *he); + +void hists__match(struct hists *leader, struct hists *other); +int hists__link(struct hists *leader, struct hists *other); + +struct hists_evsel { + struct perf_evsel evsel; + struct hists hists; +}; + +static inline struct perf_evsel *hists_to_evsel(struct hists *hists) +{ + struct hists_evsel *hevsel = container_of(hists, struct hists_evsel, hists); + return &hevsel->evsel; +} + +static inline struct hists *evsel__hists(struct perf_evsel *evsel) +{ + struct hists_evsel *hevsel = (struct hists_evsel *)evsel; + return &hevsel->hists; +} + +int hists__init(void); + +struct perf_hpp { + char *buf; + size_t size; + const char *sep; + void *ptr; +}; + +struct perf_hpp_fmt { + const char *name; + int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel); + int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel); + int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he); + int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he); + int64_t (*cmp)(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b); + int64_t (*collapse)(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b); + int64_t (*sort)(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b); + + struct list_head list; + struct list_head sort_list; + bool elide; + int len; + int user_len; +}; + +extern struct list_head perf_hpp__list; +extern struct list_head perf_hpp__sort_list; + +#define perf_hpp__for_each_format(format) \ + list_for_each_entry(format, &perf_hpp__list, list) + +#define perf_hpp__for_each_format_safe(format, tmp) \ + list_for_each_entry_safe(format, tmp, &perf_hpp__list, list) + +#define perf_hpp__for_each_sort_list(format) \ + list_for_each_entry(format, &perf_hpp__sort_list, sort_list) + +#define perf_hpp__for_each_sort_list_safe(format, tmp) \ + list_for_each_entry_safe(format, tmp, &perf_hpp__sort_list, sort_list) + +extern struct perf_hpp_fmt perf_hpp__format[]; + +enum { + /* Matches perf_hpp__format array. */ + PERF_HPP__OVERHEAD, + PERF_HPP__OVERHEAD_SYS, + PERF_HPP__OVERHEAD_US, + PERF_HPP__OVERHEAD_GUEST_SYS, + PERF_HPP__OVERHEAD_GUEST_US, + PERF_HPP__OVERHEAD_ACC, + PERF_HPP__SAMPLES, + PERF_HPP__PERIOD, + + PERF_HPP__MAX_INDEX +}; + +void perf_hpp__init(void); +void perf_hpp__column_register(struct perf_hpp_fmt *format); +void perf_hpp__column_unregister(struct perf_hpp_fmt *format); +void perf_hpp__column_enable(unsigned col); +void perf_hpp__column_disable(unsigned col); +void perf_hpp__cancel_cumulate(void); + +void perf_hpp__register_sort_field(struct perf_hpp_fmt *format); +void perf_hpp__setup_output_field(void); +void perf_hpp__reset_output_field(void); +void perf_hpp__append_sort_keys(void); + +bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format); +bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b); + +static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format) +{ + return format->elide; +} + +void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists); +void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists); +void perf_hpp__set_user_width(const char *width_list_str); + +typedef u64 (*hpp_field_fn)(struct hist_entry *he); +typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front); +typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...); + +int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he, hpp_field_fn get_field, + const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent); +int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he, hpp_field_fn get_field, + const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent); + +static inline void advance_hpp(struct perf_hpp *hpp, int inc) +{ + hpp->buf += inc; + hpp->size -= inc; +} + +static inline size_t perf_hpp__use_color(void) +{ + return !symbol_conf.field_sep; +} + +static inline size_t perf_hpp__color_overhead(void) +{ + return perf_hpp__use_color() ? + (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX + : 0; +} + +struct perf_evlist; + +struct hist_browser_timer { + void (*timer)(void *arg); + void *arg; + int refresh; +}; + +#ifdef HAVE_SLANG_SUPPORT +#include "../ui/keysyms.h" +int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel, + struct hist_browser_timer *hbt); + +int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, + struct hist_browser_timer *hbt); + +int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, + struct hist_browser_timer *hbt, + float min_pcnt, + struct perf_session_env *env); +int script_browse(const char *script_opt); +#else +static inline +int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, + const char *help __maybe_unused, + struct hist_browser_timer *hbt __maybe_unused, + float min_pcnt __maybe_unused, + struct perf_session_env *env __maybe_unused) +{ + return 0; +} +static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt __maybe_unused) +{ + return 0; +} + +static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt __maybe_unused) +{ + return 0; +} + +static inline int script_browse(const char *script_opt __maybe_unused) +{ + return 0; +} + +#define K_LEFT -1000 +#define K_RIGHT -2000 +#define K_SWITCH_INPUT_DATA -3000 +#endif + +unsigned int hists__sort_list_width(struct hists *hists); + +struct option; +int parse_filter_percentage(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused); +int perf_hist_config(const char *var, const char *value); + +#endif /* __PERF_HIST_H */ diff --git a/kernel/tools/perf/util/include/asm/alternative-asm.h b/kernel/tools/perf/util/include/asm/alternative-asm.h new file mode 100644 index 000000000..3a3a0f164 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/alternative-asm.h @@ -0,0 +1,9 @@ +#ifndef _PERF_ASM_ALTERNATIVE_ASM_H +#define _PERF_ASM_ALTERNATIVE_ASM_H + +/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ + +#define altinstruction_entry # +#define ALTERNATIVE_2 # + +#endif diff --git a/kernel/tools/perf/util/include/asm/asm-offsets.h b/kernel/tools/perf/util/include/asm/asm-offsets.h new file mode 100644 index 000000000..ed5389425 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/asm-offsets.h @@ -0,0 +1 @@ +/* stub */ diff --git a/kernel/tools/perf/util/include/asm/byteorder.h b/kernel/tools/perf/util/include/asm/byteorder.h new file mode 100644 index 000000000..2a9bdc066 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/byteorder.h @@ -0,0 +1,2 @@ +#include +#include "../../../../include/uapi/linux/swab.h" diff --git a/kernel/tools/perf/util/include/asm/cpufeature.h b/kernel/tools/perf/util/include/asm/cpufeature.h new file mode 100644 index 000000000..acffd5e4d --- /dev/null +++ b/kernel/tools/perf/util/include/asm/cpufeature.h @@ -0,0 +1,9 @@ + +#ifndef PERF_CPUFEATURE_H +#define PERF_CPUFEATURE_H + +/* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */ + +#define X86_FEATURE_REP_GOOD 0 + +#endif /* PERF_CPUFEATURE_H */ diff --git a/kernel/tools/perf/util/include/asm/dwarf2.h b/kernel/tools/perf/util/include/asm/dwarf2.h new file mode 100644 index 000000000..afe38199e --- /dev/null +++ b/kernel/tools/perf/util/include/asm/dwarf2.h @@ -0,0 +1,13 @@ + +#ifndef PERF_DWARF2_H +#define PERF_DWARF2_H + +/* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */ + +#define CFI_STARTPROC +#define CFI_ENDPROC +#define CFI_REMEMBER_STATE +#define CFI_RESTORE_STATE + +#endif /* PERF_DWARF2_H */ + diff --git a/kernel/tools/perf/util/include/asm/swab.h b/kernel/tools/perf/util/include/asm/swab.h new file mode 100644 index 000000000..ed5389425 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/swab.h @@ -0,0 +1 @@ +/* stub */ diff --git a/kernel/tools/perf/util/include/asm/system.h b/kernel/tools/perf/util/include/asm/system.h new file mode 100644 index 000000000..710cecca9 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/system.h @@ -0,0 +1 @@ +/* Empty */ diff --git a/kernel/tools/perf/util/include/asm/uaccess.h b/kernel/tools/perf/util/include/asm/uaccess.h new file mode 100644 index 000000000..d0f72b8fc --- /dev/null +++ b/kernel/tools/perf/util/include/asm/uaccess.h @@ -0,0 +1,14 @@ +#ifndef _PERF_ASM_UACCESS_H_ +#define _PERF_ASM_UACCESS_H_ + +#define __get_user(src, dest) \ +({ \ + (src) = *dest; \ + 0; \ +}) + +#define get_user __get_user + +#define access_ok(type, addr, size) 1 + +#endif diff --git a/kernel/tools/perf/util/include/asm/unistd_32.h b/kernel/tools/perf/util/include/asm/unistd_32.h new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/unistd_32.h @@ -0,0 +1 @@ + diff --git a/kernel/tools/perf/util/include/asm/unistd_64.h b/kernel/tools/perf/util/include/asm/unistd_64.h new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/kernel/tools/perf/util/include/asm/unistd_64.h @@ -0,0 +1 @@ + diff --git a/kernel/tools/perf/util/include/dwarf-regs.h b/kernel/tools/perf/util/include/dwarf-regs.h new file mode 100644 index 000000000..8f149655f --- /dev/null +++ b/kernel/tools/perf/util/include/dwarf-regs.h @@ -0,0 +1,8 @@ +#ifndef _PERF_DWARF_REGS_H_ +#define _PERF_DWARF_REGS_H_ + +#ifdef HAVE_DWARF_SUPPORT +const char *get_arch_regstr(unsigned int n); +#endif + +#endif diff --git a/kernel/tools/perf/util/include/linux/bitmap.h b/kernel/tools/perf/util/include/linux/bitmap.h new file mode 100644 index 000000000..40bd21488 --- /dev/null +++ b/kernel/tools/perf/util/include/linux/bitmap.h @@ -0,0 +1,66 @@ +#ifndef _PERF_BITOPS_H +#define _PERF_BITOPS_H + +#include +#include + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +int __bitmap_weight(const unsigned long *bitmap, int bits); +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits); + +#define BITMAP_LAST_WORD_MASK(nbits) \ +( \ + ((nbits) % BITS_PER_LONG) ? \ + (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ +) + +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} + +static inline int bitmap_weight(const unsigned long *src, int nbits) +{ + if (small_const_nbits(nbits)) + return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); + return __bitmap_weight(src, nbits); +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, int nbits) +{ + if (small_const_nbits(nbits)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + */ +static inline int test_and_set_bit(int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + + old = *p; + *p = old | mask; + + return (old & mask) != 0; +} + +#endif /* _PERF_BITOPS_H */ diff --git a/kernel/tools/perf/util/include/linux/const.h b/kernel/tools/perf/util/include/linux/const.h new file mode 100644 index 000000000..c10a35e1a --- /dev/null +++ b/kernel/tools/perf/util/include/linux/const.h @@ -0,0 +1 @@ +#include "../../../../include/uapi/linux/const.h" diff --git a/kernel/tools/perf/util/include/linux/ctype.h b/kernel/tools/perf/util/include/linux/ctype.h new file mode 100644 index 000000000..a53d4ee1e --- /dev/null +++ b/kernel/tools/perf/util/include/linux/ctype.h @@ -0,0 +1 @@ +#include "../util.h" diff --git a/kernel/tools/perf/util/include/linux/kernel.h b/kernel/tools/perf/util/include/linux/kernel.h new file mode 100644 index 000000000..09e8e7aea --- /dev/null +++ b/kernel/tools/perf/util/include/linux/kernel.h @@ -0,0 +1,107 @@ +#ifndef PERF_LINUX_KERNEL_H_ +#define PERF_LINUX_KERNEL_H_ + +#include +#include +#include +#include + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) +#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +#ifndef container_of +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof(((type *)0)->member) * __mptr = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) +#endif + +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) + +#ifndef max +#define max(x, y) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2; }) +#endif + +#ifndef min +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) +#endif + +#ifndef roundup +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#endif + +#ifndef BUG_ON +#ifdef NDEBUG +#define BUG_ON(cond) do { if (cond) {} } while (0) +#else +#define BUG_ON(cond) assert(!(cond)) +#endif +#endif + +/* + * Both need more care to handle endianness + * (Don't use bitmap_copy_le() for now) + */ +#define cpu_to_le64(x) (x) +#define cpu_to_le32(x) (x) + +static inline int +vscnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int i; + ssize_t ssize = size; + + i = vsnprintf(buf, size, fmt, args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) +{ + va_list args; + ssize_t ssize = size; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#endif diff --git a/kernel/tools/perf/util/include/linux/linkage.h b/kernel/tools/perf/util/include/linux/linkage.h new file mode 100644 index 000000000..06387cffe --- /dev/null +++ b/kernel/tools/perf/util/include/linux/linkage.h @@ -0,0 +1,13 @@ + +#ifndef PERF_LINUX_LINKAGE_H_ +#define PERF_LINUX_LINKAGE_H_ + +/* linkage.h ... for including arch/x86/lib/memcpy_64.S */ + +#define ENTRY(name) \ + .globl name; \ + name: + +#define ENDPROC(name) + +#endif /* PERF_LINUX_LINKAGE_H_ */ diff --git a/kernel/tools/perf/util/include/linux/list.h b/kernel/tools/perf/util/include/linux/list.h new file mode 100644 index 000000000..76ddbc726 --- /dev/null +++ b/kernel/tools/perf/util/include/linux/list.h @@ -0,0 +1,29 @@ +#include +#include + +#include "../../../../include/linux/list.h" + +#ifndef PERF_LIST_H +#define PERF_LIST_H +/** + * list_del_range - deletes range of entries from list. + * @begin: first element in the range to delete from the list. + * @end: last element in the range to delete from the list. + * Note: list_empty on the range of entries does not return true after this, + * the entries is in an undefined state. + */ +static inline void list_del_range(struct list_head *begin, + struct list_head *end) +{ + begin->prev->next = end->next; + end->next->prev = begin->prev; +} + +/** + * list_for_each_from - iterate over a list from one of its nodes + * @pos: the &struct list_head to use as a loop cursor, from where to start + * @head: the head for your list. + */ +#define list_for_each_from(pos, head) \ + for (; pos != (head); pos = pos->next) +#endif diff --git a/kernel/tools/perf/util/include/linux/poison.h b/kernel/tools/perf/util/include/linux/poison.h new file mode 100644 index 000000000..fef6dbc9c --- /dev/null +++ b/kernel/tools/perf/util/include/linux/poison.h @@ -0,0 +1 @@ +#include "../../../../include/linux/poison.h" diff --git a/kernel/tools/perf/util/include/linux/rbtree.h b/kernel/tools/perf/util/include/linux/rbtree.h new file mode 100644 index 000000000..2a030c5af --- /dev/null +++ b/kernel/tools/perf/util/include/linux/rbtree.h @@ -0,0 +1,2 @@ +#include +#include "../../../../include/linux/rbtree.h" diff --git a/kernel/tools/perf/util/include/linux/rbtree_augmented.h b/kernel/tools/perf/util/include/linux/rbtree_augmented.h new file mode 100644 index 000000000..9d6fcdf17 --- /dev/null +++ b/kernel/tools/perf/util/include/linux/rbtree_augmented.h @@ -0,0 +1,2 @@ +#include +#include "../../../../include/linux/rbtree_augmented.h" diff --git a/kernel/tools/perf/util/include/linux/string.h b/kernel/tools/perf/util/include/linux/string.h new file mode 100644 index 000000000..6f19c548e --- /dev/null +++ b/kernel/tools/perf/util/include/linux/string.h @@ -0,0 +1,3 @@ +#include + +void *memdup(const void *src, size_t len); diff --git a/kernel/tools/perf/util/intlist.c b/kernel/tools/perf/util/intlist.c new file mode 100644 index 000000000..89715b64a --- /dev/null +++ b/kernel/tools/perf/util/intlist.c @@ -0,0 +1,146 @@ +/* + * Based on intlist.c by: + * (c) 2009 Arnaldo Carvalho de Melo + * + * Licensed under the GPLv2. + */ + +#include +#include +#include + +#include "intlist.h" + +static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, + const void *entry) +{ + int i = (int)((long)entry); + struct rb_node *rc = NULL; + struct int_node *node = malloc(sizeof(*node)); + + if (node != NULL) { + node->i = i; + node->priv = NULL; + rc = &node->rb_node; + } + + return rc; +} + +static void int_node__delete(struct int_node *ilist) +{ + free(ilist); +} + +static void intlist__node_delete(struct rblist *rblist __maybe_unused, + struct rb_node *rb_node) +{ + struct int_node *node = container_of(rb_node, struct int_node, rb_node); + + int_node__delete(node); +} + +static int intlist__node_cmp(struct rb_node *rb_node, const void *entry) +{ + int i = (int)((long)entry); + struct int_node *node = container_of(rb_node, struct int_node, rb_node); + + return node->i - i; +} + +int intlist__add(struct intlist *ilist, int i) +{ + return rblist__add_node(&ilist->rblist, (void *)((long)i)); +} + +void intlist__remove(struct intlist *ilist, struct int_node *node) +{ + rblist__remove_node(&ilist->rblist, &node->rb_node); +} + +static struct int_node *__intlist__findnew(struct intlist *ilist, + int i, bool create) +{ + struct int_node *node = NULL; + struct rb_node *rb_node; + + if (ilist == NULL) + return NULL; + + if (create) + rb_node = rblist__findnew(&ilist->rblist, (void *)((long)i)); + else + rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); + + if (rb_node) + node = container_of(rb_node, struct int_node, rb_node); + + return node; +} + +struct int_node *intlist__find(struct intlist *ilist, int i) +{ + return __intlist__findnew(ilist, i, false); +} + +struct int_node *intlist__findnew(struct intlist *ilist, int i) +{ + return __intlist__findnew(ilist, i, true); +} + +static int intlist__parse_list(struct intlist *ilist, const char *s) +{ + char *sep; + int err; + + do { + long value = strtol(s, &sep, 10); + err = -EINVAL; + if (*sep != ',' && *sep != '\0') + break; + err = intlist__add(ilist, value); + if (err) + break; + s = sep + 1; + } while (*sep != '\0'); + + return err; +} + +struct intlist *intlist__new(const char *slist) +{ + struct intlist *ilist = malloc(sizeof(*ilist)); + + if (ilist != NULL) { + rblist__init(&ilist->rblist); + ilist->rblist.node_cmp = intlist__node_cmp; + ilist->rblist.node_new = intlist__node_new; + ilist->rblist.node_delete = intlist__node_delete; + + if (slist && intlist__parse_list(ilist, slist)) + goto out_delete; + } + + return ilist; +out_delete: + intlist__delete(ilist); + return NULL; +} + +void intlist__delete(struct intlist *ilist) +{ + if (ilist != NULL) + rblist__delete(&ilist->rblist); +} + +struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx) +{ + struct int_node *node = NULL; + struct rb_node *rb_node; + + rb_node = rblist__entry(&ilist->rblist, idx); + if (rb_node) + node = container_of(rb_node, struct int_node, rb_node); + + return node; +} diff --git a/kernel/tools/perf/util/intlist.h b/kernel/tools/perf/util/intlist.h new file mode 100644 index 000000000..aa6877d36 --- /dev/null +++ b/kernel/tools/perf/util/intlist.h @@ -0,0 +1,77 @@ +#ifndef __PERF_INTLIST_H +#define __PERF_INTLIST_H + +#include +#include + +#include "rblist.h" + +struct int_node { + struct rb_node rb_node; + int i; + void *priv; +}; + +struct intlist { + struct rblist rblist; +}; + +struct intlist *intlist__new(const char *slist); +void intlist__delete(struct intlist *ilist); + +void intlist__remove(struct intlist *ilist, struct int_node *in); +int intlist__add(struct intlist *ilist, int i); + +struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx); +struct int_node *intlist__find(struct intlist *ilist, int i); +struct int_node *intlist__findnew(struct intlist *ilist, int i); + +static inline bool intlist__has_entry(struct intlist *ilist, int i) +{ + return intlist__find(ilist, i) != NULL; +} + +static inline bool intlist__empty(const struct intlist *ilist) +{ + return rblist__empty(&ilist->rblist); +} + +static inline unsigned int intlist__nr_entries(const struct intlist *ilist) +{ + return rblist__nr_entries(&ilist->rblist); +} + +/* For intlist iteration */ +static inline struct int_node *intlist__first(struct intlist *ilist) +{ + struct rb_node *rn = rb_first(&ilist->rblist.entries); + return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; +} +static inline struct int_node *intlist__next(struct int_node *in) +{ + struct rb_node *rn; + if (!in) + return NULL; + rn = rb_next(&in->rb_node); + return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; +} + +/** + * intlist_for_each - iterate over a intlist + * @pos: the &struct int_node to use as a loop cursor. + * @ilist: the &struct intlist for loop. + */ +#define intlist__for_each(pos, ilist) \ + for (pos = intlist__first(ilist); pos; pos = intlist__next(pos)) + +/** + * intlist_for_each_safe - iterate over a intlist safe against removal of + * int_node + * @pos: the &struct int_node to use as a loop cursor. + * @n: another &struct int_node to use as temporary storage. + * @ilist: the &struct intlist for loop. + */ +#define intlist__for_each_safe(pos, n, ilist) \ + for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\ + pos = n, n = intlist__next(n)) +#endif /* __PERF_INTLIST_H */ diff --git a/kernel/tools/perf/util/kvm-stat.h b/kernel/tools/perf/util/kvm-stat.h new file mode 100644 index 000000000..ae825d4ec --- /dev/null +++ b/kernel/tools/perf/util/kvm-stat.h @@ -0,0 +1,140 @@ +#ifndef __PERF_KVM_STAT_H +#define __PERF_KVM_STAT_H + +#include "../perf.h" +#include "evsel.h" +#include "evlist.h" +#include "session.h" +#include "tool.h" +#include "stat.h" + +struct event_key { + #define INVALID_KEY (~0ULL) + u64 key; + int info; + struct exit_reasons_table *exit_reasons; +}; + +struct kvm_event_stats { + u64 time; + struct stats stats; +}; + +struct kvm_event { + struct list_head hash_entry; + struct rb_node rb; + + struct event_key key; + + struct kvm_event_stats total; + + #define DEFAULT_VCPU_NUM 8 + int max_vcpu; + struct kvm_event_stats *vcpu; +}; + +typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int); + +struct kvm_event_key { + const char *name; + key_cmp_fun key; +}; + +struct perf_kvm_stat; + +struct child_event_ops { + void (*get_key)(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key); + const char *name; +}; + +struct kvm_events_ops { + bool (*is_begin_event)(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key); + bool (*is_end_event)(struct perf_evsel *evsel, + struct perf_sample *sample, struct event_key *key); + struct child_event_ops *child_ops; + void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key, + char *decode); + const char *name; +}; + +struct exit_reasons_table { + unsigned long exit_code; + const char *reason; +}; + +#define EVENTS_BITS 12 +#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) + +struct perf_kvm_stat { + struct perf_tool tool; + struct record_opts opts; + struct perf_evlist *evlist; + struct perf_session *session; + + const char *file_name; + const char *report_event; + const char *sort_key; + int trace_vcpu; + + struct exit_reasons_table *exit_reasons; + const char *exit_reasons_isa; + + struct kvm_events_ops *events_ops; + key_cmp_fun compare; + struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; + + u64 total_time; + u64 total_count; + u64 lost_events; + u64 duration; + + struct intlist *pid_list; + + struct rb_root result; + + int timerfd; + unsigned int display_time; + bool live; + bool force; +}; + +struct kvm_reg_events_ops { + const char *name; + struct kvm_events_ops *ops; +}; + +void exit_event_get_key(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key); +bool exit_event_begin(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key); +bool exit_event_end(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key); +void exit_event_decode_key(struct perf_kvm_stat *kvm, + struct event_key *key, + char *decode); + +bool kvm_exit_event(struct perf_evsel *evsel); +bool kvm_entry_event(struct perf_evsel *evsel); + +#define define_exit_reasons_table(name, symbols) \ + static struct exit_reasons_table name[] = { \ + symbols, { -1, NULL } \ + } + +/* + * arch specific callbacks and data structures + */ +int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid); + +extern const char * const kvm_events_tp[]; +extern struct kvm_reg_events_ops kvm_reg_events_ops[]; +extern const char * const kvm_skip_events[]; + +#endif /* __PERF_KVM_STAT_H */ diff --git a/kernel/tools/perf/util/levenshtein.c b/kernel/tools/perf/util/levenshtein.c new file mode 100644 index 000000000..e521d1516 --- /dev/null +++ b/kernel/tools/perf/util/levenshtein.c @@ -0,0 +1,84 @@ +#include "cache.h" +#include "levenshtein.h" + +/* + * This function implements the Damerau-Levenshtein algorithm to + * calculate a distance between strings. + * + * Basically, it says how many letters need to be swapped, substituted, + * deleted from, or added to string1, at least, to get string2. + * + * The idea is to build a distance matrix for the substrings of both + * strings. To avoid a large space complexity, only the last three rows + * are kept in memory (if swaps had the same or higher cost as one deletion + * plus one insertion, only two rows would be needed). + * + * At any stage, "i + 1" denotes the length of the current substring of + * string1 that the distance is calculated for. + * + * row2 holds the current row, row1 the previous row (i.e. for the substring + * of string1 of length "i"), and row0 the row before that. + * + * In other words, at the start of the big loop, row2[j + 1] contains the + * Damerau-Levenshtein distance between the substring of string1 of length + * "i" and the substring of string2 of length "j + 1". + * + * All the big loop does is determine the partial minimum-cost paths. + * + * It does so by calculating the costs of the path ending in characters + * i (in string1) and j (in string2), respectively, given that the last + * operation is a substition, a swap, a deletion, or an insertion. + * + * This implementation allows the costs to be weighted: + * + * - w (as in "sWap") + * - s (as in "Substitution") + * - a (for insertion, AKA "Add") + * - d (as in "Deletion") + * + * Note that this algorithm calculates a distance _iff_ d == a. + */ +int levenshtein(const char *string1, const char *string2, + int w, int s, int a, int d) +{ + int len1 = strlen(string1), len2 = strlen(string2); + int *row0 = malloc(sizeof(int) * (len2 + 1)); + int *row1 = malloc(sizeof(int) * (len2 + 1)); + int *row2 = malloc(sizeof(int) * (len2 + 1)); + int i, j; + + for (j = 0; j <= len2; j++) + row1[j] = j * a; + for (i = 0; i < len1; i++) { + int *dummy; + + row2[0] = (i + 1) * d; + for (j = 0; j < len2; j++) { + /* substitution */ + row2[j + 1] = row1[j] + s * (string1[i] != string2[j]); + /* swap */ + if (i > 0 && j > 0 && string1[i - 1] == string2[j] && + string1[i] == string2[j - 1] && + row2[j + 1] > row0[j - 1] + w) + row2[j + 1] = row0[j - 1] + w; + /* deletion */ + if (row2[j + 1] > row1[j + 1] + d) + row2[j + 1] = row1[j + 1] + d; + /* insertion */ + if (row2[j + 1] > row2[j] + a) + row2[j + 1] = row2[j] + a; + } + + dummy = row0; + row0 = row1; + row1 = row2; + row2 = dummy; + } + + i = row1[len2]; + free(row0); + free(row1); + free(row2); + + return i; +} diff --git a/kernel/tools/perf/util/levenshtein.h b/kernel/tools/perf/util/levenshtein.h new file mode 100644 index 000000000..b0fcb6d8a --- /dev/null +++ b/kernel/tools/perf/util/levenshtein.h @@ -0,0 +1,8 @@ +#ifndef __PERF_LEVENSHTEIN_H +#define __PERF_LEVENSHTEIN_H + +int levenshtein(const char *string1, const char *string2, + int swap_penalty, int substition_penalty, + int insertion_penalty, int deletion_penalty); + +#endif /* __PERF_LEVENSHTEIN_H */ diff --git a/kernel/tools/perf/util/lzma.c b/kernel/tools/perf/util/lzma.c new file mode 100644 index 000000000..95a1acb61 --- /dev/null +++ b/kernel/tools/perf/util/lzma.c @@ -0,0 +1,95 @@ +#include +#include +#include +#include "util.h" +#include "debug.h" + +#define BUFSIZE 8192 + +static const char *lzma_strerror(lzma_ret ret) +{ + switch ((int) ret) { + case LZMA_MEM_ERROR: + return "Memory allocation failed"; + case LZMA_OPTIONS_ERROR: + return "Unsupported decompressor flags"; + case LZMA_FORMAT_ERROR: + return "The input is not in the .xz format"; + case LZMA_DATA_ERROR: + return "Compressed file is corrupt"; + case LZMA_BUF_ERROR: + return "Compressed file is truncated or otherwise corrupt"; + default: + return "Unknown error, possibly a bug"; + } +} + +int lzma_decompress_to_file(const char *input, int output_fd) +{ + lzma_action action = LZMA_RUN; + lzma_stream strm = LZMA_STREAM_INIT; + lzma_ret ret; + + u8 buf_in[BUFSIZE]; + u8 buf_out[BUFSIZE]; + FILE *infile; + + infile = fopen(input, "rb"); + if (!infile) { + pr_err("lzma: fopen failed on %s: '%s'\n", + input, strerror(errno)); + return -1; + } + + ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED); + if (ret != LZMA_OK) { + pr_err("lzma: lzma_stream_decoder failed %s (%d)\n", + lzma_strerror(ret), ret); + return -1; + } + + strm.next_in = NULL; + strm.avail_in = 0; + strm.next_out = buf_out; + strm.avail_out = sizeof(buf_out); + + while (1) { + if (strm.avail_in == 0 && !feof(infile)) { + strm.next_in = buf_in; + strm.avail_in = fread(buf_in, 1, sizeof(buf_in), infile); + + if (ferror(infile)) { + pr_err("lzma: read error: %s\n", strerror(errno)); + return -1; + } + + if (feof(infile)) + action = LZMA_FINISH; + } + + ret = lzma_code(&strm, action); + + if (strm.avail_out == 0 || ret == LZMA_STREAM_END) { + ssize_t write_size = sizeof(buf_out) - strm.avail_out; + + if (writen(output_fd, buf_out, write_size) != write_size) { + pr_err("lzma: write error: %s\n", strerror(errno)); + return -1; + } + + strm.next_out = buf_out; + strm.avail_out = sizeof(buf_out); + } + + if (ret != LZMA_OK) { + if (ret == LZMA_STREAM_END) + return 0; + + pr_err("lzma: failed %s\n", lzma_strerror(ret)); + return -1; + } + } + + fclose(infile); + return 0; +} diff --git a/kernel/tools/perf/util/machine.c b/kernel/tools/perf/util/machine.c new file mode 100644 index 000000000..527e032e2 --- /dev/null +++ b/kernel/tools/perf/util/machine.c @@ -0,0 +1,1847 @@ +#include "callchain.h" +#include "debug.h" +#include "event.h" +#include "evsel.h" +#include "hist.h" +#include "machine.h" +#include "map.h" +#include "sort.h" +#include "strlist.h" +#include "thread.h" +#include "vdso.h" +#include +#include +#include "unwind.h" +#include "linux/hash.h" + +static void dsos__init(struct dsos *dsos) +{ + INIT_LIST_HEAD(&dsos->head); + dsos->root = RB_ROOT; +} + +int machine__init(struct machine *machine, const char *root_dir, pid_t pid) +{ + map_groups__init(&machine->kmaps, machine); + RB_CLEAR_NODE(&machine->rb_node); + dsos__init(&machine->user_dsos); + dsos__init(&machine->kernel_dsos); + + machine->threads = RB_ROOT; + INIT_LIST_HEAD(&machine->dead_threads); + machine->last_match = NULL; + + machine->vdso_info = NULL; + + machine->pid = pid; + + machine->symbol_filter = NULL; + machine->id_hdr_size = 0; + machine->comm_exec = false; + machine->kernel_start = 0; + + machine->root_dir = strdup(root_dir); + if (machine->root_dir == NULL) + return -ENOMEM; + + if (pid != HOST_KERNEL_ID) { + struct thread *thread = machine__findnew_thread(machine, -1, + pid); + char comm[64]; + + if (thread == NULL) + return -ENOMEM; + + snprintf(comm, sizeof(comm), "[guest/%d]", pid); + thread__set_comm(thread, comm, 0); + } + + machine->current_tid = NULL; + + return 0; +} + +struct machine *machine__new_host(void) +{ + struct machine *machine = malloc(sizeof(*machine)); + + if (machine != NULL) { + machine__init(machine, "", HOST_KERNEL_ID); + + if (machine__create_kernel_maps(machine) < 0) + goto out_delete; + } + + return machine; +out_delete: + free(machine); + return NULL; +} + +static void dsos__delete(struct dsos *dsos) +{ + struct dso *pos, *n; + + list_for_each_entry_safe(pos, n, &dsos->head, node) { + RB_CLEAR_NODE(&pos->rb_node); + list_del(&pos->node); + dso__delete(pos); + } +} + +void machine__delete_threads(struct machine *machine) +{ + struct rb_node *nd = rb_first(&machine->threads); + + while (nd) { + struct thread *t = rb_entry(nd, struct thread, rb_node); + + nd = rb_next(nd); + machine__remove_thread(machine, t); + } +} + +void machine__exit(struct machine *machine) +{ + map_groups__exit(&machine->kmaps); + dsos__delete(&machine->user_dsos); + dsos__delete(&machine->kernel_dsos); + vdso__exit(machine); + zfree(&machine->root_dir); + zfree(&machine->current_tid); +} + +void machine__delete(struct machine *machine) +{ + machine__exit(machine); + free(machine); +} + +void machines__init(struct machines *machines) +{ + machine__init(&machines->host, "", HOST_KERNEL_ID); + machines->guests = RB_ROOT; + machines->symbol_filter = NULL; +} + +void machines__exit(struct machines *machines) +{ + machine__exit(&machines->host); + /* XXX exit guest */ +} + +struct machine *machines__add(struct machines *machines, pid_t pid, + const char *root_dir) +{ + struct rb_node **p = &machines->guests.rb_node; + struct rb_node *parent = NULL; + struct machine *pos, *machine = malloc(sizeof(*machine)); + + if (machine == NULL) + return NULL; + + if (machine__init(machine, root_dir, pid) != 0) { + free(machine); + return NULL; + } + + machine->symbol_filter = machines->symbol_filter; + + while (*p != NULL) { + parent = *p; + pos = rb_entry(parent, struct machine, rb_node); + if (pid < pos->pid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&machine->rb_node, parent, p); + rb_insert_color(&machine->rb_node, &machines->guests); + + return machine; +} + +void machines__set_symbol_filter(struct machines *machines, + symbol_filter_t symbol_filter) +{ + struct rb_node *nd; + + machines->symbol_filter = symbol_filter; + machines->host.symbol_filter = symbol_filter; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *machine = rb_entry(nd, struct machine, rb_node); + + machine->symbol_filter = symbol_filter; + } +} + +void machines__set_comm_exec(struct machines *machines, bool comm_exec) +{ + struct rb_node *nd; + + machines->host.comm_exec = comm_exec; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *machine = rb_entry(nd, struct machine, rb_node); + + machine->comm_exec = comm_exec; + } +} + +struct machine *machines__find(struct machines *machines, pid_t pid) +{ + struct rb_node **p = &machines->guests.rb_node; + struct rb_node *parent = NULL; + struct machine *machine; + struct machine *default_machine = NULL; + + if (pid == HOST_KERNEL_ID) + return &machines->host; + + while (*p != NULL) { + parent = *p; + machine = rb_entry(parent, struct machine, rb_node); + if (pid < machine->pid) + p = &(*p)->rb_left; + else if (pid > machine->pid) + p = &(*p)->rb_right; + else + return machine; + if (!machine->pid) + default_machine = machine; + } + + return default_machine; +} + +struct machine *machines__findnew(struct machines *machines, pid_t pid) +{ + char path[PATH_MAX]; + const char *root_dir = ""; + struct machine *machine = machines__find(machines, pid); + + if (machine && (machine->pid == pid)) + goto out; + + if ((pid != HOST_KERNEL_ID) && + (pid != DEFAULT_GUEST_KERNEL_ID) && + (symbol_conf.guestmount)) { + sprintf(path, "%s/%d", symbol_conf.guestmount, pid); + if (access(path, R_OK)) { + static struct strlist *seen; + + if (!seen) + seen = strlist__new(true, NULL); + + if (!strlist__has_entry(seen, path)) { + pr_err("Can't access file %s\n", path); + strlist__add(seen, path); + } + machine = NULL; + goto out; + } + root_dir = path; + } + + machine = machines__add(machines, pid, root_dir); +out: + return machine; +} + +void machines__process_guests(struct machines *machines, + machine__process_t process, void *data) +{ + struct rb_node *nd; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + process(pos, data); + } +} + +char *machine__mmap_name(struct machine *machine, char *bf, size_t size) +{ + if (machine__is_host(machine)) + snprintf(bf, size, "[%s]", "kernel.kallsyms"); + else if (machine__is_default_guest(machine)) + snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); + else { + snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", + machine->pid); + } + + return bf; +} + +void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) +{ + struct rb_node *node; + struct machine *machine; + + machines->host.id_hdr_size = id_hdr_size; + + for (node = rb_first(&machines->guests); node; node = rb_next(node)) { + machine = rb_entry(node, struct machine, rb_node); + machine->id_hdr_size = id_hdr_size; + } + + return; +} + +static void machine__update_thread_pid(struct machine *machine, + struct thread *th, pid_t pid) +{ + struct thread *leader; + + if (pid == th->pid_ || pid == -1 || th->pid_ != -1) + return; + + th->pid_ = pid; + + if (th->pid_ == th->tid) + return; + + leader = machine__findnew_thread(machine, th->pid_, th->pid_); + if (!leader) + goto out_err; + + if (!leader->mg) + leader->mg = map_groups__new(machine); + + if (!leader->mg) + goto out_err; + + if (th->mg == leader->mg) + return; + + if (th->mg) { + /* + * Maps are created from MMAP events which provide the pid and + * tid. Consequently there never should be any maps on a thread + * with an unknown pid. Just print an error if there are. + */ + if (!map_groups__empty(th->mg)) + pr_err("Discarding thread maps for %d:%d\n", + th->pid_, th->tid); + map_groups__delete(th->mg); + } + + th->mg = map_groups__get(leader->mg); + + return; + +out_err: + pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); +} + +static struct thread *__machine__findnew_thread(struct machine *machine, + pid_t pid, pid_t tid, + bool create) +{ + struct rb_node **p = &machine->threads.rb_node; + struct rb_node *parent = NULL; + struct thread *th; + + /* + * Front-end cache - TID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ + th = machine->last_match; + if (th != NULL) { + if (th->tid == tid) { + machine__update_thread_pid(machine, th, pid); + return th; + } + + thread__zput(machine->last_match); + } + + while (*p != NULL) { + parent = *p; + th = rb_entry(parent, struct thread, rb_node); + + if (th->tid == tid) { + machine->last_match = thread__get(th); + machine__update_thread_pid(machine, th, pid); + return th; + } + + if (tid < th->tid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + if (!create) + return NULL; + + th = thread__new(pid, tid); + if (th != NULL) { + rb_link_node(&th->rb_node, parent, p); + rb_insert_color(&th->rb_node, &machine->threads); + + /* + * We have to initialize map_groups separately + * after rb tree is updated. + * + * The reason is that we call machine__findnew_thread + * within thread__init_map_groups to find the thread + * leader and that would screwed the rb tree. + */ + if (thread__init_map_groups(th, machine)) { + rb_erase(&th->rb_node, &machine->threads); + thread__delete(th); + return NULL; + } + /* + * It is now in the rbtree, get a ref + */ + thread__get(th); + machine->last_match = thread__get(th); + } + + return th; +} + +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, + pid_t tid) +{ + return __machine__findnew_thread(machine, pid, tid, true); +} + +struct thread *machine__find_thread(struct machine *machine, pid_t pid, + pid_t tid) +{ + return __machine__findnew_thread(machine, pid, tid, false); +} + +struct comm *machine__thread_exec_comm(struct machine *machine, + struct thread *thread) +{ + if (machine->comm_exec) + return thread__exec_comm(thread); + else + return thread__comm(thread); +} + +int machine__process_comm_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample) +{ + struct thread *thread = machine__findnew_thread(machine, + event->comm.pid, + event->comm.tid); + bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; + + if (exec) + machine->comm_exec = true; + + if (dump_trace) + perf_event__fprintf_comm(event, stdout); + + if (thread == NULL || + __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); + return -1; + } + + return 0; +} + +int machine__process_lost_event(struct machine *machine __maybe_unused, + union perf_event *event, struct perf_sample *sample __maybe_unused) +{ + dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", + event->lost.id, event->lost.lost); + return 0; +} + +static struct dso* +machine__module_dso(struct machine *machine, struct kmod_path *m, + const char *filename) +{ + struct dso *dso; + + dso = dsos__find(&machine->kernel_dsos, m->name, true); + if (!dso) { + dso = dsos__addnew(&machine->kernel_dsos, m->name); + if (dso == NULL) + return NULL; + + if (machine__is_host(machine)) + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; + else + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; + + /* _KMODULE_COMP should be next to _KMODULE */ + if (m->kmod && m->comp) + dso->symtab_type++; + + dso__set_short_name(dso, strdup(m->name), true); + dso__set_long_name(dso, strdup(filename), true); + } + + return dso; +} + +struct map *machine__new_module(struct machine *machine, u64 start, + const char *filename) +{ + struct map *map = NULL; + struct dso *dso; + struct kmod_path m; + + if (kmod_path__parse_name(&m, filename)) + return NULL; + + map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, + m.name); + if (map) + goto out; + + dso = machine__module_dso(machine, &m, filename); + if (dso == NULL) + goto out; + + map = map__new2(start, dso, MAP__FUNCTION); + if (map == NULL) + goto out; + + map_groups__insert(&machine->kmaps, map); + +out: + free(m.name); + return map; +} + +size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) +{ + struct rb_node *nd; + size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) + + __dsos__fprintf(&machines->host.user_dsos.head, fp); + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret += __dsos__fprintf(&pos->kernel_dsos.head, fp); + ret += __dsos__fprintf(&pos->user_dsos.head, fp); + } + + return ret; +} + +size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) + + __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm); +} + +size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + struct rb_node *nd; + size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); + } + return ret; +} + +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) +{ + int i; + size_t printed = 0; + struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; + + if (kdso->has_build_id) { + char filename[PATH_MAX]; + if (dso__build_id_filename(kdso, filename, sizeof(filename))) + printed += fprintf(fp, "[0] %s\n", filename); + } + + for (i = 0; i < vmlinux_path__nr_entries; ++i) + printed += fprintf(fp, "[%d] %s\n", + i + kdso->has_build_id, vmlinux_path[i]); + + return printed; +} + +size_t machine__fprintf(struct machine *machine, FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + +static struct dso *machine__get_kernel(struct machine *machine) +{ + const char *vmlinux_name = NULL; + struct dso *kernel; + + if (machine__is_host(machine)) { + vmlinux_name = symbol_conf.vmlinux_name; + if (!vmlinux_name) + vmlinux_name = "[kernel.kallsyms]"; + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[kernel]", + DSO_TYPE_KERNEL); + } else { + char bf[PATH_MAX]; + + if (machine__is_default_guest(machine)) + vmlinux_name = symbol_conf.default_guest_vmlinux_name; + if (!vmlinux_name) + vmlinux_name = machine__mmap_name(machine, bf, + sizeof(bf)); + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[guest.kernel]", + DSO_TYPE_GUEST_KERNEL); + } + + if (kernel != NULL && (!kernel->has_build_id)) + dso__read_running_kernel_build_id(kernel, machine); + + return kernel; +} + +struct process_args { + u64 start; +}; + +static void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) +{ + if (machine__is_default_guest(machine)) + scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); + else + scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); +} + +const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; + +/* Figure out the start address of kernel map from /proc/kallsyms. + * Returns the name of the start symbol in *symbol_name. Pass in NULL as + * symbol_name if it's not that important. + */ +static u64 machine__get_running_kernel_start(struct machine *machine, + const char **symbol_name) +{ + char filename[PATH_MAX]; + int i; + const char *name; + u64 addr = 0; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return 0; + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr = kallsyms__get_function_start(filename, name); + if (addr) + break; + } + + if (symbol_name) + *symbol_name = name; + + return addr; +} + +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) +{ + enum map_type type; + u64 start = machine__get_running_kernel_start(machine, NULL); + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + + machine->vmlinux_maps[type] = map__new2(start, kernel, type); + if (machine->vmlinux_maps[type] == NULL) + return -1; + + machine->vmlinux_maps[type]->map_ip = + machine->vmlinux_maps[type]->unmap_ip = + identity__map_ip; + kmap = map__kmap(machine->vmlinux_maps[type]); + if (!kmap) + return -1; + + kmap->kmaps = &machine->kmaps; + map_groups__insert(&machine->kmaps, + machine->vmlinux_maps[type]); + } + + return 0; +} + +void machine__destroy_kernel_maps(struct machine *machine) +{ + enum map_type type; + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + + if (machine->vmlinux_maps[type] == NULL) + continue; + + kmap = map__kmap(machine->vmlinux_maps[type]); + map_groups__remove(&machine->kmaps, + machine->vmlinux_maps[type]); + if (kmap && kmap->ref_reloc_sym) { + /* + * ref_reloc_sym is shared among all maps, so free just + * on one of them. + */ + if (type == MAP__FUNCTION) { + zfree((char **)&kmap->ref_reloc_sym->name); + zfree(&kmap->ref_reloc_sym); + } else + kmap->ref_reloc_sym = NULL; + } + + map__delete(machine->vmlinux_maps[type]); + machine->vmlinux_maps[type] = NULL; + } +} + +int machines__create_guest_kernel_maps(struct machines *machines) +{ + int ret = 0; + struct dirent **namelist = NULL; + int i, items = 0; + char path[PATH_MAX]; + pid_t pid; + char *endp; + + if (symbol_conf.default_guest_vmlinux_name || + symbol_conf.default_guest_modules || + symbol_conf.default_guest_kallsyms) { + machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); + } + + if (symbol_conf.guestmount) { + items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); + if (items <= 0) + return -ENOENT; + for (i = 0; i < items; i++) { + if (!isdigit(namelist[i]->d_name[0])) { + /* Filter out . and .. */ + continue; + } + pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); + if ((*endp != '\0') || + (endp == namelist[i]->d_name) || + (errno == ERANGE)) { + pr_debug("invalid directory (%s). Skipping.\n", + namelist[i]->d_name); + continue; + } + sprintf(path, "%s/%s/proc/kallsyms", + symbol_conf.guestmount, + namelist[i]->d_name); + ret = access(path, R_OK); + if (ret) { + pr_debug("Can't access file %s\n", path); + goto failure; + } + machines__create_kernel_maps(machines, pid); + } +failure: + free(namelist); + } + + return ret; +} + +void machines__destroy_kernel_maps(struct machines *machines) +{ + struct rb_node *next = rb_first(&machines->guests); + + machine__destroy_kernel_maps(&machines->host); + + while (next) { + struct machine *pos = rb_entry(next, struct machine, rb_node); + + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, &machines->guests); + machine__delete(pos); + } +} + +int machines__create_kernel_maps(struct machines *machines, pid_t pid) +{ + struct machine *machine = machines__findnew(machines, pid); + + if (machine == NULL) + return -1; + + return machine__create_kernel_maps(machine); +} + +int machine__load_kallsyms(struct machine *machine, const char *filename, + enum map_type type, symbol_filter_t filter) +{ + struct map *map = machine->vmlinux_maps[type]; + int ret = dso__load_kallsyms(map->dso, filename, map, filter); + + if (ret > 0) { + dso__set_loaded(map->dso, type); + /* + * Since /proc/kallsyms will have multiple sessions for the + * kernel, with modules between them, fixup the end of all + * sections. + */ + __map_groups__fixup_end(&machine->kmaps, type); + } + + return ret; +} + +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, + symbol_filter_t filter) +{ + struct map *map = machine->vmlinux_maps[type]; + int ret = dso__load_vmlinux_path(map->dso, map, filter); + + if (ret > 0) + dso__set_loaded(map->dso, type); + + return ret; +} + +static void map_groups__fixup_end(struct map_groups *mg) +{ + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + __map_groups__fixup_end(mg, i); +} + +static char *get_kernel_version(const char *root_dir) +{ + char version[PATH_MAX]; + FILE *file; + char *name, *tmp; + const char *prefix = "Linux version "; + + sprintf(version, "%s/proc/version", root_dir); + file = fopen(version, "r"); + if (!file) + return NULL; + + version[0] = '\0'; + tmp = fgets(version, sizeof(version), file); + fclose(file); + + name = strstr(version, prefix); + if (!name) + return NULL; + name += strlen(prefix); + tmp = strchr(name, ' '); + if (tmp) + *tmp = '\0'; + + return strdup(name); +} + +static bool is_kmod_dso(struct dso *dso) +{ + return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || + dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; +} + +static int map_groups__set_module_path(struct map_groups *mg, const char *path, + struct kmod_path *m) +{ + struct map *map; + char *long_name; + + map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name); + if (map == NULL) + return 0; + + long_name = strdup(path); + if (long_name == NULL) + return -ENOMEM; + + dso__set_long_name(map->dso, long_name, true); + dso__kernel_module_get_build_id(map->dso, ""); + + /* + * Full name could reveal us kmod compression, so + * we need to update the symtab_type if needed. + */ + if (m->comp && is_kmod_dso(map->dso)) + map->dso->symtab_type++; + + return 0; +} + +static int map_groups__set_modules_path_dir(struct map_groups *mg, + const char *dir_name, int depth) +{ + struct dirent *dent; + DIR *dir = opendir(dir_name); + int ret = 0; + + if (!dir) { + pr_debug("%s: cannot open %s dir\n", __func__, dir_name); + return -1; + } + + while ((dent = readdir(dir)) != NULL) { + char path[PATH_MAX]; + struct stat st; + + /*sshfs might return bad dent->d_type, so we have to stat*/ + snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); + if (stat(path, &st)) + continue; + + if (S_ISDIR(st.st_mode)) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + /* Do not follow top-level source and build symlinks */ + if (depth == 0) { + if (!strcmp(dent->d_name, "source") || + !strcmp(dent->d_name, "build")) + continue; + } + + ret = map_groups__set_modules_path_dir(mg, path, + depth + 1); + if (ret < 0) + goto out; + } else { + struct kmod_path m; + + ret = kmod_path__parse_name(&m, dent->d_name); + if (ret) + goto out; + + if (m.kmod) + ret = map_groups__set_module_path(mg, path, &m); + + free(m.name); + + if (ret) + goto out; + } + } + +out: + closedir(dir); + return ret; +} + +static int machine__set_modules_path(struct machine *machine) +{ + char *version; + char modules_path[PATH_MAX]; + + version = get_kernel_version(machine->root_dir); + if (!version) + return -1; + + snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", + machine->root_dir, version); + free(version); + + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); +} + +static int machine__create_module(void *arg, const char *name, u64 start) +{ + struct machine *machine = arg; + struct map *map; + + map = machine__new_module(machine, start, name); + if (map == NULL) + return -1; + + dso__kernel_module_get_build_id(map->dso, machine->root_dir); + + return 0; +} + +static int machine__create_modules(struct machine *machine) +{ + const char *modules; + char path[PATH_MAX]; + + if (machine__is_default_guest(machine)) { + modules = symbol_conf.default_guest_modules; + } else { + snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); + modules = path; + } + + if (symbol__restricted_filename(modules, "/proc/modules")) + return -1; + + if (modules__parse(modules, machine, machine__create_module)) + return -1; + + if (!machine__set_modules_path(machine)) + return 0; + + pr_debug("Problems setting modules path maps, continuing anyway...\n"); + + return 0; +} + +int machine__create_kernel_maps(struct machine *machine) +{ + struct dso *kernel = machine__get_kernel(machine); + const char *name; + u64 addr = machine__get_running_kernel_start(machine, &name); + if (!addr) + return -1; + + if (kernel == NULL || + __machine__create_kernel_maps(machine, kernel) < 0) + return -1; + + if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { + if (machine__is_host(machine)) + pr_debug("Problems creating module maps, " + "continuing anyway...\n"); + else + pr_debug("Problems creating module maps for guest %d, " + "continuing anyway...\n", machine->pid); + } + + /* + * Now that we have all the maps created, just set the ->end of them: + */ + map_groups__fixup_end(&machine->kmaps); + + if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, + addr)) { + machine__destroy_kernel_maps(machine); + return -1; + } + + return 0; +} + +static void machine__set_kernel_mmap_len(struct machine *machine, + union perf_event *event) +{ + int i; + + for (i = 0; i < MAP__NR_TYPES; i++) { + machine->vmlinux_maps[i]->start = event->mmap.start; + machine->vmlinux_maps[i]->end = (event->mmap.start + + event->mmap.len); + /* + * Be a bit paranoid here, some perf.data file came with + * a zero sized synthesized MMAP event for the kernel. + */ + if (machine->vmlinux_maps[i]->end == 0) + machine->vmlinux_maps[i]->end = ~0ULL; + } +} + +static bool machine__uses_kcore(struct machine *machine) +{ + struct dso *dso; + + list_for_each_entry(dso, &machine->kernel_dsos.head, node) { + if (dso__is_kcore(dso)) + return true; + } + + return false; +} + +static int machine__process_kernel_mmap_event(struct machine *machine, + union perf_event *event) +{ + struct map *map; + char kmmap_prefix[PATH_MAX]; + enum dso_kernel_type kernel_type; + bool is_kernel_mmap; + + /* If we have maps from kcore then we do not need or want any others */ + if (machine__uses_kcore(machine)) + return 0; + + machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); + if (machine__is_host(machine)) + kernel_type = DSO_TYPE_KERNEL; + else + kernel_type = DSO_TYPE_GUEST_KERNEL; + + is_kernel_mmap = memcmp(event->mmap.filename, + kmmap_prefix, + strlen(kmmap_prefix) - 1) == 0; + if (event->mmap.filename[0] == '/' || + (!is_kernel_mmap && event->mmap.filename[0] == '[')) { + map = machine__new_module(machine, event->mmap.start, + event->mmap.filename); + if (map == NULL) + goto out_problem; + + map->end = map->start + event->mmap.len; + } else if (is_kernel_mmap) { + const char *symbol_name = (event->mmap.filename + + strlen(kmmap_prefix)); + /* + * Should be there already, from the build-id table in + * the header. + */ + struct dso *kernel = NULL; + struct dso *dso; + + list_for_each_entry(dso, &machine->kernel_dsos.head, node) { + if (is_kernel_module(dso->long_name)) + continue; + + kernel = dso; + break; + } + + if (kernel == NULL) + kernel = __dsos__findnew(&machine->kernel_dsos, + kmmap_prefix); + if (kernel == NULL) + goto out_problem; + + kernel->kernel = kernel_type; + if (__machine__create_kernel_maps(machine, kernel) < 0) + goto out_problem; + + if (strstr(kernel->long_name, "vmlinux")) + dso__set_short_name(kernel, "[kernel.vmlinux]", false); + + machine__set_kernel_mmap_len(machine, event); + + /* + * Avoid using a zero address (kptr_restrict) for the ref reloc + * symbol. Effectively having zero here means that at record + * time /proc/sys/kernel/kptr_restrict was non zero. + */ + if (event->mmap.pgoff != 0) { + maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, + symbol_name, + event->mmap.pgoff); + } + + if (machine__is_default_guest(machine)) { + /* + * preload dso of guest kernel and modules + */ + dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], + NULL); + } + } + return 0; +out_problem: + return -1; +} + +int machine__process_mmap2_event(struct machine *machine, + union perf_event *event, + struct perf_sample *sample __maybe_unused) +{ + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread; + struct map *map; + enum map_type type; + int ret = 0; + + if (dump_trace) + perf_event__fprintf_mmap2(event, stdout); + + if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || + cpumode == PERF_RECORD_MISC_KERNEL) { + ret = machine__process_kernel_mmap_event(machine, event); + if (ret < 0) + goto out_problem; + return 0; + } + + thread = machine__findnew_thread(machine, event->mmap2.pid, + event->mmap2.tid); + if (thread == NULL) + goto out_problem; + + if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) + type = MAP__VARIABLE; + else + type = MAP__FUNCTION; + + map = map__new(machine, event->mmap2.start, + event->mmap2.len, event->mmap2.pgoff, + event->mmap2.pid, event->mmap2.maj, + event->mmap2.min, event->mmap2.ino, + event->mmap2.ino_generation, + event->mmap2.prot, + event->mmap2.flags, + event->mmap2.filename, type, thread); + + if (map == NULL) + goto out_problem; + + thread__insert_map(thread, map); + return 0; + +out_problem: + dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); + return 0; +} + +int machine__process_mmap_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample __maybe_unused) +{ + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread; + struct map *map; + enum map_type type; + int ret = 0; + + if (dump_trace) + perf_event__fprintf_mmap(event, stdout); + + if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || + cpumode == PERF_RECORD_MISC_KERNEL) { + ret = machine__process_kernel_mmap_event(machine, event); + if (ret < 0) + goto out_problem; + return 0; + } + + thread = machine__findnew_thread(machine, event->mmap.pid, + event->mmap.tid); + if (thread == NULL) + goto out_problem; + + if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) + type = MAP__VARIABLE; + else + type = MAP__FUNCTION; + + map = map__new(machine, event->mmap.start, + event->mmap.len, event->mmap.pgoff, + event->mmap.pid, 0, 0, 0, 0, 0, 0, + event->mmap.filename, + type, thread); + + if (map == NULL) + goto out_problem; + + thread__insert_map(thread, map); + return 0; + +out_problem: + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); + return 0; +} + +void machine__remove_thread(struct machine *machine, struct thread *th) +{ + if (machine->last_match == th) + thread__zput(machine->last_match); + + rb_erase(&th->rb_node, &machine->threads); + /* + * Move it first to the dead_threads list, then drop the reference, + * if this is the last reference, then the thread__delete destructor + * will be called and we will remove it from the dead_threads list. + */ + list_add_tail(&th->node, &machine->dead_threads); + thread__put(th); +} + +int machine__process_fork_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample) +{ + struct thread *thread = machine__find_thread(machine, + event->fork.pid, + event->fork.tid); + struct thread *parent = machine__findnew_thread(machine, + event->fork.ppid, + event->fork.ptid); + + /* if a thread currently exists for the thread id remove it */ + if (thread != NULL) + machine__remove_thread(machine, thread); + + thread = machine__findnew_thread(machine, event->fork.pid, + event->fork.tid); + if (dump_trace) + perf_event__fprintf_task(event, stdout); + + if (thread == NULL || parent == NULL || + thread__fork(thread, parent, sample->time) < 0) { + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); + return -1; + } + + return 0; +} + +int machine__process_exit_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample __maybe_unused) +{ + struct thread *thread = machine__find_thread(machine, + event->fork.pid, + event->fork.tid); + + if (dump_trace) + perf_event__fprintf_task(event, stdout); + + if (thread != NULL) + thread__exited(thread); + + return 0; +} + +int machine__process_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample) +{ + int ret; + + switch (event->header.type) { + case PERF_RECORD_COMM: + ret = machine__process_comm_event(machine, event, sample); break; + case PERF_RECORD_MMAP: + ret = machine__process_mmap_event(machine, event, sample); break; + case PERF_RECORD_MMAP2: + ret = machine__process_mmap2_event(machine, event, sample); break; + case PERF_RECORD_FORK: + ret = machine__process_fork_event(machine, event, sample); break; + case PERF_RECORD_EXIT: + ret = machine__process_exit_event(machine, event, sample); break; + case PERF_RECORD_LOST: + ret = machine__process_lost_event(machine, event, sample); break; + default: + ret = -1; + break; + } + + return ret; +} + +static bool symbol__match_regex(struct symbol *sym, regex_t *regex) +{ + if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) + return 1; + return 0; +} + +static void ip__resolve_ams(struct thread *thread, + struct addr_map_symbol *ams, + u64 ip) +{ + struct addr_location al; + + memset(&al, 0, sizeof(al)); + /* + * We cannot use the header.misc hint to determine whether a + * branch stack address is user, kernel, guest, hypervisor. + * Branches may straddle the kernel/user/hypervisor boundaries. + * Thus, we have to try consecutively until we find a match + * or else, the symbol is unknown + */ + thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); + + ams->addr = ip; + ams->al_addr = al.addr; + ams->sym = al.sym; + ams->map = al.map; +} + +static void ip__resolve_data(struct thread *thread, + u8 m, struct addr_map_symbol *ams, u64 addr) +{ + struct addr_location al; + + memset(&al, 0, sizeof(al)); + + thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); + if (al.map == NULL) { + /* + * some shared data regions have execute bit set which puts + * their mapping in the MAP__FUNCTION type array. + * Check there as a fallback option before dropping the sample. + */ + thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); + } + + ams->addr = addr; + ams->al_addr = al.addr; + ams->sym = al.sym; + ams->map = al.map; +} + +struct mem_info *sample__resolve_mem(struct perf_sample *sample, + struct addr_location *al) +{ + struct mem_info *mi = zalloc(sizeof(*mi)); + + if (!mi) + return NULL; + + ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); + ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr); + mi->data_src.val = sample->data_src; + + return mi; +} + +static int add_callchain_ip(struct thread *thread, + struct symbol **parent, + struct addr_location *root_al, + u8 *cpumode, + u64 ip) +{ + struct addr_location al; + + al.filtered = 0; + al.sym = NULL; + if (!cpumode) { + thread__find_cpumode_addr_location(thread, MAP__FUNCTION, + ip, &al); + } else { + if (ip >= PERF_CONTEXT_MAX) { + switch (ip) { + case PERF_CONTEXT_HV: + *cpumode = PERF_RECORD_MISC_HYPERVISOR; + break; + case PERF_CONTEXT_KERNEL: + *cpumode = PERF_RECORD_MISC_KERNEL; + break; + case PERF_CONTEXT_USER: + *cpumode = PERF_RECORD_MISC_USER; + break; + default: + pr_debug("invalid callchain context: " + "%"PRId64"\n", (s64) ip); + /* + * It seems the callchain is corrupted. + * Discard all. + */ + callchain_cursor_reset(&callchain_cursor); + return 1; + } + return 0; + } + thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, + ip, &al); + } + + if (al.sym != NULL) { + if (sort__has_parent && !*parent && + symbol__match_regex(al.sym, &parent_regex)) + *parent = al.sym; + else if (have_ignore_callees && root_al && + symbol__match_regex(al.sym, &ignore_callees_regex)) { + /* Treat this symbol as the root, + forgetting its callees. */ + *root_al = al; + callchain_cursor_reset(&callchain_cursor); + } + } + + return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym); +} + +struct branch_info *sample__resolve_bstack(struct perf_sample *sample, + struct addr_location *al) +{ + unsigned int i; + const struct branch_stack *bs = sample->branch_stack; + struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); + + if (!bi) + return NULL; + + for (i = 0; i < bs->nr; i++) { + ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); + ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); + bi[i].flags = bs->entries[i].flags; + } + return bi; +} + +#define CHASHSZ 127 +#define CHASHBITS 7 +#define NO_ENTRY 0xff + +#define PERF_MAX_BRANCH_DEPTH 127 + +/* Remove loops. */ +static int remove_loops(struct branch_entry *l, int nr) +{ + int i, j, off; + unsigned char chash[CHASHSZ]; + + memset(chash, NO_ENTRY, sizeof(chash)); + + BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); + + for (i = 0; i < nr; i++) { + int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; + + /* no collision handling for now */ + if (chash[h] == NO_ENTRY) { + chash[h] = i; + } else if (l[chash[h]].from == l[i].from) { + bool is_loop = true; + /* check if it is a real loop */ + off = 0; + for (j = chash[h]; j < i && i + off < nr; j++, off++) + if (l[j].from != l[i + off].from) { + is_loop = false; + break; + } + if (is_loop) { + memmove(l + i, l + i + off, + (nr - (i + off)) * sizeof(*l)); + nr -= off; + } + } + } + return nr; +} + +/* + * Recolve LBR callstack chain sample + * Return: + * 1 on success get LBR callchain information + * 0 no available LBR callchain information, should try fp + * negative error code on other errors. + */ +static int resolve_lbr_callchain_sample(struct thread *thread, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack) +{ + struct ip_callchain *chain = sample->callchain; + int chain_nr = min(max_stack, (int)chain->nr); + u8 cpumode = PERF_RECORD_MISC_USER; + int i, j, err; + u64 ip; + + for (i = 0; i < chain_nr; i++) { + if (chain->ips[i] == PERF_CONTEXT_USER) + break; + } + + /* LBR only affects the user callchain */ + if (i != chain_nr) { + struct branch_stack *lbr_stack = sample->branch_stack; + int lbr_nr = lbr_stack->nr; + /* + * LBR callstack can only get user call chain. + * The mix_chain_nr is kernel call chain + * number plus LBR user call chain number. + * i is kernel call chain number, + * 1 is PERF_CONTEXT_USER, + * lbr_nr + 1 is the user call chain number. + * For details, please refer to the comments + * in callchain__printf + */ + int mix_chain_nr = i + 1 + lbr_nr + 1; + + if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) { + pr_warning("corrupted callchain. skipping...\n"); + return 0; + } + + for (j = 0; j < mix_chain_nr; j++) { + if (callchain_param.order == ORDER_CALLEE) { + if (j < i + 1) + ip = chain->ips[j]; + else if (j > i + 1) + ip = lbr_stack->entries[j - i - 2].from; + else + ip = lbr_stack->entries[0].to; + } else { + if (j < lbr_nr) + ip = lbr_stack->entries[lbr_nr - j - 1].from; + else if (j > lbr_nr) + ip = chain->ips[i + 1 - (j - lbr_nr)]; + else + ip = lbr_stack->entries[0].to; + } + + err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); + if (err) + return (err < 0) ? err : 0; + } + return 1; + } + + return 0; +} + +static int thread__resolve_callchain_sample(struct thread *thread, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack) +{ + struct branch_stack *branch = sample->branch_stack; + struct ip_callchain *chain = sample->callchain; + int chain_nr = min(max_stack, (int)chain->nr); + u8 cpumode = PERF_RECORD_MISC_USER; + int i, j, err; + int skip_idx = -1; + int first_call = 0; + + callchain_cursor_reset(&callchain_cursor); + + if (has_branch_callstack(evsel)) { + err = resolve_lbr_callchain_sample(thread, sample, parent, + root_al, max_stack); + if (err) + return (err < 0) ? err : 0; + } + + /* + * Based on DWARF debug information, some architectures skip + * a callchain entry saved by the kernel. + */ + if (chain->nr < PERF_MAX_STACK_DEPTH) + skip_idx = arch_skip_callchain_idx(thread, chain); + + /* + * Add branches to call stack for easier browsing. This gives + * more context for a sample than just the callers. + * + * This uses individual histograms of paths compared to the + * aggregated histograms the normal LBR mode uses. + * + * Limitations for now: + * - No extra filters + * - No annotations (should annotate somehow) + */ + + if (branch && callchain_param.branch_callstack) { + int nr = min(max_stack, (int)branch->nr); + struct branch_entry be[nr]; + + if (branch->nr > PERF_MAX_BRANCH_DEPTH) { + pr_warning("corrupted branch chain. skipping...\n"); + goto check_calls; + } + + for (i = 0; i < nr; i++) { + if (callchain_param.order == ORDER_CALLEE) { + be[i] = branch->entries[i]; + /* + * Check for overlap into the callchain. + * The return address is one off compared to + * the branch entry. To adjust for this + * assume the calling instruction is not longer + * than 8 bytes. + */ + if (i == skip_idx || + chain->ips[first_call] >= PERF_CONTEXT_MAX) + first_call++; + else if (be[i].from < chain->ips[first_call] && + be[i].from >= chain->ips[first_call] - 8) + first_call++; + } else + be[i] = branch->entries[branch->nr - i - 1]; + } + + nr = remove_loops(be, nr); + + for (i = 0; i < nr; i++) { + err = add_callchain_ip(thread, parent, root_al, + NULL, be[i].to); + if (!err) + err = add_callchain_ip(thread, parent, root_al, + NULL, be[i].from); + if (err == -EINVAL) + break; + if (err) + return err; + } + chain_nr -= nr; + } + +check_calls: + if (chain->nr > PERF_MAX_STACK_DEPTH) { + pr_warning("corrupted callchain. skipping...\n"); + return 0; + } + + for (i = first_call; i < chain_nr; i++) { + u64 ip; + + if (callchain_param.order == ORDER_CALLEE) + j = i; + else + j = chain->nr - i - 1; + +#ifdef HAVE_SKIP_CALLCHAIN_IDX + if (j == skip_idx) + continue; +#endif + ip = chain->ips[j]; + + err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); + + if (err) + return (err < 0) ? err : 0; + } + + return 0; +} + +static int unwind_entry(struct unwind_entry *entry, void *arg) +{ + struct callchain_cursor *cursor = arg; + return callchain_cursor_append(cursor, entry->ip, + entry->map, entry->sym); +} + +int thread__resolve_callchain(struct thread *thread, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack) +{ + int ret = thread__resolve_callchain_sample(thread, evsel, + sample, parent, + root_al, max_stack); + if (ret) + return ret; + + /* Can we do dwarf post unwind? */ + if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && + (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) + return 0; + + /* Bail out if nothing was captured. */ + if ((!sample->user_regs.regs) || + (!sample->user_stack.size)) + return 0; + + return unwind__get_entries(unwind_entry, &callchain_cursor, + thread, sample, max_stack); + +} + +int machine__for_each_thread(struct machine *machine, + int (*fn)(struct thread *thread, void *p), + void *priv) +{ + struct rb_node *nd; + struct thread *thread; + int rc = 0; + + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { + thread = rb_entry(nd, struct thread, rb_node); + rc = fn(thread, priv); + if (rc != 0) + return rc; + } + + list_for_each_entry(thread, &machine->dead_threads, node) { + rc = fn(thread, priv); + if (rc != 0) + return rc; + } + return rc; +} + +int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, + struct target *target, struct thread_map *threads, + perf_event__handler_t process, bool data_mmap) +{ + if (target__has_task(target)) + return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); + else if (target__has_cpu(target)) + return perf_event__synthesize_threads(tool, process, machine, data_mmap); + /* command specified */ + return 0; +} + +pid_t machine__get_current_tid(struct machine *machine, int cpu) +{ + if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid) + return -1; + + return machine->current_tid[cpu]; +} + +int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, + pid_t tid) +{ + struct thread *thread; + + if (cpu < 0) + return -EINVAL; + + if (!machine->current_tid) { + int i; + + machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t)); + if (!machine->current_tid) + return -ENOMEM; + for (i = 0; i < MAX_NR_CPUS; i++) + machine->current_tid[i] = -1; + } + + if (cpu >= MAX_NR_CPUS) { + pr_err("Requested CPU %d too large. ", cpu); + pr_err("Consider raising MAX_NR_CPUS\n"); + return -EINVAL; + } + + machine->current_tid[cpu] = tid; + + thread = machine__findnew_thread(machine, pid, tid); + if (!thread) + return -ENOMEM; + + thread->cpu = cpu; + + return 0; +} + +int machine__get_kernel_start(struct machine *machine) +{ + struct map *map = machine__kernel_map(machine, MAP__FUNCTION); + int err = 0; + + /* + * The only addresses above 2^63 are kernel addresses of a 64-bit + * kernel. Note that addresses are unsigned so that on a 32-bit system + * all addresses including kernel addresses are less than 2^32. In + * that case (32-bit system), if the kernel mapping is unknown, all + * addresses will be assumed to be in user space - see + * machine__kernel_ip(). + */ + machine->kernel_start = 1ULL << 63; + if (map) { + err = map__load(map, machine->symbol_filter); + if (map->start) + machine->kernel_start = map->start; + } + return err; +} diff --git a/kernel/tools/perf/util/machine.h b/kernel/tools/perf/util/machine.h new file mode 100644 index 000000000..6d64cedb9 --- /dev/null +++ b/kernel/tools/perf/util/machine.h @@ -0,0 +1,227 @@ +#ifndef __PERF_MACHINE_H +#define __PERF_MACHINE_H + +#include +#include +#include "map.h" +#include "dso.h" +#include "event.h" + +struct addr_location; +struct branch_stack; +struct perf_evsel; +struct perf_sample; +struct symbol; +struct thread; +union perf_event; + +/* Native host kernel uses -1 as pid index in machine */ +#define HOST_KERNEL_ID (-1) +#define DEFAULT_GUEST_KERNEL_ID (0) + +extern const char *ref_reloc_sym_names[]; + +struct vdso_info; + +struct machine { + struct rb_node rb_node; + pid_t pid; + u16 id_hdr_size; + bool comm_exec; + char *root_dir; + struct rb_root threads; + struct list_head dead_threads; + struct thread *last_match; + struct vdso_info *vdso_info; + struct dsos user_dsos; + struct dsos kernel_dsos; + struct map_groups kmaps; + struct map *vmlinux_maps[MAP__NR_TYPES]; + u64 kernel_start; + symbol_filter_t symbol_filter; + pid_t *current_tid; + union { /* Tool specific area */ + void *priv; + u64 db_id; + }; +}; + +static inline +struct map *machine__kernel_map(struct machine *machine, enum map_type type) +{ + return machine->vmlinux_maps[type]; +} + +int machine__get_kernel_start(struct machine *machine); + +static inline u64 machine__kernel_start(struct machine *machine) +{ + if (!machine->kernel_start) + machine__get_kernel_start(machine); + return machine->kernel_start; +} + +static inline bool machine__kernel_ip(struct machine *machine, u64 ip) +{ + u64 kernel_start = machine__kernel_start(machine); + + return ip >= kernel_start; +} + +struct thread *machine__find_thread(struct machine *machine, pid_t pid, + pid_t tid); +struct comm *machine__thread_exec_comm(struct machine *machine, + struct thread *thread); + +int machine__process_comm_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_exit_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_fork_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_lost_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_mmap_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_mmap2_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); + +typedef void (*machine__process_t)(struct machine *machine, void *data); + +struct machines { + struct machine host; + struct rb_root guests; + symbol_filter_t symbol_filter; +}; + +void machines__init(struct machines *machines); +void machines__exit(struct machines *machines); + +void machines__process_guests(struct machines *machines, + machine__process_t process, void *data); + +struct machine *machines__add(struct machines *machines, pid_t pid, + const char *root_dir); +struct machine *machines__find_host(struct machines *machines); +struct machine *machines__find(struct machines *machines, pid_t pid); +struct machine *machines__findnew(struct machines *machines, pid_t pid); + +void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); +char *machine__mmap_name(struct machine *machine, char *bf, size_t size); + +void machines__set_symbol_filter(struct machines *machines, + symbol_filter_t symbol_filter); +void machines__set_comm_exec(struct machines *machines, bool comm_exec); + +struct machine *machine__new_host(void); +int machine__init(struct machine *machine, const char *root_dir, pid_t pid); +void machine__exit(struct machine *machine); +void machine__delete_threads(struct machine *machine); +void machine__delete(struct machine *machine); +void machine__remove_thread(struct machine *machine, struct thread *th); + +struct branch_info *sample__resolve_bstack(struct perf_sample *sample, + struct addr_location *al); +struct mem_info *sample__resolve_mem(struct perf_sample *sample, + struct addr_location *al); +int thread__resolve_callchain(struct thread *thread, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack); + +/* + * Default guest kernel is defined by parameter --guestkallsyms + * and --guestmodules + */ +static inline bool machine__is_default_guest(struct machine *machine) +{ + return machine ? machine->pid == DEFAULT_GUEST_KERNEL_ID : false; +} + +static inline bool machine__is_host(struct machine *machine) +{ + return machine ? machine->pid == HOST_KERNEL_ID : false; +} + +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, + pid_t tid); + +size_t machine__fprintf(struct machine *machine, FILE *fp); + +static inline +struct symbol *machine__find_kernel_symbol(struct machine *machine, + enum map_type type, u64 addr, + struct map **mapp, + symbol_filter_t filter) +{ + return map_groups__find_symbol(&machine->kmaps, type, addr, + mapp, filter); +} + +static inline +struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr, + struct map **mapp, + symbol_filter_t filter) +{ + return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr, + mapp, filter); +} + +static inline +struct symbol *machine__find_kernel_function_by_name(struct machine *machine, + const char *name, + struct map **mapp, + symbol_filter_t filter) +{ + return map_groups__find_function_by_name(&machine->kmaps, name, mapp, + filter); +} + +struct map *machine__new_module(struct machine *machine, u64 start, + const char *filename); + +int machine__load_kallsyms(struct machine *machine, const char *filename, + enum map_type type, symbol_filter_t filter); +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, + symbol_filter_t filter); + +size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); +size_t machines__fprintf_dsos(struct machines *machines, FILE *fp); +size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); + +void machine__destroy_kernel_maps(struct machine *machine); +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); +int machine__create_kernel_maps(struct machine *machine); + +int machines__create_kernel_maps(struct machines *machines, pid_t pid); +int machines__create_guest_kernel_maps(struct machines *machines); +void machines__destroy_kernel_maps(struct machines *machines); + +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); + +int machine__for_each_thread(struct machine *machine, + int (*fn)(struct thread *thread, void *p), + void *priv); + +int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, + struct target *target, struct thread_map *threads, + perf_event__handler_t process, bool data_mmap); +static inline +int machine__synthesize_threads(struct machine *machine, struct target *target, + struct thread_map *threads, bool data_mmap) +{ + return __machine__synthesize_threads(machine, NULL, target, threads, + perf_event__process, data_mmap); +} + +pid_t machine__get_current_tid(struct machine *machine, int cpu); +int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, + pid_t tid); + +#endif /* __PERF_MACHINE_H */ diff --git a/kernel/tools/perf/util/map.c b/kernel/tools/perf/util/map.c new file mode 100644 index 000000000..a14f08f41 --- /dev/null +++ b/kernel/tools/perf/util/map.c @@ -0,0 +1,800 @@ +#include "symbol.h" +#include +#include +#include +#include +#include +#include +#include +#include "map.h" +#include "thread.h" +#include "strlist.h" +#include "vdso.h" +#include "build-id.h" +#include "util.h" +#include "debug.h" +#include "machine.h" +#include + +const char *map_type__name[MAP__NR_TYPES] = { + [MAP__FUNCTION] = "Functions", + [MAP__VARIABLE] = "Variables", +}; + +static inline int is_anon_memory(const char *filename) +{ + return !strcmp(filename, "//anon") || + !strcmp(filename, "/dev/zero (deleted)") || + !strcmp(filename, "/anon_hugepage (deleted)"); +} + +static inline int is_no_dso_memory(const char *filename) +{ + return !strncmp(filename, "[stack", 6) || + !strncmp(filename, "/SYSV",5) || + !strcmp(filename, "[heap]"); +} + +static inline int is_android_lib(const char *filename) +{ + return !strncmp(filename, "/data/app-lib", 13) || + !strncmp(filename, "/system/lib", 11); +} + +static inline bool replace_android_lib(const char *filename, char *newfilename) +{ + const char *libname; + char *app_abi; + size_t app_abi_length, new_length; + size_t lib_length = 0; + + libname = strrchr(filename, '/'); + if (libname) + lib_length = strlen(libname); + + app_abi = getenv("APP_ABI"); + if (!app_abi) + return false; + + app_abi_length = strlen(app_abi); + + if (!strncmp(filename, "/data/app-lib", 13)) { + char *apk_path; + + if (!app_abi_length) + return false; + + new_length = 7 + app_abi_length + lib_length; + + apk_path = getenv("APK_PATH"); + if (apk_path) { + new_length += strlen(apk_path) + 1; + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, + "%s/libs/%s/%s", apk_path, app_abi, libname); + } else { + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, + "libs/%s/%s", app_abi, libname); + } + return true; + } + + if (!strncmp(filename, "/system/lib/", 11)) { + char *ndk, *app; + const char *arch; + size_t ndk_length; + size_t app_length; + + ndk = getenv("NDK_ROOT"); + app = getenv("APP_PLATFORM"); + + if (!(ndk && app)) + return false; + + ndk_length = strlen(ndk); + app_length = strlen(app); + + if (!(ndk_length && app_length && app_abi_length)) + return false; + + arch = !strncmp(app_abi, "arm", 3) ? "arm" : + !strncmp(app_abi, "mips", 4) ? "mips" : + !strncmp(app_abi, "x86", 3) ? "x86" : NULL; + + if (!arch) + return false; + + new_length = 27 + ndk_length + + app_length + lib_length + + strlen(arch); + + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, + "%s/platforms/%s/arch-%s/usr/lib/%s", + ndk, app, arch, libname); + + return true; + } + return false; +} + +void map__init(struct map *map, enum map_type type, + u64 start, u64 end, u64 pgoff, struct dso *dso) +{ + map->type = type; + map->start = start; + map->end = end; + map->pgoff = pgoff; + map->reloc = 0; + map->dso = dso; + map->map_ip = map__map_ip; + map->unmap_ip = map__unmap_ip; + RB_CLEAR_NODE(&map->rb_node); + map->groups = NULL; + map->referenced = false; + map->erange_warned = false; +} + +struct map *map__new(struct machine *machine, u64 start, u64 len, + u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino, + u64 ino_gen, u32 prot, u32 flags, char *filename, + enum map_type type, struct thread *thread) +{ + struct map *map = malloc(sizeof(*map)); + + if (map != NULL) { + char newfilename[PATH_MAX]; + struct dso *dso; + int anon, no_dso, vdso, android; + + android = is_android_lib(filename); + anon = is_anon_memory(filename); + vdso = is_vdso_map(filename); + no_dso = is_no_dso_memory(filename); + + map->maj = d_maj; + map->min = d_min; + map->ino = ino; + map->ino_generation = ino_gen; + map->prot = prot; + map->flags = flags; + + if ((anon || no_dso) && type == MAP__FUNCTION) { + snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); + filename = newfilename; + } + + if (android) { + if (replace_android_lib(filename, newfilename)) + filename = newfilename; + } + + if (vdso) { + pgoff = 0; + dso = vdso__dso_findnew(machine, thread); + } else + dso = __dsos__findnew(&machine->user_dsos, filename); + + if (dso == NULL) + goto out_delete; + + map__init(map, type, start, start + len, pgoff, dso); + + if (anon || no_dso) { + map->map_ip = map->unmap_ip = identity__map_ip; + + /* + * Set memory without DSO as loaded. All map__find_* + * functions still return NULL, and we avoid the + * unnecessary map__load warning. + */ + if (type != MAP__FUNCTION) + dso__set_loaded(dso, map->type); + } + } + return map; +out_delete: + free(map); + return NULL; +} + +/* + * Constructor variant for modules (where we know from /proc/modules where + * they are loaded) and for vmlinux, where only after we load all the + * symbols we'll know where it starts and ends. + */ +struct map *map__new2(u64 start, struct dso *dso, enum map_type type) +{ + struct map *map = calloc(1, (sizeof(*map) + + (dso->kernel ? sizeof(struct kmap) : 0))); + if (map != NULL) { + /* + * ->end will be filled after we load all the symbols + */ + map__init(map, type, start, 0, 0, dso); + } + + return map; +} + +void map__delete(struct map *map) +{ + free(map); +} + +void map__fixup_start(struct map *map) +{ + struct rb_root *symbols = &map->dso->symbols[map->type]; + struct rb_node *nd = rb_first(symbols); + if (nd != NULL) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + map->start = sym->start; + } +} + +void map__fixup_end(struct map *map) +{ + struct rb_root *symbols = &map->dso->symbols[map->type]; + struct rb_node *nd = rb_last(symbols); + if (nd != NULL) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + map->end = sym->end; + } +} + +#define DSO__DELETED "(deleted)" + +int map__load(struct map *map, symbol_filter_t filter) +{ + const char *name = map->dso->long_name; + int nr; + + if (dso__loaded(map->dso, map->type)) + return 0; + + nr = dso__load(map->dso, map, filter); + if (nr < 0) { + if (map->dso->has_build_id) { + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(map->dso->build_id, + sizeof(map->dso->build_id), + sbuild_id); + pr_warning("%s with build id %s not found", + name, sbuild_id); + } else + pr_warning("Failed to open %s", name); + + pr_warning(", continuing without symbols\n"); + return -1; + } else if (nr == 0) { +#ifdef HAVE_LIBELF_SUPPORT + const size_t len = strlen(name); + const size_t real_len = len - sizeof(DSO__DELETED); + + if (len > sizeof(DSO__DELETED) && + strcmp(name + real_len + 1, DSO__DELETED) == 0) { + pr_warning("%.*s was updated (is prelink enabled?). " + "Restart the long running apps that use it!\n", + (int)real_len, name); + } else { + pr_warning("no symbols found in %s, maybe install " + "a debug package?\n", name); + } +#endif + return -1; + } + + return 0; +} + +struct symbol *map__find_symbol(struct map *map, u64 addr, + symbol_filter_t filter) +{ + if (map__load(map, filter) < 0) + return NULL; + + return dso__find_symbol(map->dso, map->type, addr); +} + +struct symbol *map__find_symbol_by_name(struct map *map, const char *name, + symbol_filter_t filter) +{ + if (map__load(map, filter) < 0) + return NULL; + + if (!dso__sorted_by_name(map->dso, map->type)) + dso__sort_by_name(map->dso, map->type); + + return dso__find_symbol_by_name(map->dso, map->type, name); +} + +struct map *map__clone(struct map *map) +{ + return memdup(map, sizeof(*map)); +} + +int map__overlap(struct map *l, struct map *r) +{ + if (l->start > r->start) { + struct map *t = l; + l = r; + r = t; + } + + if (l->end > r->start) + return 1; + + return 0; +} + +size_t map__fprintf(struct map *map, FILE *fp) +{ + return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", + map->start, map->end, map->pgoff, map->dso->name); +} + +size_t map__fprintf_dsoname(struct map *map, FILE *fp) +{ + const char *dsoname = "[unknown]"; + + if (map && map->dso && (map->dso->name || map->dso->long_name)) { + if (symbol_conf.show_kernel_path && map->dso->long_name) + dsoname = map->dso->long_name; + else if (map->dso->name) + dsoname = map->dso->name; + } + + return fprintf(fp, "%s", dsoname); +} + +int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, + FILE *fp) +{ + char *srcline; + int ret = 0; + + if (map && map->dso) { + srcline = get_srcline(map->dso, + map__rip_2objdump(map, addr), NULL, true); + if (srcline != SRCLINE_UNKNOWN) + ret = fprintf(fp, "%s%s", prefix, srcline); + free_srcline(srcline); + } + return ret; +} + +/** + * map__rip_2objdump - convert symbol start address to objdump address. + * @map: memory map + * @rip: symbol start address + * + * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. + * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is + * relative to section start. + * + * Return: Address suitable for passing to "objdump --start-address=" + */ +u64 map__rip_2objdump(struct map *map, u64 rip) +{ + if (!map->dso->adjust_symbols) + return rip; + + if (map->dso->rel) + return rip - map->pgoff; + + return map->unmap_ip(map, rip) - map->reloc; +} + +/** + * map__objdump_2mem - convert objdump address to a memory address. + * @map: memory map + * @ip: objdump address + * + * Closely related to map__rip_2objdump(), this function takes an address from + * objdump and converts it to a memory address. Note this assumes that @map + * contains the address. To be sure the result is valid, check it forwards + * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip + * + * Return: Memory address. + */ +u64 map__objdump_2mem(struct map *map, u64 ip) +{ + if (!map->dso->adjust_symbols) + return map->unmap_ip(map, ip); + + if (map->dso->rel) + return map->unmap_ip(map, ip + map->pgoff); + + return ip + map->reloc; +} + +void map_groups__init(struct map_groups *mg, struct machine *machine) +{ + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) { + mg->maps[i] = RB_ROOT; + INIT_LIST_HEAD(&mg->removed_maps[i]); + } + mg->machine = machine; + mg->refcnt = 1; +} + +static void maps__delete(struct rb_root *maps) +{ + struct rb_node *next = rb_first(maps); + + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, maps); + map__delete(pos); + } +} + +static void maps__delete_removed(struct list_head *maps) +{ + struct map *pos, *n; + + list_for_each_entry_safe(pos, n, maps, node) { + list_del(&pos->node); + map__delete(pos); + } +} + +void map_groups__exit(struct map_groups *mg) +{ + int i; + + for (i = 0; i < MAP__NR_TYPES; ++i) { + maps__delete(&mg->maps[i]); + maps__delete_removed(&mg->removed_maps[i]); + } +} + +bool map_groups__empty(struct map_groups *mg) +{ + int i; + + for (i = 0; i < MAP__NR_TYPES; ++i) { + if (maps__first(&mg->maps[i])) + return false; + if (!list_empty(&mg->removed_maps[i])) + return false; + } + + return true; +} + +struct map_groups *map_groups__new(struct machine *machine) +{ + struct map_groups *mg = malloc(sizeof(*mg)); + + if (mg != NULL) + map_groups__init(mg, machine); + + return mg; +} + +void map_groups__delete(struct map_groups *mg) +{ + map_groups__exit(mg); + free(mg); +} + +void map_groups__put(struct map_groups *mg) +{ + if (--mg->refcnt == 0) + map_groups__delete(mg); +} + +void map_groups__flush(struct map_groups *mg) +{ + int type; + + for (type = 0; type < MAP__NR_TYPES; type++) { + struct rb_root *root = &mg->maps[type]; + struct rb_node *next = rb_first(root); + + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, root); + /* + * We may have references to this map, for + * instance in some hist_entry instances, so + * just move them to a separate list. + */ + list_add_tail(&pos->node, &mg->removed_maps[pos->type]); + } + } +} + +struct symbol *map_groups__find_symbol(struct map_groups *mg, + enum map_type type, u64 addr, + struct map **mapp, + symbol_filter_t filter) +{ + struct map *map = map_groups__find(mg, type, addr); + + /* Ensure map is loaded before using map->map_ip */ + if (map != NULL && map__load(map, filter) >= 0) { + if (mapp != NULL) + *mapp = map; + return map__find_symbol(map, map->map_ip(map, addr), filter); + } + + return NULL; +} + +struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, + enum map_type type, + const char *name, + struct map **mapp, + symbol_filter_t filter) +{ + struct rb_node *nd; + + for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + struct symbol *sym = map__find_symbol_by_name(pos, name, filter); + + if (sym == NULL) + continue; + if (mapp != NULL) + *mapp = pos; + return sym; + } + + return NULL; +} + +int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) +{ + if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { + if (ams->map->groups == NULL) + return -1; + ams->map = map_groups__find(ams->map->groups, ams->map->type, + ams->addr); + if (ams->map == NULL) + return -1; + } + + ams->al_addr = ams->map->map_ip(ams->map, ams->addr); + ams->sym = map__find_symbol(ams->map, ams->al_addr, filter); + + return ams->sym ? 0 : -1; +} + +size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, + FILE *fp) +{ + size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); + struct rb_node *nd; + + for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + printed += fprintf(fp, "Map:"); + printed += map__fprintf(pos, fp); + if (verbose > 2) { + printed += dso__fprintf(pos->dso, type, fp); + printed += fprintf(fp, "--\n"); + } + } + + return printed; +} + +static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp) +{ + size_t printed = 0, i; + for (i = 0; i < MAP__NR_TYPES; ++i) + printed += __map_groups__fprintf_maps(mg, i, fp); + return printed; +} + +static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg, + enum map_type type, FILE *fp) +{ + struct map *pos; + size_t printed = 0; + + list_for_each_entry(pos, &mg->removed_maps[type], node) { + printed += fprintf(fp, "Map:"); + printed += map__fprintf(pos, fp); + if (verbose > 1) { + printed += dso__fprintf(pos->dso, type, fp); + printed += fprintf(fp, "--\n"); + } + } + return printed; +} + +static size_t map_groups__fprintf_removed_maps(struct map_groups *mg, + FILE *fp) +{ + size_t printed = 0, i; + for (i = 0; i < MAP__NR_TYPES; ++i) + printed += __map_groups__fprintf_removed_maps(mg, i, fp); + return printed; +} + +size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) +{ + size_t printed = map_groups__fprintf_maps(mg, fp); + printed += fprintf(fp, "Removed maps:\n"); + return printed + map_groups__fprintf_removed_maps(mg, fp); +} + +int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, + FILE *fp) +{ + struct rb_root *root = &mg->maps[map->type]; + struct rb_node *next = rb_first(root); + int err = 0; + + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + next = rb_next(&pos->rb_node); + + if (!map__overlap(pos, map)) + continue; + + if (verbose >= 2) { + fputs("overlapping maps:\n", fp); + map__fprintf(map, fp); + map__fprintf(pos, fp); + } + + rb_erase(&pos->rb_node, root); + /* + * Now check if we need to create new maps for areas not + * overlapped by the new map: + */ + if (map->start > pos->start) { + struct map *before = map__clone(pos); + + if (before == NULL) { + err = -ENOMEM; + goto move_map; + } + + before->end = map->start; + map_groups__insert(mg, before); + if (verbose >= 2) + map__fprintf(before, fp); + } + + if (map->end < pos->end) { + struct map *after = map__clone(pos); + + if (after == NULL) { + err = -ENOMEM; + goto move_map; + } + + after->start = map->end; + map_groups__insert(mg, after); + if (verbose >= 2) + map__fprintf(after, fp); + } +move_map: + /* + * If we have references, just move them to a separate list. + */ + if (pos->referenced) + list_add_tail(&pos->node, &mg->removed_maps[map->type]); + else + map__delete(pos); + + if (err) + return err; + } + + return 0; +} + +/* + * XXX This should not really _copy_ te maps, but refcount them. + */ +int map_groups__clone(struct map_groups *mg, + struct map_groups *parent, enum map_type type) +{ + struct rb_node *nd; + for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); + struct map *new = map__clone(map); + if (new == NULL) + return -ENOMEM; + map_groups__insert(mg, new); + } + return 0; +} + +void maps__insert(struct rb_root *maps, struct map *map) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + const u64 ip = map->start; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&map->rb_node, parent, p); + rb_insert_color(&map->rb_node, maps); +} + +void maps__remove(struct rb_root *maps, struct map *map) +{ + rb_erase(&map->rb_node, maps); +} + +struct map *maps__find(struct rb_root *maps, u64 ip) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else if (ip >= m->end) + p = &(*p)->rb_right; + else + return m; + } + + return NULL; +} + +struct map *maps__first(struct rb_root *maps) +{ + struct rb_node *first = rb_first(maps); + + if (first) + return rb_entry(first, struct map, rb_node); + return NULL; +} + +struct map *maps__next(struct map *map) +{ + struct rb_node *next = rb_next(&map->rb_node); + + if (next) + return rb_entry(next, struct map, rb_node); + return NULL; +} + +struct kmap *map__kmap(struct map *map) +{ + if (!map->dso || !map->dso->kernel) { + pr_err("Internal error: map__kmap with a non-kernel map\n"); + return NULL; + } + return (struct kmap *)(map + 1); +} + +struct map_groups *map__kmaps(struct map *map) +{ + struct kmap *kmap = map__kmap(map); + + if (!kmap || !kmap->kmaps) { + pr_err("Internal error: map__kmaps with a non-kernel map\n"); + return NULL; + } + return kmap->kmaps; +} diff --git a/kernel/tools/perf/util/map.h b/kernel/tools/perf/util/map.h new file mode 100644 index 000000000..ec19c59ca --- /dev/null +++ b/kernel/tools/perf/util/map.h @@ -0,0 +1,235 @@ +#ifndef __PERF_MAP_H +#define __PERF_MAP_H + +#include +#include +#include +#include +#include +#include + +enum map_type { + MAP__FUNCTION = 0, + MAP__VARIABLE, +}; + +#define MAP__NR_TYPES (MAP__VARIABLE + 1) + +extern const char *map_type__name[MAP__NR_TYPES]; + +struct dso; +struct ip_callchain; +struct ref_reloc_sym; +struct map_groups; +struct machine; +struct perf_evsel; + +struct map { + union { + struct rb_node rb_node; + struct list_head node; + }; + u64 start; + u64 end; + u8 /* enum map_type */ type; + bool referenced; + bool erange_warned; + u32 priv; + u32 prot; + u32 flags; + u64 pgoff; + u64 reloc; + u32 maj, min; /* only valid for MMAP2 record */ + u64 ino; /* only valid for MMAP2 record */ + u64 ino_generation;/* only valid for MMAP2 record */ + + /* ip -> dso rip */ + u64 (*map_ip)(struct map *, u64); + /* dso rip -> ip */ + u64 (*unmap_ip)(struct map *, u64); + + struct dso *dso; + struct map_groups *groups; +}; + +struct kmap { + struct ref_reloc_sym *ref_reloc_sym; + struct map_groups *kmaps; +}; + +struct map_groups { + struct rb_root maps[MAP__NR_TYPES]; + struct list_head removed_maps[MAP__NR_TYPES]; + struct machine *machine; + int refcnt; +}; + +struct map_groups *map_groups__new(struct machine *machine); +void map_groups__delete(struct map_groups *mg); +bool map_groups__empty(struct map_groups *mg); + +static inline struct map_groups *map_groups__get(struct map_groups *mg) +{ + ++mg->refcnt; + return mg; +} + +void map_groups__put(struct map_groups *mg); + +struct kmap *map__kmap(struct map *map); +struct map_groups *map__kmaps(struct map *map); + +static inline u64 map__map_ip(struct map *map, u64 ip) +{ + return ip - map->start + map->pgoff; +} + +static inline u64 map__unmap_ip(struct map *map, u64 ip) +{ + return ip + map->start - map->pgoff; +} + +static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) +{ + return ip; +} + + +/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ +u64 map__rip_2objdump(struct map *map, u64 rip); + +/* objdump address -> memory address */ +u64 map__objdump_2mem(struct map *map, u64 ip); + +struct symbol; +struct thread; + +/* map__for_each_symbol - iterate over the symbols in the given map + * + * @map: the 'struct map *' in which symbols itereated + * @pos: the 'struct symbol *' to use as a loop cursor + * @n: the 'struct rb_node *' to use as a temporary storage + * Note: caller must ensure map->dso is not NULL (map is loaded). + */ +#define map__for_each_symbol(map, pos, n) \ + dso__for_each_symbol(map->dso, pos, n, map->type) + +/* map__for_each_symbol_with_name - iterate over the symbols in the given map + * that have the given name + * + * @map: the 'struct map *' in which symbols itereated + * @sym_name: the symbol name + * @pos: the 'struct symbol *' to use as a loop cursor + * @filter: to use when loading the DSO + */ +#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \ + for (pos = map__find_symbol_by_name(map, sym_name, filter); \ + pos && strcmp(pos->name, sym_name) == 0; \ + pos = symbol__next_by_name(pos)) + +#define map__for_each_symbol_by_name(map, sym_name, pos) \ + __map__for_each_symbol_by_name(map, sym_name, (pos), NULL) + +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); + +void map__init(struct map *map, enum map_type type, + u64 start, u64 end, u64 pgoff, struct dso *dso); +struct map *map__new(struct machine *machine, u64 start, u64 len, + u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino, + u64 ino_gen, u32 prot, u32 flags, + char *filename, enum map_type type, struct thread *thread); +struct map *map__new2(u64 start, struct dso *dso, enum map_type type); +void map__delete(struct map *map); +struct map *map__clone(struct map *map); +int map__overlap(struct map *l, struct map *r); +size_t map__fprintf(struct map *map, FILE *fp); +size_t map__fprintf_dsoname(struct map *map, FILE *fp); +int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, + FILE *fp); + +int map__load(struct map *map, symbol_filter_t filter); +struct symbol *map__find_symbol(struct map *map, + u64 addr, symbol_filter_t filter); +struct symbol *map__find_symbol_by_name(struct map *map, const char *name, + symbol_filter_t filter); +void map__fixup_start(struct map *map); +void map__fixup_end(struct map *map); + +void map__reloc_vmlinux(struct map *map); + +size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, + FILE *fp); +void maps__insert(struct rb_root *maps, struct map *map); +void maps__remove(struct rb_root *maps, struct map *map); +struct map *maps__find(struct rb_root *maps, u64 addr); +struct map *maps__first(struct rb_root *maps); +struct map *maps__next(struct map *map); +void map_groups__init(struct map_groups *mg, struct machine *machine); +void map_groups__exit(struct map_groups *mg); +int map_groups__clone(struct map_groups *mg, + struct map_groups *parent, enum map_type type); +size_t map_groups__fprintf(struct map_groups *mg, FILE *fp); + +int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, + u64 addr); + +static inline void map_groups__insert(struct map_groups *mg, struct map *map) +{ + maps__insert(&mg->maps[map->type], map); + map->groups = mg; +} + +static inline void map_groups__remove(struct map_groups *mg, struct map *map) +{ + maps__remove(&mg->maps[map->type], map); +} + +static inline struct map *map_groups__find(struct map_groups *mg, + enum map_type type, u64 addr) +{ + return maps__find(&mg->maps[type], addr); +} + +static inline struct map *map_groups__first(struct map_groups *mg, + enum map_type type) +{ + return maps__first(&mg->maps[type]); +} + +static inline struct map *map_groups__next(struct map *map) +{ + return maps__next(map); +} + +struct symbol *map_groups__find_symbol(struct map_groups *mg, + enum map_type type, u64 addr, + struct map **mapp, + symbol_filter_t filter); + +struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, + enum map_type type, + const char *name, + struct map **mapp, + symbol_filter_t filter); + +struct addr_map_symbol; + +int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter); + +static inline +struct symbol *map_groups__find_function_by_name(struct map_groups *mg, + const char *name, struct map **mapp, + symbol_filter_t filter) +{ + return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter); +} + +int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, + FILE *fp); + +struct map *map_groups__find_by_name(struct map_groups *mg, + enum map_type type, const char *name); + +void map_groups__flush(struct map_groups *mg); + +#endif /* __PERF_MAP_H */ diff --git a/kernel/tools/perf/util/ordered-events.c b/kernel/tools/perf/util/ordered-events.c new file mode 100644 index 000000000..52be201b9 --- /dev/null +++ b/kernel/tools/perf/util/ordered-events.c @@ -0,0 +1,307 @@ +#include +#include +#include +#include "ordered-events.h" +#include "session.h" +#include "asm/bug.h" +#include "debug.h" + +#define pr_N(n, fmt, ...) \ + eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__) + +#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) + +static void queue_event(struct ordered_events *oe, struct ordered_event *new) +{ + struct ordered_event *last = oe->last; + u64 timestamp = new->timestamp; + struct list_head *p; + + ++oe->nr_events; + oe->last = new; + + pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events); + + if (!last) { + list_add(&new->list, &oe->events); + oe->max_timestamp = timestamp; + return; + } + + /* + * last event might point to some random place in the list as it's + * the last queued event. We expect that the new event is close to + * this. + */ + if (last->timestamp <= timestamp) { + while (last->timestamp <= timestamp) { + p = last->list.next; + if (p == &oe->events) { + list_add_tail(&new->list, &oe->events); + oe->max_timestamp = timestamp; + return; + } + last = list_entry(p, struct ordered_event, list); + } + list_add_tail(&new->list, &last->list); + } else { + while (last->timestamp > timestamp) { + p = last->list.prev; + if (p == &oe->events) { + list_add(&new->list, &oe->events); + return; + } + last = list_entry(p, struct ordered_event, list); + } + list_add(&new->list, &last->list); + } +} + +static union perf_event *__dup_event(struct ordered_events *oe, + union perf_event *event) +{ + union perf_event *new_event = NULL; + + if (oe->cur_alloc_size < oe->max_alloc_size) { + new_event = memdup(event, event->header.size); + if (new_event) + oe->cur_alloc_size += event->header.size; + } + + return new_event; +} + +static union perf_event *dup_event(struct ordered_events *oe, + union perf_event *event) +{ + return oe->copy_on_queue ? __dup_event(oe, event) : event; +} + +static void free_dup_event(struct ordered_events *oe, union perf_event *event) +{ + if (oe->copy_on_queue) { + oe->cur_alloc_size -= event->header.size; + free(event); + } +} + +#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event)) +static struct ordered_event *alloc_event(struct ordered_events *oe, + union perf_event *event) +{ + struct list_head *cache = &oe->cache; + struct ordered_event *new = NULL; + union perf_event *new_event; + + new_event = dup_event(oe, event); + if (!new_event) + return NULL; + + if (!list_empty(cache)) { + new = list_entry(cache->next, struct ordered_event, list); + list_del(&new->list); + } else if (oe->buffer) { + new = oe->buffer + oe->buffer_idx; + if (++oe->buffer_idx == MAX_SAMPLE_BUFFER) + oe->buffer = NULL; + } else if (oe->cur_alloc_size < oe->max_alloc_size) { + size_t size = MAX_SAMPLE_BUFFER * sizeof(*new); + + oe->buffer = malloc(size); + if (!oe->buffer) { + free_dup_event(oe, new_event); + return NULL; + } + + pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n", + oe->cur_alloc_size, size, oe->max_alloc_size); + + oe->cur_alloc_size += size; + list_add(&oe->buffer->list, &oe->to_free); + + /* First entry is abused to maintain the to_free list. */ + oe->buffer_idx = 2; + new = oe->buffer + 1; + } else { + pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size); + } + + new->event = new_event; + return new; +} + +static struct ordered_event * +ordered_events__new_event(struct ordered_events *oe, u64 timestamp, + union perf_event *event) +{ + struct ordered_event *new; + + new = alloc_event(oe, event); + if (new) { + new->timestamp = timestamp; + queue_event(oe, new); + } + + return new; +} + +void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event) +{ + list_move(&event->list, &oe->cache); + oe->nr_events--; + free_dup_event(oe, event->event); +} + +int ordered_events__queue(struct ordered_events *oe, union perf_event *event, + struct perf_sample *sample, u64 file_offset) +{ + u64 timestamp = sample->time; + struct ordered_event *oevent; + + if (!timestamp || timestamp == ~0ULL) + return -ETIME; + + if (timestamp < oe->last_flush) { + pr_oe_time(timestamp, "out of order event\n"); + pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n", + oe->last_flush_type); + + oe->nr_unordered_events++; + } + + oevent = ordered_events__new_event(oe, timestamp, event); + if (!oevent) { + ordered_events__flush(oe, OE_FLUSH__HALF); + oevent = ordered_events__new_event(oe, timestamp, event); + } + + if (!oevent) + return -ENOMEM; + + oevent->file_offset = file_offset; + return 0; +} + +static int __ordered_events__flush(struct ordered_events *oe) +{ + struct list_head *head = &oe->events; + struct ordered_event *tmp, *iter; + u64 limit = oe->next_flush; + u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; + bool show_progress = limit == ULLONG_MAX; + struct ui_progress prog; + int ret; + + if (!limit) + return 0; + + if (show_progress) + ui_progress__init(&prog, oe->nr_events, "Processing time ordered events..."); + + list_for_each_entry_safe(iter, tmp, head, list) { + if (session_done()) + return 0; + + if (iter->timestamp > limit) + break; + ret = oe->deliver(oe, iter); + if (ret) + return ret; + + ordered_events__delete(oe, iter); + oe->last_flush = iter->timestamp; + + if (show_progress) + ui_progress__update(&prog, 1); + } + + if (list_empty(head)) + oe->last = NULL; + else if (last_ts <= limit) + oe->last = list_entry(head->prev, struct ordered_event, list); + + return 0; +} + +int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) +{ + static const char * const str[] = { + "NONE", + "FINAL", + "ROUND", + "HALF ", + }; + int err; + + if (oe->nr_events == 0) + return 0; + + switch (how) { + case OE_FLUSH__FINAL: + oe->next_flush = ULLONG_MAX; + break; + + case OE_FLUSH__HALF: + { + struct ordered_event *first, *last; + struct list_head *head = &oe->events; + + first = list_entry(head->next, struct ordered_event, list); + last = oe->last; + + /* Warn if we are called before any event got allocated. */ + if (WARN_ONCE(!last || list_empty(head), "empty queue")) + return 0; + + oe->next_flush = first->timestamp; + oe->next_flush += (last->timestamp - first->timestamp) / 2; + break; + } + + case OE_FLUSH__ROUND: + case OE_FLUSH__NONE: + default: + break; + }; + + pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n", + str[how], oe->nr_events); + pr_oe_time(oe->max_timestamp, "max_timestamp\n"); + + err = __ordered_events__flush(oe); + + if (!err) { + if (how == OE_FLUSH__ROUND) + oe->next_flush = oe->max_timestamp; + + oe->last_flush_type = how; + } + + pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n", + str[how], oe->nr_events); + pr_oe_time(oe->last_flush, "last_flush\n"); + + return err; +} + +void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver) +{ + INIT_LIST_HEAD(&oe->events); + INIT_LIST_HEAD(&oe->cache); + INIT_LIST_HEAD(&oe->to_free); + oe->max_alloc_size = (u64) -1; + oe->cur_alloc_size = 0; + oe->deliver = deliver; +} + +void ordered_events__free(struct ordered_events *oe) +{ + while (!list_empty(&oe->to_free)) { + struct ordered_event *event; + + event = list_entry(oe->to_free.next, struct ordered_event, list); + list_del(&event->list); + free_dup_event(oe, event->event); + free(event); + } +} diff --git a/kernel/tools/perf/util/ordered-events.h b/kernel/tools/perf/util/ordered-events.h new file mode 100644 index 000000000..f403991e3 --- /dev/null +++ b/kernel/tools/perf/util/ordered-events.h @@ -0,0 +1,64 @@ +#ifndef __ORDERED_EVENTS_H +#define __ORDERED_EVENTS_H + +#include + +struct perf_sample; + +struct ordered_event { + u64 timestamp; + u64 file_offset; + union perf_event *event; + struct list_head list; +}; + +enum oe_flush { + OE_FLUSH__NONE, + OE_FLUSH__FINAL, + OE_FLUSH__ROUND, + OE_FLUSH__HALF, +}; + +struct ordered_events; + +typedef int (*ordered_events__deliver_t)(struct ordered_events *oe, + struct ordered_event *event); + +struct ordered_events { + u64 last_flush; + u64 next_flush; + u64 max_timestamp; + u64 max_alloc_size; + u64 cur_alloc_size; + struct list_head events; + struct list_head cache; + struct list_head to_free; + struct ordered_event *buffer; + struct ordered_event *last; + ordered_events__deliver_t deliver; + int buffer_idx; + unsigned int nr_events; + enum oe_flush last_flush_type; + u32 nr_unordered_events; + bool copy_on_queue; +}; + +int ordered_events__queue(struct ordered_events *oe, union perf_event *event, + struct perf_sample *sample, u64 file_offset); +void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event); +int ordered_events__flush(struct ordered_events *oe, enum oe_flush how); +void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver); +void ordered_events__free(struct ordered_events *oe); + +static inline +void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size) +{ + oe->max_alloc_size = size; +} + +static inline +void ordered_events__set_copy_on_queue(struct ordered_events *oe, bool copy) +{ + oe->copy_on_queue = copy; +} +#endif /* __ORDERED_EVENTS_H */ diff --git a/kernel/tools/perf/util/pager.c b/kernel/tools/perf/util/pager.c new file mode 100644 index 000000000..31ee02d4e --- /dev/null +++ b/kernel/tools/perf/util/pager.c @@ -0,0 +1,100 @@ +#include "cache.h" +#include "run-command.h" +#include "sigchain.h" + +/* + * This is split up from the rest of git so that we can do + * something different on Windows. + */ + +static int spawned_pager; + +static void pager_preexec(void) +{ + /* + * Work around bug in "less" by not starting it until we + * have real input + */ + fd_set in; + + FD_ZERO(&in); + FD_SET(0, &in); + select(1, &in, NULL, &in, NULL); + + setenv("LESS", "FRSX", 0); +} + +static const char *pager_argv[] = { "sh", "-c", NULL, NULL }; +static struct child_process pager_process; + +static void wait_for_pager(void) +{ + fflush(stdout); + fflush(stderr); + /* signal EOF to pager */ + close(1); + close(2); + finish_command(&pager_process); +} + +static void wait_for_pager_signal(int signo) +{ + wait_for_pager(); + sigchain_pop(signo); + raise(signo); +} + +void setup_pager(void) +{ + const char *pager = getenv("PERF_PAGER"); + + if (!isatty(1)) + return; + if (!pager) { + if (!pager_program) + perf_config(perf_default_config, NULL); + pager = pager_program; + } + if (!pager) + pager = getenv("PAGER"); + if (!(pager || access("/usr/bin/pager", X_OK))) + pager = "/usr/bin/pager"; + if (!(pager || access("/usr/bin/less", X_OK))) + pager = "/usr/bin/less"; + if (!pager) + pager = "cat"; + if (!*pager || !strcmp(pager, "cat")) + return; + + spawned_pager = 1; /* means we are emitting to terminal */ + + /* spawn the pager */ + pager_argv[2] = pager; + pager_process.argv = pager_argv; + pager_process.in = -1; + pager_process.preexec_cb = pager_preexec; + + if (start_command(&pager_process)) + return; + + /* original process continues, but writes to the pipe */ + dup2(pager_process.in, 1); + if (isatty(2)) + dup2(pager_process.in, 2); + close(pager_process.in); + + /* this makes sure that the parent terminates after the pager */ + sigchain_push_common(wait_for_pager_signal); + atexit(wait_for_pager); +} + +int pager_in_use(void) +{ + const char *env; + + if (spawned_pager) + return 1; + + env = getenv("PERF_PAGER_IN_USE"); + return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0; +} diff --git a/kernel/tools/perf/util/parse-events.c b/kernel/tools/perf/util/parse-events.c new file mode 100644 index 000000000..be0655388 --- /dev/null +++ b/kernel/tools/perf/util/parse-events.c @@ -0,0 +1,1537 @@ +#include +#include "util.h" +#include "../perf.h" +#include "evlist.h" +#include "evsel.h" +#include "parse-options.h" +#include "parse-events.h" +#include "exec_cmd.h" +#include "string.h" +#include "symbol.h" +#include "cache.h" +#include "header.h" +#include "debug.h" +#include +#include "parse-events-bison.h" +#define YY_EXTRA_TYPE int +#include "parse-events-flex.h" +#include "pmu.h" +#include "thread_map.h" + +#define MAX_NAME_LEN 100 + +#ifdef PARSER_DEBUG +extern int parse_events_debug; +#endif +int parse_events_parse(void *data, void *scanner); + +static struct perf_pmu_event_symbol *perf_pmu_events_list; +/* + * The variable indicates the number of supported pmu event symbols. + * 0 means not initialized and ready to init + * -1 means failed to init, don't try anymore + * >0 is the number of supported pmu event symbols + */ +static int perf_pmu_events_list_num; + +struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { + .symbol = "cpu-cycles", + .alias = "cycles", + }, + [PERF_COUNT_HW_INSTRUCTIONS] = { + .symbol = "instructions", + .alias = "", + }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { + .symbol = "cache-references", + .alias = "", + }, + [PERF_COUNT_HW_CACHE_MISSES] = { + .symbol = "cache-misses", + .alias = "", + }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { + .symbol = "branch-instructions", + .alias = "branches", + }, + [PERF_COUNT_HW_BRANCH_MISSES] = { + .symbol = "branch-misses", + .alias = "", + }, + [PERF_COUNT_HW_BUS_CYCLES] = { + .symbol = "bus-cycles", + .alias = "", + }, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { + .symbol = "stalled-cycles-frontend", + .alias = "idle-cycles-frontend", + }, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { + .symbol = "stalled-cycles-backend", + .alias = "idle-cycles-backend", + }, + [PERF_COUNT_HW_REF_CPU_CYCLES] = { + .symbol = "ref-cycles", + .alias = "", + }, +}; + +struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { + [PERF_COUNT_SW_CPU_CLOCK] = { + .symbol = "cpu-clock", + .alias = "", + }, + [PERF_COUNT_SW_TASK_CLOCK] = { + .symbol = "task-clock", + .alias = "", + }, + [PERF_COUNT_SW_PAGE_FAULTS] = { + .symbol = "page-faults", + .alias = "faults", + }, + [PERF_COUNT_SW_CONTEXT_SWITCHES] = { + .symbol = "context-switches", + .alias = "cs", + }, + [PERF_COUNT_SW_CPU_MIGRATIONS] = { + .symbol = "cpu-migrations", + .alias = "migrations", + }, + [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { + .symbol = "minor-faults", + .alias = "", + }, + [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { + .symbol = "major-faults", + .alias = "", + }, + [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { + .symbol = "alignment-faults", + .alias = "", + }, + [PERF_COUNT_SW_EMULATION_FAULTS] = { + .symbol = "emulation-faults", + .alias = "", + }, + [PERF_COUNT_SW_DUMMY] = { + .symbol = "dummy", + .alias = "", + }, +}; + +#define __PERF_EVENT_FIELD(config, name) \ + ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) + +#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) +#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) +#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) +#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) + +#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ + while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ + if (sys_dirent.d_type == DT_DIR && \ + (strcmp(sys_dirent.d_name, ".")) && \ + (strcmp(sys_dirent.d_name, ".."))) + +static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) +{ + char evt_path[MAXPATHLEN]; + int fd; + + snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, + sys_dir->d_name, evt_dir->d_name); + fd = open(evt_path, O_RDONLY); + if (fd < 0) + return -EINVAL; + close(fd); + + return 0; +} + +#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ + while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ + if (evt_dirent.d_type == DT_DIR && \ + (strcmp(evt_dirent.d_name, ".")) && \ + (strcmp(evt_dirent.d_name, "..")) && \ + (!tp_event_has_id(&sys_dirent, &evt_dirent))) + +#define MAX_EVENT_LENGTH 512 + + +struct tracepoint_path *tracepoint_id_to_path(u64 config) +{ + struct tracepoint_path *path = NULL; + DIR *sys_dir, *evt_dir; + struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + char id_buf[24]; + int fd; + u64 id; + char evt_path[MAXPATHLEN]; + char dir_path[MAXPATHLEN]; + + sys_dir = opendir(tracing_events_path); + if (!sys_dir) + return NULL; + + for_each_subsystem(sys_dir, sys_dirent, sys_next) { + + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, + sys_dirent.d_name); + evt_dir = opendir(dir_path); + if (!evt_dir) + continue; + + for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + + snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, + evt_dirent.d_name); + fd = open(evt_path, O_RDONLY); + if (fd < 0) + continue; + if (read(fd, id_buf, sizeof(id_buf)) < 0) { + close(fd); + continue; + } + close(fd); + id = atoll(id_buf); + if (id == config) { + closedir(evt_dir); + closedir(sys_dir); + path = zalloc(sizeof(*path)); + path->system = malloc(MAX_EVENT_LENGTH); + if (!path->system) { + free(path); + return NULL; + } + path->name = malloc(MAX_EVENT_LENGTH); + if (!path->name) { + zfree(&path->system); + free(path); + return NULL; + } + strncpy(path->system, sys_dirent.d_name, + MAX_EVENT_LENGTH); + strncpy(path->name, evt_dirent.d_name, + MAX_EVENT_LENGTH); + return path; + } + } + closedir(evt_dir); + } + + closedir(sys_dir); + return NULL; +} + +struct tracepoint_path *tracepoint_name_to_path(const char *name) +{ + struct tracepoint_path *path = zalloc(sizeof(*path)); + char *str = strchr(name, ':'); + + if (path == NULL || str == NULL) { + free(path); + return NULL; + } + + path->system = strndup(name, str - name); + path->name = strdup(str+1); + + if (path->system == NULL || path->name == NULL) { + zfree(&path->system); + zfree(&path->name); + free(path); + path = NULL; + } + + return path; +} + +const char *event_type(int type) +{ + switch (type) { + case PERF_TYPE_HARDWARE: + return "hardware"; + + case PERF_TYPE_SOFTWARE: + return "software"; + + case PERF_TYPE_TRACEPOINT: + return "tracepoint"; + + case PERF_TYPE_HW_CACHE: + return "hardware-cache"; + + default: + break; + } + + return "unknown"; +} + + + +static struct perf_evsel * +__add_event(struct list_head *list, int *idx, + struct perf_event_attr *attr, + char *name, struct cpu_map *cpus) +{ + struct perf_evsel *evsel; + + event_attr_init(attr); + + evsel = perf_evsel__new_idx(attr, (*idx)++); + if (!evsel) + return NULL; + + evsel->cpus = cpus; + if (name) + evsel->name = strdup(name); + list_add_tail(&evsel->node, list); + return evsel; +} + +static int add_event(struct list_head *list, int *idx, + struct perf_event_attr *attr, char *name) +{ + return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM; +} + +static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) +{ + int i, j; + int n, longest = -1; + + for (i = 0; i < size; i++) { + for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) { + n = strlen(names[i][j]); + if (n > longest && !strncasecmp(str, names[i][j], n)) + longest = n; + } + if (longest > 0) + return i; + } + + return -1; +} + +int parse_events_add_cache(struct list_head *list, int *idx, + char *type, char *op_result1, char *op_result2) +{ + struct perf_event_attr attr; + char name[MAX_NAME_LEN]; + int cache_type = -1, cache_op = -1, cache_result = -1; + char *op_result[2] = { op_result1, op_result2 }; + int i, n; + + /* + * No fallback - if we cannot get a clear cache type + * then bail out: + */ + cache_type = parse_aliases(type, perf_evsel__hw_cache, + PERF_COUNT_HW_CACHE_MAX); + if (cache_type == -1) + return -EINVAL; + + n = snprintf(name, MAX_NAME_LEN, "%s", type); + + for (i = 0; (i < 2) && (op_result[i]); i++) { + char *str = op_result[i]; + + n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); + + if (cache_op == -1) { + cache_op = parse_aliases(str, perf_evsel__hw_cache_op, + PERF_COUNT_HW_CACHE_OP_MAX); + if (cache_op >= 0) { + if (!perf_evsel__is_cache_op_valid(cache_type, cache_op)) + return -EINVAL; + continue; + } + } + + if (cache_result == -1) { + cache_result = parse_aliases(str, perf_evsel__hw_cache_result, + PERF_COUNT_HW_CACHE_RESULT_MAX); + if (cache_result >= 0) + continue; + } + } + + /* + * Fall back to reads: + */ + if (cache_op == -1) + cache_op = PERF_COUNT_HW_CACHE_OP_READ; + + /* + * Fall back to accesses: + */ + if (cache_result == -1) + cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; + + memset(&attr, 0, sizeof(attr)); + attr.config = cache_type | (cache_op << 8) | (cache_result << 16); + attr.type = PERF_TYPE_HW_CACHE; + return add_event(list, idx, &attr, name); +} + +static int add_tracepoint(struct list_head *list, int *idx, + char *sys_name, char *evt_name) +{ + struct perf_evsel *evsel; + + evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++); + if (!evsel) + return -ENOMEM; + + list_add_tail(&evsel->node, list); + + return 0; +} + +static int add_tracepoint_multi_event(struct list_head *list, int *idx, + char *sys_name, char *evt_name) +{ + char evt_path[MAXPATHLEN]; + struct dirent *evt_ent; + DIR *evt_dir; + int ret = 0; + + snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); + evt_dir = opendir(evt_path); + if (!evt_dir) { + perror("Can't open event dir"); + return -1; + } + + while (!ret && (evt_ent = readdir(evt_dir))) { + if (!strcmp(evt_ent->d_name, ".") + || !strcmp(evt_ent->d_name, "..") + || !strcmp(evt_ent->d_name, "enable") + || !strcmp(evt_ent->d_name, "filter")) + continue; + + if (!strglobmatch(evt_ent->d_name, evt_name)) + continue; + + ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); + } + + closedir(evt_dir); + return ret; +} + +static int add_tracepoint_event(struct list_head *list, int *idx, + char *sys_name, char *evt_name) +{ + return strpbrk(evt_name, "*?") ? + add_tracepoint_multi_event(list, idx, sys_name, evt_name) : + add_tracepoint(list, idx, sys_name, evt_name); +} + +static int add_tracepoint_multi_sys(struct list_head *list, int *idx, + char *sys_name, char *evt_name) +{ + struct dirent *events_ent; + DIR *events_dir; + int ret = 0; + + events_dir = opendir(tracing_events_path); + if (!events_dir) { + perror("Can't open event dir"); + return -1; + } + + while (!ret && (events_ent = readdir(events_dir))) { + if (!strcmp(events_ent->d_name, ".") + || !strcmp(events_ent->d_name, "..") + || !strcmp(events_ent->d_name, "enable") + || !strcmp(events_ent->d_name, "header_event") + || !strcmp(events_ent->d_name, "header_page")) + continue; + + if (!strglobmatch(events_ent->d_name, sys_name)) + continue; + + ret = add_tracepoint_event(list, idx, events_ent->d_name, + evt_name); + } + + closedir(events_dir); + return ret; +} + +int parse_events_add_tracepoint(struct list_head *list, int *idx, + char *sys, char *event) +{ + if (strpbrk(sys, "*?")) + return add_tracepoint_multi_sys(list, idx, sys, event); + else + return add_tracepoint_event(list, idx, sys, event); +} + +static int +parse_breakpoint_type(const char *type, struct perf_event_attr *attr) +{ + int i; + + for (i = 0; i < 3; i++) { + if (!type || !type[i]) + break; + +#define CHECK_SET_TYPE(bit) \ +do { \ + if (attr->bp_type & bit) \ + return -EINVAL; \ + else \ + attr->bp_type |= bit; \ +} while (0) + + switch (type[i]) { + case 'r': + CHECK_SET_TYPE(HW_BREAKPOINT_R); + break; + case 'w': + CHECK_SET_TYPE(HW_BREAKPOINT_W); + break; + case 'x': + CHECK_SET_TYPE(HW_BREAKPOINT_X); + break; + default: + return -EINVAL; + } + } + +#undef CHECK_SET_TYPE + + if (!attr->bp_type) /* Default */ + attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; + + return 0; +} + +int parse_events_add_breakpoint(struct list_head *list, int *idx, + void *ptr, char *type, u64 len) +{ + struct perf_event_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.bp_addr = (unsigned long) ptr; + + if (parse_breakpoint_type(type, &attr)) + return -EINVAL; + + /* Provide some defaults if len is not specified */ + if (!len) { + if (attr.bp_type == HW_BREAKPOINT_X) + len = sizeof(long); + else + len = HW_BREAKPOINT_LEN_4; + } + + attr.bp_len = len; + + attr.type = PERF_TYPE_BREAKPOINT; + attr.sample_period = 1; + + return add_event(list, idx, &attr, NULL); +} + +static int config_term(struct perf_event_attr *attr, + struct parse_events_term *term) +{ +#define CHECK_TYPE_VAL(type) \ +do { \ + if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \ + return -EINVAL; \ +} while (0) + + switch (term->type_term) { + case PARSE_EVENTS__TERM_TYPE_CONFIG: + CHECK_TYPE_VAL(NUM); + attr->config = term->val.num; + break; + case PARSE_EVENTS__TERM_TYPE_CONFIG1: + CHECK_TYPE_VAL(NUM); + attr->config1 = term->val.num; + break; + case PARSE_EVENTS__TERM_TYPE_CONFIG2: + CHECK_TYPE_VAL(NUM); + attr->config2 = term->val.num; + break; + case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD: + CHECK_TYPE_VAL(NUM); + attr->sample_period = term->val.num; + break; + case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE: + /* + * TODO uncomment when the field is available + * attr->branch_sample_type = term->val.num; + */ + break; + case PARSE_EVENTS__TERM_TYPE_NAME: + CHECK_TYPE_VAL(STR); + break; + default: + return -EINVAL; + } + + return 0; +#undef CHECK_TYPE_VAL +} + +static int config_attr(struct perf_event_attr *attr, + struct list_head *head, int fail) +{ + struct parse_events_term *term; + + list_for_each_entry(term, head, list) + if (config_term(attr, term) && fail) + return -EINVAL; + + return 0; +} + +int parse_events_add_numeric(struct list_head *list, int *idx, + u32 type, u64 config, + struct list_head *head_config) +{ + struct perf_event_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.type = type; + attr.config = config; + + if (head_config && + config_attr(&attr, head_config, 1)) + return -EINVAL; + + return add_event(list, idx, &attr, NULL); +} + +static int parse_events__is_name_term(struct parse_events_term *term) +{ + return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; +} + +static char *pmu_event_name(struct list_head *head_terms) +{ + struct parse_events_term *term; + + list_for_each_entry(term, head_terms, list) + if (parse_events__is_name_term(term)) + return term->val.str; + + return NULL; +} + +int parse_events_add_pmu(struct list_head *list, int *idx, + char *name, struct list_head *head_config) +{ + struct perf_event_attr attr; + struct perf_pmu_info info; + struct perf_pmu *pmu; + struct perf_evsel *evsel; + + pmu = perf_pmu__find(name); + if (!pmu) + return -EINVAL; + + if (pmu->default_config) { + memcpy(&attr, pmu->default_config, + sizeof(struct perf_event_attr)); + } else { + memset(&attr, 0, sizeof(attr)); + } + + if (!head_config) { + attr.type = pmu->type; + evsel = __add_event(list, idx, &attr, NULL, pmu->cpus); + return evsel ? 0 : -ENOMEM; + } + + if (perf_pmu__check_alias(pmu, head_config, &info)) + return -EINVAL; + + /* + * Configure hardcoded terms first, no need to check + * return value when called with fail == 0 ;) + */ + config_attr(&attr, head_config, 0); + + if (perf_pmu__config(pmu, &attr, head_config)) + return -EINVAL; + + evsel = __add_event(list, idx, &attr, pmu_event_name(head_config), + pmu->cpus); + if (evsel) { + evsel->unit = info.unit; + evsel->scale = info.scale; + evsel->per_pkg = info.per_pkg; + evsel->snapshot = info.snapshot; + } + + return evsel ? 0 : -ENOMEM; +} + +int parse_events__modifier_group(struct list_head *list, + char *event_mod) +{ + return parse_events__modifier_event(list, event_mod, true); +} + +void parse_events__set_leader(char *name, struct list_head *list) +{ + struct perf_evsel *leader; + + __perf_evlist__set_leader(list); + leader = list_entry(list->next, struct perf_evsel, node); + leader->group_name = name ? strdup(name) : NULL; +} + +/* list_event is assumed to point to malloc'ed memory */ +void parse_events_update_lists(struct list_head *list_event, + struct list_head *list_all) +{ + /* + * Called for single event definition. Update the + * 'all event' list, and reinit the 'single event' + * list, for next event definition. + */ + list_splice_tail(list_event, list_all); + free(list_event); +} + +struct event_modifier { + int eu; + int ek; + int eh; + int eH; + int eG; + int eI; + int precise; + int exclude_GH; + int sample_read; + int pinned; +}; + +static int get_event_modifier(struct event_modifier *mod, char *str, + struct perf_evsel *evsel) +{ + int eu = evsel ? evsel->attr.exclude_user : 0; + int ek = evsel ? evsel->attr.exclude_kernel : 0; + int eh = evsel ? evsel->attr.exclude_hv : 0; + int eH = evsel ? evsel->attr.exclude_host : 0; + int eG = evsel ? evsel->attr.exclude_guest : 0; + int eI = evsel ? evsel->attr.exclude_idle : 0; + int precise = evsel ? evsel->attr.precise_ip : 0; + int sample_read = 0; + int pinned = evsel ? evsel->attr.pinned : 0; + + int exclude = eu | ek | eh; + int exclude_GH = evsel ? evsel->exclude_GH : 0; + + memset(mod, 0, sizeof(*mod)); + + while (*str) { + if (*str == 'u') { + if (!exclude) + exclude = eu = ek = eh = 1; + eu = 0; + } else if (*str == 'k') { + if (!exclude) + exclude = eu = ek = eh = 1; + ek = 0; + } else if (*str == 'h') { + if (!exclude) + exclude = eu = ek = eh = 1; + eh = 0; + } else if (*str == 'G') { + if (!exclude_GH) + exclude_GH = eG = eH = 1; + eG = 0; + } else if (*str == 'H') { + if (!exclude_GH) + exclude_GH = eG = eH = 1; + eH = 0; + } else if (*str == 'I') { + eI = 1; + } else if (*str == 'p') { + precise++; + /* use of precise requires exclude_guest */ + if (!exclude_GH) + eG = 1; + } else if (*str == 'S') { + sample_read = 1; + } else if (*str == 'D') { + pinned = 1; + } else + break; + + ++str; + } + + /* + * precise ip: + * + * 0 - SAMPLE_IP can have arbitrary skid + * 1 - SAMPLE_IP must have constant skid + * 2 - SAMPLE_IP requested to have 0 skid + * 3 - SAMPLE_IP must have 0 skid + * + * See also PERF_RECORD_MISC_EXACT_IP + */ + if (precise > 3) + return -EINVAL; + + mod->eu = eu; + mod->ek = ek; + mod->eh = eh; + mod->eH = eH; + mod->eG = eG; + mod->eI = eI; + mod->precise = precise; + mod->exclude_GH = exclude_GH; + mod->sample_read = sample_read; + mod->pinned = pinned; + + return 0; +} + +/* + * Basic modifier sanity check to validate it contains only one + * instance of any modifier (apart from 'p') present. + */ +static int check_modifier(char *str) +{ + char *p = str; + + /* The sizeof includes 0 byte as well. */ + if (strlen(str) > (sizeof("ukhGHpppSDI") - 1)) + return -1; + + while (*p) { + if (*p != 'p' && strchr(p + 1, *p)) + return -1; + p++; + } + + return 0; +} + +int parse_events__modifier_event(struct list_head *list, char *str, bool add) +{ + struct perf_evsel *evsel; + struct event_modifier mod; + + if (str == NULL) + return 0; + + if (check_modifier(str)) + return -EINVAL; + + if (!add && get_event_modifier(&mod, str, NULL)) + return -EINVAL; + + __evlist__for_each(list, evsel) { + if (add && get_event_modifier(&mod, str, evsel)) + return -EINVAL; + + evsel->attr.exclude_user = mod.eu; + evsel->attr.exclude_kernel = mod.ek; + evsel->attr.exclude_hv = mod.eh; + evsel->attr.precise_ip = mod.precise; + evsel->attr.exclude_host = mod.eH; + evsel->attr.exclude_guest = mod.eG; + evsel->attr.exclude_idle = mod.eI; + evsel->exclude_GH = mod.exclude_GH; + evsel->sample_read = mod.sample_read; + + if (perf_evsel__is_group_leader(evsel)) + evsel->attr.pinned = mod.pinned; + } + + return 0; +} + +int parse_events_name(struct list_head *list, char *name) +{ + struct perf_evsel *evsel; + + __evlist__for_each(list, evsel) { + if (!evsel->name) + evsel->name = strdup(name); + } + + return 0; +} + +static int +comp_pmu(const void *p1, const void *p2) +{ + struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1; + struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2; + + return strcmp(pmu1->symbol, pmu2->symbol); +} + +static void perf_pmu__parse_cleanup(void) +{ + if (perf_pmu_events_list_num > 0) { + struct perf_pmu_event_symbol *p; + int i; + + for (i = 0; i < perf_pmu_events_list_num; i++) { + p = perf_pmu_events_list + i; + free(p->symbol); + } + free(perf_pmu_events_list); + perf_pmu_events_list = NULL; + perf_pmu_events_list_num = 0; + } +} + +#define SET_SYMBOL(str, stype) \ +do { \ + p->symbol = str; \ + if (!p->symbol) \ + goto err; \ + p->type = stype; \ +} while (0) + +/* + * Read the pmu events list from sysfs + * Save it into perf_pmu_events_list + */ +static void perf_pmu__parse_init(void) +{ + + struct perf_pmu *pmu = NULL; + struct perf_pmu_alias *alias; + int len = 0; + + pmu = perf_pmu__find("cpu"); + if ((pmu == NULL) || list_empty(&pmu->aliases)) { + perf_pmu_events_list_num = -1; + return; + } + list_for_each_entry(alias, &pmu->aliases, list) { + if (strchr(alias->name, '-')) + len++; + len++; + } + perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len); + if (!perf_pmu_events_list) + return; + perf_pmu_events_list_num = len; + + len = 0; + list_for_each_entry(alias, &pmu->aliases, list) { + struct perf_pmu_event_symbol *p = perf_pmu_events_list + len; + char *tmp = strchr(alias->name, '-'); + + if (tmp != NULL) { + SET_SYMBOL(strndup(alias->name, tmp - alias->name), + PMU_EVENT_SYMBOL_PREFIX); + p++; + SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX); + len += 2; + } else { + SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL); + len++; + } + } + qsort(perf_pmu_events_list, len, + sizeof(struct perf_pmu_event_symbol), comp_pmu); + + return; +err: + perf_pmu__parse_cleanup(); +} + +enum perf_pmu_event_symbol_type +perf_pmu__parse_check(const char *name) +{ + struct perf_pmu_event_symbol p, *r; + + /* scan kernel pmu events from sysfs if needed */ + if (perf_pmu_events_list_num == 0) + perf_pmu__parse_init(); + /* + * name "cpu" could be prefix of cpu-cycles or cpu// events. + * cpu-cycles has been handled by hardcode. + * So it must be cpu// events, not kernel pmu event. + */ + if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu")) + return PMU_EVENT_SYMBOL_ERR; + + p.symbol = strdup(name); + r = bsearch(&p, perf_pmu_events_list, + (size_t) perf_pmu_events_list_num, + sizeof(struct perf_pmu_event_symbol), comp_pmu); + free(p.symbol); + return r ? r->type : PMU_EVENT_SYMBOL_ERR; +} + +static int parse_events__scanner(const char *str, void *data, int start_token) +{ + YY_BUFFER_STATE buffer; + void *scanner; + int ret; + + ret = parse_events_lex_init_extra(start_token, &scanner); + if (ret) + return ret; + + buffer = parse_events__scan_string(str, scanner); + +#ifdef PARSER_DEBUG + parse_events_debug = 1; +#endif + ret = parse_events_parse(data, scanner); + + parse_events__flush_buffer(buffer, scanner); + parse_events__delete_buffer(buffer, scanner); + parse_events_lex_destroy(scanner); + return ret; +} + +/* + * parse event config string, return a list of event terms. + */ +int parse_events_terms(struct list_head *terms, const char *str) +{ + struct parse_events_terms data = { + .terms = NULL, + }; + int ret; + + ret = parse_events__scanner(str, &data, PE_START_TERMS); + if (!ret) { + list_splice(data.terms, terms); + zfree(&data.terms); + return 0; + } + + if (data.terms) + parse_events__free_terms(data.terms); + return ret; +} + +int parse_events(struct perf_evlist *evlist, const char *str) +{ + struct parse_events_evlist data = { + .list = LIST_HEAD_INIT(data.list), + .idx = evlist->nr_entries, + }; + int ret; + + ret = parse_events__scanner(str, &data, PE_START_EVENTS); + perf_pmu__parse_cleanup(); + if (!ret) { + int entries = data.idx - evlist->nr_entries; + perf_evlist__splice_list_tail(evlist, &data.list, entries); + evlist->nr_groups += data.nr_groups; + return 0; + } + + /* + * There are 2 users - builtin-record and builtin-test objects. + * Both call perf_evlist__delete in case of error, so we dont + * need to bother. + */ + return ret; +} + +int parse_events_option(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; + int ret = parse_events(evlist, str); + + if (ret) { + fprintf(stderr, "invalid or unsupported event: '%s'\n", str); + fprintf(stderr, "Run 'perf list' for a list of valid events\n"); + } + return ret; +} + +int parse_filter(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; + struct perf_evsel *last = NULL; + + if (evlist->nr_entries > 0) + last = perf_evlist__last(evlist); + + if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { + fprintf(stderr, + "--filter option should follow a -e tracepoint option\n"); + return -1; + } + + last->filter = strdup(str); + if (last->filter == NULL) { + fprintf(stderr, "not enough memory to hold filter string\n"); + return -1; + } + + return 0; +} + +static const char * const event_type_descriptors[] = { + "Hardware event", + "Software event", + "Tracepoint event", + "Hardware cache event", + "Raw hardware event descriptor", + "Hardware breakpoint", +}; + +static int cmp_string(const void *a, const void *b) +{ + const char * const *as = a; + const char * const *bs = b; + + return strcmp(*as, *bs); +} + +/* + * Print the events from /tracing/events + */ + +void print_tracepoint_events(const char *subsys_glob, const char *event_glob, + bool name_only) +{ + DIR *sys_dir, *evt_dir; + struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + char evt_path[MAXPATHLEN]; + char dir_path[MAXPATHLEN]; + char **evt_list = NULL; + unsigned int evt_i = 0, evt_num = 0; + bool evt_num_known = false; + +restart: + sys_dir = opendir(tracing_events_path); + if (!sys_dir) + return; + + if (evt_num_known) { + evt_list = zalloc(sizeof(char *) * evt_num); + if (!evt_list) + goto out_close_sys_dir; + } + + for_each_subsystem(sys_dir, sys_dirent, sys_next) { + if (subsys_glob != NULL && + !strglobmatch(sys_dirent.d_name, subsys_glob)) + continue; + + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, + sys_dirent.d_name); + evt_dir = opendir(dir_path); + if (!evt_dir) + continue; + + for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + if (event_glob != NULL && + !strglobmatch(evt_dirent.d_name, event_glob)) + continue; + + if (!evt_num_known) { + evt_num++; + continue; + } + + snprintf(evt_path, MAXPATHLEN, "%s:%s", + sys_dirent.d_name, evt_dirent.d_name); + + evt_list[evt_i] = strdup(evt_path); + if (evt_list[evt_i] == NULL) + goto out_close_evt_dir; + evt_i++; + } + closedir(evt_dir); + } + closedir(sys_dir); + + if (!evt_num_known) { + evt_num_known = true; + goto restart; + } + qsort(evt_list, evt_num, sizeof(char *), cmp_string); + evt_i = 0; + while (evt_i < evt_num) { + if (name_only) { + printf("%s ", evt_list[evt_i++]); + continue; + } + printf(" %-50s [%s]\n", evt_list[evt_i++], + event_type_descriptors[PERF_TYPE_TRACEPOINT]); + } + if (evt_num) + printf("\n"); + +out_free: + evt_num = evt_i; + for (evt_i = 0; evt_i < evt_num; evt_i++) + zfree(&evt_list[evt_i]); + zfree(&evt_list); + return; + +out_close_evt_dir: + closedir(evt_dir); +out_close_sys_dir: + closedir(sys_dir); + + printf("FATAL: not enough memory to print %s\n", + event_type_descriptors[PERF_TYPE_TRACEPOINT]); + if (evt_list) + goto out_free; +} + +/* + * Check whether event is in /tracing/events + */ + +int is_valid_tracepoint(const char *event_string) +{ + DIR *sys_dir, *evt_dir; + struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + char evt_path[MAXPATHLEN]; + char dir_path[MAXPATHLEN]; + + sys_dir = opendir(tracing_events_path); + if (!sys_dir) + return 0; + + for_each_subsystem(sys_dir, sys_dirent, sys_next) { + + snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, + sys_dirent.d_name); + evt_dir = opendir(dir_path); + if (!evt_dir) + continue; + + for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + snprintf(evt_path, MAXPATHLEN, "%s:%s", + sys_dirent.d_name, evt_dirent.d_name); + if (!strcmp(evt_path, event_string)) { + closedir(evt_dir); + closedir(sys_dir); + return 1; + } + } + closedir(evt_dir); + } + closedir(sys_dir); + return 0; +} + +static bool is_event_supported(u8 type, unsigned config) +{ + bool ret = true; + int open_return; + struct perf_evsel *evsel; + struct perf_event_attr attr = { + .type = type, + .config = config, + .disabled = 1, + }; + struct { + struct thread_map map; + int threads[1]; + } tmap = { + .map.nr = 1, + .threads = { 0 }, + }; + + evsel = perf_evsel__new(&attr); + if (evsel) { + open_return = perf_evsel__open(evsel, NULL, &tmap.map); + ret = open_return >= 0; + + if (open_return == -EACCES) { + /* + * This happens if the paranoid value + * /proc/sys/kernel/perf_event_paranoid is set to 2 + * Re-run with exclude_kernel set; we don't do that + * by default as some ARM machines do not support it. + * + */ + evsel->attr.exclude_kernel = 1; + ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; + } + perf_evsel__delete(evsel); + } + + return ret; +} + +int print_hwcache_events(const char *event_glob, bool name_only) +{ + unsigned int type, op, i, evt_i = 0, evt_num = 0; + char name[64]; + char **evt_list = NULL; + bool evt_num_known = false; + +restart: + if (evt_num_known) { + evt_list = zalloc(sizeof(char *) * evt_num); + if (!evt_list) + goto out_enomem; + } + + for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { + for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { + /* skip invalid cache type */ + if (!perf_evsel__is_cache_op_valid(type, op)) + continue; + + for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { + __perf_evsel__hw_cache_type_op_res_name(type, op, i, + name, sizeof(name)); + if (event_glob != NULL && !strglobmatch(name, event_glob)) + continue; + + if (!is_event_supported(PERF_TYPE_HW_CACHE, + type | (op << 8) | (i << 16))) + continue; + + if (!evt_num_known) { + evt_num++; + continue; + } + + evt_list[evt_i] = strdup(name); + if (evt_list[evt_i] == NULL) + goto out_enomem; + evt_i++; + } + } + } + + if (!evt_num_known) { + evt_num_known = true; + goto restart; + } + qsort(evt_list, evt_num, sizeof(char *), cmp_string); + evt_i = 0; + while (evt_i < evt_num) { + if (name_only) { + printf("%s ", evt_list[evt_i++]); + continue; + } + printf(" %-50s [%s]\n", evt_list[evt_i++], + event_type_descriptors[PERF_TYPE_HW_CACHE]); + } + if (evt_num) + printf("\n"); + +out_free: + evt_num = evt_i; + for (evt_i = 0; evt_i < evt_num; evt_i++) + zfree(&evt_list[evt_i]); + zfree(&evt_list); + return evt_num; + +out_enomem: + printf("FATAL: not enough memory to print %s\n", event_type_descriptors[PERF_TYPE_HW_CACHE]); + if (evt_list) + goto out_free; + return evt_num; +} + +void print_symbol_events(const char *event_glob, unsigned type, + struct event_symbol *syms, unsigned max, + bool name_only) +{ + unsigned int i, evt_i = 0, evt_num = 0; + char name[MAX_NAME_LEN]; + char **evt_list = NULL; + bool evt_num_known = false; + +restart: + if (evt_num_known) { + evt_list = zalloc(sizeof(char *) * evt_num); + if (!evt_list) + goto out_enomem; + syms -= max; + } + + for (i = 0; i < max; i++, syms++) { + + if (event_glob != NULL && + !(strglobmatch(syms->symbol, event_glob) || + (syms->alias && strglobmatch(syms->alias, event_glob)))) + continue; + + if (!is_event_supported(type, i)) + continue; + + if (!evt_num_known) { + evt_num++; + continue; + } + + if (!name_only && strlen(syms->alias)) + snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); + else + strncpy(name, syms->symbol, MAX_NAME_LEN); + + evt_list[evt_i] = strdup(name); + if (evt_list[evt_i] == NULL) + goto out_enomem; + evt_i++; + } + + if (!evt_num_known) { + evt_num_known = true; + goto restart; + } + qsort(evt_list, evt_num, sizeof(char *), cmp_string); + evt_i = 0; + while (evt_i < evt_num) { + if (name_only) { + printf("%s ", evt_list[evt_i++]); + continue; + } + printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]); + } + if (evt_num) + printf("\n"); + +out_free: + evt_num = evt_i; + for (evt_i = 0; evt_i < evt_num; evt_i++) + zfree(&evt_list[evt_i]); + zfree(&evt_list); + return; + +out_enomem: + printf("FATAL: not enough memory to print %s\n", event_type_descriptors[type]); + if (evt_list) + goto out_free; +} + +/* + * Print the help text for the event symbols: + */ +void print_events(const char *event_glob, bool name_only) +{ + print_symbol_events(event_glob, PERF_TYPE_HARDWARE, + event_symbols_hw, PERF_COUNT_HW_MAX, name_only); + + print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, + event_symbols_sw, PERF_COUNT_SW_MAX, name_only); + + print_hwcache_events(event_glob, name_only); + + print_pmu_events(event_glob, name_only); + + if (event_glob != NULL) + return; + + if (!name_only) { + printf(" %-50s [%s]\n", + "rNNN", + event_type_descriptors[PERF_TYPE_RAW]); + printf(" %-50s [%s]\n", + "cpu/t1=v1[,t2=v2,t3 ...]/modifier", + event_type_descriptors[PERF_TYPE_RAW]); + printf(" (see 'man perf-list' on how to encode it)\n"); + printf("\n"); + + printf(" %-50s [%s]\n", + "mem:[/len][:access]", + event_type_descriptors[PERF_TYPE_BREAKPOINT]); + printf("\n"); + } + + print_tracepoint_events(NULL, NULL, name_only); +} + +int parse_events__is_hardcoded_term(struct parse_events_term *term) +{ + return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; +} + +static int new_term(struct parse_events_term **_term, int type_val, + int type_term, char *config, + char *str, u64 num) +{ + struct parse_events_term *term; + + term = zalloc(sizeof(*term)); + if (!term) + return -ENOMEM; + + INIT_LIST_HEAD(&term->list); + term->type_val = type_val; + term->type_term = type_term; + term->config = config; + + switch (type_val) { + case PARSE_EVENTS__TERM_TYPE_NUM: + term->val.num = num; + break; + case PARSE_EVENTS__TERM_TYPE_STR: + term->val.str = str; + break; + default: + free(term); + return -EINVAL; + } + + *_term = term; + return 0; +} + +int parse_events_term__num(struct parse_events_term **term, + int type_term, char *config, u64 num) +{ + return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, + config, NULL, num); +} + +int parse_events_term__str(struct parse_events_term **term, + int type_term, char *config, char *str) +{ + return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, + config, str, 0); +} + +int parse_events_term__sym_hw(struct parse_events_term **term, + char *config, unsigned idx) +{ + struct event_symbol *sym; + + BUG_ON(idx >= PERF_COUNT_HW_MAX); + sym = &event_symbols_hw[idx]; + + if (config) + return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, + PARSE_EVENTS__TERM_TYPE_USER, config, + (char *) sym->symbol, 0); + else + return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, + PARSE_EVENTS__TERM_TYPE_USER, + (char *) "event", (char *) sym->symbol, 0); +} + +int parse_events_term__clone(struct parse_events_term **new, + struct parse_events_term *term) +{ + return new_term(new, term->type_val, term->type_term, term->config, + term->val.str, term->val.num); +} + +void parse_events__free_terms(struct list_head *terms) +{ + struct parse_events_term *term, *h; + + list_for_each_entry_safe(term, h, terms, list) + free(term); +} diff --git a/kernel/tools/perf/util/parse-events.h b/kernel/tools/perf/util/parse-events.h new file mode 100644 index 000000000..52a2dda4f --- /dev/null +++ b/kernel/tools/perf/util/parse-events.h @@ -0,0 +1,136 @@ +#ifndef __PERF_PARSE_EVENTS_H +#define __PERF_PARSE_EVENTS_H +/* + * Parse symbolic events/counts passed in as options: + */ + +#include +#include +#include +#include + +struct list_head; +struct perf_evsel; +struct perf_evlist; + +struct option; + +struct tracepoint_path { + char *system; + char *name; + struct tracepoint_path *next; +}; + +extern struct tracepoint_path *tracepoint_id_to_path(u64 config); +extern struct tracepoint_path *tracepoint_name_to_path(const char *name); +extern bool have_tracepoints(struct list_head *evlist); + +const char *event_type(int type); + +extern int parse_events_option(const struct option *opt, const char *str, + int unset); +extern int parse_events(struct perf_evlist *evlist, const char *str); +extern int parse_events_terms(struct list_head *terms, const char *str); +extern int parse_filter(const struct option *opt, const char *str, int unset); + +#define EVENTS_HELP_MAX (128*1024) + +enum perf_pmu_event_symbol_type { + PMU_EVENT_SYMBOL_ERR, /* not a PMU EVENT */ + PMU_EVENT_SYMBOL, /* normal style PMU event */ + PMU_EVENT_SYMBOL_PREFIX, /* prefix of pre-suf style event */ + PMU_EVENT_SYMBOL_SUFFIX, /* suffix of pre-suf style event */ +}; + +struct perf_pmu_event_symbol { + char *symbol; + enum perf_pmu_event_symbol_type type; +}; + +enum { + PARSE_EVENTS__TERM_TYPE_NUM, + PARSE_EVENTS__TERM_TYPE_STR, +}; + +enum { + PARSE_EVENTS__TERM_TYPE_USER, + PARSE_EVENTS__TERM_TYPE_CONFIG, + PARSE_EVENTS__TERM_TYPE_CONFIG1, + PARSE_EVENTS__TERM_TYPE_CONFIG2, + PARSE_EVENTS__TERM_TYPE_NAME, + PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD, + PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, +}; + +struct parse_events_term { + char *config; + union { + char *str; + u64 num; + } val; + int type_val; + int type_term; + struct list_head list; + bool used; +}; + +struct parse_events_evlist { + struct list_head list; + int idx; + int nr_groups; +}; + +struct parse_events_terms { + struct list_head *terms; +}; + +int parse_events__is_hardcoded_term(struct parse_events_term *term); +int parse_events_term__num(struct parse_events_term **_term, + int type_term, char *config, u64 num); +int parse_events_term__str(struct parse_events_term **_term, + int type_term, char *config, char *str); +int parse_events_term__sym_hw(struct parse_events_term **term, + char *config, unsigned idx); +int parse_events_term__clone(struct parse_events_term **new, + struct parse_events_term *term); +void parse_events__free_terms(struct list_head *terms); +int parse_events__modifier_event(struct list_head *list, char *str, bool add); +int parse_events__modifier_group(struct list_head *list, char *event_mod); +int parse_events_name(struct list_head *list, char *name); +int parse_events_add_tracepoint(struct list_head *list, int *idx, + char *sys, char *event); +int parse_events_add_numeric(struct list_head *list, int *idx, + u32 type, u64 config, + struct list_head *head_config); +int parse_events_add_cache(struct list_head *list, int *idx, + char *type, char *op_result1, char *op_result2); +int parse_events_add_breakpoint(struct list_head *list, int *idx, + void *ptr, char *type, u64 len); +int parse_events_add_pmu(struct list_head *list, int *idx, + char *pmu , struct list_head *head_config); +enum perf_pmu_event_symbol_type +perf_pmu__parse_check(const char *name); +void parse_events__set_leader(char *name, struct list_head *list); +void parse_events_update_lists(struct list_head *list_event, + struct list_head *list_all); +void parse_events_error(void *data, void *scanner, char const *msg); + +void print_events(const char *event_glob, bool name_only); + +struct event_symbol { + const char *symbol; + const char *alias; +}; +extern struct event_symbol event_symbols_hw[]; +extern struct event_symbol event_symbols_sw[]; +void print_symbol_events(const char *event_glob, unsigned type, + struct event_symbol *syms, unsigned max, + bool name_only); +void print_tracepoint_events(const char *subsys_glob, const char *event_glob, + bool name_only); +int print_hwcache_events(const char *event_glob, bool name_only); +extern int is_valid_tracepoint(const char *event_string); + +int valid_event_mount(const char *eventfs); + +#endif /* __PERF_PARSE_EVENTS_H */ diff --git a/kernel/tools/perf/util/parse-events.l b/kernel/tools/perf/util/parse-events.l new file mode 100644 index 000000000..8895cf313 --- /dev/null +++ b/kernel/tools/perf/util/parse-events.l @@ -0,0 +1,247 @@ + +%option reentrant +%option bison-bridge +%option prefix="parse_events_" +%option stack + +%{ +#include +#include "../perf.h" +#include "parse-events-bison.h" +#include "parse-events.h" + +char *parse_events_get_text(yyscan_t yyscanner); +YYSTYPE *parse_events_get_lval(yyscan_t yyscanner); + +static int __value(YYSTYPE *yylval, char *str, int base, int token) +{ + u64 num; + + errno = 0; + num = strtoull(str, NULL, base); + if (errno) + return PE_ERROR; + + yylval->num = num; + return token; +} + +static int value(yyscan_t scanner, int base) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + char *text = parse_events_get_text(scanner); + + return __value(yylval, text, base, PE_VALUE); +} + +static int raw(yyscan_t scanner) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + char *text = parse_events_get_text(scanner); + + return __value(yylval, text + 1, 16, PE_RAW); +} + +static int str(yyscan_t scanner, int token) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + char *text = parse_events_get_text(scanner); + + yylval->str = strdup(text); + return token; +} + +static int pmu_str_check(yyscan_t scanner) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + char *text = parse_events_get_text(scanner); + + yylval->str = strdup(text); + switch (perf_pmu__parse_check(text)) { + case PMU_EVENT_SYMBOL_PREFIX: + return PE_PMU_EVENT_PRE; + case PMU_EVENT_SYMBOL_SUFFIX: + return PE_PMU_EVENT_SUF; + case PMU_EVENT_SYMBOL: + return PE_KERNEL_PMU_EVENT; + default: + return PE_NAME; + } +} + +static int sym(yyscan_t scanner, int type, int config) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + + yylval->num = (type << 16) + config; + return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW; +} + +static int term(yyscan_t scanner, int type) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + + yylval->num = type; + return PE_TERM; +} + +%} + +%x mem +%s config +%x event + +group [^,{}/]*[{][^}]*[}][^,{}/]* +event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* +event [^,{}/]+ + +num_dec [0-9]+ +num_hex 0x[a-fA-F0-9]+ +num_raw_hex [a-fA-F0-9]+ +name [a-zA-Z_*?][a-zA-Z0-9_*?]* +name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* +/* If you add a modifier you need to update check_modifier() */ +modifier_event [ukhpGHSDI]+ +modifier_bp [rwx]{1,3} + +%% + +%{ + { + int start_token; + + start_token = parse_events_get_extra(yyscanner); + + if (start_token == PE_START_TERMS) + BEGIN(config); + else if (start_token == PE_START_EVENTS) + BEGIN(event); + + if (start_token) { + parse_events_set_extra(NULL, yyscanner); + return start_token; + } + } +%} + +{ + +{group} { + BEGIN(INITIAL); yyless(0); + } + +{event_pmu} | +{event} { + str(yyscanner, PE_EVENT_NAME); + BEGIN(INITIAL); yyless(0); + return PE_EVENT_NAME; + } + +. | +<> { + BEGIN(INITIAL); yyless(0); + } + +} + +{ +config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } +config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } +config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } +name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } +period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } +branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } +, { return ','; } +"/" { BEGIN(INITIAL); return '/'; } +{name_minus} { return str(yyscanner, PE_NAME); } +} + +{ +{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } +: { return ':'; } +"/" { return '/'; } +{num_dec} { return value(yyscanner, 10); } +{num_hex} { return value(yyscanner, 16); } + /* + * We need to separate 'mem:' scanner part, in order to get specific + * modifier bits parsed out. Otherwise we would need to handle PE_NAME + * and we'd need to parse it manually. During the escape from + * state we need to put the escaping char back, so we dont miss it. + */ +. { unput(*yytext); BEGIN(INITIAL); } + /* + * We destroy the scanner after reaching EOF, + * but anyway just to be sure get back to INIT state. + */ +<> { BEGIN(INITIAL); } +} + +cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } +stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } +stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } +instructions { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); } +cache-references { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); } +cache-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); } +branch-instructions|branches { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); } +branch-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); } +bus-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); } +ref-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); } +cpu-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); } +task-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); } +page-faults|faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); } +minor-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); } +major-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); } +context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); } +cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); } +alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } +emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } +dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); } + + /* + * We have to handle the kernel PMU event cycles-ct/cycles-t/mem-loads/mem-stores separately. + * Because the prefix cycles is mixed up with cpu-cycles. + * loads and stores are mixed up with cache event + */ +cycles-ct { return str(yyscanner, PE_KERNEL_PMU_EVENT); } +cycles-t { return str(yyscanner, PE_KERNEL_PMU_EVENT); } +mem-loads { return str(yyscanner, PE_KERNEL_PMU_EVENT); } +mem-stores { return str(yyscanner, PE_KERNEL_PMU_EVENT); } + +L1-dcache|l1-d|l1d|L1-data | +L1-icache|l1-i|l1i|L1-instruction | +LLC|L2 | +dTLB|d-tlb|Data-TLB | +iTLB|i-tlb|Instruction-TLB | +branch|branches|bpu|btb|bpc | +node { return str(yyscanner, PE_NAME_CACHE_TYPE); } + +load|loads|read | +store|stores|write | +prefetch|prefetches | +speculative-read|speculative-load | +refs|Reference|ops|access | +misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } + +mem: { BEGIN(mem); return PE_PREFIX_MEM; } +r{num_raw_hex} { return raw(yyscanner); } +{num_dec} { return value(yyscanner, 10); } +{num_hex} { return value(yyscanner, 16); } + +{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } +{name} { return pmu_str_check(yyscanner); } +"/" { BEGIN(config); return '/'; } +- { return '-'; } +, { BEGIN(event); return ','; } +: { return ':'; } +"{" { BEGIN(event); return '{'; } +"}" { return '}'; } += { return '='; } +\n { } +. { } + +%% + +int parse_events_wrap(void *scanner __maybe_unused) +{ + return 1; +} diff --git a/kernel/tools/perf/util/parse-events.y b/kernel/tools/perf/util/parse-events.y new file mode 100644 index 000000000..72def077d --- /dev/null +++ b/kernel/tools/perf/util/parse-events.y @@ -0,0 +1,526 @@ +%pure-parser +%parse-param {void *_data} +%parse-param {void *scanner} +%lex-param {void* scanner} + +%{ + +#define YYDEBUG 1 + +#include +#include +#include +#include "util.h" +#include "parse-events.h" +#include "parse-events-bison.h" + +extern int parse_events_lex (YYSTYPE* lvalp, void* scanner); + +#define ABORT_ON(val) \ +do { \ + if (val) \ + YYABORT; \ +} while (0) + +#define ALLOC_LIST(list) \ +do { \ + list = malloc(sizeof(*list)); \ + ABORT_ON(!list); \ + INIT_LIST_HEAD(list); \ +} while (0) + +static inc_group_count(struct list_head *list, + struct parse_events_evlist *data) +{ + /* Count groups only have more than 1 members */ + if (!list_is_last(list->next, list)) + data->nr_groups++; +} + +%} + +%token PE_START_EVENTS PE_START_TERMS +%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM +%token PE_EVENT_NAME +%token PE_NAME +%token PE_MODIFIER_EVENT PE_MODIFIER_BP +%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT +%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP +%token PE_ERROR +%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT +%type PE_VALUE +%type PE_VALUE_SYM_HW +%type PE_VALUE_SYM_SW +%type PE_RAW +%type PE_TERM +%type PE_NAME +%type PE_NAME_CACHE_TYPE +%type PE_NAME_CACHE_OP_RESULT +%type PE_MODIFIER_EVENT +%type PE_MODIFIER_BP +%type PE_EVENT_NAME +%type PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT +%type value_sym +%type event_config +%type event_term +%type event_pmu +%type event_legacy_symbol +%type event_legacy_cache +%type event_legacy_mem +%type event_legacy_tracepoint +%type event_legacy_numeric +%type event_legacy_raw +%type event_def +%type event_mod +%type event_name +%type event +%type events +%type group_def +%type group +%type groups + +%union +{ + char *str; + u64 num; + struct list_head *head; + struct parse_events_term *term; +} +%% + +start: +PE_START_EVENTS start_events +| +PE_START_TERMS start_terms + +start_events: groups +{ + struct parse_events_evlist *data = _data; + + parse_events_update_lists($1, &data->list); +} + +groups: +groups ',' group +{ + struct list_head *list = $1; + struct list_head *group = $3; + + parse_events_update_lists(group, list); + $$ = list; +} +| +groups ',' event +{ + struct list_head *list = $1; + struct list_head *event = $3; + + parse_events_update_lists(event, list); + $$ = list; +} +| +group +| +event + +group: +group_def ':' PE_MODIFIER_EVENT +{ + struct list_head *list = $1; + + ABORT_ON(parse_events__modifier_group(list, $3)); + $$ = list; +} +| +group_def + +group_def: +PE_NAME '{' events '}' +{ + struct list_head *list = $3; + + inc_group_count(list, _data); + parse_events__set_leader($1, list); + $$ = list; +} +| +'{' events '}' +{ + struct list_head *list = $2; + + inc_group_count(list, _data); + parse_events__set_leader(NULL, list); + $$ = list; +} + +events: +events ',' event +{ + struct list_head *event = $3; + struct list_head *list = $1; + + parse_events_update_lists(event, list); + $$ = list; +} +| +event + +event: event_mod + +event_mod: +event_name PE_MODIFIER_EVENT +{ + struct list_head *list = $1; + + /* + * Apply modifier on all events added by single event definition + * (there could be more events added for multiple tracepoint + * definitions via '*?'. + */ + ABORT_ON(parse_events__modifier_event(list, $2, false)); + $$ = list; +} +| +event_name + +event_name: +PE_EVENT_NAME event_def +{ + ABORT_ON(parse_events_name($2, $1)); + free($1); + $$ = $2; +} +| +event_def + +event_def: event_pmu | + event_legacy_symbol | + event_legacy_cache sep_dc | + event_legacy_mem | + event_legacy_tracepoint sep_dc | + event_legacy_numeric sep_dc | + event_legacy_raw sep_dc + +event_pmu: +PE_NAME '/' event_config '/' +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3)); + parse_events__free_terms($3); + $$ = list; +} +| +PE_NAME '/' '/' +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, NULL)); + $$ = list; +} +| +PE_KERNEL_PMU_EVENT sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *head; + struct parse_events_term *term; + struct list_head *list; + + ALLOC_LIST(head); + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, + $1, 1)); + list_add_tail(&term->list, head); + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); + parse_events__free_terms(head); + $$ = list; +} +| +PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *head; + struct parse_events_term *term; + struct list_head *list; + char pmu_name[128]; + snprintf(&pmu_name, 128, "%s-%s", $1, $3); + + ALLOC_LIST(head); + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, + &pmu_name, 1)); + list_add_tail(&term->list, head); + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head)); + parse_events__free_terms(head); + $$ = list; +} + +value_sym: +PE_VALUE_SYM_HW +| +PE_VALUE_SYM_SW + +event_legacy_symbol: +value_sym '/' event_config '/' +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + int type = $1 >> 16; + int config = $1 & 255; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, + type, config, $3)); + parse_events__free_terms($3); + $$ = list; +} +| +value_sym sep_slash_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + int type = $1 >> 16; + int config = $1 & 255; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, + type, config, NULL)); + $$ = list; +} + +event_legacy_cache: +PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5)); + $$ = list; +} +| +PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL)); + $$ = list; +} +| +PE_NAME_CACHE_TYPE +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL)); + $$ = list; +} + +event_legacy_mem: +PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, + (void *) $2, $6, $4)); + $$ = list; +} +| +PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, + (void *) $2, NULL, $4)); + $$ = list; +} +| +PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, + (void *) $2, $4, 0)); + $$ = list; +} +| +PE_PREFIX_MEM PE_VALUE sep_dc +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, + (void *) $2, NULL, 0)); + $$ = list; +} + +event_legacy_tracepoint: +PE_NAME '-' PE_NAME ':' PE_NAME +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + char sys_name[128]; + snprintf(&sys_name, 128, "%s-%s", $1, $3); + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_tracepoint(list, &data->idx, &sys_name, $5)); + $$ = list; +} +| +PE_NAME ':' PE_NAME +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3)); + $$ = list; +} + +event_legacy_numeric: +PE_VALUE ':' PE_VALUE +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL)); + $$ = list; +} + +event_legacy_raw: +PE_RAW +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, + PERF_TYPE_RAW, $1, NULL)); + $$ = list; +} + +start_terms: event_config +{ + struct parse_events_terms *data = _data; + data->terms = $1; +} + +event_config: +event_config ',' event_term +{ + struct list_head *head = $1; + struct parse_events_term *term = $3; + + ABORT_ON(!head); + list_add_tail(&term->list, head); + $$ = $1; +} +| +event_term +{ + struct list_head *head = malloc(sizeof(*head)); + struct parse_events_term *term = $1; + + ABORT_ON(!head); + INIT_LIST_HEAD(head); + list_add_tail(&term->list, head); + $$ = head; +} + +event_term: +PE_NAME '=' PE_NAME +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, + $1, $3)); + $$ = term; +} +| +PE_NAME '=' PE_VALUE +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, + $1, $3)); + $$ = term; +} +| +PE_NAME '=' PE_VALUE_SYM_HW +{ + struct parse_events_term *term; + int config = $3 & 255; + + ABORT_ON(parse_events_term__sym_hw(&term, $1, config)); + $$ = term; +} +| +PE_NAME +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, + $1, 1)); + $$ = term; +} +| +PE_VALUE_SYM_HW +{ + struct parse_events_term *term; + int config = $1 & 255; + + ABORT_ON(parse_events_term__sym_hw(&term, NULL, config)); + $$ = term; +} +| +PE_TERM '=' PE_NAME +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3)); + $$ = term; +} +| +PE_TERM '=' PE_VALUE +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3)); + $$ = term; +} +| +PE_TERM +{ + struct parse_events_term *term; + + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1)); + $$ = term; +} + +sep_dc: ':' | + +sep_slash_dc: '/' | ':' | + +%% + +void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused, + char const *msg __maybe_unused) +{ +} diff --git a/kernel/tools/perf/util/parse-options.c b/kernel/tools/perf/util/parse-options.c new file mode 100644 index 000000000..01626be2a --- /dev/null +++ b/kernel/tools/perf/util/parse-options.c @@ -0,0 +1,757 @@ +#include "util.h" +#include "parse-options.h" +#include "cache.h" +#include "header.h" + +#define OPT_SHORT 1 +#define OPT_UNSET 2 + +static int opterror(const struct option *opt, const char *reason, int flags) +{ + if (flags & OPT_SHORT) + return error("switch `%c' %s", opt->short_name, reason); + if (flags & OPT_UNSET) + return error("option `no-%s' %s", opt->long_name, reason); + return error("option `%s' %s", opt->long_name, reason); +} + +static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt, + int flags, const char **arg) +{ + if (p->opt) { + *arg = p->opt; + p->opt = NULL; + } else if ((opt->flags & PARSE_OPT_LASTARG_DEFAULT) && (p->argc == 1 || + **(p->argv + 1) == '-')) { + *arg = (const char *)opt->defval; + } else if (p->argc > 1) { + p->argc--; + *arg = *++p->argv; + } else + return opterror(opt, "requires a value", flags); + return 0; +} + +static int get_value(struct parse_opt_ctx_t *p, + const struct option *opt, int flags) +{ + const char *s, *arg = NULL; + const int unset = flags & OPT_UNSET; + int err; + + if (unset && p->opt) + return opterror(opt, "takes no value", flags); + if (unset && (opt->flags & PARSE_OPT_NONEG)) + return opterror(opt, "isn't available", flags); + if (opt->flags & PARSE_OPT_DISABLED) + return opterror(opt, "is not usable", flags); + + if (opt->flags & PARSE_OPT_EXCLUSIVE) { + if (p->excl_opt && p->excl_opt != opt) { + char msg[128]; + + if (((flags & OPT_SHORT) && p->excl_opt->short_name) || + p->excl_opt->long_name == NULL) { + scnprintf(msg, sizeof(msg), "cannot be used with switch `%c'", + p->excl_opt->short_name); + } else { + scnprintf(msg, sizeof(msg), "cannot be used with %s", + p->excl_opt->long_name); + } + opterror(opt, msg, flags); + return -3; + } + p->excl_opt = opt; + } + if (!(flags & OPT_SHORT) && p->opt) { + switch (opt->type) { + case OPTION_CALLBACK: + if (!(opt->flags & PARSE_OPT_NOARG)) + break; + /* FALLTHROUGH */ + case OPTION_BOOLEAN: + case OPTION_INCR: + case OPTION_BIT: + case OPTION_SET_UINT: + case OPTION_SET_PTR: + return opterror(opt, "takes no value", flags); + case OPTION_END: + case OPTION_ARGUMENT: + case OPTION_GROUP: + case OPTION_STRING: + case OPTION_INTEGER: + case OPTION_UINTEGER: + case OPTION_LONG: + case OPTION_U64: + default: + break; + } + } + + switch (opt->type) { + case OPTION_BIT: + if (unset) + *(int *)opt->value &= ~opt->defval; + else + *(int *)opt->value |= opt->defval; + return 0; + + case OPTION_BOOLEAN: + *(bool *)opt->value = unset ? false : true; + if (opt->set) + *(bool *)opt->set = true; + return 0; + + case OPTION_INCR: + *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; + return 0; + + case OPTION_SET_UINT: + *(unsigned int *)opt->value = unset ? 0 : opt->defval; + return 0; + + case OPTION_SET_PTR: + *(void **)opt->value = unset ? NULL : (void *)opt->defval; + return 0; + + case OPTION_STRING: + err = 0; + if (unset) + *(const char **)opt->value = NULL; + else if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + *(const char **)opt->value = (const char *)opt->defval; + else + err = get_arg(p, opt, flags, (const char **)opt->value); + + /* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */ + if (opt->flags & PARSE_OPT_NOEMPTY) { + const char *val = *(const char **)opt->value; + + if (!val) + return err; + + /* Similar to unset if we are given an empty string. */ + if (val[0] == '\0') { + *(const char **)opt->value = NULL; + return 0; + } + } + + return err; + + case OPTION_CALLBACK: + if (unset) + return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; + if (opt->flags & PARSE_OPT_NOARG) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) + return (*opt->callback)(opt, NULL, 0) ? (-1) : 0; + if (get_arg(p, opt, flags, &arg)) + return -1; + return (*opt->callback)(opt, arg, 0) ? (-1) : 0; + + case OPTION_INTEGER: + if (unset) { + *(int *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(int *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(int *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + case OPTION_UINTEGER: + if (unset) { + *(unsigned int *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(unsigned int *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(unsigned int *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + case OPTION_LONG: + if (unset) { + *(long *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(long *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(long *)opt->value = strtol(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + case OPTION_U64: + if (unset) { + *(u64 *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(u64 *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(u64 *)opt->value = strtoull(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + + case OPTION_END: + case OPTION_ARGUMENT: + case OPTION_GROUP: + default: + die("should not happen, someone must be hit on the forehead"); + } +} + +static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options) +{ + for (; options->type != OPTION_END; options++) { + if (options->short_name == *p->opt) { + p->opt = p->opt[1] ? p->opt + 1 : NULL; + return get_value(p, options, OPT_SHORT); + } + } + return -2; +} + +static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, + const struct option *options) +{ + const char *arg_end = strchr(arg, '='); + const struct option *abbrev_option = NULL, *ambiguous_option = NULL; + int abbrev_flags = 0, ambiguous_flags = 0; + + if (!arg_end) + arg_end = arg + strlen(arg); + + for (; options->type != OPTION_END; options++) { + const char *rest; + int flags = 0; + + if (!options->long_name) + continue; + + rest = skip_prefix(arg, options->long_name); + if (options->type == OPTION_ARGUMENT) { + if (!rest) + continue; + if (*rest == '=') + return opterror(options, "takes no value", flags); + if (*rest) + continue; + p->out[p->cpidx++] = arg - 2; + return 0; + } + if (!rest) { + if (!prefixcmp(options->long_name, "no-")) { + /* + * The long name itself starts with "no-", so + * accept the option without "no-" so that users + * do not have to enter "no-no-" to get the + * negation. + */ + rest = skip_prefix(arg, options->long_name + 3); + if (rest) { + flags |= OPT_UNSET; + goto match; + } + /* Abbreviated case */ + if (!prefixcmp(options->long_name + 3, arg)) { + flags |= OPT_UNSET; + goto is_abbreviated; + } + } + /* abbreviated? */ + if (!strncmp(options->long_name, arg, arg_end - arg)) { +is_abbreviated: + if (abbrev_option) { + /* + * If this is abbreviated, it is + * ambiguous. So when there is no + * exact match later, we need to + * error out. + */ + ambiguous_option = abbrev_option; + ambiguous_flags = abbrev_flags; + } + if (!(flags & OPT_UNSET) && *arg_end) + p->opt = arg_end + 1; + abbrev_option = options; + abbrev_flags = flags; + continue; + } + /* negated and abbreviated very much? */ + if (!prefixcmp("no-", arg)) { + flags |= OPT_UNSET; + goto is_abbreviated; + } + /* negated? */ + if (strncmp(arg, "no-", 3)) + continue; + flags |= OPT_UNSET; + rest = skip_prefix(arg + 3, options->long_name); + /* abbreviated and negated? */ + if (!rest && !prefixcmp(options->long_name, arg + 3)) + goto is_abbreviated; + if (!rest) + continue; + } +match: + if (*rest) { + if (*rest != '=') + continue; + p->opt = rest + 1; + } + return get_value(p, options, flags); + } + + if (ambiguous_option) + return error("Ambiguous option: %s " + "(could be --%s%s or --%s%s)", + arg, + (ambiguous_flags & OPT_UNSET) ? "no-" : "", + ambiguous_option->long_name, + (abbrev_flags & OPT_UNSET) ? "no-" : "", + abbrev_option->long_name); + if (abbrev_option) + return get_value(p, abbrev_option, abbrev_flags); + return -2; +} + +static void check_typos(const char *arg, const struct option *options) +{ + if (strlen(arg) < 3) + return; + + if (!prefixcmp(arg, "no-")) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + + for (; options->type != OPTION_END; options++) { + if (!options->long_name) + continue; + if (!prefixcmp(options->long_name, arg)) { + error ("did you mean `--%s` (with two dashes ?)", arg); + exit(129); + } + } +} + +void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags) +{ + memset(ctx, 0, sizeof(*ctx)); + ctx->argc = argc - 1; + ctx->argv = argv + 1; + ctx->out = argv; + ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); + ctx->flags = flags; + if ((flags & PARSE_OPT_KEEP_UNKNOWN) && + (flags & PARSE_OPT_STOP_AT_NON_OPTION)) + die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); +} + +static int usage_with_options_internal(const char * const *, + const struct option *, int); + +int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]) +{ + int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); + int excl_short_opt = 1; + const char *arg; + + /* we must reset ->opt, unknown short option leave it dangling */ + ctx->opt = NULL; + + for (; ctx->argc; ctx->argc--, ctx->argv++) { + arg = ctx->argv[0]; + if (*arg != '-' || !arg[1]) { + if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) + break; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + continue; + } + + if (arg[1] != '-') { + ctx->opt = ++arg; + if (internal_help && *ctx->opt == 'h') + return usage_with_options_internal(usagestr, options, 0); + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options, arg, 1); + case -2: + goto unknown; + case -3: + goto exclusive; + default: + break; + } + if (ctx->opt) + check_typos(arg, options); + while (ctx->opt) { + if (internal_help && *ctx->opt == 'h') + return usage_with_options_internal(usagestr, options, 0); + arg = ctx->opt; + switch (parse_short_opt(ctx, options)) { + case -1: + return parse_options_usage(usagestr, options, arg, 1); + case -2: + /* fake a short option thing to hide the fact that we may have + * started to parse aggregated stuff + * + * This is leaky, too bad. + */ + ctx->argv[0] = strdup(ctx->opt - 1); + *(char *)ctx->argv[0] = '-'; + goto unknown; + case -3: + goto exclusive; + default: + break; + } + } + continue; + } + + if (!arg[2]) { /* "--" */ + if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) { + ctx->argc--; + ctx->argv++; + } + break; + } + + arg += 2; + if (internal_help && !strcmp(arg, "help-all")) + return usage_with_options_internal(usagestr, options, 1); + if (internal_help && !strcmp(arg, "help")) + return usage_with_options_internal(usagestr, options, 0); + if (!strcmp(arg, "list-opts")) + return PARSE_OPT_LIST_OPTS; + if (!strcmp(arg, "list-cmds")) + return PARSE_OPT_LIST_SUBCMDS; + switch (parse_long_opt(ctx, arg, options)) { + case -1: + return parse_options_usage(usagestr, options, arg, 0); + case -2: + goto unknown; + case -3: + excl_short_opt = 0; + goto exclusive; + default: + break; + } + continue; +unknown: + if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) + return PARSE_OPT_UNKNOWN; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + ctx->opt = NULL; + } + return PARSE_OPT_DONE; + +exclusive: + parse_options_usage(usagestr, options, arg, excl_short_opt); + if ((excl_short_opt && ctx->excl_opt->short_name) || + ctx->excl_opt->long_name == NULL) { + char opt = ctx->excl_opt->short_name; + parse_options_usage(NULL, options, &opt, 1); + } else { + parse_options_usage(NULL, options, ctx->excl_opt->long_name, 0); + } + return PARSE_OPT_HELP; +} + +int parse_options_end(struct parse_opt_ctx_t *ctx) +{ + memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + ctx->out[ctx->cpidx + ctx->argc] = NULL; + return ctx->cpidx + ctx->argc; +} + +int parse_options_subcommand(int argc, const char **argv, const struct option *options, + const char *const subcommands[], const char *usagestr[], int flags) +{ + struct parse_opt_ctx_t ctx; + + perf_header__set_cmdline(argc, argv); + + /* build usage string if it's not provided */ + if (subcommands && !usagestr[0]) { + struct strbuf buf = STRBUF_INIT; + + strbuf_addf(&buf, "perf %s [] {", argv[0]); + for (int i = 0; subcommands[i]; i++) { + if (i) + strbuf_addstr(&buf, "|"); + strbuf_addstr(&buf, subcommands[i]); + } + strbuf_addstr(&buf, "}"); + + usagestr[0] = strdup(buf.buf); + strbuf_release(&buf); + } + + parse_options_start(&ctx, argc, argv, flags); + switch (parse_options_step(&ctx, options, usagestr)) { + case PARSE_OPT_HELP: + exit(129); + case PARSE_OPT_DONE: + break; + case PARSE_OPT_LIST_OPTS: + while (options->type != OPTION_END) { + if (options->long_name) + printf("--%s ", options->long_name); + options++; + } + putchar('\n'); + exit(130); + case PARSE_OPT_LIST_SUBCMDS: + if (subcommands) { + for (int i = 0; subcommands[i]; i++) + printf("%s ", subcommands[i]); + } + putchar('\n'); + exit(130); + default: /* PARSE_OPT_UNKNOWN */ + if (ctx.argv[0][1] == '-') { + error("unknown option `%s'", ctx.argv[0] + 2); + } else { + error("unknown switch `%c'", *ctx.opt); + } + usage_with_options(usagestr, options); + } + + return parse_options_end(&ctx); +} + +int parse_options(int argc, const char **argv, const struct option *options, + const char * const usagestr[], int flags) +{ + return parse_options_subcommand(argc, argv, options, NULL, + (const char **) usagestr, flags); +} + +#define USAGE_OPTS_WIDTH 24 +#define USAGE_GAP 2 + +static void print_option_help(const struct option *opts, int full) +{ + size_t pos; + int pad; + + if (opts->type == OPTION_GROUP) { + fputc('\n', stderr); + if (*opts->help) + fprintf(stderr, "%s\n", opts->help); + return; + } + if (!full && (opts->flags & PARSE_OPT_HIDDEN)) + return; + if (opts->flags & PARSE_OPT_DISABLED) + return; + + pos = fprintf(stderr, " "); + if (opts->short_name) + pos += fprintf(stderr, "-%c", opts->short_name); + else + pos += fprintf(stderr, " "); + + if (opts->long_name && opts->short_name) + pos += fprintf(stderr, ", "); + if (opts->long_name) + pos += fprintf(stderr, "--%s", opts->long_name); + + switch (opts->type) { + case OPTION_ARGUMENT: + break; + case OPTION_LONG: + case OPTION_U64: + case OPTION_INTEGER: + case OPTION_UINTEGER: + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=]"); + else + pos += fprintf(stderr, "[]"); + else + pos += fprintf(stderr, " "); + break; + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + /* FALLTHROUGH */ + case OPTION_STRING: + if (opts->argh) { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=<%s>]", opts->argh); + else + pos += fprintf(stderr, "[<%s>]", opts->argh); + else + pos += fprintf(stderr, " <%s>", opts->argh); + } else { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=...]"); + else + pos += fprintf(stderr, "[...]"); + else + pos += fprintf(stderr, " ..."); + } + break; + default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */ + case OPTION_END: + case OPTION_GROUP: + case OPTION_BIT: + case OPTION_BOOLEAN: + case OPTION_INCR: + case OPTION_SET_UINT: + case OPTION_SET_PTR: + break; + } + + if (pos <= USAGE_OPTS_WIDTH) + pad = USAGE_OPTS_WIDTH - pos; + else { + fputc('\n', stderr); + pad = USAGE_OPTS_WIDTH; + } + fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); +} + +int usage_with_options_internal(const char * const *usagestr, + const struct option *opts, int full) +{ + if (!usagestr) + return PARSE_OPT_HELP; + + fprintf(stderr, "\n usage: %s\n", *usagestr++); + while (*usagestr && **usagestr) + fprintf(stderr, " or: %s\n", *usagestr++); + while (*usagestr) { + fprintf(stderr, "%s%s\n", + **usagestr ? " " : "", + *usagestr); + usagestr++; + } + + if (opts->type != OPTION_GROUP) + fputc('\n', stderr); + + for ( ; opts->type != OPTION_END; opts++) + print_option_help(opts, full); + + fputc('\n', stderr); + + return PARSE_OPT_HELP; +} + +void usage_with_options(const char * const *usagestr, + const struct option *opts) +{ + exit_browser(false); + usage_with_options_internal(usagestr, opts, 0); + exit(129); +} + +int parse_options_usage(const char * const *usagestr, + const struct option *opts, + const char *optstr, bool short_opt) +{ + if (!usagestr) + goto opt; + + fprintf(stderr, "\n usage: %s\n", *usagestr++); + while (*usagestr && **usagestr) + fprintf(stderr, " or: %s\n", *usagestr++); + while (*usagestr) { + fprintf(stderr, "%s%s\n", + **usagestr ? " " : "", + *usagestr); + usagestr++; + } + fputc('\n', stderr); + +opt: + for ( ; opts->type != OPTION_END; opts++) { + if (short_opt) { + if (opts->short_name == *optstr) + break; + continue; + } + + if (opts->long_name == NULL) + continue; + + if (!prefixcmp(optstr, opts->long_name)) + break; + if (!prefixcmp(optstr, "no-") && + !prefixcmp(optstr + 3, opts->long_name)) + break; + } + + if (opts->type != OPTION_END) + print_option_help(opts, 0); + + return PARSE_OPT_HELP; +} + + +int parse_opt_verbosity_cb(const struct option *opt, + const char *arg __maybe_unused, + int unset) +{ + int *target = opt->value; + + if (unset) + /* --no-quiet, --no-verbose */ + *target = 0; + else if (opt->short_name == 'v') { + if (*target >= 0) + (*target)++; + else + *target = 1; + } else { + if (*target <= 0) + (*target)--; + else + *target = -1; + } + return 0; +} + +void set_option_flag(struct option *opts, int shortopt, const char *longopt, + int flag) +{ + for (; opts->type != OPTION_END; opts++) { + if ((shortopt && opts->short_name == shortopt) || + (opts->long_name && longopt && + !strcmp(opts->long_name, longopt))) { + opts->flags |= flag; + break; + } + } +} diff --git a/kernel/tools/perf/util/parse-options.h b/kernel/tools/perf/util/parse-options.h new file mode 100644 index 000000000..59561fd86 --- /dev/null +++ b/kernel/tools/perf/util/parse-options.h @@ -0,0 +1,220 @@ +#ifndef __PERF_PARSE_OPTIONS_H +#define __PERF_PARSE_OPTIONS_H + +#include +#include + +enum parse_opt_type { + /* special types */ + OPTION_END, + OPTION_ARGUMENT, + OPTION_GROUP, + /* options with no arguments */ + OPTION_BIT, + OPTION_BOOLEAN, + OPTION_INCR, + OPTION_SET_UINT, + OPTION_SET_PTR, + /* options with arguments (usually) */ + OPTION_STRING, + OPTION_INTEGER, + OPTION_LONG, + OPTION_CALLBACK, + OPTION_U64, + OPTION_UINTEGER, +}; + +enum parse_opt_flags { + PARSE_OPT_KEEP_DASHDASH = 1, + PARSE_OPT_STOP_AT_NON_OPTION = 2, + PARSE_OPT_KEEP_ARGV0 = 4, + PARSE_OPT_KEEP_UNKNOWN = 8, + PARSE_OPT_NO_INTERNAL_HELP = 16, +}; + +enum parse_opt_option_flags { + PARSE_OPT_OPTARG = 1, + PARSE_OPT_NOARG = 2, + PARSE_OPT_NONEG = 4, + PARSE_OPT_HIDDEN = 8, + PARSE_OPT_LASTARG_DEFAULT = 16, + PARSE_OPT_DISABLED = 32, + PARSE_OPT_EXCLUSIVE = 64, + PARSE_OPT_NOEMPTY = 128, +}; + +struct option; +typedef int parse_opt_cb(const struct option *, const char *arg, int unset); + +/* + * `type`:: + * holds the type of the option, you must have an OPTION_END last in your + * array. + * + * `short_name`:: + * the character to use as a short option name, '\0' if none. + * + * `long_name`:: + * the long option name, without the leading dashes, NULL if none. + * + * `value`:: + * stores pointers to the values to be filled. + * + * `argh`:: + * token to explain the kind of argument this option wants. Keep it + * homogenous across the repository. + * + * `help`:: + * the short help associated to what the option does. + * Must never be NULL (except for OPTION_END). + * OPTION_GROUP uses this pointer to store the group header. + * + * `flags`:: + * mask of parse_opt_option_flags. + * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs) + * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs + * PARSE_OPT_NONEG: says that this option cannot be negated + * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in + * the long one. + * + * `callback`:: + * pointer to the callback to use for OPTION_CALLBACK. + * + * `defval`:: + * default value to fill (*->value) with for PARSE_OPT_OPTARG. + * OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in + * the value when met. + * CALLBACKS can use it like they want. + * + * `set`:: + * whether an option was set by the user + */ +struct option { + enum parse_opt_type type; + int short_name; + const char *long_name; + void *value; + const char *argh; + const char *help; + + int flags; + parse_opt_cb *callback; + intptr_t defval; + bool *set; + void *data; +}; + +#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v ) + +#define OPT_END() { .type = OPTION_END } +#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) } +#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } +#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) } +#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) } +#define OPT_BOOLEAN_SET(s, l, v, os, h) \ + { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \ + .value = check_vtype(v, bool *), .help = (h), \ + .set = check_vtype(os, bool *)} +#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } +#define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) } +#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } +#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } +#define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) } +#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) } +#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) } +#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) } +#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY} +#define OPT_DATE(s, l, v, h) \ + { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } +#define OPT_CALLBACK(s, l, v, a, h, f) \ + { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) } +#define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \ + { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } +#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ + { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } +#define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \ + { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\ + .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\ + .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG} +#define OPT_CALLBACK_OPTARG(s, l, v, d, a, h, f) \ + { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), \ + .value = (v), (a), .help = (h), .callback = (f), \ + .flags = PARSE_OPT_OPTARG, .data = (d) } + +/* parse_options() will filter out the processed options and leave the + * non-option argments in argv[]. + * Returns the number of arguments left in argv[]. + */ +extern int parse_options(int argc, const char **argv, + const struct option *options, + const char * const usagestr[], int flags); + +extern int parse_options_subcommand(int argc, const char **argv, + const struct option *options, + const char *const subcommands[], + const char *usagestr[], int flags); + +extern NORETURN void usage_with_options(const char * const *usagestr, + const struct option *options); + +/*----- incremantal advanced APIs -----*/ + +enum { + PARSE_OPT_HELP = -1, + PARSE_OPT_DONE, + PARSE_OPT_LIST_OPTS, + PARSE_OPT_LIST_SUBCMDS, + PARSE_OPT_UNKNOWN, +}; + +/* + * It's okay for the caller to consume argv/argc in the usual way. + * Other fields of that structure are private to parse-options and should not + * be modified in any way. + */ +struct parse_opt_ctx_t { + const char **argv; + const char **out; + int argc, cpidx; + const char *opt; + const struct option *excl_opt; + int flags; +}; + +extern int parse_options_usage(const char * const *usagestr, + const struct option *opts, + const char *optstr, + bool short_opt); + +extern void parse_options_start(struct parse_opt_ctx_t *ctx, + int argc, const char **argv, int flags); + +extern int parse_options_step(struct parse_opt_ctx_t *ctx, + const struct option *options, + const char * const usagestr[]); + +extern int parse_options_end(struct parse_opt_ctx_t *ctx); + + +/*----- some often used options -----*/ +extern int parse_opt_abbrev_cb(const struct option *, const char *, int); +extern int parse_opt_approxidate_cb(const struct option *, const char *, int); +extern int parse_opt_verbosity_cb(const struct option *, const char *, int); + +#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose") +#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet") +#define OPT__VERBOSITY(var) \ + { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \ + { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \ + PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 } +#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run") +#define OPT__ABBREV(var) \ + { OPTION_CALLBACK, 0, "abbrev", (var), "n", \ + "use digits to display SHA-1s", \ + PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 } + +extern const char *parse_options_fix_filename(const char *prefix, const char *file); + +void set_option_flag(struct option *opts, int sopt, const char *lopt, int flag); +#endif /* __PERF_PARSE_OPTIONS_H */ diff --git a/kernel/tools/perf/util/path.c b/kernel/tools/perf/util/path.c new file mode 100644 index 000000000..5d13cb45b --- /dev/null +++ b/kernel/tools/perf/util/path.c @@ -0,0 +1,161 @@ +/* + * I'm tired of doing "vsnprintf()" etc just to open a + * file, so here's a "return static buffer with printf" + * interface for paths. + * + * It's obviously not thread-safe. Sue me. But it's quite + * useful for doing things like + * + * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY); + * + * which is what it's designed for. + */ +#include "cache.h" + +static char bad_path[] = "/bad-path/"; +/* + * Two hacks: + */ + +static const char *get_perf_dir(void) +{ + return "."; +} + +/* + * If libc has strlcpy() then that version will override this + * implementation: + */ +size_t __weak strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + + memcpy(dest, src, len); + dest[len] = '\0'; + } + + return ret; +} + +static char *get_pathname(void) +{ + static char pathname_array[4][PATH_MAX]; + static int idx; + + return pathname_array[3 & ++idx]; +} + +static char *cleanup_path(char *path) +{ + /* Clean it up */ + if (!memcmp(path, "./", 2)) { + path += 2; + while (*path == '/') + path++; + } + return path; +} + +static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) +{ + const char *perf_dir = get_perf_dir(); + size_t len; + + len = strlen(perf_dir); + if (n < len + 1) + goto bad; + memcpy(buf, perf_dir, len); + if (len && !is_dir_sep(perf_dir[len-1])) + buf[len++] = '/'; + len += vsnprintf(buf + len, n - len, fmt, args); + if (len >= n) + goto bad; + return cleanup_path(buf); +bad: + strlcpy(buf, bad_path, n); + return buf; +} + +char *perf_pathdup(const char *fmt, ...) +{ + char path[PATH_MAX]; + va_list args; + va_start(args, fmt); + (void)perf_vsnpath(path, sizeof(path), fmt, args); + va_end(args); + return xstrdup(path); +} + +char *mkpath(const char *fmt, ...) +{ + va_list args; + unsigned len; + char *pathname = get_pathname(); + + va_start(args, fmt); + len = vsnprintf(pathname, PATH_MAX, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + +char *perf_path(const char *fmt, ...) +{ + const char *perf_dir = get_perf_dir(); + char *pathname = get_pathname(); + va_list args; + unsigned len; + + len = strlen(perf_dir); + if (len > PATH_MAX-100) + return bad_path; + memcpy(pathname, perf_dir, len); + if (len && perf_dir[len-1] != '/') + pathname[len++] = '/'; + va_start(args, fmt); + len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args); + va_end(args); + if (len >= PATH_MAX) + return bad_path; + return cleanup_path(pathname); +} + +/* strip arbitrary amount of directory separators at end of path */ +static inline int chomp_trailing_dir_sep(const char *path, int len) +{ + while (len && is_dir_sep(path[len - 1])) + len--; + return len; +} + +/* + * If path ends with suffix (complete path components), returns the + * part before suffix (sans trailing directory separators). + * Otherwise returns NULL. + */ +char *strip_path_suffix(const char *path, const char *suffix) +{ + int path_len = strlen(path), suffix_len = strlen(suffix); + + while (suffix_len) { + if (!path_len) + return NULL; + + if (is_dir_sep(path[path_len - 1])) { + if (!is_dir_sep(suffix[suffix_len - 1])) + return NULL; + path_len = chomp_trailing_dir_sep(path, path_len); + suffix_len = chomp_trailing_dir_sep(suffix, suffix_len); + } + else if (path[--path_len] != suffix[--suffix_len]) + return NULL; + } + + if (path_len && !is_dir_sep(path[path_len - 1])) + return NULL; + return strndup(path, chomp_trailing_dir_sep(path, path_len)); +} diff --git a/kernel/tools/perf/util/perf_regs.c b/kernel/tools/perf/util/perf_regs.c new file mode 100644 index 000000000..43168fb0d --- /dev/null +++ b/kernel/tools/perf/util/perf_regs.c @@ -0,0 +1,27 @@ +#include +#include "perf_regs.h" +#include "event.h" + +int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) +{ + int i, idx = 0; + u64 mask = regs->mask; + + if (regs->cache_mask & (1 << id)) + goto out; + + if (!(mask & (1 << id))) + return -EINVAL; + + for (i = 0; i < id; i++) { + if (mask & (1 << i)) + idx++; + } + + regs->cache_mask |= (1 << id); + regs->cache_regs[id] = regs->regs[idx]; + +out: + *valp = regs->cache_regs[id]; + return 0; +} diff --git a/kernel/tools/perf/util/perf_regs.h b/kernel/tools/perf/util/perf_regs.h new file mode 100644 index 000000000..980dbf76b --- /dev/null +++ b/kernel/tools/perf/util/perf_regs.h @@ -0,0 +1,29 @@ +#ifndef __PERF_REGS_H +#define __PERF_REGS_H + +#include + +struct regs_dump; + +#ifdef HAVE_PERF_REGS_SUPPORT +#include + +int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); + +#else +#define PERF_REGS_MASK 0 +#define PERF_REGS_MAX 0 + +static inline const char *perf_reg_name(int id __maybe_unused) +{ + return NULL; +} + +static inline int perf_reg_value(u64 *valp __maybe_unused, + struct regs_dump *regs __maybe_unused, + int id __maybe_unused) +{ + return 0; +} +#endif /* HAVE_PERF_REGS_SUPPORT */ +#endif /* __PERF_REGS_H */ diff --git a/kernel/tools/perf/util/pmu.c b/kernel/tools/perf/util/pmu.c new file mode 100644 index 000000000..48411674d --- /dev/null +++ b/kernel/tools/perf/util/pmu.c @@ -0,0 +1,992 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "util.h" +#include "pmu.h" +#include "parse-events.h" +#include "cpumap.h" + +struct perf_pmu_format { + char *name; + int value; + DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); + struct list_head list; +}; + +#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" + +int perf_pmu_parse(struct list_head *list, char *name); +extern FILE *perf_pmu_in; + +static LIST_HEAD(pmus); + +/* + * Parse & process all the sysfs attributes located under + * the directory specified in 'dir' parameter. + */ +int perf_pmu__format_parse(char *dir, struct list_head *head) +{ + struct dirent *evt_ent; + DIR *format_dir; + int ret = 0; + + format_dir = opendir(dir); + if (!format_dir) + return -EINVAL; + + while (!ret && (evt_ent = readdir(format_dir))) { + char path[PATH_MAX]; + char *name = evt_ent->d_name; + FILE *file; + + if (!strcmp(name, ".") || !strcmp(name, "..")) + continue; + + snprintf(path, PATH_MAX, "%s/%s", dir, name); + + ret = -EINVAL; + file = fopen(path, "r"); + if (!file) + break; + + perf_pmu_in = file; + ret = perf_pmu_parse(head, name); + fclose(file); + } + + closedir(format_dir); + return ret; +} + +/* + * Reading/parsing the default pmu format definition, which should be + * located at: + * /sys/bus/event_source/devices//format as sysfs group attributes. + */ +static int pmu_format(const char *name, struct list_head *format) +{ + struct stat st; + char path[PATH_MAX]; + const char *sysfs = sysfs__mountpoint(); + + if (!sysfs) + return -1; + + snprintf(path, PATH_MAX, + "%s" EVENT_SOURCE_DEVICE_PATH "%s/format", sysfs, name); + + if (stat(path, &st) < 0) + return 0; /* no error if format does not exist */ + + if (perf_pmu__format_parse(path, format)) + return -1; + + return 0; +} + +static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *name) +{ + struct stat st; + ssize_t sret; + char scale[128]; + int fd, ret = -1; + char path[PATH_MAX]; + const char *lc; + + snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); + + fd = open(path, O_RDONLY); + if (fd == -1) + return -1; + + if (fstat(fd, &st) < 0) + goto error; + + sret = read(fd, scale, sizeof(scale)-1); + if (sret < 0) + goto error; + + scale[sret] = '\0'; + /* + * save current locale + */ + lc = setlocale(LC_NUMERIC, NULL); + + /* + * force to C locale to ensure kernel + * scale string is converted correctly. + * kernel uses default C locale. + */ + setlocale(LC_NUMERIC, "C"); + + alias->scale = strtod(scale, NULL); + + /* restore locale */ + setlocale(LC_NUMERIC, lc); + + ret = 0; +error: + close(fd); + return ret; +} + +static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *name) +{ + char path[PATH_MAX]; + ssize_t sret; + int fd; + + snprintf(path, PATH_MAX, "%s/%s.unit", dir, name); + + fd = open(path, O_RDONLY); + if (fd == -1) + return -1; + + sret = read(fd, alias->unit, UNIT_MAX_LEN); + if (sret < 0) + goto error; + + close(fd); + + alias->unit[sret] = '\0'; + + return 0; +error: + close(fd); + alias->unit[0] = '\0'; + return -1; +} + +static int +perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name) +{ + char path[PATH_MAX]; + int fd; + + snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name); + + fd = open(path, O_RDONLY); + if (fd == -1) + return -1; + + close(fd); + + alias->per_pkg = true; + return 0; +} + +static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias, + char *dir, char *name) +{ + char path[PATH_MAX]; + int fd; + + snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name); + + fd = open(path, O_RDONLY); + if (fd == -1) + return -1; + + alias->snapshot = true; + close(fd); + return 0; +} + +static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file) +{ + struct perf_pmu_alias *alias; + char buf[256]; + int ret; + + ret = fread(buf, 1, sizeof(buf), file); + if (ret == 0) + return -EINVAL; + buf[ret] = 0; + + alias = malloc(sizeof(*alias)); + if (!alias) + return -ENOMEM; + + INIT_LIST_HEAD(&alias->terms); + alias->scale = 1.0; + alias->unit[0] = '\0'; + alias->per_pkg = false; + + ret = parse_events_terms(&alias->terms, buf); + if (ret) { + free(alias); + return ret; + } + + alias->name = strdup(name); + /* + * load unit name and scale if available + */ + perf_pmu__parse_unit(alias, dir, name); + perf_pmu__parse_scale(alias, dir, name); + perf_pmu__parse_per_pkg(alias, dir, name); + perf_pmu__parse_snapshot(alias, dir, name); + + list_add_tail(&alias->list, list); + + return 0; +} + +static inline bool pmu_alias_info_file(char *name) +{ + size_t len; + + len = strlen(name); + if (len > 5 && !strcmp(name + len - 5, ".unit")) + return true; + if (len > 6 && !strcmp(name + len - 6, ".scale")) + return true; + if (len > 8 && !strcmp(name + len - 8, ".per-pkg")) + return true; + if (len > 9 && !strcmp(name + len - 9, ".snapshot")) + return true; + + return false; +} + +/* + * Process all the sysfs attributes located under the directory + * specified in 'dir' parameter. + */ +static int pmu_aliases_parse(char *dir, struct list_head *head) +{ + struct dirent *evt_ent; + DIR *event_dir; + int ret = 0; + + event_dir = opendir(dir); + if (!event_dir) + return -EINVAL; + + while (!ret && (evt_ent = readdir(event_dir))) { + char path[PATH_MAX]; + char *name = evt_ent->d_name; + FILE *file; + + if (!strcmp(name, ".") || !strcmp(name, "..")) + continue; + + /* + * skip info files parsed in perf_pmu__new_alias() + */ + if (pmu_alias_info_file(name)) + continue; + + snprintf(path, PATH_MAX, "%s/%s", dir, name); + + ret = -EINVAL; + file = fopen(path, "r"); + if (!file) + break; + + ret = perf_pmu__new_alias(head, dir, name, file); + fclose(file); + } + + closedir(event_dir); + return ret; +} + +/* + * Reading the pmu event aliases definition, which should be located at: + * /sys/bus/event_source/devices//events as sysfs group attributes. + */ +static int pmu_aliases(const char *name, struct list_head *head) +{ + struct stat st; + char path[PATH_MAX]; + const char *sysfs = sysfs__mountpoint(); + + if (!sysfs) + return -1; + + snprintf(path, PATH_MAX, + "%s/bus/event_source/devices/%s/events", sysfs, name); + + if (stat(path, &st) < 0) + return 0; /* no error if 'events' does not exist */ + + if (pmu_aliases_parse(path, head)) + return -1; + + return 0; +} + +static int pmu_alias_terms(struct perf_pmu_alias *alias, + struct list_head *terms) +{ + struct parse_events_term *term, *cloned; + LIST_HEAD(list); + int ret; + + list_for_each_entry(term, &alias->terms, list) { + ret = parse_events_term__clone(&cloned, term); + if (ret) { + parse_events__free_terms(&list); + return ret; + } + list_add_tail(&cloned->list, &list); + } + list_splice(&list, terms); + return 0; +} + +/* + * Reading/parsing the default pmu type value, which should be + * located at: + * /sys/bus/event_source/devices//type as sysfs attribute. + */ +static int pmu_type(const char *name, __u32 *type) +{ + struct stat st; + char path[PATH_MAX]; + FILE *file; + int ret = 0; + const char *sysfs = sysfs__mountpoint(); + + if (!sysfs) + return -1; + + snprintf(path, PATH_MAX, + "%s" EVENT_SOURCE_DEVICE_PATH "%s/type", sysfs, name); + + if (stat(path, &st) < 0) + return -1; + + file = fopen(path, "r"); + if (!file) + return -EINVAL; + + if (1 != fscanf(file, "%u", type)) + ret = -1; + + fclose(file); + return ret; +} + +/* Add all pmus in sysfs to pmu list: */ +static void pmu_read_sysfs(void) +{ + char path[PATH_MAX]; + DIR *dir; + struct dirent *dent; + const char *sysfs = sysfs__mountpoint(); + + if (!sysfs) + return; + + snprintf(path, PATH_MAX, + "%s" EVENT_SOURCE_DEVICE_PATH, sysfs); + + dir = opendir(path); + if (!dir) + return; + + while ((dent = readdir(dir))) { + if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) + continue; + /* add to static LIST_HEAD(pmus): */ + perf_pmu__find(dent->d_name); + } + + closedir(dir); +} + +static struct cpu_map *pmu_cpumask(const char *name) +{ + struct stat st; + char path[PATH_MAX]; + FILE *file; + struct cpu_map *cpus; + const char *sysfs = sysfs__mountpoint(); + + if (!sysfs) + return NULL; + + snprintf(path, PATH_MAX, + "%s/bus/event_source/devices/%s/cpumask", sysfs, name); + + if (stat(path, &st) < 0) + return NULL; + + file = fopen(path, "r"); + if (!file) + return NULL; + + cpus = cpu_map__read(file); + fclose(file); + return cpus; +} + +struct perf_event_attr *__attribute__((weak)) +perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) +{ + return NULL; +} + +static struct perf_pmu *pmu_lookup(const char *name) +{ + struct perf_pmu *pmu; + LIST_HEAD(format); + LIST_HEAD(aliases); + __u32 type; + + /* + * The pmu data we store & need consists of the pmu + * type value and format definitions. Load both right + * now. + */ + if (pmu_format(name, &format)) + return NULL; + + if (pmu_aliases(name, &aliases)) + return NULL; + + if (pmu_type(name, &type)) + return NULL; + + pmu = zalloc(sizeof(*pmu)); + if (!pmu) + return NULL; + + pmu->cpus = pmu_cpumask(name); + + INIT_LIST_HEAD(&pmu->format); + INIT_LIST_HEAD(&pmu->aliases); + list_splice(&format, &pmu->format); + list_splice(&aliases, &pmu->aliases); + pmu->name = strdup(name); + pmu->type = type; + list_add_tail(&pmu->list, &pmus); + + pmu->default_config = perf_pmu__get_default_config(pmu); + + return pmu; +} + +static struct perf_pmu *pmu_find(const char *name) +{ + struct perf_pmu *pmu; + + list_for_each_entry(pmu, &pmus, list) + if (!strcmp(pmu->name, name)) + return pmu; + + return NULL; +} + +struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu) +{ + /* + * pmu iterator: If pmu is NULL, we start at the begin, + * otherwise return the next pmu. Returns NULL on end. + */ + if (!pmu) { + pmu_read_sysfs(); + pmu = list_prepare_entry(pmu, &pmus, list); + } + list_for_each_entry_continue(pmu, &pmus, list) + return pmu; + return NULL; +} + +struct perf_pmu *perf_pmu__find(const char *name) +{ + struct perf_pmu *pmu; + + /* + * Once PMU is loaded it stays in the list, + * so we keep us from multiple reading/parsing + * the pmu format definitions. + */ + pmu = pmu_find(name); + if (pmu) + return pmu; + + return pmu_lookup(name); +} + +static struct perf_pmu_format * +pmu_find_format(struct list_head *formats, char *name) +{ + struct perf_pmu_format *format; + + list_for_each_entry(format, formats, list) + if (!strcmp(format->name, name)) + return format; + + return NULL; +} + +/* + * Sets value based on the format definition (format parameter) + * and unformated value (value parameter). + */ +static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v, + bool zero) +{ + unsigned long fbit, vbit; + + for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) { + + if (!test_bit(fbit, format)) + continue; + + if (value & (1llu << vbit++)) + *v |= (1llu << fbit); + else if (zero) + *v &= ~(1llu << fbit); + } +} + +/* + * Term is a string term, and might be a param-term. Try to look up it's value + * in the remaining terms. + * - We have a term like "base-or-format-term=param-term", + * - We need to find the value supplied for "param-term" (with param-term named + * in a config string) later on in the term list. + */ +static int pmu_resolve_param_term(struct parse_events_term *term, + struct list_head *head_terms, + __u64 *value) +{ + struct parse_events_term *t; + + list_for_each_entry(t, head_terms, list) { + if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { + if (!strcmp(t->config, term->config)) { + t->used = true; + *value = t->val.num; + return 0; + } + } + } + + if (verbose) + printf("Required parameter '%s' not specified\n", term->config); + + return -1; +} + +/* + * Setup one of config[12] attr members based on the + * user input data - term parameter. + */ +static int pmu_config_term(struct list_head *formats, + struct perf_event_attr *attr, + struct parse_events_term *term, + struct list_head *head_terms, + bool zero) +{ + struct perf_pmu_format *format; + __u64 *vp; + __u64 val; + + /* + * If this is a parameter we've already used for parameterized-eval, + * skip it in normal eval. + */ + if (term->used) + return 0; + + /* + * Hardcoded terms should be already in, so nothing + * to be done for them. + */ + if (parse_events__is_hardcoded_term(term)) + return 0; + + format = pmu_find_format(formats, term->config); + if (!format) { + if (verbose) + printf("Invalid event/parameter '%s'\n", term->config); + return -EINVAL; + } + + switch (format->value) { + case PERF_PMU_FORMAT_VALUE_CONFIG: + vp = &attr->config; + break; + case PERF_PMU_FORMAT_VALUE_CONFIG1: + vp = &attr->config1; + break; + case PERF_PMU_FORMAT_VALUE_CONFIG2: + vp = &attr->config2; + break; + default: + return -EINVAL; + } + + /* + * Either directly use a numeric term, or try to translate string terms + * using event parameters. + */ + if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) + val = term->val.num; + else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { + if (strcmp(term->val.str, "?")) { + if (verbose) + pr_info("Invalid sysfs entry %s=%s\n", + term->config, term->val.str); + return -EINVAL; + } + + if (pmu_resolve_param_term(term, head_terms, &val)) + return -EINVAL; + } else + return -EINVAL; + + pmu_format_value(format->bits, val, vp, zero); + return 0; +} + +int perf_pmu__config_terms(struct list_head *formats, + struct perf_event_attr *attr, + struct list_head *head_terms, + bool zero) +{ + struct parse_events_term *term; + + list_for_each_entry(term, head_terms, list) { + if (pmu_config_term(formats, attr, term, head_terms, zero)) + return -EINVAL; + } + + return 0; +} + +/* + * Configures event's 'attr' parameter based on the: + * 1) users input - specified in terms parameter + * 2) pmu format definitions - specified by pmu parameter + */ +int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, + struct list_head *head_terms) +{ + bool zero = !!pmu->default_config; + + attr->type = pmu->type; + return perf_pmu__config_terms(&pmu->format, attr, head_terms, zero); +} + +static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu, + struct parse_events_term *term) +{ + struct perf_pmu_alias *alias; + char *name; + + if (parse_events__is_hardcoded_term(term)) + return NULL; + + if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { + if (term->val.num != 1) + return NULL; + if (pmu_find_format(&pmu->format, term->config)) + return NULL; + name = term->config; + } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { + if (strcasecmp(term->config, "event")) + return NULL; + name = term->val.str; + } else { + return NULL; + } + + list_for_each_entry(alias, &pmu->aliases, list) { + if (!strcasecmp(alias->name, name)) + return alias; + } + return NULL; +} + + +static int check_info_data(struct perf_pmu_alias *alias, + struct perf_pmu_info *info) +{ + /* + * Only one term in event definition can + * define unit, scale and snapshot, fail + * if there's more than one. + */ + if ((info->unit && alias->unit) || + (info->scale && alias->scale) || + (info->snapshot && alias->snapshot)) + return -EINVAL; + + if (alias->unit) + info->unit = alias->unit; + + if (alias->scale) + info->scale = alias->scale; + + if (alias->snapshot) + info->snapshot = alias->snapshot; + + return 0; +} + +/* + * Find alias in the terms list and replace it with the terms + * defined for the alias + */ +int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, + struct perf_pmu_info *info) +{ + struct parse_events_term *term, *h; + struct perf_pmu_alias *alias; + int ret; + + info->per_pkg = false; + + /* + * Mark unit and scale as not set + * (different from default values, see below) + */ + info->unit = NULL; + info->scale = 0.0; + info->snapshot = false; + + list_for_each_entry_safe(term, h, head_terms, list) { + alias = pmu_find_alias(pmu, term); + if (!alias) + continue; + ret = pmu_alias_terms(alias, &term->list); + if (ret) + return ret; + + ret = check_info_data(alias, info); + if (ret) + return ret; + + if (alias->per_pkg) + info->per_pkg = true; + + list_del(&term->list); + free(term); + } + + /* + * if no unit or scale foundin aliases, then + * set defaults as for evsel + * unit cannot left to NULL + */ + if (info->unit == NULL) + info->unit = ""; + + if (info->scale == 0.0) + info->scale = 1.0; + + return 0; +} + +int perf_pmu__new_format(struct list_head *list, char *name, + int config, unsigned long *bits) +{ + struct perf_pmu_format *format; + + format = zalloc(sizeof(*format)); + if (!format) + return -ENOMEM; + + format->name = strdup(name); + format->value = config; + memcpy(format->bits, bits, sizeof(format->bits)); + + list_add_tail(&format->list, list); + return 0; +} + +void perf_pmu__set_format(unsigned long *bits, long from, long to) +{ + long b; + + if (!to) + to = from; + + memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS)); + for (b = from; b <= to; b++) + set_bit(b, bits); +} + +static int sub_non_neg(int a, int b) +{ + if (b > a) + return 0; + return a - b; +} + +static char *format_alias(char *buf, int len, struct perf_pmu *pmu, + struct perf_pmu_alias *alias) +{ + struct parse_events_term *term; + int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name); + + list_for_each_entry(term, &alias->terms, list) { + if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) + used += snprintf(buf + used, sub_non_neg(len, used), + ",%s=%s", term->config, + term->val.str); + } + + if (sub_non_neg(len, used) > 0) { + buf[used] = '/'; + used++; + } + if (sub_non_neg(len, used) > 0) { + buf[used] = '\0'; + used++; + } else + buf[len - 1] = '\0'; + + return buf; +} + +static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu, + struct perf_pmu_alias *alias) +{ + snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name); + return buf; +} + +static int cmp_string(const void *a, const void *b) +{ + const char * const *as = a; + const char * const *bs = b; + return strcmp(*as, *bs); +} + +void print_pmu_events(const char *event_glob, bool name_only) +{ + struct perf_pmu *pmu; + struct perf_pmu_alias *alias; + char buf[1024]; + int printed = 0; + int len, j; + char **aliases; + + pmu = NULL; + len = 0; + while ((pmu = perf_pmu__scan(pmu)) != NULL) { + list_for_each_entry(alias, &pmu->aliases, list) + len++; + if (pmu->selectable) + len++; + } + aliases = zalloc(sizeof(char *) * len); + if (!aliases) + goto out_enomem; + pmu = NULL; + j = 0; + while ((pmu = perf_pmu__scan(pmu)) != NULL) { + list_for_each_entry(alias, &pmu->aliases, list) { + char *name = format_alias(buf, sizeof(buf), pmu, alias); + bool is_cpu = !strcmp(pmu->name, "cpu"); + + if (event_glob != NULL && + !(strglobmatch(name, event_glob) || + (!is_cpu && strglobmatch(alias->name, + event_glob)))) + continue; + + if (is_cpu && !name_only) + name = format_alias_or(buf, sizeof(buf), pmu, alias); + + aliases[j] = strdup(name); + if (aliases[j] == NULL) + goto out_enomem; + j++; + } + if (pmu->selectable) { + char *s; + if (asprintf(&s, "%s//", pmu->name) < 0) + goto out_enomem; + aliases[j] = s; + j++; + } + } + len = j; + qsort(aliases, len, sizeof(char *), cmp_string); + for (j = 0; j < len; j++) { + if (name_only) { + printf("%s ", aliases[j]); + continue; + } + printf(" %-50s [Kernel PMU event]\n", aliases[j]); + printed++; + } + if (printed) + printf("\n"); +out_free: + for (j = 0; j < len; j++) + zfree(&aliases[j]); + zfree(&aliases); + return; + +out_enomem: + printf("FATAL: not enough memory to print PMU events\n"); + if (aliases) + goto out_free; +} + +bool pmu_have_event(const char *pname, const char *name) +{ + struct perf_pmu *pmu; + struct perf_pmu_alias *alias; + + pmu = NULL; + while ((pmu = perf_pmu__scan(pmu)) != NULL) { + if (strcmp(pname, pmu->name)) + continue; + list_for_each_entry(alias, &pmu->aliases, list) + if (!strcmp(alias->name, name)) + return true; + } + return false; +} + +static FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name) +{ + struct stat st; + char path[PATH_MAX]; + const char *sysfs; + + sysfs = sysfs__mountpoint(); + if (!sysfs) + return NULL; + + snprintf(path, PATH_MAX, + "%s" EVENT_SOURCE_DEVICE_PATH "%s/%s", sysfs, pmu->name, name); + + if (stat(path, &st) < 0) + return NULL; + + return fopen(path, "r"); +} + +int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, + ...) +{ + va_list args; + FILE *file; + int ret = EOF; + + va_start(args, fmt); + file = perf_pmu__open_file(pmu, name); + if (file) { + ret = vfscanf(file, fmt, args); + fclose(file); + } + va_end(args); + return ret; +} diff --git a/kernel/tools/perf/util/pmu.h b/kernel/tools/perf/util/pmu.h new file mode 100644 index 000000000..6b1249fbd --- /dev/null +++ b/kernel/tools/perf/util/pmu.h @@ -0,0 +1,79 @@ +#ifndef __PMU_H +#define __PMU_H + +#include +#include +#include + +enum { + PERF_PMU_FORMAT_VALUE_CONFIG, + PERF_PMU_FORMAT_VALUE_CONFIG1, + PERF_PMU_FORMAT_VALUE_CONFIG2, +}; + +#define PERF_PMU_FORMAT_BITS 64 + +struct perf_event_attr; + +struct perf_pmu { + char *name; + __u32 type; + bool selectable; + struct perf_event_attr *default_config; + struct cpu_map *cpus; + struct list_head format; /* HEAD struct perf_pmu_format -> list */ + struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */ + struct list_head list; /* ELEM */ +}; + +struct perf_pmu_info { + const char *unit; + double scale; + bool per_pkg; + bool snapshot; +}; + +#define UNIT_MAX_LEN 31 /* max length for event unit name */ + +struct perf_pmu_alias { + char *name; + struct list_head terms; /* HEAD struct parse_events_term -> list */ + struct list_head list; /* ELEM */ + char unit[UNIT_MAX_LEN+1]; + double scale; + bool per_pkg; + bool snapshot; +}; + +struct perf_pmu *perf_pmu__find(const char *name); +int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, + struct list_head *head_terms); +int perf_pmu__config_terms(struct list_head *formats, + struct perf_event_attr *attr, + struct list_head *head_terms, + bool zero); +int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, + struct perf_pmu_info *info); +struct list_head *perf_pmu__alias(struct perf_pmu *pmu, + struct list_head *head_terms); +int perf_pmu_wrap(void); +void perf_pmu_error(struct list_head *list, char *name, char const *msg); + +int perf_pmu__new_format(struct list_head *list, char *name, + int config, unsigned long *bits); +void perf_pmu__set_format(unsigned long *bits, long from, long to); +int perf_pmu__format_parse(char *dir, struct list_head *head); + +struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); + +void print_pmu_events(const char *event_glob, bool name_only); +bool pmu_have_event(const char *pname, const char *name); + +int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, + ...) __attribute__((format(scanf, 3, 4))); + +int perf_pmu__test(void); + +struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu); + +#endif /* __PMU_H */ diff --git a/kernel/tools/perf/util/pmu.l b/kernel/tools/perf/util/pmu.l new file mode 100644 index 000000000..a15d9fbd7 --- /dev/null +++ b/kernel/tools/perf/util/pmu.l @@ -0,0 +1,43 @@ +%option prefix="perf_pmu_" + +%{ +#include +#include +#include "pmu.h" +#include "pmu-bison.h" + +static int value(int base) +{ + long num; + + errno = 0; + num = strtoul(perf_pmu_text, NULL, base); + if (errno) + return PP_ERROR; + + perf_pmu_lval.num = num; + return PP_VALUE; +} + +%} + +num_dec [0-9]+ + +%% + +{num_dec} { return value(10); } +config { return PP_CONFIG; } +config1 { return PP_CONFIG1; } +config2 { return PP_CONFIG2; } +- { return '-'; } +: { return ':'; } +, { return ','; } +. { ; } +\n { ; } + +%% + +int perf_pmu_wrap(void) +{ + return 1; +} diff --git a/kernel/tools/perf/util/pmu.y b/kernel/tools/perf/util/pmu.y new file mode 100644 index 000000000..bfd7e8509 --- /dev/null +++ b/kernel/tools/perf/util/pmu.y @@ -0,0 +1,92 @@ + +%parse-param {struct list_head *format} +%parse-param {char *name} + +%{ + +#include +#include +#include +#include +#include "pmu.h" + +extern int perf_pmu_lex (void); + +#define ABORT_ON(val) \ +do { \ + if (val) \ + YYABORT; \ +} while (0) + +%} + +%token PP_CONFIG PP_CONFIG1 PP_CONFIG2 +%token PP_VALUE PP_ERROR +%type PP_VALUE +%type bit_term +%type bits + +%union +{ + unsigned long num; + DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); +} + +%% + +format: +format format_term +| +format_term + +format_term: +PP_CONFIG ':' bits +{ + ABORT_ON(perf_pmu__new_format(format, name, + PERF_PMU_FORMAT_VALUE_CONFIG, + $3)); +} +| +PP_CONFIG1 ':' bits +{ + ABORT_ON(perf_pmu__new_format(format, name, + PERF_PMU_FORMAT_VALUE_CONFIG1, + $3)); +} +| +PP_CONFIG2 ':' bits +{ + ABORT_ON(perf_pmu__new_format(format, name, + PERF_PMU_FORMAT_VALUE_CONFIG2, + $3)); +} + +bits: +bits ',' bit_term +{ + bitmap_or($$, $1, $3, 64); +} +| +bit_term +{ + memcpy($$, $1, sizeof($1)); +} + +bit_term: +PP_VALUE '-' PP_VALUE +{ + perf_pmu__set_format($$, $1, $3); +} +| +PP_VALUE +{ + perf_pmu__set_format($$, $1, 0); +} + +%% + +void perf_pmu_error(struct list_head *list __maybe_unused, + char *name __maybe_unused, + char const *msg __maybe_unused) +{ +} diff --git a/kernel/tools/perf/util/probe-event.c b/kernel/tools/perf/util/probe-event.c new file mode 100644 index 000000000..d05b77cf3 --- /dev/null +++ b/kernel/tools/perf/util/probe-event.c @@ -0,0 +1,2847 @@ +/* + * probe-event.c : perf-probe definition to probe_events format converter + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "util.h" +#include "event.h" +#include "strlist.h" +#include "debug.h" +#include "cache.h" +#include "color.h" +#include "symbol.h" +#include "thread.h" +#include +#include +#include "trace-event.h" /* For __maybe_unused */ +#include "probe-event.h" +#include "probe-finder.h" +#include "session.h" + +#define MAX_CMDLEN 256 +#define PERFPROBE_GROUP "probe" + +bool probe_event_dry_run; /* Dry run flag */ + +#define semantic_error(msg ...) pr_err("Semantic error :" msg) + +/* If there is no space to write, returns -E2BIG. */ +static int e_snprintf(char *str, size_t size, const char *format, ...) + __attribute__((format(printf, 3, 4))); + +static int e_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + va_start(ap, format); + ret = vsnprintf(str, size, format, ap); + va_end(ap); + if (ret >= (int)size) + ret = -E2BIG; + return ret; +} + +static char *synthesize_perf_probe_point(struct perf_probe_point *pp); +static void clear_probe_trace_event(struct probe_trace_event *tev); +static struct machine *host_machine; + +/* Initialize symbol maps and path of vmlinux/modules */ +static int init_symbol_maps(bool user_only) +{ + int ret; + + symbol_conf.sort_by_name = true; + symbol_conf.allow_aliases = true; + ret = symbol__init(NULL); + if (ret < 0) { + pr_debug("Failed to init symbol map.\n"); + goto out; + } + + if (host_machine || user_only) /* already initialized */ + return 0; + + if (symbol_conf.vmlinux_name) + pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name); + + host_machine = machine__new_host(); + if (!host_machine) { + pr_debug("machine__new_host() failed.\n"); + symbol__exit(); + ret = -1; + } +out: + if (ret < 0) + pr_warning("Failed to init vmlinux path.\n"); + return ret; +} + +static void exit_symbol_maps(void) +{ + if (host_machine) { + machine__delete(host_machine); + host_machine = NULL; + } + symbol__exit(); +} + +static struct symbol *__find_kernel_function_by_name(const char *name, + struct map **mapp) +{ + return machine__find_kernel_function_by_name(host_machine, name, mapp, + NULL); +} + +static struct symbol *__find_kernel_function(u64 addr, struct map **mapp) +{ + return machine__find_kernel_function(host_machine, addr, mapp, NULL); +} + +static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) +{ + /* kmap->ref_reloc_sym should be set if host_machine is initialized */ + struct kmap *kmap; + + if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0) + return NULL; + + kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]); + if (!kmap) + return NULL; + return kmap->ref_reloc_sym; +} + +static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc) +{ + struct ref_reloc_sym *reloc_sym; + struct symbol *sym; + struct map *map; + + /* ref_reloc_sym is just a label. Need a special fix*/ + reloc_sym = kernel_get_ref_reloc_sym(); + if (reloc_sym && strcmp(name, reloc_sym->name) == 0) + return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; + else { + sym = __find_kernel_function_by_name(name, &map); + if (sym) + return map->unmap_ip(map, sym->start) - + ((reloc) ? 0 : map->reloc); + } + return 0; +} + +static struct map *kernel_get_module_map(const char *module) +{ + struct rb_node *nd; + struct map_groups *grp = &host_machine->kmaps; + + /* A file path -- this is an offline module */ + if (module && strchr(module, '/')) + return machine__new_module(host_machine, 0, module); + + if (!module) + module = "kernel"; + + for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + if (strncmp(pos->dso->short_name + 1, module, + pos->dso->short_name_len - 2) == 0) { + return pos; + } + } + return NULL; +} + +static struct map *get_target_map(const char *target, bool user) +{ + /* Init maps of given executable or kernel */ + if (user) + return dso__new_map(target); + else + return kernel_get_module_map(target); +} + +static void put_target_map(struct map *map, bool user) +{ + if (map && user) { + /* Only the user map needs to be released */ + dso__delete(map->dso); + map__delete(map); + } +} + + +static struct dso *kernel_get_module_dso(const char *module) +{ + struct dso *dso; + struct map *map; + const char *vmlinux_name; + + if (module) { + list_for_each_entry(dso, &host_machine->kernel_dsos.head, + node) { + if (strncmp(dso->short_name + 1, module, + dso->short_name_len - 2) == 0) + goto found; + } + pr_debug("Failed to find module %s.\n", module); + return NULL; + } + + map = host_machine->vmlinux_maps[MAP__FUNCTION]; + dso = map->dso; + + vmlinux_name = symbol_conf.vmlinux_name; + if (vmlinux_name) { + if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0) + return NULL; + } else { + if (dso__load_vmlinux_path(dso, map, NULL) <= 0) { + pr_debug("Failed to load kernel map.\n"); + return NULL; + } + } +found: + return dso; +} + +const char *kernel_get_module_path(const char *module) +{ + struct dso *dso = kernel_get_module_dso(module); + return (dso) ? dso->long_name : NULL; +} + +static int convert_exec_to_group(const char *exec, char **result) +{ + char *ptr1, *ptr2, *exec_copy; + char buf[64]; + int ret; + + exec_copy = strdup(exec); + if (!exec_copy) + return -ENOMEM; + + ptr1 = basename(exec_copy); + if (!ptr1) { + ret = -EINVAL; + goto out; + } + + ptr2 = strpbrk(ptr1, "-._"); + if (ptr2) + *ptr2 = '\0'; + ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1); + if (ret < 0) + goto out; + + *result = strdup(buf); + ret = *result ? 0 : -ENOMEM; + +out: + free(exec_copy); + return ret; +} + +static void clear_perf_probe_point(struct perf_probe_point *pp) +{ + free(pp->file); + free(pp->function); + free(pp->lazy_line); +} + +static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) +{ + int i; + + for (i = 0; i < ntevs; i++) + clear_probe_trace_event(tevs + i); +} + +#ifdef HAVE_DWARF_SUPPORT +/* + * Some binaries like glibc have special symbols which are on the symbol + * table, but not in the debuginfo. If we can find the address of the + * symbol from map, we can translate the address back to the probe point. + */ +static int find_alternative_probe_point(struct debuginfo *dinfo, + struct perf_probe_point *pp, + struct perf_probe_point *result, + const char *target, bool uprobes) +{ + struct map *map = NULL; + struct symbol *sym; + u64 address = 0; + int ret = -ENOENT; + + /* This can work only for function-name based one */ + if (!pp->function || pp->file) + return -ENOTSUP; + + map = get_target_map(target, uprobes); + if (!map) + return -EINVAL; + + /* Find the address of given function */ + map__for_each_symbol_by_name(map, pp->function, sym) { + if (uprobes) + address = sym->start; + else + address = map->unmap_ip(map, sym->start); + break; + } + if (!address) { + ret = -ENOENT; + goto out; + } + pr_debug("Symbol %s address found : %" PRIx64 "\n", + pp->function, address); + + ret = debuginfo__find_probe_point(dinfo, (unsigned long)address, + result); + if (ret <= 0) + ret = (!ret) ? -ENOENT : ret; + else { + result->offset += pp->offset; + result->line += pp->line; + result->retprobe = pp->retprobe; + ret = 0; + } + +out: + put_target_map(map, uprobes); + return ret; + +} + +static int get_alternative_probe_event(struct debuginfo *dinfo, + struct perf_probe_event *pev, + struct perf_probe_point *tmp, + const char *target) +{ + int ret; + + memcpy(tmp, &pev->point, sizeof(*tmp)); + memset(&pev->point, 0, sizeof(pev->point)); + ret = find_alternative_probe_point(dinfo, tmp, &pev->point, + target, pev->uprobes); + if (ret < 0) + memcpy(&pev->point, tmp, sizeof(*tmp)); + + return ret; +} + +static int get_alternative_line_range(struct debuginfo *dinfo, + struct line_range *lr, + const char *target, bool user) +{ + struct perf_probe_point pp = { .function = lr->function, + .file = lr->file, + .line = lr->start }; + struct perf_probe_point result; + int ret, len = 0; + + memset(&result, 0, sizeof(result)); + + if (lr->end != INT_MAX) + len = lr->end - lr->start; + ret = find_alternative_probe_point(dinfo, &pp, &result, + target, user); + if (!ret) { + lr->function = result.function; + lr->file = result.file; + lr->start = result.line; + if (lr->end != INT_MAX) + lr->end = lr->start + len; + clear_perf_probe_point(&pp); + } + return ret; +} + +/* Open new debuginfo of given module */ +static struct debuginfo *open_debuginfo(const char *module, bool silent) +{ + const char *path = module; + struct debuginfo *ret; + + if (!module || !strchr(module, '/')) { + path = kernel_get_module_path(module); + if (!path) { + if (!silent) + pr_err("Failed to find path of %s module.\n", + module ?: "kernel"); + return NULL; + } + } + ret = debuginfo__new(path); + if (!ret && !silent) { + pr_warning("The %s file has no debug information.\n", path); + if (!module || !strtailcmp(path, ".ko")) + pr_warning("Rebuild with CONFIG_DEBUG_INFO=y, "); + else + pr_warning("Rebuild with -g, "); + pr_warning("or install an appropriate debuginfo package.\n"); + } + return ret; +} + + +static int get_text_start_address(const char *exec, unsigned long *address) +{ + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + int fd, ret = -ENOENT; + + fd = open(exec, O_RDONLY); + if (fd < 0) + return -errno; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return -EINVAL; + + if (gelf_getehdr(elf, &ehdr) == NULL) + goto out; + + if (!elf_section_by_name(elf, &ehdr, &shdr, ".text", NULL)) + goto out; + + *address = shdr.sh_addr - shdr.sh_offset; + ret = 0; +out: + elf_end(elf); + return ret; +} + +/* + * Convert trace point to probe point with debuginfo + */ +static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, + struct perf_probe_point *pp, + bool is_kprobe) +{ + struct debuginfo *dinfo = NULL; + unsigned long stext = 0; + u64 addr = tp->address; + int ret = -ENOENT; + + /* convert the address to dwarf address */ + if (!is_kprobe) { + if (!addr) { + ret = -EINVAL; + goto error; + } + ret = get_text_start_address(tp->module, &stext); + if (ret < 0) + goto error; + addr += stext; + } else { + addr = kernel_get_symbol_address_by_name(tp->symbol, false); + if (addr == 0) + goto error; + addr += tp->offset; + } + + pr_debug("try to find information at %" PRIx64 " in %s\n", addr, + tp->module ? : "kernel"); + + dinfo = open_debuginfo(tp->module, verbose == 0); + if (dinfo) { + ret = debuginfo__find_probe_point(dinfo, + (unsigned long)addr, pp); + debuginfo__delete(dinfo); + } else + ret = -ENOENT; + + if (ret > 0) { + pp->retprobe = tp->retprobe; + return 0; + } +error: + pr_debug("Failed to find corresponding probes from debuginfo.\n"); + return ret ? : -ENOENT; +} + +static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *exec) +{ + int i, ret = 0; + unsigned long stext = 0; + + if (!exec) + return 0; + + ret = get_text_start_address(exec, &stext); + if (ret < 0) + return ret; + + for (i = 0; i < ntevs && ret >= 0; i++) { + /* point.address is the addres of point.symbol + point.offset */ + tevs[i].point.address -= stext; + tevs[i].point.module = strdup(exec); + if (!tevs[i].point.module) { + ret = -ENOMEM; + break; + } + tevs[i].uprobes = true; + } + + return ret; +} + +static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module) +{ + int i, ret = 0; + char *tmp; + + if (!module) + return 0; + + tmp = strrchr(module, '/'); + if (tmp) { + /* This is a module path -- get the module name */ + module = strdup(tmp + 1); + if (!module) + return -ENOMEM; + tmp = strchr(module, '.'); + if (tmp) + *tmp = '\0'; + tmp = (char *)module; /* For free() */ + } + + for (i = 0; i < ntevs; i++) { + tevs[i].point.module = strdup(module); + if (!tevs[i].point.module) { + ret = -ENOMEM; + break; + } + } + + free(tmp); + return ret; +} + +/* Post processing the probe events */ +static int post_process_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module, + bool uprobe) +{ + struct ref_reloc_sym *reloc_sym; + char *tmp; + int i; + + if (uprobe) + return add_exec_to_probe_trace_events(tevs, ntevs, module); + + /* Note that currently ref_reloc_sym based probe is not for drivers */ + if (module) + return add_module_to_probe_trace_events(tevs, ntevs, module); + + reloc_sym = kernel_get_ref_reloc_sym(); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + return -EINVAL; + } + + for (i = 0; i < ntevs; i++) { + if (tevs[i].point.address && !tevs[i].point.retprobe) { + tmp = strdup(reloc_sym->name); + if (!tmp) + return -ENOMEM; + free(tevs[i].point.symbol); + tevs[i].point.symbol = tmp; + tevs[i].point.offset = tevs[i].point.address - + reloc_sym->unrelocated_addr; + } + } + return 0; +} + +/* Try to find perf_probe_event with debuginfo */ +static int try_to_find_probe_trace_events(struct perf_probe_event *pev, + struct probe_trace_event **tevs, + int max_tevs, const char *target) +{ + bool need_dwarf = perf_probe_event_need_dwarf(pev); + struct perf_probe_point tmp; + struct debuginfo *dinfo; + int ntevs, ret = 0; + + dinfo = open_debuginfo(target, !need_dwarf); + + if (!dinfo) { + if (need_dwarf) + return -ENOENT; + pr_debug("Could not open debuginfo. Try to use symbols.\n"); + return 0; + } + + pr_debug("Try to find probe point from debuginfo.\n"); + /* Searching trace events corresponding to a probe event */ + ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs); + + if (ntevs == 0) { /* Not found, retry with an alternative */ + ret = get_alternative_probe_event(dinfo, pev, &tmp, target); + if (!ret) { + ntevs = debuginfo__find_trace_events(dinfo, pev, + tevs, max_tevs); + /* + * Write back to the original probe_event for + * setting appropriate (user given) event name + */ + clear_perf_probe_point(&pev->point); + memcpy(&pev->point, &tmp, sizeof(tmp)); + } + } + + debuginfo__delete(dinfo); + + if (ntevs > 0) { /* Succeeded to find trace events */ + pr_debug("Found %d probe_trace_events.\n", ntevs); + ret = post_process_probe_trace_events(*tevs, ntevs, + target, pev->uprobes); + if (ret < 0) { + clear_probe_trace_events(*tevs, ntevs); + zfree(tevs); + } + return ret < 0 ? ret : ntevs; + } + + if (ntevs == 0) { /* No error but failed to find probe point. */ + pr_warning("Probe point '%s' not found.\n", + synthesize_perf_probe_point(&pev->point)); + return -ENOENT; + } + /* Error path : ntevs < 0 */ + pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); + if (ntevs == -EBADF) { + pr_warning("Warning: No dwarf info found in the vmlinux - " + "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); + if (!need_dwarf) { + pr_debug("Trying to use symbols.\n"); + return 0; + } + } + return ntevs; +} + +#define LINEBUF_SIZE 256 +#define NR_ADDITIONAL_LINES 2 + +static int __show_one_line(FILE *fp, int l, bool skip, bool show_num) +{ + char buf[LINEBUF_SIZE], sbuf[STRERR_BUFSIZE]; + const char *color = show_num ? "" : PERF_COLOR_BLUE; + const char *prefix = NULL; + + do { + if (fgets(buf, LINEBUF_SIZE, fp) == NULL) + goto error; + if (skip) + continue; + if (!prefix) { + prefix = show_num ? "%7d " : " "; + color_fprintf(stdout, color, prefix, l); + } + color_fprintf(stdout, color, "%s", buf); + + } while (strchr(buf, '\n') == NULL); + + return 1; +error: + if (ferror(fp)) { + pr_warning("File read error: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); + return -1; + } + return 0; +} + +static int _show_one_line(FILE *fp, int l, bool skip, bool show_num) +{ + int rv = __show_one_line(fp, l, skip, show_num); + if (rv == 0) { + pr_warning("Source file is shorter than expected.\n"); + rv = -1; + } + return rv; +} + +#define show_one_line_with_num(f,l) _show_one_line(f,l,false,true) +#define show_one_line(f,l) _show_one_line(f,l,false,false) +#define skip_one_line(f,l) _show_one_line(f,l,true,false) +#define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false) + +/* + * Show line-range always requires debuginfo to find source file and + * line number. + */ +static int __show_line_range(struct line_range *lr, const char *module, + bool user) +{ + int l = 1; + struct int_node *ln; + struct debuginfo *dinfo; + FILE *fp; + int ret; + char *tmp; + char sbuf[STRERR_BUFSIZE]; + + /* Search a line range */ + dinfo = open_debuginfo(module, false); + if (!dinfo) + return -ENOENT; + + ret = debuginfo__find_line_range(dinfo, lr); + if (!ret) { /* Not found, retry with an alternative */ + ret = get_alternative_line_range(dinfo, lr, module, user); + if (!ret) + ret = debuginfo__find_line_range(dinfo, lr); + } + debuginfo__delete(dinfo); + if (ret == 0 || ret == -ENOENT) { + pr_warning("Specified source line is not found.\n"); + return -ENOENT; + } else if (ret < 0) { + pr_warning("Debuginfo analysis failed.\n"); + return ret; + } + + /* Convert source file path */ + tmp = lr->path; + ret = get_real_path(tmp, lr->comp_dir, &lr->path); + + /* Free old path when new path is assigned */ + if (tmp != lr->path) + free(tmp); + + if (ret < 0) { + pr_warning("Failed to find source file path.\n"); + return ret; + } + + setup_pager(); + + if (lr->function) + fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path, + lr->start - lr->offset); + else + fprintf(stdout, "<%s:%d>\n", lr->path, lr->start); + + fp = fopen(lr->path, "r"); + if (fp == NULL) { + pr_warning("Failed to open %s: %s\n", lr->path, + strerror_r(errno, sbuf, sizeof(sbuf))); + return -errno; + } + /* Skip to starting line number */ + while (l < lr->start) { + ret = skip_one_line(fp, l++); + if (ret < 0) + goto end; + } + + intlist__for_each(ln, lr->line_list) { + for (; ln->i > l; l++) { + ret = show_one_line(fp, l - lr->offset); + if (ret < 0) + goto end; + } + ret = show_one_line_with_num(fp, l++ - lr->offset); + if (ret < 0) + goto end; + } + + if (lr->end == INT_MAX) + lr->end = l + NR_ADDITIONAL_LINES; + while (l <= lr->end) { + ret = show_one_line_or_eof(fp, l++ - lr->offset); + if (ret <= 0) + break; + } +end: + fclose(fp); + return ret; +} + +int show_line_range(struct line_range *lr, const char *module, bool user) +{ + int ret; + + ret = init_symbol_maps(user); + if (ret < 0) + return ret; + ret = __show_line_range(lr, module, user); + exit_symbol_maps(); + + return ret; +} + +static int show_available_vars_at(struct debuginfo *dinfo, + struct perf_probe_event *pev, + int max_vls, struct strfilter *_filter, + bool externs, const char *target) +{ + char *buf; + int ret, i, nvars; + struct str_node *node; + struct variable_list *vls = NULL, *vl; + struct perf_probe_point tmp; + const char *var; + + buf = synthesize_perf_probe_point(&pev->point); + if (!buf) + return -EINVAL; + pr_debug("Searching variables at %s\n", buf); + + ret = debuginfo__find_available_vars_at(dinfo, pev, &vls, + max_vls, externs); + if (!ret) { /* Not found, retry with an alternative */ + ret = get_alternative_probe_event(dinfo, pev, &tmp, target); + if (!ret) { + ret = debuginfo__find_available_vars_at(dinfo, pev, + &vls, max_vls, externs); + /* Release the old probe_point */ + clear_perf_probe_point(&tmp); + } + } + if (ret <= 0) { + if (ret == 0 || ret == -ENOENT) { + pr_err("Failed to find the address of %s\n", buf); + ret = -ENOENT; + } else + pr_warning("Debuginfo analysis failed.\n"); + goto end; + } + + /* Some variables are found */ + fprintf(stdout, "Available variables at %s\n", buf); + for (i = 0; i < ret; i++) { + vl = &vls[i]; + /* + * A probe point might be converted to + * several trace points. + */ + fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol, + vl->point.offset); + zfree(&vl->point.symbol); + nvars = 0; + if (vl->vars) { + strlist__for_each(node, vl->vars) { + var = strchr(node->s, '\t') + 1; + if (strfilter__compare(_filter, var)) { + fprintf(stdout, "\t\t%s\n", node->s); + nvars++; + } + } + strlist__delete(vl->vars); + } + if (nvars == 0) + fprintf(stdout, "\t\t(No matched variables)\n"); + } + free(vls); +end: + free(buf); + return ret; +} + +/* Show available variables on given probe point */ +int show_available_vars(struct perf_probe_event *pevs, int npevs, + int max_vls, const char *module, + struct strfilter *_filter, bool externs) +{ + int i, ret = 0; + struct debuginfo *dinfo; + + ret = init_symbol_maps(pevs->uprobes); + if (ret < 0) + return ret; + + dinfo = open_debuginfo(module, false); + if (!dinfo) { + ret = -ENOENT; + goto out; + } + + setup_pager(); + + for (i = 0; i < npevs && ret >= 0; i++) + ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter, + externs, module); + + debuginfo__delete(dinfo); +out: + exit_symbol_maps(); + return ret; +} + +#else /* !HAVE_DWARF_SUPPORT */ + +static int +find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused, + struct perf_probe_point *pp __maybe_unused, + bool is_kprobe __maybe_unused) +{ + return -ENOSYS; +} + +static int try_to_find_probe_trace_events(struct perf_probe_event *pev, + struct probe_trace_event **tevs __maybe_unused, + int max_tevs __maybe_unused, + const char *target __maybe_unused) +{ + if (perf_probe_event_need_dwarf(pev)) { + pr_warning("Debuginfo-analysis is not supported.\n"); + return -ENOSYS; + } + + return 0; +} + +int show_line_range(struct line_range *lr __maybe_unused, + const char *module __maybe_unused, + bool user __maybe_unused) +{ + pr_warning("Debuginfo-analysis is not supported.\n"); + return -ENOSYS; +} + +int show_available_vars(struct perf_probe_event *pevs __maybe_unused, + int npevs __maybe_unused, int max_vls __maybe_unused, + const char *module __maybe_unused, + struct strfilter *filter __maybe_unused, + bool externs __maybe_unused) +{ + pr_warning("Debuginfo-analysis is not supported.\n"); + return -ENOSYS; +} +#endif + +void line_range__clear(struct line_range *lr) +{ + free(lr->function); + free(lr->file); + free(lr->path); + free(lr->comp_dir); + intlist__delete(lr->line_list); + memset(lr, 0, sizeof(*lr)); +} + +int line_range__init(struct line_range *lr) +{ + memset(lr, 0, sizeof(*lr)); + lr->line_list = intlist__new(NULL); + if (!lr->line_list) + return -ENOMEM; + else + return 0; +} + +static int parse_line_num(char **ptr, int *val, const char *what) +{ + const char *start = *ptr; + + errno = 0; + *val = strtol(*ptr, ptr, 0); + if (errno || *ptr == start) { + semantic_error("'%s' is not a valid number.\n", what); + return -EINVAL; + } + return 0; +} + +/* + * Stuff 'lr' according to the line range described by 'arg'. + * The line range syntax is described by: + * + * SRC[:SLN[+NUM|-ELN]] + * FNC[@SRC][:SLN[+NUM|-ELN]] + */ +int parse_line_range_desc(const char *arg, struct line_range *lr) +{ + char *range, *file, *name = strdup(arg); + int err; + + if (!name) + return -ENOMEM; + + lr->start = 0; + lr->end = INT_MAX; + + range = strchr(name, ':'); + if (range) { + *range++ = '\0'; + + err = parse_line_num(&range, &lr->start, "start line"); + if (err) + goto err; + + if (*range == '+' || *range == '-') { + const char c = *range++; + + err = parse_line_num(&range, &lr->end, "end line"); + if (err) + goto err; + + if (c == '+') { + lr->end += lr->start; + /* + * Adjust the number of lines here. + * If the number of lines == 1, the + * the end of line should be equal to + * the start of line. + */ + lr->end--; + } + } + + pr_debug("Line range is %d to %d\n", lr->start, lr->end); + + err = -EINVAL; + if (lr->start > lr->end) { + semantic_error("Start line must be smaller" + " than end line.\n"); + goto err; + } + if (*range != '\0') { + semantic_error("Tailing with invalid str '%s'.\n", range); + goto err; + } + } + + file = strchr(name, '@'); + if (file) { + *file = '\0'; + lr->file = strdup(++file); + if (lr->file == NULL) { + err = -ENOMEM; + goto err; + } + lr->function = name; + } else if (strchr(name, '.')) + lr->file = name; + else + lr->function = name; + + return 0; +err: + free(name); + return err; +} + +/* Check the name is good for event/group */ +static bool check_event_name(const char *name) +{ + if (!isalpha(*name) && *name != '_') + return false; + while (*++name != '\0') { + if (!isalpha(*name) && !isdigit(*name) && *name != '_') + return false; + } + return true; +} + +/* Parse probepoint definition. */ +static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) +{ + struct perf_probe_point *pp = &pev->point; + char *ptr, *tmp; + char c, nc = 0; + /* + * + * perf probe [EVENT=]SRC[:LN|;PTN] + * perf probe [EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT] + * + * TODO:Group name support + */ + if (!arg) + return -EINVAL; + + ptr = strpbrk(arg, ";=@+%"); + if (ptr && *ptr == '=') { /* Event name */ + *ptr = '\0'; + tmp = ptr + 1; + if (strchr(arg, ':')) { + semantic_error("Group name is not supported yet.\n"); + return -ENOTSUP; + } + if (!check_event_name(arg)) { + semantic_error("%s is bad for event name -it must " + "follow C symbol-naming rule.\n", arg); + return -EINVAL; + } + pev->event = strdup(arg); + if (pev->event == NULL) + return -ENOMEM; + pev->group = NULL; + arg = tmp; + } + + ptr = strpbrk(arg, ";:+@%"); + if (ptr) { + nc = *ptr; + *ptr++ = '\0'; + } + + tmp = strdup(arg); + if (tmp == NULL) + return -ENOMEM; + + /* Check arg is function or file and copy it */ + if (strchr(tmp, '.')) /* File */ + pp->file = tmp; + else /* Function */ + pp->function = tmp; + + /* Parse other options */ + while (ptr) { + arg = ptr; + c = nc; + if (c == ';') { /* Lazy pattern must be the last part */ + pp->lazy_line = strdup(arg); + if (pp->lazy_line == NULL) + return -ENOMEM; + break; + } + ptr = strpbrk(arg, ";:+@%"); + if (ptr) { + nc = *ptr; + *ptr++ = '\0'; + } + switch (c) { + case ':': /* Line number */ + pp->line = strtoul(arg, &tmp, 0); + if (*tmp != '\0') { + semantic_error("There is non-digit char" + " in line number.\n"); + return -EINVAL; + } + break; + case '+': /* Byte offset from a symbol */ + pp->offset = strtoul(arg, &tmp, 0); + if (*tmp != '\0') { + semantic_error("There is non-digit character" + " in offset.\n"); + return -EINVAL; + } + break; + case '@': /* File name */ + if (pp->file) { + semantic_error("SRC@SRC is not allowed.\n"); + return -EINVAL; + } + pp->file = strdup(arg); + if (pp->file == NULL) + return -ENOMEM; + break; + case '%': /* Probe places */ + if (strcmp(arg, "return") == 0) { + pp->retprobe = 1; + } else { /* Others not supported yet */ + semantic_error("%%%s is not supported.\n", arg); + return -ENOTSUP; + } + break; + default: /* Buggy case */ + pr_err("This program has a bug at %s:%d.\n", + __FILE__, __LINE__); + return -ENOTSUP; + break; + } + } + + /* Exclusion check */ + if (pp->lazy_line && pp->line) { + semantic_error("Lazy pattern can't be used with" + " line number.\n"); + return -EINVAL; + } + + if (pp->lazy_line && pp->offset) { + semantic_error("Lazy pattern can't be used with offset.\n"); + return -EINVAL; + } + + if (pp->line && pp->offset) { + semantic_error("Offset can't be used with line number.\n"); + return -EINVAL; + } + + if (!pp->line && !pp->lazy_line && pp->file && !pp->function) { + semantic_error("File always requires line number or " + "lazy pattern.\n"); + return -EINVAL; + } + + if (pp->offset && !pp->function) { + semantic_error("Offset requires an entry function.\n"); + return -EINVAL; + } + + if (pp->retprobe && !pp->function) { + semantic_error("Return probe requires an entry function.\n"); + return -EINVAL; + } + + if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) { + semantic_error("Offset/Line/Lazy pattern can't be used with " + "return probe.\n"); + return -EINVAL; + } + + pr_debug("symbol:%s file:%s line:%d offset:%lu return:%d lazy:%s\n", + pp->function, pp->file, pp->line, pp->offset, pp->retprobe, + pp->lazy_line); + return 0; +} + +/* Parse perf-probe event argument */ +static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg) +{ + char *tmp, *goodname; + struct perf_probe_arg_field **fieldp; + + pr_debug("parsing arg: %s into ", str); + + tmp = strchr(str, '='); + if (tmp) { + arg->name = strndup(str, tmp - str); + if (arg->name == NULL) + return -ENOMEM; + pr_debug("name:%s ", arg->name); + str = tmp + 1; + } + + tmp = strchr(str, ':'); + if (tmp) { /* Type setting */ + *tmp = '\0'; + arg->type = strdup(tmp + 1); + if (arg->type == NULL) + return -ENOMEM; + pr_debug("type:%s ", arg->type); + } + + tmp = strpbrk(str, "-.["); + if (!is_c_varname(str) || !tmp) { + /* A variable, register, symbol or special value */ + arg->var = strdup(str); + if (arg->var == NULL) + return -ENOMEM; + pr_debug("%s\n", arg->var); + return 0; + } + + /* Structure fields or array element */ + arg->var = strndup(str, tmp - str); + if (arg->var == NULL) + return -ENOMEM; + goodname = arg->var; + pr_debug("%s, ", arg->var); + fieldp = &arg->field; + + do { + *fieldp = zalloc(sizeof(struct perf_probe_arg_field)); + if (*fieldp == NULL) + return -ENOMEM; + if (*tmp == '[') { /* Array */ + str = tmp; + (*fieldp)->index = strtol(str + 1, &tmp, 0); + (*fieldp)->ref = true; + if (*tmp != ']' || tmp == str + 1) { + semantic_error("Array index must be a" + " number.\n"); + return -EINVAL; + } + tmp++; + if (*tmp == '\0') + tmp = NULL; + } else { /* Structure */ + if (*tmp == '.') { + str = tmp + 1; + (*fieldp)->ref = false; + } else if (tmp[1] == '>') { + str = tmp + 2; + (*fieldp)->ref = true; + } else { + semantic_error("Argument parse error: %s\n", + str); + return -EINVAL; + } + tmp = strpbrk(str, "-.["); + } + if (tmp) { + (*fieldp)->name = strndup(str, tmp - str); + if ((*fieldp)->name == NULL) + return -ENOMEM; + if (*str != '[') + goodname = (*fieldp)->name; + pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref); + fieldp = &(*fieldp)->next; + } + } while (tmp); + (*fieldp)->name = strdup(str); + if ((*fieldp)->name == NULL) + return -ENOMEM; + if (*str != '[') + goodname = (*fieldp)->name; + pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref); + + /* If no name is specified, set the last field name (not array index)*/ + if (!arg->name) { + arg->name = strdup(goodname); + if (arg->name == NULL) + return -ENOMEM; + } + return 0; +} + +/* Parse perf-probe event command */ +int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev) +{ + char **argv; + int argc, i, ret = 0; + + argv = argv_split(cmd, &argc); + if (!argv) { + pr_debug("Failed to split arguments.\n"); + return -ENOMEM; + } + if (argc - 1 > MAX_PROBE_ARGS) { + semantic_error("Too many probe arguments (%d).\n", argc - 1); + ret = -ERANGE; + goto out; + } + /* Parse probe point */ + ret = parse_perf_probe_point(argv[0], pev); + if (ret < 0) + goto out; + + /* Copy arguments and ensure return probe has no C argument */ + pev->nargs = argc - 1; + pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs); + if (pev->args == NULL) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < pev->nargs && ret >= 0; i++) { + ret = parse_perf_probe_arg(argv[i + 1], &pev->args[i]); + if (ret >= 0 && + is_c_varname(pev->args[i].var) && pev->point.retprobe) { + semantic_error("You can't specify local variable for" + " kretprobe.\n"); + ret = -EINVAL; + } + } +out: + argv_free(argv); + + return ret; +} + +/* Return true if this perf_probe_event requires debuginfo */ +bool perf_probe_event_need_dwarf(struct perf_probe_event *pev) +{ + int i; + + if (pev->point.file || pev->point.line || pev->point.lazy_line) + return true; + + for (i = 0; i < pev->nargs; i++) + if (is_c_varname(pev->args[i].var)) + return true; + + return false; +} + +/* Parse probe_events event into struct probe_point */ +static int parse_probe_trace_command(const char *cmd, + struct probe_trace_event *tev) +{ + struct probe_trace_point *tp = &tev->point; + char pr; + char *p; + char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str; + int ret, i, argc; + char **argv; + + pr_debug("Parsing probe_events: %s\n", cmd); + argv = argv_split(cmd, &argc); + if (!argv) { + pr_debug("Failed to split arguments.\n"); + return -ENOMEM; + } + if (argc < 2) { + semantic_error("Too few probe arguments.\n"); + ret = -ERANGE; + goto out; + } + + /* Scan event and group name. */ + argv0_str = strdup(argv[0]); + if (argv0_str == NULL) { + ret = -ENOMEM; + goto out; + } + fmt1_str = strtok_r(argv0_str, ":", &fmt); + fmt2_str = strtok_r(NULL, "/", &fmt); + fmt3_str = strtok_r(NULL, " \t", &fmt); + if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL + || fmt3_str == NULL) { + semantic_error("Failed to parse event name: %s\n", argv[0]); + ret = -EINVAL; + goto out; + } + pr = fmt1_str[0]; + tev->group = strdup(fmt2_str); + tev->event = strdup(fmt3_str); + if (tev->group == NULL || tev->event == NULL) { + ret = -ENOMEM; + goto out; + } + pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); + + tp->retprobe = (pr == 'r'); + + /* Scan module name(if there), function name and offset */ + p = strchr(argv[1], ':'); + if (p) { + tp->module = strndup(argv[1], p - argv[1]); + p++; + } else + p = argv[1]; + fmt1_str = strtok_r(p, "+", &fmt); + if (fmt1_str[0] == '0') /* only the address started with 0x */ + tp->address = strtoul(fmt1_str, NULL, 0); + else { + /* Only the symbol-based probe has offset */ + tp->symbol = strdup(fmt1_str); + if (tp->symbol == NULL) { + ret = -ENOMEM; + goto out; + } + fmt2_str = strtok_r(NULL, "", &fmt); + if (fmt2_str == NULL) + tp->offset = 0; + else + tp->offset = strtoul(fmt2_str, NULL, 10); + } + + tev->nargs = argc - 2; + tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); + if (tev->args == NULL) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < tev->nargs; i++) { + p = strchr(argv[i + 2], '='); + if (p) /* We don't need which register is assigned. */ + *p++ = '\0'; + else + p = argv[i + 2]; + tev->args[i].name = strdup(argv[i + 2]); + /* TODO: parse regs and offset */ + tev->args[i].value = strdup(p); + if (tev->args[i].name == NULL || tev->args[i].value == NULL) { + ret = -ENOMEM; + goto out; + } + } + ret = 0; +out: + free(argv0_str); + argv_free(argv); + return ret; +} + +/* Compose only probe arg */ +int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len) +{ + struct perf_probe_arg_field *field = pa->field; + int ret; + char *tmp = buf; + + if (pa->name && pa->var) + ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var); + else + ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var); + if (ret <= 0) + goto error; + tmp += ret; + len -= ret; + + while (field) { + if (field->name[0] == '[') + ret = e_snprintf(tmp, len, "%s", field->name); + else + ret = e_snprintf(tmp, len, "%s%s", + field->ref ? "->" : ".", field->name); + if (ret <= 0) + goto error; + tmp += ret; + len -= ret; + field = field->next; + } + + if (pa->type) { + ret = e_snprintf(tmp, len, ":%s", pa->type); + if (ret <= 0) + goto error; + tmp += ret; + len -= ret; + } + + return tmp - buf; +error: + pr_debug("Failed to synthesize perf probe argument: %d\n", ret); + return ret; +} + +/* Compose only probe point (not argument) */ +static char *synthesize_perf_probe_point(struct perf_probe_point *pp) +{ + char *buf, *tmp; + char offs[32] = "", line[32] = "", file[32] = ""; + int ret, len; + + buf = zalloc(MAX_CMDLEN); + if (buf == NULL) { + ret = -ENOMEM; + goto error; + } + if (pp->offset) { + ret = e_snprintf(offs, 32, "+%lu", pp->offset); + if (ret <= 0) + goto error; + } + if (pp->line) { + ret = e_snprintf(line, 32, ":%d", pp->line); + if (ret <= 0) + goto error; + } + if (pp->file) { + tmp = pp->file; + len = strlen(tmp); + if (len > 30) { + tmp = strchr(pp->file + len - 30, '/'); + tmp = tmp ? tmp + 1 : pp->file + len - 30; + } + ret = e_snprintf(file, 32, "@%s", tmp); + if (ret <= 0) + goto error; + } + + if (pp->function) + ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function, + offs, pp->retprobe ? "%return" : "", line, + file); + else + ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line); + if (ret <= 0) + goto error; + + return buf; +error: + pr_debug("Failed to synthesize perf probe point: %d\n", ret); + free(buf); + return NULL; +} + +#if 0 +char *synthesize_perf_probe_command(struct perf_probe_event *pev) +{ + char *buf; + int i, len, ret; + + buf = synthesize_perf_probe_point(&pev->point); + if (!buf) + return NULL; + + len = strlen(buf); + for (i = 0; i < pev->nargs; i++) { + ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", + pev->args[i].name); + if (ret <= 0) { + free(buf); + return NULL; + } + len += ret; + } + + return buf; +} +#endif + +static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref, + char **buf, size_t *buflen, + int depth) +{ + int ret; + if (ref->next) { + depth = __synthesize_probe_trace_arg_ref(ref->next, buf, + buflen, depth + 1); + if (depth < 0) + goto out; + } + + ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset); + if (ret < 0) + depth = ret; + else { + *buf += ret; + *buflen -= ret; + } +out: + return depth; + +} + +static int synthesize_probe_trace_arg(struct probe_trace_arg *arg, + char *buf, size_t buflen) +{ + struct probe_trace_arg_ref *ref = arg->ref; + int ret, depth = 0; + char *tmp = buf; + + /* Argument name or separator */ + if (arg->name) + ret = e_snprintf(buf, buflen, " %s=", arg->name); + else + ret = e_snprintf(buf, buflen, " "); + if (ret < 0) + return ret; + buf += ret; + buflen -= ret; + + /* Special case: @XXX */ + if (arg->value[0] == '@' && arg->ref) + ref = ref->next; + + /* Dereferencing arguments */ + if (ref) { + depth = __synthesize_probe_trace_arg_ref(ref, &buf, + &buflen, 1); + if (depth < 0) + return depth; + } + + /* Print argument value */ + if (arg->value[0] == '@' && arg->ref) + ret = e_snprintf(buf, buflen, "%s%+ld", arg->value, + arg->ref->offset); + else + ret = e_snprintf(buf, buflen, "%s", arg->value); + if (ret < 0) + return ret; + buf += ret; + buflen -= ret; + + /* Closing */ + while (depth--) { + ret = e_snprintf(buf, buflen, ")"); + if (ret < 0) + return ret; + buf += ret; + buflen -= ret; + } + /* Print argument type */ + if (arg->type) { + ret = e_snprintf(buf, buflen, ":%s", arg->type); + if (ret <= 0) + return ret; + buf += ret; + } + + return buf - tmp; +} + +char *synthesize_probe_trace_command(struct probe_trace_event *tev) +{ + struct probe_trace_point *tp = &tev->point; + char *buf; + int i, len, ret; + + buf = zalloc(MAX_CMDLEN); + if (buf == NULL) + return NULL; + + len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p', + tev->group, tev->event); + if (len <= 0) + goto error; + + /* Uprobes must have tp->address and tp->module */ + if (tev->uprobes && (!tp->address || !tp->module)) + goto error; + + /* Use the tp->address for uprobes */ + if (tev->uprobes) + ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx", + tp->module, tp->address); + else + ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu", + tp->module ?: "", tp->module ? ":" : "", + tp->symbol, tp->offset); + + if (ret <= 0) + goto error; + len += ret; + + for (i = 0; i < tev->nargs; i++) { + ret = synthesize_probe_trace_arg(&tev->args[i], buf + len, + MAX_CMDLEN - len); + if (ret <= 0) + goto error; + len += ret; + } + + return buf; +error: + free(buf); + return NULL; +} + +static int find_perf_probe_point_from_map(struct probe_trace_point *tp, + struct perf_probe_point *pp, + bool is_kprobe) +{ + struct symbol *sym = NULL; + struct map *map; + u64 addr; + int ret = -ENOENT; + + if (!is_kprobe) { + map = dso__new_map(tp->module); + if (!map) + goto out; + addr = tp->address; + sym = map__find_symbol(map, addr, NULL); + } else { + addr = kernel_get_symbol_address_by_name(tp->symbol, true); + if (addr) { + addr += tp->offset; + sym = __find_kernel_function(addr, &map); + } + } + if (!sym) + goto out; + + pp->retprobe = tp->retprobe; + pp->offset = addr - map->unmap_ip(map, sym->start); + pp->function = strdup(sym->name); + ret = pp->function ? 0 : -ENOMEM; + +out: + if (map && !is_kprobe) { + dso__delete(map->dso); + map__delete(map); + } + + return ret; +} + +static int convert_to_perf_probe_point(struct probe_trace_point *tp, + struct perf_probe_point *pp, + bool is_kprobe) +{ + char buf[128]; + int ret; + + ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe); + if (!ret) + return 0; + ret = find_perf_probe_point_from_map(tp, pp, is_kprobe); + if (!ret) + return 0; + + pr_debug("Failed to find probe point from both of dwarf and map.\n"); + + if (tp->symbol) { + pp->function = strdup(tp->symbol); + pp->offset = tp->offset; + } else if (!tp->module && !is_kprobe) { + ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address); + if (ret < 0) + return ret; + pp->function = strdup(buf); + pp->offset = 0; + } + if (pp->function == NULL) + return -ENOMEM; + + pp->retprobe = tp->retprobe; + + return 0; +} + +static int convert_to_perf_probe_event(struct probe_trace_event *tev, + struct perf_probe_event *pev, bool is_kprobe) +{ + char buf[64] = ""; + int i, ret; + + /* Convert event/group name */ + pev->event = strdup(tev->event); + pev->group = strdup(tev->group); + if (pev->event == NULL || pev->group == NULL) + return -ENOMEM; + + /* Convert trace_point to probe_point */ + ret = convert_to_perf_probe_point(&tev->point, &pev->point, is_kprobe); + if (ret < 0) + return ret; + + /* Convert trace_arg to probe_arg */ + pev->nargs = tev->nargs; + pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs); + if (pev->args == NULL) + return -ENOMEM; + for (i = 0; i < tev->nargs && ret >= 0; i++) { + if (tev->args[i].name) + pev->args[i].name = strdup(tev->args[i].name); + else { + ret = synthesize_probe_trace_arg(&tev->args[i], + buf, 64); + pev->args[i].name = strdup(buf); + } + if (pev->args[i].name == NULL && ret >= 0) + ret = -ENOMEM; + } + + if (ret < 0) + clear_perf_probe_event(pev); + + return ret; +} + +void clear_perf_probe_event(struct perf_probe_event *pev) +{ + struct perf_probe_arg_field *field, *next; + int i; + + free(pev->event); + free(pev->group); + free(pev->target); + clear_perf_probe_point(&pev->point); + + for (i = 0; i < pev->nargs; i++) { + free(pev->args[i].name); + free(pev->args[i].var); + free(pev->args[i].type); + field = pev->args[i].field; + while (field) { + next = field->next; + zfree(&field->name); + free(field); + field = next; + } + } + free(pev->args); + memset(pev, 0, sizeof(*pev)); +} + +static void clear_probe_trace_event(struct probe_trace_event *tev) +{ + struct probe_trace_arg_ref *ref, *next; + int i; + + free(tev->event); + free(tev->group); + free(tev->point.symbol); + free(tev->point.module); + for (i = 0; i < tev->nargs; i++) { + free(tev->args[i].name); + free(tev->args[i].value); + free(tev->args[i].type); + ref = tev->args[i].ref; + while (ref) { + next = ref->next; + free(ref); + ref = next; + } + } + free(tev->args); + memset(tev, 0, sizeof(*tev)); +} + +static void print_open_warning(int err, bool is_kprobe) +{ + char sbuf[STRERR_BUFSIZE]; + + if (err == -ENOENT) { + const char *config; + + if (!is_kprobe) + config = "CONFIG_UPROBE_EVENTS"; + else + config = "CONFIG_KPROBE_EVENTS"; + + pr_warning("%cprobe_events file does not exist" + " - please rebuild kernel with %s.\n", + is_kprobe ? 'k' : 'u', config); + } else if (err == -ENOTSUP) + pr_warning("Tracefs or debugfs is not mounted.\n"); + else + pr_warning("Failed to open %cprobe_events: %s\n", + is_kprobe ? 'k' : 'u', + strerror_r(-err, sbuf, sizeof(sbuf))); +} + +static void print_both_open_warning(int kerr, int uerr) +{ + /* Both kprobes and uprobes are disabled, warn it. */ + if (kerr == -ENOTSUP && uerr == -ENOTSUP) + pr_warning("Tracefs or debugfs is not mounted.\n"); + else if (kerr == -ENOENT && uerr == -ENOENT) + pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS " + "or/and CONFIG_UPROBE_EVENTS.\n"); + else { + char sbuf[STRERR_BUFSIZE]; + pr_warning("Failed to open kprobe events: %s.\n", + strerror_r(-kerr, sbuf, sizeof(sbuf))); + pr_warning("Failed to open uprobe events: %s.\n", + strerror_r(-uerr, sbuf, sizeof(sbuf))); + } +} + +static int open_probe_events(const char *trace_file, bool readwrite) +{ + char buf[PATH_MAX]; + const char *__debugfs; + const char *tracing_dir = ""; + int ret; + + __debugfs = tracefs_find_mountpoint(); + if (__debugfs == NULL) { + tracing_dir = "tracing/"; + + __debugfs = debugfs_find_mountpoint(); + if (__debugfs == NULL) + return -ENOTSUP; + } + + ret = e_snprintf(buf, PATH_MAX, "%s/%s%s", + __debugfs, tracing_dir, trace_file); + if (ret >= 0) { + pr_debug("Opening %s write=%d\n", buf, readwrite); + if (readwrite && !probe_event_dry_run) + ret = open(buf, O_RDWR, O_APPEND); + else + ret = open(buf, O_RDONLY, 0); + + if (ret < 0) + ret = -errno; + } + return ret; +} + +static int open_kprobe_events(bool readwrite) +{ + return open_probe_events("kprobe_events", readwrite); +} + +static int open_uprobe_events(bool readwrite) +{ + return open_probe_events("uprobe_events", readwrite); +} + +/* Get raw string list of current kprobe_events or uprobe_events */ +static struct strlist *get_probe_trace_command_rawlist(int fd) +{ + int ret, idx; + FILE *fp; + char buf[MAX_CMDLEN]; + char *p; + struct strlist *sl; + + sl = strlist__new(true, NULL); + + fp = fdopen(dup(fd), "r"); + while (!feof(fp)) { + p = fgets(buf, MAX_CMDLEN, fp); + if (!p) + break; + + idx = strlen(p) - 1; + if (p[idx] == '\n') + p[idx] = '\0'; + ret = strlist__add(sl, buf); + if (ret < 0) { + pr_debug("strlist__add failed (%d)\n", ret); + strlist__delete(sl); + return NULL; + } + } + fclose(fp); + + return sl; +} + +struct kprobe_blacklist_node { + struct list_head list; + unsigned long start; + unsigned long end; + char *symbol; +}; + +static void kprobe_blacklist__delete(struct list_head *blacklist) +{ + struct kprobe_blacklist_node *node; + + while (!list_empty(blacklist)) { + node = list_first_entry(blacklist, + struct kprobe_blacklist_node, list); + list_del(&node->list); + free(node->symbol); + free(node); + } +} + +static int kprobe_blacklist__load(struct list_head *blacklist) +{ + struct kprobe_blacklist_node *node; + const char *__debugfs = debugfs_find_mountpoint(); + char buf[PATH_MAX], *p; + FILE *fp; + int ret; + + if (__debugfs == NULL) + return -ENOTSUP; + + ret = e_snprintf(buf, PATH_MAX, "%s/kprobes/blacklist", __debugfs); + if (ret < 0) + return ret; + + fp = fopen(buf, "r"); + if (!fp) + return -errno; + + ret = 0; + while (fgets(buf, PATH_MAX, fp)) { + node = zalloc(sizeof(*node)); + if (!node) { + ret = -ENOMEM; + break; + } + INIT_LIST_HEAD(&node->list); + list_add_tail(&node->list, blacklist); + if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) { + ret = -EINVAL; + break; + } + p = strchr(buf, '\t'); + if (p) { + p++; + if (p[strlen(p) - 1] == '\n') + p[strlen(p) - 1] = '\0'; + } else + p = (char *)"unknown"; + node->symbol = strdup(p); + if (!node->symbol) { + ret = -ENOMEM; + break; + } + pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n", + node->start, node->end, node->symbol); + ret++; + } + if (ret < 0) + kprobe_blacklist__delete(blacklist); + fclose(fp); + + return ret; +} + +static struct kprobe_blacklist_node * +kprobe_blacklist__find_by_address(struct list_head *blacklist, + unsigned long address) +{ + struct kprobe_blacklist_node *node; + + list_for_each_entry(node, blacklist, list) { + if (node->start <= address && address <= node->end) + return node; + } + + return NULL; +} + +/* Show an event */ +static int show_perf_probe_event(struct perf_probe_event *pev, + const char *module) +{ + int i, ret; + char buf[128]; + char *place; + + /* Synthesize only event probe point */ + place = synthesize_perf_probe_point(&pev->point); + if (!place) + return -EINVAL; + + ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event); + if (ret < 0) + return ret; + + pr_info(" %-20s (on %s", buf, place); + if (module) + pr_info(" in %s", module); + + if (pev->nargs > 0) { + pr_info(" with"); + for (i = 0; i < pev->nargs; i++) { + ret = synthesize_perf_probe_arg(&pev->args[i], + buf, 128); + if (ret < 0) + break; + pr_info(" %s", buf); + } + } + pr_info(")\n"); + free(place); + return ret; +} + +static int __show_perf_probe_events(int fd, bool is_kprobe) +{ + int ret = 0; + struct probe_trace_event tev; + struct perf_probe_event pev; + struct strlist *rawlist; + struct str_node *ent; + + memset(&tev, 0, sizeof(tev)); + memset(&pev, 0, sizeof(pev)); + + rawlist = get_probe_trace_command_rawlist(fd); + if (!rawlist) + return -ENOMEM; + + strlist__for_each(ent, rawlist) { + ret = parse_probe_trace_command(ent->s, &tev); + if (ret >= 0) { + ret = convert_to_perf_probe_event(&tev, &pev, + is_kprobe); + if (ret >= 0) + ret = show_perf_probe_event(&pev, + tev.point.module); + } + clear_perf_probe_event(&pev); + clear_probe_trace_event(&tev); + if (ret < 0) + break; + } + strlist__delete(rawlist); + + return ret; +} + +/* List up current perf-probe events */ +int show_perf_probe_events(void) +{ + int kp_fd, up_fd, ret; + + setup_pager(); + + ret = init_symbol_maps(false); + if (ret < 0) + return ret; + + kp_fd = open_kprobe_events(false); + if (kp_fd >= 0) { + ret = __show_perf_probe_events(kp_fd, true); + close(kp_fd); + if (ret < 0) + goto out; + } + + up_fd = open_uprobe_events(false); + if (kp_fd < 0 && up_fd < 0) { + print_both_open_warning(kp_fd, up_fd); + ret = kp_fd; + goto out; + } + + if (up_fd >= 0) { + ret = __show_perf_probe_events(up_fd, false); + close(up_fd); + } +out: + exit_symbol_maps(); + return ret; +} + +/* Get current perf-probe event names */ +static struct strlist *get_probe_trace_event_names(int fd, bool include_group) +{ + char buf[128]; + struct strlist *sl, *rawlist; + struct str_node *ent; + struct probe_trace_event tev; + int ret = 0; + + memset(&tev, 0, sizeof(tev)); + rawlist = get_probe_trace_command_rawlist(fd); + if (!rawlist) + return NULL; + sl = strlist__new(true, NULL); + strlist__for_each(ent, rawlist) { + ret = parse_probe_trace_command(ent->s, &tev); + if (ret < 0) + break; + if (include_group) { + ret = e_snprintf(buf, 128, "%s:%s", tev.group, + tev.event); + if (ret >= 0) + ret = strlist__add(sl, buf); + } else + ret = strlist__add(sl, tev.event); + clear_probe_trace_event(&tev); + if (ret < 0) + break; + } + strlist__delete(rawlist); + + if (ret < 0) { + strlist__delete(sl); + return NULL; + } + return sl; +} + +static int write_probe_trace_event(int fd, struct probe_trace_event *tev) +{ + int ret = 0; + char *buf = synthesize_probe_trace_command(tev); + char sbuf[STRERR_BUFSIZE]; + + if (!buf) { + pr_debug("Failed to synthesize probe trace event.\n"); + return -EINVAL; + } + + pr_debug("Writing event: %s\n", buf); + if (!probe_event_dry_run) { + ret = write(fd, buf, strlen(buf)); + if (ret <= 0) { + ret = -errno; + pr_warning("Failed to write event: %s\n", + strerror_r(errno, sbuf, sizeof(sbuf))); + } + } + free(buf); + return ret; +} + +static int get_new_event_name(char *buf, size_t len, const char *base, + struct strlist *namelist, bool allow_suffix) +{ + int i, ret; + + /* Try no suffix */ + ret = e_snprintf(buf, len, "%s", base); + if (ret < 0) { + pr_debug("snprintf() failed: %d\n", ret); + return ret; + } + if (!strlist__has_entry(namelist, buf)) + return 0; + + if (!allow_suffix) { + pr_warning("Error: event \"%s\" already exists. " + "(Use -f to force duplicates.)\n", base); + return -EEXIST; + } + + /* Try to add suffix */ + for (i = 1; i < MAX_EVENT_INDEX; i++) { + ret = e_snprintf(buf, len, "%s_%d", base, i); + if (ret < 0) { + pr_debug("snprintf() failed: %d\n", ret); + return ret; + } + if (!strlist__has_entry(namelist, buf)) + break; + } + if (i == MAX_EVENT_INDEX) { + pr_warning("Too many events are on the same function.\n"); + ret = -ERANGE; + } + + return ret; +} + +/* Warn if the current kernel's uprobe implementation is old */ +static void warn_uprobe_event_compat(struct probe_trace_event *tev) +{ + int i; + char *buf = synthesize_probe_trace_command(tev); + + /* Old uprobe event doesn't support memory dereference */ + if (!tev->uprobes || tev->nargs == 0 || !buf) + goto out; + + for (i = 0; i < tev->nargs; i++) + if (strglobmatch(tev->args[i].value, "[$@+-]*")) { + pr_warning("Please upgrade your kernel to at least " + "3.14 to have access to feature %s\n", + tev->args[i].value); + break; + } +out: + free(buf); +} + +static int __add_probe_trace_events(struct perf_probe_event *pev, + struct probe_trace_event *tevs, + int ntevs, bool allow_suffix) +{ + int i, fd, ret; + struct probe_trace_event *tev = NULL; + char buf[64]; + const char *event, *group; + struct strlist *namelist; + LIST_HEAD(blacklist); + struct kprobe_blacklist_node *node; + + if (pev->uprobes) + fd = open_uprobe_events(true); + else + fd = open_kprobe_events(true); + + if (fd < 0) { + print_open_warning(fd, !pev->uprobes); + return fd; + } + + /* Get current event names */ + namelist = get_probe_trace_event_names(fd, false); + if (!namelist) { + pr_debug("Failed to get current event list.\n"); + return -EIO; + } + /* Get kprobe blacklist if exists */ + if (!pev->uprobes) { + ret = kprobe_blacklist__load(&blacklist); + if (ret < 0) + pr_debug("No kprobe blacklist support, ignored\n"); + } + + ret = 0; + pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":"); + for (i = 0; i < ntevs; i++) { + tev = &tevs[i]; + /* Ensure that the address is NOT blacklisted */ + node = kprobe_blacklist__find_by_address(&blacklist, + tev->point.address); + if (node) { + pr_warning("Warning: Skipped probing on blacklisted function: %s\n", node->symbol); + continue; + } + + if (pev->event) + event = pev->event; + else + if (pev->point.function) + event = pev->point.function; + else + event = tev->point.symbol; + if (pev->group) + group = pev->group; + else + group = PERFPROBE_GROUP; + + /* Get an unused new event name */ + ret = get_new_event_name(buf, 64, event, + namelist, allow_suffix); + if (ret < 0) + break; + event = buf; + + tev->event = strdup(event); + tev->group = strdup(group); + if (tev->event == NULL || tev->group == NULL) { + ret = -ENOMEM; + break; + } + ret = write_probe_trace_event(fd, tev); + if (ret < 0) + break; + /* Add added event name to namelist */ + strlist__add(namelist, event); + + /* Trick here - save current event/group */ + event = pev->event; + group = pev->group; + pev->event = tev->event; + pev->group = tev->group; + show_perf_probe_event(pev, tev->point.module); + /* Trick here - restore current event/group */ + pev->event = (char *)event; + pev->group = (char *)group; + + /* + * Probes after the first probe which comes from same + * user input are always allowed to add suffix, because + * there might be several addresses corresponding to + * one code line. + */ + allow_suffix = true; + } + if (ret == -EINVAL && pev->uprobes) + warn_uprobe_event_compat(tev); + + /* Note that it is possible to skip all events because of blacklist */ + if (ret >= 0 && tev->event) { + /* Show how to use the event. */ + pr_info("\nYou can now use it in all perf tools, such as:\n\n"); + pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group, + tev->event); + } + + kprobe_blacklist__delete(&blacklist); + strlist__delete(namelist); + close(fd); + return ret; +} + +static int find_probe_functions(struct map *map, char *name) +{ + int found = 0; + struct symbol *sym; + + map__for_each_symbol_by_name(map, name, sym) { + found++; + } + + return found; +} + +#define strdup_or_goto(str, label) \ + ({ char *__p = strdup(str); if (!__p) goto label; __p; }) + +/* + * Find probe function addresses from map. + * Return an error or the number of found probe_trace_event + */ +static int find_probe_trace_events_from_map(struct perf_probe_event *pev, + struct probe_trace_event **tevs, + int max_tevs, const char *target) +{ + struct map *map = NULL; + struct ref_reloc_sym *reloc_sym = NULL; + struct symbol *sym; + struct probe_trace_event *tev; + struct perf_probe_point *pp = &pev->point; + struct probe_trace_point *tp; + int num_matched_functions; + int ret, i; + + map = get_target_map(target, pev->uprobes); + if (!map) { + ret = -EINVAL; + goto out; + } + + /* + * Load matched symbols: Since the different local symbols may have + * same name but different addresses, this lists all the symbols. + */ + num_matched_functions = find_probe_functions(map, pp->function); + if (num_matched_functions == 0) { + pr_err("Failed to find symbol %s in %s\n", pp->function, + target ? : "kernel"); + ret = -ENOENT; + goto out; + } else if (num_matched_functions > max_tevs) { + pr_err("Too many functions matched in %s\n", + target ? : "kernel"); + ret = -E2BIG; + goto out; + } + + if (!pev->uprobes && !pp->retprobe) { + reloc_sym = kernel_get_ref_reloc_sym(); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + ret = -EINVAL; + goto out; + } + } + + /* Setup result trace-probe-events */ + *tevs = zalloc(sizeof(*tev) * num_matched_functions); + if (!*tevs) { + ret = -ENOMEM; + goto out; + } + + ret = 0; + + map__for_each_symbol_by_name(map, pp->function, sym) { + tev = (*tevs) + ret; + tp = &tev->point; + if (ret == num_matched_functions) { + pr_warning("Too many symbols are listed. Skip it.\n"); + break; + } + ret++; + + if (pp->offset > sym->end - sym->start) { + pr_warning("Offset %ld is bigger than the size of %s\n", + pp->offset, sym->name); + ret = -ENOENT; + goto err_out; + } + /* Add one probe point */ + tp->address = map->unmap_ip(map, sym->start) + pp->offset; + if (reloc_sym) { + tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out); + tp->offset = tp->address - reloc_sym->addr; + } else { + tp->symbol = strdup_or_goto(sym->name, nomem_out); + tp->offset = pp->offset; + } + tp->retprobe = pp->retprobe; + if (target) + tev->point.module = strdup_or_goto(target, nomem_out); + tev->uprobes = pev->uprobes; + tev->nargs = pev->nargs; + if (tev->nargs) { + tev->args = zalloc(sizeof(struct probe_trace_arg) * + tev->nargs); + if (tev->args == NULL) + goto nomem_out; + } + for (i = 0; i < tev->nargs; i++) { + if (pev->args[i].name) + tev->args[i].name = + strdup_or_goto(pev->args[i].name, + nomem_out); + + tev->args[i].value = strdup_or_goto(pev->args[i].var, + nomem_out); + if (pev->args[i].type) + tev->args[i].type = + strdup_or_goto(pev->args[i].type, + nomem_out); + } + } + +out: + put_target_map(map, pev->uprobes); + return ret; + +nomem_out: + ret = -ENOMEM; +err_out: + clear_probe_trace_events(*tevs, num_matched_functions); + zfree(tevs); + goto out; +} + +static int convert_to_probe_trace_events(struct perf_probe_event *pev, + struct probe_trace_event **tevs, + int max_tevs, const char *target) +{ + int ret; + + if (pev->uprobes && !pev->group) { + /* Replace group name if not given */ + ret = convert_exec_to_group(target, &pev->group); + if (ret != 0) { + pr_warning("Failed to make a group name.\n"); + return ret; + } + } + + /* Convert perf_probe_event with debuginfo */ + ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target); + if (ret != 0) + return ret; /* Found in debuginfo or got an error */ + + return find_probe_trace_events_from_map(pev, tevs, max_tevs, target); +} + +struct __event_package { + struct perf_probe_event *pev; + struct probe_trace_event *tevs; + int ntevs; +}; + +int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, + int max_tevs, bool force_add) +{ + int i, j, ret; + struct __event_package *pkgs; + + ret = 0; + pkgs = zalloc(sizeof(struct __event_package) * npevs); + + if (pkgs == NULL) + return -ENOMEM; + + ret = init_symbol_maps(pevs->uprobes); + if (ret < 0) { + free(pkgs); + return ret; + } + + /* Loop 1: convert all events */ + for (i = 0; i < npevs; i++) { + pkgs[i].pev = &pevs[i]; + /* Convert with or without debuginfo */ + ret = convert_to_probe_trace_events(pkgs[i].pev, + &pkgs[i].tevs, + max_tevs, + pkgs[i].pev->target); + if (ret < 0) + goto end; + pkgs[i].ntevs = ret; + } + + /* Loop 2: add all events */ + for (i = 0; i < npevs; i++) { + ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, + pkgs[i].ntevs, force_add); + if (ret < 0) + break; + } +end: + /* Loop 3: cleanup and free trace events */ + for (i = 0; i < npevs; i++) { + for (j = 0; j < pkgs[i].ntevs; j++) + clear_probe_trace_event(&pkgs[i].tevs[j]); + zfree(&pkgs[i].tevs); + } + free(pkgs); + exit_symbol_maps(); + + return ret; +} + +static int __del_trace_probe_event(int fd, struct str_node *ent) +{ + char *p; + char buf[128]; + int ret; + + /* Convert from perf-probe event to trace-probe event */ + ret = e_snprintf(buf, 128, "-:%s", ent->s); + if (ret < 0) + goto error; + + p = strchr(buf + 2, ':'); + if (!p) { + pr_debug("Internal error: %s should have ':' but not.\n", + ent->s); + ret = -ENOTSUP; + goto error; + } + *p = '/'; + + pr_debug("Writing event: %s\n", buf); + ret = write(fd, buf, strlen(buf)); + if (ret < 0) { + ret = -errno; + goto error; + } + + pr_info("Removed event: %s\n", ent->s); + return 0; +error: + pr_warning("Failed to delete event: %s\n", + strerror_r(-ret, buf, sizeof(buf))); + return ret; +} + +static int del_trace_probe_event(int fd, const char *buf, + struct strlist *namelist) +{ + struct str_node *ent, *n; + int ret = -1; + + if (strpbrk(buf, "*?")) { /* Glob-exp */ + strlist__for_each_safe(ent, n, namelist) + if (strglobmatch(ent->s, buf)) { + ret = __del_trace_probe_event(fd, ent); + if (ret < 0) + break; + strlist__remove(namelist, ent); + } + } else { + ent = strlist__find(namelist, buf); + if (ent) { + ret = __del_trace_probe_event(fd, ent); + if (ret >= 0) + strlist__remove(namelist, ent); + } + } + + return ret; +} + +int del_perf_probe_events(struct strlist *dellist) +{ + int ret = -1, ufd = -1, kfd = -1; + char buf[128]; + const char *group, *event; + char *p, *str; + struct str_node *ent; + struct strlist *namelist = NULL, *unamelist = NULL; + + /* Get current event names */ + kfd = open_kprobe_events(true); + if (kfd >= 0) + namelist = get_probe_trace_event_names(kfd, true); + + ufd = open_uprobe_events(true); + if (ufd >= 0) + unamelist = get_probe_trace_event_names(ufd, true); + + if (kfd < 0 && ufd < 0) { + print_both_open_warning(kfd, ufd); + goto error; + } + + if (namelist == NULL && unamelist == NULL) + goto error; + + strlist__for_each(ent, dellist) { + str = strdup(ent->s); + if (str == NULL) { + ret = -ENOMEM; + goto error; + } + pr_debug("Parsing: %s\n", str); + p = strchr(str, ':'); + if (p) { + group = str; + *p = '\0'; + event = p + 1; + } else { + group = "*"; + event = str; + } + + ret = e_snprintf(buf, 128, "%s:%s", group, event); + if (ret < 0) { + pr_err("Failed to copy event."); + free(str); + goto error; + } + + pr_debug("Group: %s, Event: %s\n", group, event); + + if (namelist) + ret = del_trace_probe_event(kfd, buf, namelist); + + if (unamelist && ret != 0) + ret = del_trace_probe_event(ufd, buf, unamelist); + + if (ret != 0) + pr_info("Info: Event \"%s\" does not exist.\n", buf); + + free(str); + } + +error: + if (kfd >= 0) { + strlist__delete(namelist); + close(kfd); + } + + if (ufd >= 0) { + strlist__delete(unamelist); + close(ufd); + } + + return ret; +} + +/* TODO: don't use a global variable for filter ... */ +static struct strfilter *available_func_filter; + +/* + * If a symbol corresponds to a function with global binding and + * matches filter return 0. For all others return 1. + */ +static int filter_available_functions(struct map *map __maybe_unused, + struct symbol *sym) +{ + if (strfilter__compare(available_func_filter, sym->name)) + return 0; + return 1; +} + +int show_available_funcs(const char *target, struct strfilter *_filter, + bool user) +{ + struct map *map; + int ret; + + ret = init_symbol_maps(user); + if (ret < 0) + return ret; + + /* Get a symbol map */ + if (user) + map = dso__new_map(target); + else + map = kernel_get_module_map(target); + if (!map) { + pr_err("Failed to get a map for %s\n", (target) ? : "kernel"); + return -EINVAL; + } + + /* Load symbols with given filter */ + available_func_filter = _filter; + if (map__load(map, filter_available_functions)) { + pr_err("Failed to load symbols in %s\n", (target) ? : "kernel"); + goto end; + } + if (!dso__sorted_by_name(map->dso, map->type)) + dso__sort_by_name(map->dso, map->type); + + /* Show all (filtered) symbols */ + setup_pager(); + dso__fprintf_symbols_by_name(map->dso, map->type, stdout); +end: + if (user) { + dso__delete(map->dso); + map__delete(map); + } + exit_symbol_maps(); + + return ret; +} + diff --git a/kernel/tools/perf/util/probe-event.h b/kernel/tools/perf/util/probe-event.h new file mode 100644 index 000000000..d6b783447 --- /dev/null +++ b/kernel/tools/perf/util/probe-event.h @@ -0,0 +1,142 @@ +#ifndef _PROBE_EVENT_H +#define _PROBE_EVENT_H + +#include +#include "intlist.h" +#include "strlist.h" +#include "strfilter.h" + +extern bool probe_event_dry_run; + +/* kprobe-tracer and uprobe-tracer tracing point */ +struct probe_trace_point { + char *symbol; /* Base symbol */ + char *module; /* Module name */ + unsigned long offset; /* Offset from symbol */ + unsigned long address; /* Actual address of the trace point */ + bool retprobe; /* Return probe flag */ +}; + +/* probe-tracer tracing argument referencing offset */ +struct probe_trace_arg_ref { + struct probe_trace_arg_ref *next; /* Next reference */ + long offset; /* Offset value */ +}; + +/* kprobe-tracer and uprobe-tracer tracing argument */ +struct probe_trace_arg { + char *name; /* Argument name */ + char *value; /* Base value */ + char *type; /* Type name */ + struct probe_trace_arg_ref *ref; /* Referencing offset */ +}; + +/* kprobe-tracer and uprobe-tracer tracing event (point + arg) */ +struct probe_trace_event { + char *event; /* Event name */ + char *group; /* Group name */ + struct probe_trace_point point; /* Trace point */ + int nargs; /* Number of args */ + bool uprobes; /* uprobes only */ + struct probe_trace_arg *args; /* Arguments */ +}; + +/* Perf probe probing point */ +struct perf_probe_point { + char *file; /* File path */ + char *function; /* Function name */ + int line; /* Line number */ + bool retprobe; /* Return probe flag */ + char *lazy_line; /* Lazy matching pattern */ + unsigned long offset; /* Offset from function entry */ +}; + +/* Perf probe probing argument field chain */ +struct perf_probe_arg_field { + struct perf_probe_arg_field *next; /* Next field */ + char *name; /* Name of the field */ + long index; /* Array index number */ + bool ref; /* Referencing flag */ +}; + +/* Perf probe probing argument */ +struct perf_probe_arg { + char *name; /* Argument name */ + char *var; /* Variable name */ + char *type; /* Type name */ + struct perf_probe_arg_field *field; /* Structure fields */ +}; + +/* Perf probe probing event (point + arg) */ +struct perf_probe_event { + char *event; /* Event name */ + char *group; /* Group name */ + struct perf_probe_point point; /* Probe point */ + int nargs; /* Number of arguments */ + bool uprobes; /* Uprobe event flag */ + char *target; /* Target binary */ + struct perf_probe_arg *args; /* Arguments */ +}; + +/* Line range */ +struct line_range { + char *file; /* File name */ + char *function; /* Function name */ + int start; /* Start line number */ + int end; /* End line number */ + int offset; /* Start line offset */ + char *path; /* Real path name */ + char *comp_dir; /* Compile directory */ + struct intlist *line_list; /* Visible lines */ +}; + +/* List of variables */ +struct variable_list { + struct probe_trace_point point; /* Actual probepoint */ + struct strlist *vars; /* Available variables */ +}; + +/* Command string to events */ +extern int parse_perf_probe_command(const char *cmd, + struct perf_probe_event *pev); + +/* Events to command string */ +extern char *synthesize_perf_probe_command(struct perf_probe_event *pev); +extern char *synthesize_probe_trace_command(struct probe_trace_event *tev); +extern int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, + size_t len); + +/* Check the perf_probe_event needs debuginfo */ +extern bool perf_probe_event_need_dwarf(struct perf_probe_event *pev); + +/* Release event contents */ +extern void clear_perf_probe_event(struct perf_probe_event *pev); + +/* Command string to line-range */ +extern int parse_line_range_desc(const char *cmd, struct line_range *lr); + +/* Release line range members */ +extern void line_range__clear(struct line_range *lr); + +/* Initialize line range */ +extern int line_range__init(struct line_range *lr); + +/* Internal use: Return kernel/module path */ +extern const char *kernel_get_module_path(const char *module); + +extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, + int max_probe_points, bool force_add); +extern int del_perf_probe_events(struct strlist *dellist); +extern int show_perf_probe_events(void); +extern int show_line_range(struct line_range *lr, const char *module, + bool user); +extern int show_available_vars(struct perf_probe_event *pevs, int npevs, + int max_probe_points, const char *module, + struct strfilter *filter, bool externs); +extern int show_available_funcs(const char *module, struct strfilter *filter, + bool user); + +/* Maximum index number of event-name postfix */ +#define MAX_EVENT_INDEX 1024 + +#endif /*_PROBE_EVENT_H */ diff --git a/kernel/tools/perf/util/probe-finder.c b/kernel/tools/perf/util/probe-finder.c new file mode 100644 index 000000000..2a76e14db --- /dev/null +++ b/kernel/tools/perf/util/probe-finder.c @@ -0,0 +1,1695 @@ +/* + * probe-finder.c : C expression to kprobe event converter + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "event.h" +#include "dso.h" +#include "debug.h" +#include "intlist.h" +#include "util.h" +#include "symbol.h" +#include "probe-finder.h" + +/* Kprobe tracer basic type is up to u64 */ +#define MAX_BASIC_TYPE_BITS 64 + +/* Dwarf FL wrappers */ +static char *debuginfo_path; /* Currently dummy */ + +static const Dwfl_Callbacks offline_callbacks = { + .find_debuginfo = dwfl_standard_find_debuginfo, + .debuginfo_path = &debuginfo_path, + + .section_address = dwfl_offline_section_address, + + /* We use this table for core files too. */ + .find_elf = dwfl_build_id_find_elf, +}; + +/* Get a Dwarf from offline image */ +static int debuginfo__init_offline_dwarf(struct debuginfo *dbg, + const char *path) +{ + int fd; + + fd = open(path, O_RDONLY); + if (fd < 0) + return fd; + + dbg->dwfl = dwfl_begin(&offline_callbacks); + if (!dbg->dwfl) + goto error; + + dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd); + if (!dbg->mod) + goto error; + + dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias); + if (!dbg->dbg) + goto error; + + return 0; +error: + if (dbg->dwfl) + dwfl_end(dbg->dwfl); + else + close(fd); + memset(dbg, 0, sizeof(*dbg)); + + return -ENOENT; +} + +static struct debuginfo *__debuginfo__new(const char *path) +{ + struct debuginfo *dbg = zalloc(sizeof(*dbg)); + if (!dbg) + return NULL; + + if (debuginfo__init_offline_dwarf(dbg, path) < 0) + zfree(&dbg); + if (dbg) + pr_debug("Open Debuginfo file: %s\n", path); + return dbg; +} + +enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +struct debuginfo *debuginfo__new(const char *path) +{ + enum dso_binary_type *type; + char buf[PATH_MAX], nil = '\0'; + struct dso *dso; + struct debuginfo *dinfo = NULL; + + /* Try to open distro debuginfo files */ + dso = dso__new(path); + if (!dso) + goto out; + + for (type = distro_dwarf_types; + !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND; + type++) { + if (dso__read_binary_type_filename(dso, *type, &nil, + buf, PATH_MAX) < 0) + continue; + dinfo = __debuginfo__new(buf); + } + dso__delete(dso); + +out: + /* if failed to open all distro debuginfo, open given binary */ + return dinfo ? : __debuginfo__new(path); +} + +void debuginfo__delete(struct debuginfo *dbg) +{ + if (dbg) { + if (dbg->dwfl) + dwfl_end(dbg->dwfl); + free(dbg); + } +} + +/* + * Probe finder related functions + */ + +static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs) +{ + struct probe_trace_arg_ref *ref; + ref = zalloc(sizeof(struct probe_trace_arg_ref)); + if (ref != NULL) + ref->offset = offs; + return ref; +} + +/* + * Convert a location into trace_arg. + * If tvar == NULL, this just checks variable can be converted. + * If fentry == true and vr_die is a parameter, do huristic search + * for the location fuzzed by function entry mcount. + */ +static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, + Dwarf_Op *fb_ops, Dwarf_Die *sp_die, + struct probe_trace_arg *tvar) +{ + Dwarf_Attribute attr; + Dwarf_Addr tmp = 0; + Dwarf_Op *op; + size_t nops; + unsigned int regn; + Dwarf_Word offs = 0; + bool ref = false; + const char *regs; + int ret; + + if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL) + goto static_var; + + /* TODO: handle more than 1 exprs */ + if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL) + return -EINVAL; /* Broken DIE ? */ + if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) { + ret = dwarf_entrypc(sp_die, &tmp); + if (ret || addr != tmp || + dwarf_tag(vr_die) != DW_TAG_formal_parameter || + dwarf_highpc(sp_die, &tmp)) + return -ENOENT; + /* + * This is fuzzed by fentry mcount. We try to find the + * parameter location at the earliest address. + */ + for (addr += 1; addr <= tmp; addr++) { + if (dwarf_getlocation_addr(&attr, addr, &op, + &nops, 1) > 0) + goto found; + } + return -ENOENT; + } +found: + if (nops == 0) + /* TODO: Support const_value */ + return -ENOENT; + + if (op->atom == DW_OP_addr) { +static_var: + if (!tvar) + return 0; + /* Static variables on memory (not stack), make @varname */ + ret = strlen(dwarf_diename(vr_die)); + tvar->value = zalloc(ret + 2); + if (tvar->value == NULL) + return -ENOMEM; + snprintf(tvar->value, ret + 2, "@%s", dwarf_diename(vr_die)); + tvar->ref = alloc_trace_arg_ref((long)offs); + if (tvar->ref == NULL) + return -ENOMEM; + return 0; + } + + /* If this is based on frame buffer, set the offset */ + if (op->atom == DW_OP_fbreg) { + if (fb_ops == NULL) + return -ENOTSUP; + ref = true; + offs = op->number; + op = &fb_ops[0]; + } + + if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) { + regn = op->atom - DW_OP_breg0; + offs += op->number; + ref = true; + } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) { + regn = op->atom - DW_OP_reg0; + } else if (op->atom == DW_OP_bregx) { + regn = op->number; + offs += op->number2; + ref = true; + } else if (op->atom == DW_OP_regx) { + regn = op->number; + } else { + pr_debug("DW_OP %x is not supported.\n", op->atom); + return -ENOTSUP; + } + + if (!tvar) + return 0; + + regs = get_arch_regstr(regn); + if (!regs) { + /* This should be a bug in DWARF or this tool */ + pr_warning("Mapping for the register number %u " + "missing on this architecture.\n", regn); + return -ERANGE; + } + + tvar->value = strdup(regs); + if (tvar->value == NULL) + return -ENOMEM; + + if (ref) { + tvar->ref = alloc_trace_arg_ref((long)offs); + if (tvar->ref == NULL) + return -ENOMEM; + } + return 0; +} + +#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) + +static int convert_variable_type(Dwarf_Die *vr_die, + struct probe_trace_arg *tvar, + const char *cast) +{ + struct probe_trace_arg_ref **ref_ptr = &tvar->ref; + Dwarf_Die type; + char buf[16]; + char sbuf[STRERR_BUFSIZE]; + int bsize, boffs, total; + int ret; + + /* TODO: check all types */ + if (cast && strcmp(cast, "string") != 0) { + /* Non string type is OK */ + tvar->type = strdup(cast); + return (tvar->type == NULL) ? -ENOMEM : 0; + } + + bsize = dwarf_bitsize(vr_die); + if (bsize > 0) { + /* This is a bitfield */ + boffs = dwarf_bitoffset(vr_die); + total = dwarf_bytesize(vr_die); + if (boffs < 0 || total < 0) + return -ENOENT; + ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs, + BYTES_TO_BITS(total)); + goto formatted; + } + + if (die_get_real_type(vr_die, &type) == NULL) { + pr_warning("Failed to get a type information of %s.\n", + dwarf_diename(vr_die)); + return -ENOENT; + } + + pr_debug("%s type is %s.\n", + dwarf_diename(vr_die), dwarf_diename(&type)); + + if (cast && strcmp(cast, "string") == 0) { /* String type */ + ret = dwarf_tag(&type); + if (ret != DW_TAG_pointer_type && + ret != DW_TAG_array_type) { + pr_warning("Failed to cast into string: " + "%s(%s) is not a pointer nor array.\n", + dwarf_diename(vr_die), dwarf_diename(&type)); + return -EINVAL; + } + if (die_get_real_type(&type, &type) == NULL) { + pr_warning("Failed to get a type" + " information.\n"); + return -ENOENT; + } + if (ret == DW_TAG_pointer_type) { + while (*ref_ptr) + ref_ptr = &(*ref_ptr)->next; + /* Add new reference with offset +0 */ + *ref_ptr = zalloc(sizeof(struct probe_trace_arg_ref)); + if (*ref_ptr == NULL) { + pr_warning("Out of memory error\n"); + return -ENOMEM; + } + } + if (!die_compare_name(&type, "char") && + !die_compare_name(&type, "unsigned char")) { + pr_warning("Failed to cast into string: " + "%s is not (unsigned) char *.\n", + dwarf_diename(vr_die)); + return -EINVAL; + } + tvar->type = strdup(cast); + return (tvar->type == NULL) ? -ENOMEM : 0; + } + + ret = dwarf_bytesize(&type); + if (ret <= 0) + /* No size ... try to use default type */ + return 0; + ret = BYTES_TO_BITS(ret); + + /* Check the bitwidth */ + if (ret > MAX_BASIC_TYPE_BITS) { + pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n", + dwarf_diename(&type), MAX_BASIC_TYPE_BITS); + ret = MAX_BASIC_TYPE_BITS; + } + ret = snprintf(buf, 16, "%c%d", + die_is_signed_type(&type) ? 's' : 'u', ret); + +formatted: + if (ret < 0 || ret >= 16) { + if (ret >= 16) + ret = -E2BIG; + pr_warning("Failed to convert variable type: %s\n", + strerror_r(-ret, sbuf, sizeof(sbuf))); + return ret; + } + tvar->type = strdup(buf); + if (tvar->type == NULL) + return -ENOMEM; + return 0; +} + +static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, + struct perf_probe_arg_field *field, + struct probe_trace_arg_ref **ref_ptr, + Dwarf_Die *die_mem) +{ + struct probe_trace_arg_ref *ref = *ref_ptr; + Dwarf_Die type; + Dwarf_Word offs; + int ret, tag; + + pr_debug("converting %s in %s\n", field->name, varname); + if (die_get_real_type(vr_die, &type) == NULL) { + pr_warning("Failed to get the type of %s.\n", varname); + return -ENOENT; + } + pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset(&type)); + tag = dwarf_tag(&type); + + if (field->name[0] == '[' && + (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) { + if (field->next) + /* Save original type for next field */ + memcpy(die_mem, &type, sizeof(*die_mem)); + /* Get the type of this array */ + if (die_get_real_type(&type, &type) == NULL) { + pr_warning("Failed to get the type of %s.\n", varname); + return -ENOENT; + } + pr_debug2("Array real type: (%x)\n", + (unsigned)dwarf_dieoffset(&type)); + if (tag == DW_TAG_pointer_type) { + ref = zalloc(sizeof(struct probe_trace_arg_ref)); + if (ref == NULL) + return -ENOMEM; + if (*ref_ptr) + (*ref_ptr)->next = ref; + else + *ref_ptr = ref; + } + ref->offset += dwarf_bytesize(&type) * field->index; + if (!field->next) + /* Save vr_die for converting types */ + memcpy(die_mem, vr_die, sizeof(*die_mem)); + goto next; + } else if (tag == DW_TAG_pointer_type) { + /* Check the pointer and dereference */ + if (!field->ref) { + pr_err("Semantic error: %s must be referred by '->'\n", + field->name); + return -EINVAL; + } + /* Get the type pointed by this pointer */ + if (die_get_real_type(&type, &type) == NULL) { + pr_warning("Failed to get the type of %s.\n", varname); + return -ENOENT; + } + /* Verify it is a data structure */ + tag = dwarf_tag(&type); + if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { + pr_warning("%s is not a data structure nor an union.\n", + varname); + return -EINVAL; + } + + ref = zalloc(sizeof(struct probe_trace_arg_ref)); + if (ref == NULL) + return -ENOMEM; + if (*ref_ptr) + (*ref_ptr)->next = ref; + else + *ref_ptr = ref; + } else { + /* Verify it is a data structure */ + if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { + pr_warning("%s is not a data structure nor an union.\n", + varname); + return -EINVAL; + } + if (field->name[0] == '[') { + pr_err("Semantic error: %s is not a pointer" + " nor array.\n", varname); + return -EINVAL; + } + /* While prcessing unnamed field, we don't care about this */ + if (field->ref && dwarf_diename(vr_die)) { + pr_err("Semantic error: %s must be referred by '.'\n", + field->name); + return -EINVAL; + } + if (!ref) { + pr_warning("Structure on a register is not " + "supported yet.\n"); + return -ENOTSUP; + } + } + + if (die_find_member(&type, field->name, die_mem) == NULL) { + pr_warning("%s(type:%s) has no member %s.\n", varname, + dwarf_diename(&type), field->name); + return -EINVAL; + } + + /* Get the offset of the field */ + if (tag == DW_TAG_union_type) { + offs = 0; + } else { + ret = die_get_data_member_location(die_mem, &offs); + if (ret < 0) { + pr_warning("Failed to get the offset of %s.\n", + field->name); + return ret; + } + } + ref->offset += (long)offs; + + /* If this member is unnamed, we need to reuse this field */ + if (!dwarf_diename(die_mem)) + return convert_variable_fields(die_mem, varname, field, + &ref, die_mem); + +next: + /* Converting next field */ + if (field->next) + return convert_variable_fields(die_mem, field->name, + field->next, &ref, die_mem); + else + return 0; +} + +/* Show a variables in kprobe event format */ +static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) +{ + Dwarf_Die die_mem; + int ret; + + pr_debug("Converting variable %s into trace event.\n", + dwarf_diename(vr_die)); + + ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, + &pf->sp_die, pf->tvar); + if (ret == -ENOENT || ret == -EINVAL) + pr_err("Failed to find the location of %s at this address.\n" + " Perhaps, it has been optimized out.\n", pf->pvar->var); + else if (ret == -ENOTSUP) + pr_err("Sorry, we don't support this variable location yet.\n"); + else if (ret == 0 && pf->pvar->field) { + ret = convert_variable_fields(vr_die, pf->pvar->var, + pf->pvar->field, &pf->tvar->ref, + &die_mem); + vr_die = &die_mem; + } + if (ret == 0) + ret = convert_variable_type(vr_die, pf->tvar, pf->pvar->type); + /* *expr will be cached in libdw. Don't free it. */ + return ret; +} + +/* Find a variable in a scope DIE */ +static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) +{ + Dwarf_Die vr_die; + char buf[32], *ptr; + int ret = 0; + + if (!is_c_varname(pf->pvar->var)) { + /* Copy raw parameters */ + pf->tvar->value = strdup(pf->pvar->var); + if (pf->tvar->value == NULL) + return -ENOMEM; + if (pf->pvar->type) { + pf->tvar->type = strdup(pf->pvar->type); + if (pf->tvar->type == NULL) + return -ENOMEM; + } + if (pf->pvar->name) { + pf->tvar->name = strdup(pf->pvar->name); + if (pf->tvar->name == NULL) + return -ENOMEM; + } else + pf->tvar->name = NULL; + return 0; + } + + if (pf->pvar->name) + pf->tvar->name = strdup(pf->pvar->name); + else { + ret = synthesize_perf_probe_arg(pf->pvar, buf, 32); + if (ret < 0) + return ret; + ptr = strchr(buf, ':'); /* Change type separator to _ */ + if (ptr) + *ptr = '_'; + pf->tvar->name = strdup(buf); + } + if (pf->tvar->name == NULL) + return -ENOMEM; + + pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); + /* Search child die for local variables and parameters. */ + if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { + /* Search again in global variables */ + if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, + 0, &vr_die)) { + pr_warning("Failed to find '%s' in this function.\n", + pf->pvar->var); + ret = -ENOENT; + } + } + if (ret >= 0) + ret = convert_variable(&vr_die, pf); + + return ret; +} + +/* Convert subprogram DIE to trace point */ +static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, + Dwarf_Addr paddr, bool retprobe, + struct probe_trace_point *tp) +{ + Dwarf_Addr eaddr, highaddr; + GElf_Sym sym; + const char *symbol; + + /* Verify the address is correct */ + if (dwarf_entrypc(sp_die, &eaddr) != 0) { + pr_warning("Failed to get entry address of %s\n", + dwarf_diename(sp_die)); + return -ENOENT; + } + if (dwarf_highpc(sp_die, &highaddr) != 0) { + pr_warning("Failed to get end address of %s\n", + dwarf_diename(sp_die)); + return -ENOENT; + } + if (paddr > highaddr) { + pr_warning("Offset specified is greater than size of %s\n", + dwarf_diename(sp_die)); + return -EINVAL; + } + + symbol = dwarf_diename(sp_die); + if (!symbol) { + /* Try to get the symbol name from symtab */ + symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL); + if (!symbol) { + pr_warning("Failed to find symbol at 0x%lx\n", + (unsigned long)paddr); + return -ENOENT; + } + eaddr = sym.st_value; + } + tp->offset = (unsigned long)(paddr - eaddr); + tp->address = (unsigned long)paddr; + tp->symbol = strdup(symbol); + if (!tp->symbol) + return -ENOMEM; + + /* Return probe must be on the head of a subprogram */ + if (retprobe) { + if (eaddr != paddr) { + pr_warning("Return probe must be on the head of" + " a real function.\n"); + return -EINVAL; + } + tp->retprobe = true; + } + + return 0; +} + +/* Call probe_finder callback with scope DIE */ +static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) +{ + Dwarf_Attribute fb_attr; + size_t nops; + int ret; + + if (!sc_die) { + pr_err("Caller must pass a scope DIE. Program error.\n"); + return -EINVAL; + } + + /* If not a real subprogram, find a real one */ + if (!die_is_func_def(sc_die)) { + if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { + pr_warning("Failed to find probe point in any " + "functions.\n"); + return -ENOENT; + } + } else + memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die)); + + /* Get the frame base attribute/ops from subprogram */ + dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr); + ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); + if (ret <= 0 || nops == 0) { + pf->fb_ops = NULL; +#if _ELFUTILS_PREREQ(0, 142) + } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && + pf->cfi != NULL) { + Dwarf_Frame *frame; + if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || + dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { + pr_warning("Failed to get call frame on 0x%jx\n", + (uintmax_t)pf->addr); + return -ENOENT; + } +#endif + } + + /* Call finder's callback handler */ + ret = pf->callback(sc_die, pf); + + /* *pf->fb_ops will be cached in libdw. Don't free it. */ + pf->fb_ops = NULL; + + return ret; +} + +struct find_scope_param { + const char *function; + const char *file; + int line; + int diff; + Dwarf_Die *die_mem; + bool found; +}; + +static int find_best_scope_cb(Dwarf_Die *fn_die, void *data) +{ + struct find_scope_param *fsp = data; + const char *file; + int lno; + + /* Skip if declared file name does not match */ + if (fsp->file) { + file = dwarf_decl_file(fn_die); + if (!file || strcmp(fsp->file, file) != 0) + return 0; + } + /* If the function name is given, that's what user expects */ + if (fsp->function) { + if (die_compare_name(fn_die, fsp->function)) { + memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); + fsp->found = true; + return 1; + } + } else { + /* With the line number, find the nearest declared DIE */ + dwarf_decl_line(fn_die, &lno); + if (lno < fsp->line && fsp->diff > fsp->line - lno) { + /* Keep a candidate and continue */ + fsp->diff = fsp->line - lno; + memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); + fsp->found = true; + } + } + return 0; +} + +/* Find an appropriate scope fits to given conditions */ +static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem) +{ + struct find_scope_param fsp = { + .function = pf->pev->point.function, + .file = pf->fname, + .line = pf->lno, + .diff = INT_MAX, + .die_mem = die_mem, + .found = false, + }; + + cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp); + + return fsp.found ? die_mem : NULL; +} + +static int probe_point_line_walker(const char *fname, int lineno, + Dwarf_Addr addr, void *data) +{ + struct probe_finder *pf = data; + Dwarf_Die *sc_die, die_mem; + int ret; + + if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) + return 0; + + pf->addr = addr; + sc_die = find_best_scope(pf, &die_mem); + if (!sc_die) { + pr_warning("Failed to find scope of probe point.\n"); + return -ENOENT; + } + + ret = call_probe_finder(sc_die, pf); + + /* Continue if no error, because the line will be in inline function */ + return ret < 0 ? ret : 0; +} + +/* Find probe point from its line number */ +static int find_probe_point_by_line(struct probe_finder *pf) +{ + return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf); +} + +/* Find lines which match lazy pattern */ +static int find_lazy_match_lines(struct intlist *list, + const char *fname, const char *pat) +{ + FILE *fp; + char *line = NULL; + size_t line_len; + ssize_t len; + int count = 0, linenum = 1; + char sbuf[STRERR_BUFSIZE]; + + fp = fopen(fname, "r"); + if (!fp) { + pr_warning("Failed to open %s: %s\n", fname, + strerror_r(errno, sbuf, sizeof(sbuf))); + return -errno; + } + + while ((len = getline(&line, &line_len, fp)) > 0) { + + if (line[len - 1] == '\n') + line[len - 1] = '\0'; + + if (strlazymatch(line, pat)) { + intlist__add(list, linenum); + count++; + } + linenum++; + } + + if (ferror(fp)) + count = -errno; + free(line); + fclose(fp); + + if (count == 0) + pr_debug("No matched lines found in %s.\n", fname); + return count; +} + +static int probe_point_lazy_walker(const char *fname, int lineno, + Dwarf_Addr addr, void *data) +{ + struct probe_finder *pf = data; + Dwarf_Die *sc_die, die_mem; + int ret; + + if (!intlist__has_entry(pf->lcache, lineno) || + strtailcmp(fname, pf->fname) != 0) + return 0; + + pr_debug("Probe line found: line:%d addr:0x%llx\n", + lineno, (unsigned long long)addr); + pf->addr = addr; + pf->lno = lineno; + sc_die = find_best_scope(pf, &die_mem); + if (!sc_die) { + pr_warning("Failed to find scope of probe point.\n"); + return -ENOENT; + } + + ret = call_probe_finder(sc_die, pf); + + /* + * Continue if no error, because the lazy pattern will match + * to other lines + */ + return ret < 0 ? ret : 0; +} + +/* Find probe points from lazy pattern */ +static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) +{ + int ret = 0; + char *fpath; + + if (intlist__empty(pf->lcache)) { + const char *comp_dir; + + comp_dir = cu_get_comp_dir(&pf->cu_die); + ret = get_real_path(pf->fname, comp_dir, &fpath); + if (ret < 0) { + pr_warning("Failed to find source file path.\n"); + return ret; + } + + /* Matching lazy line pattern */ + ret = find_lazy_match_lines(pf->lcache, fpath, + pf->pev->point.lazy_line); + free(fpath); + if (ret <= 0) + return ret; + } + + return die_walk_lines(sp_die, probe_point_lazy_walker, pf); +} + +static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) +{ + struct probe_finder *pf = data; + struct perf_probe_point *pp = &pf->pev->point; + Dwarf_Addr addr; + int ret; + + if (pp->lazy_line) + ret = find_probe_point_lazy(in_die, pf); + else { + /* Get probe address */ + if (dwarf_entrypc(in_die, &addr) != 0) { + pr_warning("Failed to get entry address of %s.\n", + dwarf_diename(in_die)); + return -ENOENT; + } + pf->addr = addr; + pf->addr += pp->offset; + pr_debug("found inline addr: 0x%jx\n", + (uintmax_t)pf->addr); + + ret = call_probe_finder(in_die, pf); + } + + return ret; +} + +/* Callback parameter with return value for libdw */ +struct dwarf_callback_param { + void *data; + int retval; +}; + +/* Search function from function name */ +static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) +{ + struct dwarf_callback_param *param = data; + struct probe_finder *pf = param->data; + struct perf_probe_point *pp = &pf->pev->point; + + /* Check tag and diename */ + if (!die_is_func_def(sp_die) || + !die_compare_name(sp_die, pp->function)) + return DWARF_CB_OK; + + /* Check declared file */ + if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die))) + return DWARF_CB_OK; + + pf->fname = dwarf_decl_file(sp_die); + if (pp->line) { /* Function relative line */ + dwarf_decl_line(sp_die, &pf->lno); + pf->lno += pp->line; + param->retval = find_probe_point_by_line(pf); + } else if (die_is_func_instance(sp_die)) { + /* Instances always have the entry address */ + dwarf_entrypc(sp_die, &pf->addr); + /* Real function */ + if (pp->lazy_line) + param->retval = find_probe_point_lazy(sp_die, pf); + else { + pf->addr += pp->offset; + /* TODO: Check the address in this function */ + param->retval = call_probe_finder(sp_die, pf); + } + } else + /* Inlined function: search instances */ + param->retval = die_walk_instances(sp_die, + probe_point_inline_cb, (void *)pf); + + return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ +} + +static int find_probe_point_by_func(struct probe_finder *pf) +{ + struct dwarf_callback_param _param = {.data = (void *)pf, + .retval = 0}; + dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0); + return _param.retval; +} + +struct pubname_callback_param { + char *function; + char *file; + Dwarf_Die *cu_die; + Dwarf_Die *sp_die; + int found; +}; + +static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) +{ + struct pubname_callback_param *param = data; + + if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { + if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) + return DWARF_CB_OK; + + if (die_compare_name(param->sp_die, param->function)) { + if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) + return DWARF_CB_OK; + + if (param->file && + strtailcmp(param->file, dwarf_decl_file(param->sp_die))) + return DWARF_CB_OK; + + param->found = 1; + return DWARF_CB_ABORT; + } + } + + return DWARF_CB_OK; +} + +/* Find probe points from debuginfo */ +static int debuginfo__find_probes(struct debuginfo *dbg, + struct probe_finder *pf) +{ + struct perf_probe_point *pp = &pf->pev->point; + Dwarf_Off off, noff; + size_t cuhl; + Dwarf_Die *diep; + int ret = 0; + +#if _ELFUTILS_PREREQ(0, 142) + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + + /* Get the call frame information from this dwarf */ + elf = dwarf_getelf(dbg->dbg); + if (elf == NULL) + return -EINVAL; + + if (gelf_getehdr(elf, &ehdr) == NULL) + return -EINVAL; + + if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && + shdr.sh_type == SHT_PROGBITS) { + pf->cfi = dwarf_getcfi_elf(elf); + } else { + pf->cfi = dwarf_getcfi(dbg->dbg); + } +#endif + + off = 0; + pf->lcache = intlist__new(NULL); + if (!pf->lcache) + return -ENOMEM; + + /* Fastpath: lookup by function name from .debug_pubnames section */ + if (pp->function) { + struct pubname_callback_param pubname_param = { + .function = pp->function, + .file = pp->file, + .cu_die = &pf->cu_die, + .sp_die = &pf->sp_die, + .found = 0, + }; + struct dwarf_callback_param probe_param = { + .data = pf, + }; + + dwarf_getpubnames(dbg->dbg, pubname_search_cb, + &pubname_param, 0); + if (pubname_param.found) { + ret = probe_point_search_cb(&pf->sp_die, &probe_param); + if (ret) + goto found; + } + } + + /* Loop on CUs (Compilation Unit) */ + while (!dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { + /* Get the DIE(Debugging Information Entry) of this CU */ + diep = dwarf_offdie(dbg->dbg, off + cuhl, &pf->cu_die); + if (!diep) + continue; + + /* Check if target file is included. */ + if (pp->file) + pf->fname = cu_find_realpath(&pf->cu_die, pp->file); + else + pf->fname = NULL; + + if (!pp->file || pf->fname) { + if (pp->function) + ret = find_probe_point_by_func(pf); + else if (pp->lazy_line) + ret = find_probe_point_lazy(&pf->cu_die, pf); + else { + pf->lno = pp->line; + ret = find_probe_point_by_line(pf); + } + if (ret < 0) + break; + } + off = noff; + } + +found: + intlist__delete(pf->lcache); + pf->lcache = NULL; + + return ret; +} + +struct local_vars_finder { + struct probe_finder *pf; + struct perf_probe_arg *args; + int max_args; + int nargs; + int ret; +}; + +/* Collect available variables in this scope */ +static int copy_variables_cb(Dwarf_Die *die_mem, void *data) +{ + struct local_vars_finder *vf = data; + struct probe_finder *pf = vf->pf; + int tag; + + tag = dwarf_tag(die_mem); + if (tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) { + if (convert_variable_location(die_mem, vf->pf->addr, + vf->pf->fb_ops, &pf->sp_die, + NULL) == 0) { + vf->args[vf->nargs].var = (char *)dwarf_diename(die_mem); + if (vf->args[vf->nargs].var == NULL) { + vf->ret = -ENOMEM; + return DIE_FIND_CB_END; + } + pr_debug(" %s", vf->args[vf->nargs].var); + vf->nargs++; + } + } + + if (dwarf_haspc(die_mem, vf->pf->addr)) + return DIE_FIND_CB_CONTINUE; + else + return DIE_FIND_CB_SIBLING; +} + +static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf, + struct perf_probe_arg *args) +{ + Dwarf_Die die_mem; + int i; + int n = 0; + struct local_vars_finder vf = {.pf = pf, .args = args, + .max_args = MAX_PROBE_ARGS, .ret = 0}; + + for (i = 0; i < pf->pev->nargs; i++) { + /* var never be NULL */ + if (strcmp(pf->pev->args[i].var, "$vars") == 0) { + pr_debug("Expanding $vars into:"); + vf.nargs = n; + /* Special local variables */ + die_find_child(sc_die, copy_variables_cb, (void *)&vf, + &die_mem); + pr_debug(" (%d)\n", vf.nargs - n); + if (vf.ret < 0) + return vf.ret; + n = vf.nargs; + } else { + /* Copy normal argument */ + args[n] = pf->pev->args[i]; + n++; + } + } + return n; +} + +/* Add a found probe point into trace event list */ +static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) +{ + struct trace_event_finder *tf = + container_of(pf, struct trace_event_finder, pf); + struct probe_trace_event *tev; + struct perf_probe_arg *args; + int ret, i; + + /* Check number of tevs */ + if (tf->ntevs == tf->max_tevs) { + pr_warning("Too many( > %d) probe point found.\n", + tf->max_tevs); + return -ERANGE; + } + tev = &tf->tevs[tf->ntevs++]; + + /* Trace point should be converted from subprogram DIE */ + ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, + pf->pev->point.retprobe, &tev->point); + if (ret < 0) + return ret; + + pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, + tev->point.offset); + + /* Expand special probe argument if exist */ + args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); + if (args == NULL) + return -ENOMEM; + + ret = expand_probe_args(sc_die, pf, args); + if (ret < 0) + goto end; + + tev->nargs = ret; + tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); + if (tev->args == NULL) { + ret = -ENOMEM; + goto end; + } + + /* Find each argument */ + for (i = 0; i < tev->nargs; i++) { + pf->pvar = &args[i]; + pf->tvar = &tev->args[i]; + /* Variable should be found from scope DIE */ + ret = find_variable(sc_die, pf); + if (ret != 0) + break; + } + +end: + free(args); + return ret; +} + +/* Find probe_trace_events specified by perf_probe_event from debuginfo */ +int debuginfo__find_trace_events(struct debuginfo *dbg, + struct perf_probe_event *pev, + struct probe_trace_event **tevs, int max_tevs) +{ + struct trace_event_finder tf = { + .pf = {.pev = pev, .callback = add_probe_trace_event}, + .mod = dbg->mod, .max_tevs = max_tevs}; + int ret; + + /* Allocate result tevs array */ + *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); + if (*tevs == NULL) + return -ENOMEM; + + tf.tevs = *tevs; + tf.ntevs = 0; + + ret = debuginfo__find_probes(dbg, &tf.pf); + if (ret < 0) { + zfree(tevs); + return ret; + } + + return (ret < 0) ? ret : tf.ntevs; +} + +#define MAX_VAR_LEN 64 + +/* Collect available variables in this scope */ +static int collect_variables_cb(Dwarf_Die *die_mem, void *data) +{ + struct available_var_finder *af = data; + struct variable_list *vl; + char buf[MAX_VAR_LEN]; + int tag, ret; + + vl = &af->vls[af->nvls - 1]; + + tag = dwarf_tag(die_mem); + if (tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) { + ret = convert_variable_location(die_mem, af->pf.addr, + af->pf.fb_ops, &af->pf.sp_die, + NULL); + if (ret == 0) { + ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); + pr_debug2("Add new var: %s\n", buf); + if (ret > 0) + strlist__add(vl->vars, buf); + } + } + + if (af->child && dwarf_haspc(die_mem, af->pf.addr)) + return DIE_FIND_CB_CONTINUE; + else + return DIE_FIND_CB_SIBLING; +} + +/* Add a found vars into available variables list */ +static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) +{ + struct available_var_finder *af = + container_of(pf, struct available_var_finder, pf); + struct variable_list *vl; + Dwarf_Die die_mem; + int ret; + + /* Check number of tevs */ + if (af->nvls == af->max_vls) { + pr_warning("Too many( > %d) probe point found.\n", af->max_vls); + return -ERANGE; + } + vl = &af->vls[af->nvls++]; + + /* Trace point should be converted from subprogram DIE */ + ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr, + pf->pev->point.retprobe, &vl->point); + if (ret < 0) + return ret; + + pr_debug("Probe point found: %s+%lu\n", vl->point.symbol, + vl->point.offset); + + /* Find local variables */ + vl->vars = strlist__new(true, NULL); + if (vl->vars == NULL) + return -ENOMEM; + af->child = true; + die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem); + + /* Find external variables */ + if (!af->externs) + goto out; + /* Don't need to search child DIE for externs. */ + af->child = false; + die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem); + +out: + if (strlist__empty(vl->vars)) { + strlist__delete(vl->vars); + vl->vars = NULL; + } + + return ret; +} + +/* + * Find available variables at given probe point + * Return the number of found probe points. Return 0 if there is no + * matched probe point. Return <0 if an error occurs. + */ +int debuginfo__find_available_vars_at(struct debuginfo *dbg, + struct perf_probe_event *pev, + struct variable_list **vls, + int max_vls, bool externs) +{ + struct available_var_finder af = { + .pf = {.pev = pev, .callback = add_available_vars}, + .mod = dbg->mod, + .max_vls = max_vls, .externs = externs}; + int ret; + + /* Allocate result vls array */ + *vls = zalloc(sizeof(struct variable_list) * max_vls); + if (*vls == NULL) + return -ENOMEM; + + af.vls = *vls; + af.nvls = 0; + + ret = debuginfo__find_probes(dbg, &af.pf); + if (ret < 0) { + /* Free vlist for error */ + while (af.nvls--) { + zfree(&af.vls[af.nvls].point.symbol); + strlist__delete(af.vls[af.nvls].vars); + } + zfree(vls); + return ret; + } + + return (ret < 0) ? ret : af.nvls; +} + +/* Reverse search */ +int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, + struct perf_probe_point *ppt) +{ + Dwarf_Die cudie, spdie, indie; + Dwarf_Addr _addr = 0, baseaddr = 0; + const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; + int baseline = 0, lineno = 0, ret = 0; + + /* Find cu die */ + if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { + pr_warning("Failed to find debug information for address %lx\n", + addr); + ret = -EINVAL; + goto end; + } + + /* Find a corresponding line (filename and lineno) */ + cu_find_lineinfo(&cudie, addr, &fname, &lineno); + /* Don't care whether it failed or not */ + + /* Find a corresponding function (name, baseline and baseaddr) */ + if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) { + /* Get function entry information */ + func = basefunc = dwarf_diename(&spdie); + if (!func || + dwarf_entrypc(&spdie, &baseaddr) != 0 || + dwarf_decl_line(&spdie, &baseline) != 0) { + lineno = 0; + goto post; + } + + fname = dwarf_decl_file(&spdie); + if (addr == (unsigned long)baseaddr) { + /* Function entry - Relative line number is 0 */ + lineno = baseline; + goto post; + } + + /* Track down the inline functions step by step */ + while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr, + &indie)) { + /* There is an inline function */ + if (dwarf_entrypc(&indie, &_addr) == 0 && + _addr == addr) { + /* + * addr is at an inline function entry. + * In this case, lineno should be the call-site + * line number. (overwrite lineinfo) + */ + lineno = die_get_call_lineno(&indie); + fname = die_get_call_file(&indie); + break; + } else { + /* + * addr is in an inline function body. + * Since lineno points one of the lines + * of the inline function, baseline should + * be the entry line of the inline function. + */ + tmp = dwarf_diename(&indie); + if (!tmp || + dwarf_decl_line(&indie, &baseline) != 0) + break; + func = tmp; + spdie = indie; + } + } + /* Verify the lineno and baseline are in a same file */ + tmp = dwarf_decl_file(&spdie); + if (!tmp || strcmp(tmp, fname) != 0) + lineno = 0; + } + +post: + /* Make a relative line number or an offset */ + if (lineno) + ppt->line = lineno - baseline; + else if (basefunc) { + ppt->offset = addr - (unsigned long)baseaddr; + func = basefunc; + } + + /* Duplicate strings */ + if (func) { + ppt->function = strdup(func); + if (ppt->function == NULL) { + ret = -ENOMEM; + goto end; + } + } + if (fname) { + ppt->file = strdup(fname); + if (ppt->file == NULL) { + zfree(&ppt->function); + ret = -ENOMEM; + goto end; + } + } +end: + if (ret == 0 && (fname || func)) + ret = 1; /* Found a point */ + return ret; +} + +/* Add a line and store the src path */ +static int line_range_add_line(const char *src, unsigned int lineno, + struct line_range *lr) +{ + /* Copy source path */ + if (!lr->path) { + lr->path = strdup(src); + if (lr->path == NULL) + return -ENOMEM; + } + return intlist__add(lr->line_list, lineno); +} + +static int line_range_walk_cb(const char *fname, int lineno, + Dwarf_Addr addr __maybe_unused, + void *data) +{ + struct line_finder *lf = data; + int err; + + if ((strtailcmp(fname, lf->fname) != 0) || + (lf->lno_s > lineno || lf->lno_e < lineno)) + return 0; + + err = line_range_add_line(fname, lineno, lf->lr); + if (err < 0 && err != -EEXIST) + return err; + + return 0; +} + +/* Find line range from its line number */ +static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) +{ + int ret; + + ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf); + + /* Update status */ + if (ret >= 0) + if (!intlist__empty(lf->lr->line_list)) + ret = lf->found = 1; + else + ret = 0; /* Lines are not found */ + else { + zfree(&lf->lr->path); + } + return ret; +} + +static int line_range_inline_cb(Dwarf_Die *in_die, void *data) +{ + int ret = find_line_range_by_line(in_die, data); + + /* + * We have to check all instances of inlined function, because + * some execution paths can be optimized out depends on the + * function argument of instances. However, if an error occurs, + * it should be handled by the caller. + */ + return ret < 0 ? ret : 0; +} + +/* Search function definition from function name */ +static int line_range_search_cb(Dwarf_Die *sp_die, void *data) +{ + struct dwarf_callback_param *param = data; + struct line_finder *lf = param->data; + struct line_range *lr = lf->lr; + + /* Check declared file */ + if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) + return DWARF_CB_OK; + + if (die_is_func_def(sp_die) && + die_compare_name(sp_die, lr->function)) { + lf->fname = dwarf_decl_file(sp_die); + dwarf_decl_line(sp_die, &lr->offset); + pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset); + lf->lno_s = lr->offset + lr->start; + if (lf->lno_s < 0) /* Overflow */ + lf->lno_s = INT_MAX; + lf->lno_e = lr->offset + lr->end; + if (lf->lno_e < 0) /* Overflow */ + lf->lno_e = INT_MAX; + pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e); + lr->start = lf->lno_s; + lr->end = lf->lno_e; + if (!die_is_func_instance(sp_die)) + param->retval = die_walk_instances(sp_die, + line_range_inline_cb, lf); + else + param->retval = find_line_range_by_line(sp_die, lf); + return DWARF_CB_ABORT; + } + return DWARF_CB_OK; +} + +static int find_line_range_by_func(struct line_finder *lf) +{ + struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0}; + dwarf_getfuncs(&lf->cu_die, line_range_search_cb, ¶m, 0); + return param.retval; +} + +int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr) +{ + struct line_finder lf = {.lr = lr, .found = 0}; + int ret = 0; + Dwarf_Off off = 0, noff; + size_t cuhl; + Dwarf_Die *diep; + const char *comp_dir; + + /* Fastpath: lookup by function name from .debug_pubnames section */ + if (lr->function) { + struct pubname_callback_param pubname_param = { + .function = lr->function, .file = lr->file, + .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; + struct dwarf_callback_param line_range_param = { + .data = (void *)&lf, .retval = 0}; + + dwarf_getpubnames(dbg->dbg, pubname_search_cb, + &pubname_param, 0); + if (pubname_param.found) { + line_range_search_cb(&lf.sp_die, &line_range_param); + if (lf.found) + goto found; + } + } + + /* Loop on CUs (Compilation Unit) */ + while (!lf.found && ret >= 0) { + if (dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, + NULL, NULL, NULL) != 0) + break; + + /* Get the DIE(Debugging Information Entry) of this CU */ + diep = dwarf_offdie(dbg->dbg, off + cuhl, &lf.cu_die); + if (!diep) + continue; + + /* Check if target file is included. */ + if (lr->file) + lf.fname = cu_find_realpath(&lf.cu_die, lr->file); + else + lf.fname = 0; + + if (!lr->file || lf.fname) { + if (lr->function) + ret = find_line_range_by_func(&lf); + else { + lf.lno_s = lr->start; + lf.lno_e = lr->end; + ret = find_line_range_by_line(NULL, &lf); + } + } + off = noff; + } + +found: + /* Store comp_dir */ + if (lf.found) { + comp_dir = cu_get_comp_dir(&lf.cu_die); + if (comp_dir) { + lr->comp_dir = strdup(comp_dir); + if (!lr->comp_dir) + ret = -ENOMEM; + } + } + + pr_debug("path: %s\n", lr->path); + return (ret < 0) ? ret : lf.found; +} + +/* + * Find a src file from a DWARF tag path. Prepend optional source path prefix + * and chop off leading directories that do not exist. Result is passed back as + * a newly allocated path on success. + * Return 0 if file was found and readable, -errno otherwise. + */ +int get_real_path(const char *raw_path, const char *comp_dir, + char **new_path) +{ + const char *prefix = symbol_conf.source_prefix; + + if (!prefix) { + if (raw_path[0] != '/' && comp_dir) + /* If not an absolute path, try to use comp_dir */ + prefix = comp_dir; + else { + if (access(raw_path, R_OK) == 0) { + *new_path = strdup(raw_path); + return *new_path ? 0 : -ENOMEM; + } else + return -errno; + } + } + + *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2)); + if (!*new_path) + return -ENOMEM; + + for (;;) { + sprintf(*new_path, "%s/%s", prefix, raw_path); + + if (access(*new_path, R_OK) == 0) + return 0; + + if (!symbol_conf.source_prefix) { + /* In case of searching comp_dir, don't retry */ + zfree(new_path); + return -errno; + } + + switch (errno) { + case ENAMETOOLONG: + case ENOENT: + case EROFS: + case EFAULT: + raw_path = strchr(++raw_path, '/'); + if (!raw_path) { + zfree(new_path); + return -ENOENT; + } + continue; + + default: + zfree(new_path); + return -errno; + } + } +} diff --git a/kernel/tools/perf/util/probe-finder.h b/kernel/tools/perf/util/probe-finder.h new file mode 100644 index 000000000..ebf8c8c81 --- /dev/null +++ b/kernel/tools/perf/util/probe-finder.h @@ -0,0 +1,116 @@ +#ifndef _PROBE_FINDER_H +#define _PROBE_FINDER_H + +#include +#include "util.h" +#include "intlist.h" +#include "probe-event.h" + +#define MAX_PROBE_BUFFER 1024 +#define MAX_PROBES 128 +#define MAX_PROBE_ARGS 128 + +static inline int is_c_varname(const char *name) +{ + /* TODO */ + return isalpha(name[0]) || name[0] == '_'; +} + +#ifdef HAVE_DWARF_SUPPORT + +#include "dwarf-aux.h" + +/* TODO: export debuginfo data structure even if no dwarf support */ + +/* debug information structure */ +struct debuginfo { + Dwarf *dbg; + Dwfl_Module *mod; + Dwfl *dwfl; + Dwarf_Addr bias; +}; + +/* This also tries to open distro debuginfo */ +extern struct debuginfo *debuginfo__new(const char *path); +extern void debuginfo__delete(struct debuginfo *dbg); + +/* Find probe_trace_events specified by perf_probe_event from debuginfo */ +extern int debuginfo__find_trace_events(struct debuginfo *dbg, + struct perf_probe_event *pev, + struct probe_trace_event **tevs, + int max_tevs); + +/* Find a perf_probe_point from debuginfo */ +extern int debuginfo__find_probe_point(struct debuginfo *dbg, + unsigned long addr, + struct perf_probe_point *ppt); + +/* Find a line range */ +extern int debuginfo__find_line_range(struct debuginfo *dbg, + struct line_range *lr); + +/* Find available variables */ +extern int debuginfo__find_available_vars_at(struct debuginfo *dbg, + struct perf_probe_event *pev, + struct variable_list **vls, + int max_points, bool externs); + +/* Find a src file from a DWARF tag path */ +int get_real_path(const char *raw_path, const char *comp_dir, + char **new_path); + +struct probe_finder { + struct perf_probe_event *pev; /* Target probe event */ + + /* Callback when a probe point is found */ + int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf); + + /* For function searching */ + int lno; /* Line number */ + Dwarf_Addr addr; /* Address */ + const char *fname; /* Real file name */ + Dwarf_Die cu_die; /* Current CU */ + Dwarf_Die sp_die; + struct intlist *lcache; /* Line cache for lazy match */ + + /* For variable searching */ +#if _ELFUTILS_PREREQ(0, 142) + Dwarf_CFI *cfi; /* Call Frame Information */ +#endif + Dwarf_Op *fb_ops; /* Frame base attribute */ + struct perf_probe_arg *pvar; /* Current target variable */ + struct probe_trace_arg *tvar; /* Current result variable */ +}; + +struct trace_event_finder { + struct probe_finder pf; + Dwfl_Module *mod; /* For solving symbols */ + struct probe_trace_event *tevs; /* Found trace events */ + int ntevs; /* Number of trace events */ + int max_tevs; /* Max number of trace events */ +}; + +struct available_var_finder { + struct probe_finder pf; + Dwfl_Module *mod; /* For solving symbols */ + struct variable_list *vls; /* Found variable lists */ + int nvls; /* Number of variable lists */ + int max_vls; /* Max no. of variable lists */ + bool externs; /* Find external vars too */ + bool child; /* Search child scopes */ +}; + +struct line_finder { + struct line_range *lr; /* Target line range */ + + const char *fname; /* File name */ + int lno_s; /* Start line number */ + int lno_e; /* End line number */ + Dwarf_Die cu_die; /* Current CU */ + Dwarf_Die sp_die; + int found; +}; + +#endif /* HAVE_DWARF_SUPPORT */ + +#endif /*_PROBE_FINDER_H */ diff --git a/kernel/tools/perf/util/pstack.c b/kernel/tools/perf/util/pstack.c new file mode 100644 index 000000000..a126e6cc6 --- /dev/null +++ b/kernel/tools/perf/util/pstack.c @@ -0,0 +1,76 @@ +/* + * Simple pointer stack + * + * (c) 2010 Arnaldo Carvalho de Melo + */ + +#include "util.h" +#include "pstack.h" +#include "debug.h" +#include +#include + +struct pstack { + unsigned short top; + unsigned short max_nr_entries; + void *entries[0]; +}; + +struct pstack *pstack__new(unsigned short max_nr_entries) +{ + struct pstack *pstack = zalloc((sizeof(*pstack) + + max_nr_entries * sizeof(void *))); + if (pstack != NULL) + pstack->max_nr_entries = max_nr_entries; + return pstack; +} + +void pstack__delete(struct pstack *pstack) +{ + free(pstack); +} + +bool pstack__empty(const struct pstack *pstack) +{ + return pstack->top == 0; +} + +void pstack__remove(struct pstack *pstack, void *key) +{ + unsigned short i = pstack->top, last_index = pstack->top - 1; + + while (i-- != 0) { + if (pstack->entries[i] == key) { + if (i < last_index) + memmove(pstack->entries + i, + pstack->entries + i + 1, + (last_index - i) * sizeof(void *)); + --pstack->top; + return; + } + } + pr_err("%s: %p not on the pstack!\n", __func__, key); +} + +void pstack__push(struct pstack *pstack, void *key) +{ + if (pstack->top == pstack->max_nr_entries) { + pr_err("%s: top=%d, overflow!\n", __func__, pstack->top); + return; + } + pstack->entries[pstack->top++] = key; +} + +void *pstack__pop(struct pstack *pstack) +{ + void *ret; + + if (pstack->top == 0) { + pr_err("%s: underflow!\n", __func__); + return NULL; + } + + ret = pstack->entries[--pstack->top]; + pstack->entries[pstack->top] = NULL; + return ret; +} diff --git a/kernel/tools/perf/util/pstack.h b/kernel/tools/perf/util/pstack.h new file mode 100644 index 000000000..c3cb6584d --- /dev/null +++ b/kernel/tools/perf/util/pstack.h @@ -0,0 +1,14 @@ +#ifndef _PERF_PSTACK_ +#define _PERF_PSTACK_ + +#include + +struct pstack; +struct pstack *pstack__new(unsigned short max_nr_entries); +void pstack__delete(struct pstack *pstack); +bool pstack__empty(const struct pstack *pstack); +void pstack__remove(struct pstack *pstack, void *key); +void pstack__push(struct pstack *pstack, void *key); +void *pstack__pop(struct pstack *pstack); + +#endif /* _PERF_PSTACK_ */ diff --git a/kernel/tools/perf/util/python-ext-sources b/kernel/tools/perf/util/python-ext-sources new file mode 100644 index 000000000..4d28624a1 --- /dev/null +++ b/kernel/tools/perf/util/python-ext-sources @@ -0,0 +1,21 @@ +# +# List of files needed by perf python extension +# +# Each source file must be placed on its own line so that it can be +# processed by Makefile and util/setup.py accordingly. +# + +util/python.c +util/ctype.c +util/evlist.c +util/evsel.c +util/cpumap.c +../../lib/hweight.c +util/thread_map.c +util/util.c +util/xyarray.c +util/cgroup.c +util/rblist.c +util/strlist.c +util/trace-event.c +../../lib/rbtree.c diff --git a/kernel/tools/perf/util/python.c b/kernel/tools/perf/util/python.c new file mode 100644 index 000000000..d906d0ad5 --- /dev/null +++ b/kernel/tools/perf/util/python.c @@ -0,0 +1,1074 @@ +#include +#include +#include +#include +#include "evlist.h" +#include "evsel.h" +#include "event.h" +#include "cpumap.h" +#include "thread_map.h" + +/* + * Support debug printing even though util/debug.c is not linked. That means + * implementing 'verbose' and 'eprintf'. + */ +int verbose; + +int eprintf(int level, int var, const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (var >= level) { + va_start(args, fmt); + ret = vfprintf(stderr, fmt, args); + va_end(args); + } + + return ret; +} + +/* Define PyVarObject_HEAD_INIT for python 2.5 */ +#ifndef PyVarObject_HEAD_INIT +# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, +#endif + +PyMODINIT_FUNC initperf(void); + +#define member_def(type, member, ptype, help) \ + { #member, ptype, \ + offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ + 0, help } + +#define sample_member_def(name, member, ptype, help) \ + { #name, ptype, \ + offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ + 0, help } + +struct pyrf_event { + PyObject_HEAD + struct perf_sample sample; + union perf_event event; +}; + +#define sample_members \ + sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ + sample_member_def(sample_pid, pid, T_INT, "event pid"), \ + sample_member_def(sample_tid, tid, T_INT, "event tid"), \ + sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ + sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ + sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ + sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ + sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ + sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), + +static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); + +static PyMemberDef pyrf_mmap_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + member_def(mmap_event, pid, T_UINT, "event pid"), + member_def(mmap_event, tid, T_UINT, "event tid"), + member_def(mmap_event, start, T_ULONGLONG, "start of the map"), + member_def(mmap_event, len, T_ULONGLONG, "map length"), + member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"), + member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"), + { .name = NULL, }, +}; + +static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) +{ + PyObject *ret; + char *s; + + if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", " + "length: %#" PRIx64 ", offset: %#" PRIx64 ", " + "filename: %s }", + pevent->event.mmap.pid, pevent->event.mmap.tid, + pevent->event.mmap.start, pevent->event.mmap.len, + pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { + ret = PyErr_NoMemory(); + } else { + ret = PyString_FromString(s); + free(s); + } + return ret; +} + +static PyTypeObject pyrf_mmap_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.mmap_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_mmap_event__doc, + .tp_members = pyrf_mmap_event__members, + .tp_repr = (reprfunc)pyrf_mmap_event__repr, +}; + +static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); + +static PyMemberDef pyrf_task_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + member_def(fork_event, pid, T_UINT, "event pid"), + member_def(fork_event, ppid, T_UINT, "event ppid"), + member_def(fork_event, tid, T_UINT, "event tid"), + member_def(fork_event, ptid, T_UINT, "event ptid"), + member_def(fork_event, time, T_ULONGLONG, "timestamp"), + { .name = NULL, }, +}; + +static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) +{ + return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " + "ptid: %u, time: %" PRIu64 "}", + pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", + pevent->event.fork.pid, + pevent->event.fork.ppid, + pevent->event.fork.tid, + pevent->event.fork.ptid, + pevent->event.fork.time); +} + +static PyTypeObject pyrf_task_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.task_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_task_event__doc, + .tp_members = pyrf_task_event__members, + .tp_repr = (reprfunc)pyrf_task_event__repr, +}; + +static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); + +static PyMemberDef pyrf_comm_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + member_def(comm_event, pid, T_UINT, "event pid"), + member_def(comm_event, tid, T_UINT, "event tid"), + member_def(comm_event, comm, T_STRING_INPLACE, "process name"), + { .name = NULL, }, +}; + +static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) +{ + return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", + pevent->event.comm.pid, + pevent->event.comm.tid, + pevent->event.comm.comm); +} + +static PyTypeObject pyrf_comm_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.comm_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_comm_event__doc, + .tp_members = pyrf_comm_event__members, + .tp_repr = (reprfunc)pyrf_comm_event__repr, +}; + +static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); + +static PyMemberDef pyrf_throttle_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + member_def(throttle_event, time, T_ULONGLONG, "timestamp"), + member_def(throttle_event, id, T_ULONGLONG, "event id"), + member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"), + { .name = NULL, }, +}; + +static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) +{ + struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1); + + return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64 + ", stream_id: %" PRIu64 " }", + pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", + te->time, te->id, te->stream_id); +} + +static PyTypeObject pyrf_throttle_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.throttle_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_throttle_event__doc, + .tp_members = pyrf_throttle_event__members, + .tp_repr = (reprfunc)pyrf_throttle_event__repr, +}; + +static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); + +static PyMemberDef pyrf_lost_event__members[] = { + sample_members + member_def(lost_event, id, T_ULONGLONG, "event id"), + member_def(lost_event, lost, T_ULONGLONG, "number of lost events"), + { .name = NULL, }, +}; + +static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) +{ + PyObject *ret; + char *s; + + if (asprintf(&s, "{ type: lost, id: %#" PRIx64 ", " + "lost: %#" PRIx64 " }", + pevent->event.lost.id, pevent->event.lost.lost) < 0) { + ret = PyErr_NoMemory(); + } else { + ret = PyString_FromString(s); + free(s); + } + return ret; +} + +static PyTypeObject pyrf_lost_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.lost_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_lost_event__doc, + .tp_members = pyrf_lost_event__members, + .tp_repr = (reprfunc)pyrf_lost_event__repr, +}; + +static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); + +static PyMemberDef pyrf_read_event__members[] = { + sample_members + member_def(read_event, pid, T_UINT, "event pid"), + member_def(read_event, tid, T_UINT, "event tid"), + { .name = NULL, }, +}; + +static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) +{ + return PyString_FromFormat("{ type: read, pid: %u, tid: %u }", + pevent->event.read.pid, + pevent->event.read.tid); + /* + * FIXME: return the array of read values, + * making this method useful ;-) + */ +} + +static PyTypeObject pyrf_read_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.read_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_read_event__doc, + .tp_members = pyrf_read_event__members, + .tp_repr = (reprfunc)pyrf_read_event__repr, +}; + +static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); + +static PyMemberDef pyrf_sample_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + { .name = NULL, }, +}; + +static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) +{ + PyObject *ret; + char *s; + + if (asprintf(&s, "{ type: sample }") < 0) { + ret = PyErr_NoMemory(); + } else { + ret = PyString_FromString(s); + free(s); + } + return ret; +} + +static PyTypeObject pyrf_sample_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.sample_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_sample_event__doc, + .tp_members = pyrf_sample_event__members, + .tp_repr = (reprfunc)pyrf_sample_event__repr, +}; + +static int pyrf_event__setup_types(void) +{ + int err; + pyrf_mmap_event__type.tp_new = + pyrf_task_event__type.tp_new = + pyrf_comm_event__type.tp_new = + pyrf_lost_event__type.tp_new = + pyrf_read_event__type.tp_new = + pyrf_sample_event__type.tp_new = + pyrf_throttle_event__type.tp_new = PyType_GenericNew; + err = PyType_Ready(&pyrf_mmap_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_lost_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_task_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_comm_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_throttle_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_read_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_sample_event__type); + if (err < 0) + goto out; +out: + return err; +} + +static PyTypeObject *pyrf_event__type[] = { + [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, + [PERF_RECORD_LOST] = &pyrf_lost_event__type, + [PERF_RECORD_COMM] = &pyrf_comm_event__type, + [PERF_RECORD_EXIT] = &pyrf_task_event__type, + [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, + [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, + [PERF_RECORD_FORK] = &pyrf_task_event__type, + [PERF_RECORD_READ] = &pyrf_read_event__type, + [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, +}; + +static PyObject *pyrf_event__new(union perf_event *event) +{ + struct pyrf_event *pevent; + PyTypeObject *ptype; + + if (event->header.type < PERF_RECORD_MMAP || + event->header.type > PERF_RECORD_SAMPLE) + return NULL; + + ptype = pyrf_event__type[event->header.type]; + pevent = PyObject_New(struct pyrf_event, ptype); + if (pevent != NULL) + memcpy(&pevent->event, event, event->header.size); + return (PyObject *)pevent; +} + +struct pyrf_cpu_map { + PyObject_HEAD + + struct cpu_map *cpus; +}; + +static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, + PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = { "cpustr", NULL }; + char *cpustr = NULL; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", + kwlist, &cpustr)) + return -1; + + pcpus->cpus = cpu_map__new(cpustr); + if (pcpus->cpus == NULL) + return -1; + return 0; +} + +static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) +{ + cpu_map__delete(pcpus->cpus); + pcpus->ob_type->tp_free((PyObject*)pcpus); +} + +static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) +{ + struct pyrf_cpu_map *pcpus = (void *)obj; + + return pcpus->cpus->nr; +} + +static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) +{ + struct pyrf_cpu_map *pcpus = (void *)obj; + + if (i >= pcpus->cpus->nr) + return NULL; + + return Py_BuildValue("i", pcpus->cpus->map[i]); +} + +static PySequenceMethods pyrf_cpu_map__sequence_methods = { + .sq_length = pyrf_cpu_map__length, + .sq_item = pyrf_cpu_map__item, +}; + +static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); + +static PyTypeObject pyrf_cpu_map__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.cpu_map", + .tp_basicsize = sizeof(struct pyrf_cpu_map), + .tp_dealloc = (destructor)pyrf_cpu_map__delete, + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_cpu_map__doc, + .tp_as_sequence = &pyrf_cpu_map__sequence_methods, + .tp_init = (initproc)pyrf_cpu_map__init, +}; + +static int pyrf_cpu_map__setup_types(void) +{ + pyrf_cpu_map__type.tp_new = PyType_GenericNew; + return PyType_Ready(&pyrf_cpu_map__type); +} + +struct pyrf_thread_map { + PyObject_HEAD + + struct thread_map *threads; +}; + +static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, + PyObject *args, PyObject *kwargs) +{ + static char *kwlist[] = { "pid", "tid", "uid", NULL }; + int pid = -1, tid = -1, uid = UINT_MAX; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii", + kwlist, &pid, &tid, &uid)) + return -1; + + pthreads->threads = thread_map__new(pid, tid, uid); + if (pthreads->threads == NULL) + return -1; + return 0; +} + +static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) +{ + thread_map__delete(pthreads->threads); + pthreads->ob_type->tp_free((PyObject*)pthreads); +} + +static Py_ssize_t pyrf_thread_map__length(PyObject *obj) +{ + struct pyrf_thread_map *pthreads = (void *)obj; + + return pthreads->threads->nr; +} + +static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) +{ + struct pyrf_thread_map *pthreads = (void *)obj; + + if (i >= pthreads->threads->nr) + return NULL; + + return Py_BuildValue("i", pthreads->threads->map[i]); +} + +static PySequenceMethods pyrf_thread_map__sequence_methods = { + .sq_length = pyrf_thread_map__length, + .sq_item = pyrf_thread_map__item, +}; + +static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); + +static PyTypeObject pyrf_thread_map__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.thread_map", + .tp_basicsize = sizeof(struct pyrf_thread_map), + .tp_dealloc = (destructor)pyrf_thread_map__delete, + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_thread_map__doc, + .tp_as_sequence = &pyrf_thread_map__sequence_methods, + .tp_init = (initproc)pyrf_thread_map__init, +}; + +static int pyrf_thread_map__setup_types(void) +{ + pyrf_thread_map__type.tp_new = PyType_GenericNew; + return PyType_Ready(&pyrf_thread_map__type); +} + +struct pyrf_evsel { + PyObject_HEAD + + struct perf_evsel evsel; +}; + +static int pyrf_evsel__init(struct pyrf_evsel *pevsel, + PyObject *args, PyObject *kwargs) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, + }; + static char *kwlist[] = { + "type", + "config", + "sample_freq", + "sample_period", + "sample_type", + "read_format", + "disabled", + "inherit", + "pinned", + "exclusive", + "exclude_user", + "exclude_kernel", + "exclude_hv", + "exclude_idle", + "mmap", + "comm", + "freq", + "inherit_stat", + "enable_on_exec", + "task", + "watermark", + "precise_ip", + "mmap_data", + "sample_id_all", + "wakeup_events", + "bp_type", + "bp_addr", + "bp_len", + NULL + }; + u64 sample_period = 0; + u32 disabled = 0, + inherit = 0, + pinned = 0, + exclusive = 0, + exclude_user = 0, + exclude_kernel = 0, + exclude_hv = 0, + exclude_idle = 0, + mmap = 0, + comm = 0, + freq = 1, + inherit_stat = 0, + enable_on_exec = 0, + task = 0, + watermark = 0, + precise_ip = 0, + mmap_data = 0, + sample_id_all = 1; + int idx = 0; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, + "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist, + &attr.type, &attr.config, &attr.sample_freq, + &sample_period, &attr.sample_type, + &attr.read_format, &disabled, &inherit, + &pinned, &exclusive, &exclude_user, + &exclude_kernel, &exclude_hv, &exclude_idle, + &mmap, &comm, &freq, &inherit_stat, + &enable_on_exec, &task, &watermark, + &precise_ip, &mmap_data, &sample_id_all, + &attr.wakeup_events, &attr.bp_type, + &attr.bp_addr, &attr.bp_len, &idx)) + return -1; + + /* union... */ + if (sample_period != 0) { + if (attr.sample_freq != 0) + return -1; /* FIXME: throw right exception */ + attr.sample_period = sample_period; + } + + /* Bitfields */ + attr.disabled = disabled; + attr.inherit = inherit; + attr.pinned = pinned; + attr.exclusive = exclusive; + attr.exclude_user = exclude_user; + attr.exclude_kernel = exclude_kernel; + attr.exclude_hv = exclude_hv; + attr.exclude_idle = exclude_idle; + attr.mmap = mmap; + attr.comm = comm; + attr.freq = freq; + attr.inherit_stat = inherit_stat; + attr.enable_on_exec = enable_on_exec; + attr.task = task; + attr.watermark = watermark; + attr.precise_ip = precise_ip; + attr.mmap_data = mmap_data; + attr.sample_id_all = sample_id_all; + + perf_evsel__init(&pevsel->evsel, &attr, idx); + return 0; +} + +static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) +{ + perf_evsel__exit(&pevsel->evsel); + pevsel->ob_type->tp_free((PyObject*)pevsel); +} + +static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, + PyObject *args, PyObject *kwargs) +{ + struct perf_evsel *evsel = &pevsel->evsel; + struct cpu_map *cpus = NULL; + struct thread_map *threads = NULL; + PyObject *pcpus = NULL, *pthreads = NULL; + int group = 0, inherit = 0; + static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, + &pcpus, &pthreads, &group, &inherit)) + return NULL; + + if (pthreads != NULL) + threads = ((struct pyrf_thread_map *)pthreads)->threads; + + if (pcpus != NULL) + cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; + + evsel->attr.inherit = inherit; + /* + * This will group just the fds for this single evsel, to group + * multiple events, use evlist.open(). + */ + if (perf_evsel__open(evsel, cpus, threads) < 0) { + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef pyrf_evsel__methods[] = { + { + .ml_name = "open", + .ml_meth = (PyCFunction)pyrf_evsel__open, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("open the event selector file descriptor table.") + }, + { .ml_name = NULL, } +}; + +static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); + +static PyTypeObject pyrf_evsel__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.evsel", + .tp_basicsize = sizeof(struct pyrf_evsel), + .tp_dealloc = (destructor)pyrf_evsel__delete, + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_evsel__doc, + .tp_methods = pyrf_evsel__methods, + .tp_init = (initproc)pyrf_evsel__init, +}; + +static int pyrf_evsel__setup_types(void) +{ + pyrf_evsel__type.tp_new = PyType_GenericNew; + return PyType_Ready(&pyrf_evsel__type); +} + +struct pyrf_evlist { + PyObject_HEAD + + struct perf_evlist evlist; +}; + +static int pyrf_evlist__init(struct pyrf_evlist *pevlist, + PyObject *args, PyObject *kwargs __maybe_unused) +{ + PyObject *pcpus = NULL, *pthreads = NULL; + struct cpu_map *cpus; + struct thread_map *threads; + + if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) + return -1; + + threads = ((struct pyrf_thread_map *)pthreads)->threads; + cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; + perf_evlist__init(&pevlist->evlist, cpus, threads); + return 0; +} + +static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) +{ + perf_evlist__exit(&pevlist->evlist); + pevlist->ob_type->tp_free((PyObject*)pevlist); +} + +static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, + PyObject *args, PyObject *kwargs) +{ + struct perf_evlist *evlist = &pevlist->evlist; + static char *kwlist[] = { "pages", "overwrite", NULL }; + int pages = 128, overwrite = false; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, + &pages, &overwrite)) + return NULL; + + if (perf_evlist__mmap(evlist, pages, overwrite) < 0) { + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, + PyObject *args, PyObject *kwargs) +{ + struct perf_evlist *evlist = &pevlist->evlist; + static char *kwlist[] = { "timeout", NULL }; + int timeout = -1, n; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) + return NULL; + + n = perf_evlist__poll(evlist, timeout); + if (n < 0) { + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + + return Py_BuildValue("i", n); +} + +static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, + PyObject *args __maybe_unused, + PyObject *kwargs __maybe_unused) +{ + struct perf_evlist *evlist = &pevlist->evlist; + PyObject *list = PyList_New(0); + int i; + + for (i = 0; i < evlist->pollfd.nr; ++i) { + PyObject *file; + FILE *fp = fdopen(evlist->pollfd.entries[i].fd, "r"); + + if (fp == NULL) + goto free_list; + + file = PyFile_FromFile(fp, "perf", "r", NULL); + if (file == NULL) + goto free_list; + + if (PyList_Append(list, file) != 0) { + Py_DECREF(file); + goto free_list; + } + + Py_DECREF(file); + } + + return list; +free_list: + return PyErr_NoMemory(); +} + + +static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, + PyObject *args, + PyObject *kwargs __maybe_unused) +{ + struct perf_evlist *evlist = &pevlist->evlist; + PyObject *pevsel; + struct perf_evsel *evsel; + + if (!PyArg_ParseTuple(args, "O", &pevsel)) + return NULL; + + Py_INCREF(pevsel); + evsel = &((struct pyrf_evsel *)pevsel)->evsel; + evsel->idx = evlist->nr_entries; + perf_evlist__add(evlist, evsel); + + return Py_BuildValue("i", evlist->nr_entries); +} + +static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, + PyObject *args, PyObject *kwargs) +{ + struct perf_evlist *evlist = &pevlist->evlist; + union perf_event *event; + int sample_id_all = 1, cpu; + static char *kwlist[] = { "cpu", "sample_id_all", NULL }; + int err; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, + &cpu, &sample_id_all)) + return NULL; + + event = perf_evlist__mmap_read(evlist, cpu); + if (event != NULL) { + PyObject *pyevent = pyrf_event__new(event); + struct pyrf_event *pevent = (struct pyrf_event *)pyevent; + + perf_evlist__mmap_consume(evlist, cpu); + + if (pyevent == NULL) + return PyErr_NoMemory(); + + err = perf_evlist__parse_sample(evlist, event, &pevent->sample); + if (err) + return PyErr_Format(PyExc_OSError, + "perf: can't parse sample, err=%d", err); + return pyevent; + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, + PyObject *args, PyObject *kwargs) +{ + struct perf_evlist *evlist = &pevlist->evlist; + int group = 0; + static char *kwlist[] = { "group", NULL }; + + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group)) + return NULL; + + if (group) + perf_evlist__set_leader(evlist); + + if (perf_evlist__open(evlist) < 0) { + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef pyrf_evlist__methods[] = { + { + .ml_name = "mmap", + .ml_meth = (PyCFunction)pyrf_evlist__mmap, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("mmap the file descriptor table.") + }, + { + .ml_name = "open", + .ml_meth = (PyCFunction)pyrf_evlist__open, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("open the file descriptors.") + }, + { + .ml_name = "poll", + .ml_meth = (PyCFunction)pyrf_evlist__poll, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("poll the file descriptor table.") + }, + { + .ml_name = "get_pollfd", + .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("get the poll file descriptor table.") + }, + { + .ml_name = "add", + .ml_meth = (PyCFunction)pyrf_evlist__add, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("adds an event selector to the list.") + }, + { + .ml_name = "read_on_cpu", + .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, + .ml_flags = METH_VARARGS | METH_KEYWORDS, + .ml_doc = PyDoc_STR("reads an event.") + }, + { .ml_name = NULL, } +}; + +static Py_ssize_t pyrf_evlist__length(PyObject *obj) +{ + struct pyrf_evlist *pevlist = (void *)obj; + + return pevlist->evlist.nr_entries; +} + +static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) +{ + struct pyrf_evlist *pevlist = (void *)obj; + struct perf_evsel *pos; + + if (i >= pevlist->evlist.nr_entries) + return NULL; + + evlist__for_each(&pevlist->evlist, pos) { + if (i-- == 0) + break; + } + + return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); +} + +static PySequenceMethods pyrf_evlist__sequence_methods = { + .sq_length = pyrf_evlist__length, + .sq_item = pyrf_evlist__item, +}; + +static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); + +static PyTypeObject pyrf_evlist__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.evlist", + .tp_basicsize = sizeof(struct pyrf_evlist), + .tp_dealloc = (destructor)pyrf_evlist__delete, + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_as_sequence = &pyrf_evlist__sequence_methods, + .tp_doc = pyrf_evlist__doc, + .tp_methods = pyrf_evlist__methods, + .tp_init = (initproc)pyrf_evlist__init, +}; + +static int pyrf_evlist__setup_types(void) +{ + pyrf_evlist__type.tp_new = PyType_GenericNew; + return PyType_Ready(&pyrf_evlist__type); +} + +static struct { + const char *name; + int value; +} perf__constants[] = { + { "TYPE_HARDWARE", PERF_TYPE_HARDWARE }, + { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE }, + { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT }, + { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE }, + { "TYPE_RAW", PERF_TYPE_RAW }, + { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT }, + + { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES }, + { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS }, + { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES }, + { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES }, + { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, + { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, + { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, + { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, + { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, + { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB }, + { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB }, + { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU }, + { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ }, + { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE }, + { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH }, + { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, + { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, + + { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, + { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, + + { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, + { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, + { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, + { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES }, + { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS }, + { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN }, + { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, + { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, + { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, + { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY }, + + { "SAMPLE_IP", PERF_SAMPLE_IP }, + { "SAMPLE_TID", PERF_SAMPLE_TID }, + { "SAMPLE_TIME", PERF_SAMPLE_TIME }, + { "SAMPLE_ADDR", PERF_SAMPLE_ADDR }, + { "SAMPLE_READ", PERF_SAMPLE_READ }, + { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN }, + { "SAMPLE_ID", PERF_SAMPLE_ID }, + { "SAMPLE_CPU", PERF_SAMPLE_CPU }, + { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD }, + { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID }, + { "SAMPLE_RAW", PERF_SAMPLE_RAW }, + + { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED }, + { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING }, + { "FORMAT_ID", PERF_FORMAT_ID }, + { "FORMAT_GROUP", PERF_FORMAT_GROUP }, + + { "RECORD_MMAP", PERF_RECORD_MMAP }, + { "RECORD_LOST", PERF_RECORD_LOST }, + { "RECORD_COMM", PERF_RECORD_COMM }, + { "RECORD_EXIT", PERF_RECORD_EXIT }, + { "RECORD_THROTTLE", PERF_RECORD_THROTTLE }, + { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE }, + { "RECORD_FORK", PERF_RECORD_FORK }, + { "RECORD_READ", PERF_RECORD_READ }, + { "RECORD_SAMPLE", PERF_RECORD_SAMPLE }, + { .name = NULL, }, +}; + +static PyMethodDef perf__methods[] = { + { .ml_name = NULL, } +}; + +PyMODINIT_FUNC initperf(void) +{ + PyObject *obj; + int i; + PyObject *dict, *module = Py_InitModule("perf", perf__methods); + + if (module == NULL || + pyrf_event__setup_types() < 0 || + pyrf_evlist__setup_types() < 0 || + pyrf_evsel__setup_types() < 0 || + pyrf_thread_map__setup_types() < 0 || + pyrf_cpu_map__setup_types() < 0) + return; + + /* The page_size is placed in util object. */ + page_size = sysconf(_SC_PAGE_SIZE); + + Py_INCREF(&pyrf_evlist__type); + PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); + + Py_INCREF(&pyrf_evsel__type); + PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); + + Py_INCREF(&pyrf_thread_map__type); + PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); + + Py_INCREF(&pyrf_cpu_map__type); + PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); + + dict = PyModule_GetDict(module); + if (dict == NULL) + goto error; + + for (i = 0; perf__constants[i].name != NULL; i++) { + obj = PyInt_FromLong(perf__constants[i].value); + if (obj == NULL) + goto error; + PyDict_SetItemString(dict, perf__constants[i].name, obj); + Py_DECREF(obj); + } + +error: + if (PyErr_Occurred()) + PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); +} + +/* + * Dummy, to avoid dragging all the test_attr infrastructure in the python + * binding. + */ +void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, + int fd, int group_fd, unsigned long flags) +{ +} diff --git a/kernel/tools/perf/util/quote.c b/kernel/tools/perf/util/quote.c new file mode 100644 index 000000000..01f03242b --- /dev/null +++ b/kernel/tools/perf/util/quote.c @@ -0,0 +1,54 @@ +#include "cache.h" +#include "quote.h" + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * + * E.g. + * original sq_quote result + * name ==> name ==> 'name' + * a b ==> a b ==> 'a b' + * a'b ==> a'\''b ==> 'a'\''b' + * a!b ==> a'\!'b ==> 'a'\!'b' + */ +static inline int need_bs_quote(char c) +{ + return (c == '\'' || c == '!'); +} + +static void sq_quote_buf(struct strbuf *dst, const char *src) +{ + char *to_free = NULL; + + if (dst->buf == src) + to_free = strbuf_detach(dst, NULL); + + strbuf_addch(dst, '\''); + while (*src) { + size_t len = strcspn(src, "'!"); + strbuf_add(dst, src, len); + src += len; + while (need_bs_quote(*src)) { + strbuf_addstr(dst, "'\\"); + strbuf_addch(dst, *src++); + strbuf_addch(dst, '\''); + } + } + strbuf_addch(dst, '\''); + free(to_free); +} + +void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) +{ + int i; + + /* Copy into destination buffer. */ + strbuf_grow(dst, 255); + for (i = 0; argv[i]; ++i) { + strbuf_addch(dst, ' '); + sq_quote_buf(dst, argv[i]); + if (maxlen && dst->len > maxlen) + die("Too many or long arguments"); + } +} diff --git a/kernel/tools/perf/util/quote.h b/kernel/tools/perf/util/quote.h new file mode 100644 index 000000000..172889ea2 --- /dev/null +++ b/kernel/tools/perf/util/quote.h @@ -0,0 +1,29 @@ +#ifndef __PERF_QUOTE_H +#define __PERF_QUOTE_H + +#include +#include + +/* Help to copy the thing properly quoted for the shell safety. + * any single quote is replaced with '\'', any exclamation point + * is replaced with '\!', and the whole thing is enclosed in a + * single quote pair. + * + * For example, if you are passing the result to system() as an + * argument: + * + * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1)) + * + * would be appropriate. If the system() is going to call ssh to + * run the command on the other side: + * + * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1)); + * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd)); + * + * Note that the above examples leak memory! Remember to free result from + * sq_quote() in a real application. + */ + +extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); + +#endif /* __PERF_QUOTE_H */ diff --git a/kernel/tools/perf/util/rblist.c b/kernel/tools/perf/util/rblist.c new file mode 100644 index 000000000..0dfe27d99 --- /dev/null +++ b/kernel/tools/perf/util/rblist.c @@ -0,0 +1,128 @@ +/* + * Based on strlist.c by: + * (c) 2009 Arnaldo Carvalho de Melo + * + * Licensed under the GPLv2. + */ + +#include +#include +#include + +#include "rblist.h" + +int rblist__add_node(struct rblist *rblist, const void *new_entry) +{ + struct rb_node **p = &rblist->entries.rb_node; + struct rb_node *parent = NULL, *new_node; + + while (*p != NULL) { + int rc; + + parent = *p; + + rc = rblist->node_cmp(parent, new_entry); + if (rc > 0) + p = &(*p)->rb_left; + else if (rc < 0) + p = &(*p)->rb_right; + else + return -EEXIST; + } + + new_node = rblist->node_new(rblist, new_entry); + if (new_node == NULL) + return -ENOMEM; + + rb_link_node(new_node, parent, p); + rb_insert_color(new_node, &rblist->entries); + ++rblist->nr_entries; + + return 0; +} + +void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node) +{ + rb_erase(rb_node, &rblist->entries); + --rblist->nr_entries; + rblist->node_delete(rblist, rb_node); +} + +static struct rb_node *__rblist__findnew(struct rblist *rblist, + const void *entry, + bool create) +{ + struct rb_node **p = &rblist->entries.rb_node; + struct rb_node *parent = NULL, *new_node = NULL; + + while (*p != NULL) { + int rc; + + parent = *p; + + rc = rblist->node_cmp(parent, entry); + if (rc > 0) + p = &(*p)->rb_left; + else if (rc < 0) + p = &(*p)->rb_right; + else + return parent; + } + + if (create) { + new_node = rblist->node_new(rblist, entry); + if (new_node) { + rb_link_node(new_node, parent, p); + rb_insert_color(new_node, &rblist->entries); + ++rblist->nr_entries; + } + } + + return new_node; +} + +struct rb_node *rblist__find(struct rblist *rblist, const void *entry) +{ + return __rblist__findnew(rblist, entry, false); +} + +struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry) +{ + return __rblist__findnew(rblist, entry, true); +} + +void rblist__init(struct rblist *rblist) +{ + if (rblist != NULL) { + rblist->entries = RB_ROOT; + rblist->nr_entries = 0; + } + + return; +} + +void rblist__delete(struct rblist *rblist) +{ + if (rblist != NULL) { + struct rb_node *pos, *next = rb_first(&rblist->entries); + + while (next) { + pos = next; + next = rb_next(pos); + rblist__remove_node(rblist, pos); + } + free(rblist); + } +} + +struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx) +{ + struct rb_node *node; + + for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { + if (!idx--) + return node; + } + + return NULL; +} diff --git a/kernel/tools/perf/util/rblist.h b/kernel/tools/perf/util/rblist.h new file mode 100644 index 000000000..ff9913b99 --- /dev/null +++ b/kernel/tools/perf/util/rblist.h @@ -0,0 +1,48 @@ +#ifndef __PERF_RBLIST_H +#define __PERF_RBLIST_H + +#include +#include + +/* + * create node structs of the form: + * struct my_node { + * struct rb_node rb_node; + * ... my data ... + * }; + * + * create list structs of the form: + * struct mylist { + * struct rblist rblist; + * ... my data ... + * }; + */ + +struct rblist { + struct rb_root entries; + unsigned int nr_entries; + + int (*node_cmp)(struct rb_node *rbn, const void *entry); + struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry); + void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node); +}; + +void rblist__init(struct rblist *rblist); +void rblist__delete(struct rblist *rblist); +int rblist__add_node(struct rblist *rblist, const void *new_entry); +void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node); +struct rb_node *rblist__find(struct rblist *rblist, const void *entry); +struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry); +struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx); + +static inline bool rblist__empty(const struct rblist *rblist) +{ + return rblist->nr_entries == 0; +} + +static inline unsigned int rblist__nr_entries(const struct rblist *rblist) +{ + return rblist->nr_entries; +} + +#endif /* __PERF_RBLIST_H */ diff --git a/kernel/tools/perf/util/record.c b/kernel/tools/perf/util/record.c new file mode 100644 index 000000000..8acd0df88 --- /dev/null +++ b/kernel/tools/perf/util/record.c @@ -0,0 +1,243 @@ +#include "evlist.h" +#include "evsel.h" +#include "cpumap.h" +#include "parse-events.h" +#include +#include "util.h" +#include "cloexec.h" + +typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel); + +static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) +{ + struct perf_evlist *evlist; + struct perf_evsel *evsel; + unsigned long flags = perf_event_open_cloexec_flag(); + int err = -EAGAIN, fd; + static pid_t pid = -1; + + evlist = perf_evlist__new(); + if (!evlist) + return -ENOMEM; + + if (parse_events(evlist, str)) + goto out_delete; + + evsel = perf_evlist__first(evlist); + + while (1) { + fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); + if (fd < 0) { + if (pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + goto out_delete; + } + break; + } + close(fd); + + fn(evsel); + + fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags); + if (fd < 0) { + if (errno == EINVAL) + err = -EINVAL; + goto out_delete; + } + close(fd); + err = 0; + +out_delete: + perf_evlist__delete(evlist); + return err; +} + +static bool perf_probe_api(setup_probe_fn_t fn) +{ + const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL}; + struct cpu_map *cpus; + int cpu, ret, i = 0; + + cpus = cpu_map__new(NULL); + if (!cpus) + return false; + cpu = cpus->map[0]; + cpu_map__delete(cpus); + + do { + ret = perf_do_probe_api(fn, cpu, try[i++]); + if (!ret) + return true; + } while (ret == -EAGAIN && try[i]); + + return false; +} + +static void perf_probe_sample_identifier(struct perf_evsel *evsel) +{ + evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER; +} + +static void perf_probe_comm_exec(struct perf_evsel *evsel) +{ + evsel->attr.comm_exec = 1; +} + +bool perf_can_sample_identifier(void) +{ + return perf_probe_api(perf_probe_sample_identifier); +} + +static bool perf_can_comm_exec(void) +{ + return perf_probe_api(perf_probe_comm_exec); +} + +void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) +{ + struct perf_evsel *evsel; + bool use_sample_identifier = false; + bool use_comm_exec; + + /* + * Set the evsel leader links before we configure attributes, + * since some might depend on this info. + */ + if (opts->group) + perf_evlist__set_leader(evlist); + + if (evlist->cpus->map[0] < 0) + opts->no_inherit = true; + + use_comm_exec = perf_can_comm_exec(); + + evlist__for_each(evlist, evsel) { + perf_evsel__config(evsel, opts); + if (evsel->tracking && use_comm_exec) + evsel->attr.comm_exec = 1; + } + + if (evlist->nr_entries > 1) { + struct perf_evsel *first = perf_evlist__first(evlist); + + evlist__for_each(evlist, evsel) { + if (evsel->attr.sample_type == first->attr.sample_type) + continue; + use_sample_identifier = perf_can_sample_identifier(); + break; + } + evlist__for_each(evlist, evsel) + perf_evsel__set_sample_id(evsel, use_sample_identifier); + } + + perf_evlist__set_id_pos(evlist); +} + +static int get_max_rate(unsigned int *rate) +{ + return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate); +} + +static int record_opts__config_freq(struct record_opts *opts) +{ + bool user_freq = opts->user_freq != UINT_MAX; + unsigned int max_rate; + + if (opts->user_interval != ULLONG_MAX) + opts->default_interval = opts->user_interval; + if (user_freq) + opts->freq = opts->user_freq; + + /* + * User specified count overrides default frequency. + */ + if (opts->default_interval) + opts->freq = 0; + else if (opts->freq) { + opts->default_interval = opts->freq; + } else { + pr_err("frequency and count are zero, aborting\n"); + return -1; + } + + if (get_max_rate(&max_rate)) + return 0; + + /* + * User specified frequency is over current maximum. + */ + if (user_freq && (max_rate < opts->freq)) { + pr_err("Maximum frequency rate (%u) reached.\n" + "Please use -F freq option with lower value or consider\n" + "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n", + max_rate); + return -1; + } + + /* + * Default frequency is over current maximum. + */ + if (max_rate < opts->freq) { + pr_warning("Lowering default frequency rate to %u.\n" + "Please consider tweaking " + "/proc/sys/kernel/perf_event_max_sample_rate.\n", + max_rate); + opts->freq = max_rate; + } + + return 0; +} + +int record_opts__config(struct record_opts *opts) +{ + return record_opts__config_freq(opts); +} + +bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) +{ + struct perf_evlist *temp_evlist; + struct perf_evsel *evsel; + int err, fd, cpu; + bool ret = false; + pid_t pid = -1; + + temp_evlist = perf_evlist__new(); + if (!temp_evlist) + return false; + + err = parse_events(temp_evlist, str); + if (err) + goto out_delete; + + evsel = perf_evlist__last(temp_evlist); + + if (!evlist || cpu_map__empty(evlist->cpus)) { + struct cpu_map *cpus = cpu_map__new(NULL); + + cpu = cpus ? cpus->map[0] : 0; + cpu_map__delete(cpus); + } else { + cpu = evlist->cpus->map[0]; + } + + while (1) { + fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, + perf_event_open_cloexec_flag()); + if (fd < 0) { + if (pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + goto out_delete; + } + break; + } + close(fd); + ret = true; + +out_delete: + perf_evlist__delete(temp_evlist); + return ret; +} diff --git a/kernel/tools/perf/util/run-command.c b/kernel/tools/perf/util/run-command.c new file mode 100644 index 000000000..34622b53e --- /dev/null +++ b/kernel/tools/perf/util/run-command.c @@ -0,0 +1,219 @@ +#include "cache.h" +#include "run-command.h" +#include "exec_cmd.h" +#include "debug.h" + +static inline void close_pair(int fd[2]) +{ + close(fd[0]); + close(fd[1]); +} + +static inline void dup_devnull(int to) +{ + int fd = open("/dev/null", O_RDWR); + dup2(fd, to); + close(fd); +} + +int start_command(struct child_process *cmd) +{ + int need_in, need_out, need_err; + int fdin[2], fdout[2], fderr[2]; + char sbuf[STRERR_BUFSIZE]; + + /* + * In case of errors we must keep the promise to close FDs + * that have been passed in via ->in and ->out. + */ + + need_in = !cmd->no_stdin && cmd->in < 0; + if (need_in) { + if (pipe(fdin) < 0) { + if (cmd->out > 0) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->in = fdin[1]; + } + + need_out = !cmd->no_stdout + && !cmd->stdout_to_stderr + && cmd->out < 0; + if (need_out) { + if (pipe(fdout) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->out = fdout[0]; + } + + need_err = !cmd->no_stderr && cmd->err < 0; + if (need_err) { + if (pipe(fderr) < 0) { + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + return -ERR_RUN_COMMAND_PIPE; + } + cmd->err = fderr[0]; + } + + fflush(NULL); + cmd->pid = fork(); + if (!cmd->pid) { + if (cmd->no_stdin) + dup_devnull(0); + else if (need_in) { + dup2(fdin[0], 0); + close_pair(fdin); + } else if (cmd->in) { + dup2(cmd->in, 0); + close(cmd->in); + } + + if (cmd->no_stderr) + dup_devnull(2); + else if (need_err) { + dup2(fderr[1], 2); + close_pair(fderr); + } + + if (cmd->no_stdout) + dup_devnull(1); + else if (cmd->stdout_to_stderr) + dup2(2, 1); + else if (need_out) { + dup2(fdout[1], 1); + close_pair(fdout); + } else if (cmd->out > 1) { + dup2(cmd->out, 1); + close(cmd->out); + } + + if (cmd->dir && chdir(cmd->dir)) + die("exec %s: cd to %s failed (%s)", cmd->argv[0], + cmd->dir, strerror_r(errno, sbuf, sizeof(sbuf))); + if (cmd->env) { + for (; *cmd->env; cmd->env++) { + if (strchr(*cmd->env, '=')) + putenv((char*)*cmd->env); + else + unsetenv(*cmd->env); + } + } + if (cmd->preexec_cb) + cmd->preexec_cb(); + if (cmd->perf_cmd) { + execv_perf_cmd(cmd->argv); + } else { + execvp(cmd->argv[0], (char *const*) cmd->argv); + } + exit(127); + } + + if (cmd->pid < 0) { + int err = errno; + if (need_in) + close_pair(fdin); + else if (cmd->in) + close(cmd->in); + if (need_out) + close_pair(fdout); + else if (cmd->out) + close(cmd->out); + if (need_err) + close_pair(fderr); + return err == ENOENT ? + -ERR_RUN_COMMAND_EXEC : + -ERR_RUN_COMMAND_FORK; + } + + if (need_in) + close(fdin[0]); + else if (cmd->in) + close(cmd->in); + + if (need_out) + close(fdout[1]); + else if (cmd->out) + close(cmd->out); + + if (need_err) + close(fderr[1]); + + return 0; +} + +static int wait_or_whine(pid_t pid) +{ + char sbuf[STRERR_BUFSIZE]; + + for (;;) { + int status, code; + pid_t waiting = waitpid(pid, &status, 0); + + if (waiting < 0) { + if (errno == EINTR) + continue; + error("waitpid failed (%s)", + strerror_r(errno, sbuf, sizeof(sbuf))); + return -ERR_RUN_COMMAND_WAITPID; + } + if (waiting != pid) + return -ERR_RUN_COMMAND_WAITPID_WRONG_PID; + if (WIFSIGNALED(status)) + return -ERR_RUN_COMMAND_WAITPID_SIGNAL; + + if (!WIFEXITED(status)) + return -ERR_RUN_COMMAND_WAITPID_NOEXIT; + code = WEXITSTATUS(status); + switch (code) { + case 127: + return -ERR_RUN_COMMAND_EXEC; + case 0: + return 0; + default: + return -code; + } + } +} + +int finish_command(struct child_process *cmd) +{ + return wait_or_whine(cmd->pid); +} + +int run_command(struct child_process *cmd) +{ + int code = start_command(cmd); + if (code) + return code; + return finish_command(cmd); +} + +static void prepare_run_command_v_opt(struct child_process *cmd, + const char **argv, + int opt) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->argv = argv; + cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0; + cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0; + cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0; +} + +int run_command_v_opt(const char **argv, int opt) +{ + struct child_process cmd; + prepare_run_command_v_opt(&cmd, argv, opt); + return run_command(&cmd); +} diff --git a/kernel/tools/perf/util/run-command.h b/kernel/tools/perf/util/run-command.h new file mode 100644 index 000000000..1ef264d50 --- /dev/null +++ b/kernel/tools/perf/util/run-command.h @@ -0,0 +1,58 @@ +#ifndef __PERF_RUN_COMMAND_H +#define __PERF_RUN_COMMAND_H + +enum { + ERR_RUN_COMMAND_FORK = 10000, + ERR_RUN_COMMAND_EXEC, + ERR_RUN_COMMAND_PIPE, + ERR_RUN_COMMAND_WAITPID, + ERR_RUN_COMMAND_WAITPID_WRONG_PID, + ERR_RUN_COMMAND_WAITPID_SIGNAL, + ERR_RUN_COMMAND_WAITPID_NOEXIT, +}; +#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK) + +struct child_process { + const char **argv; + pid_t pid; + /* + * Using .in, .out, .err: + * - Specify 0 for no redirections (child inherits stdin, stdout, + * stderr from parent). + * - Specify -1 to have a pipe allocated as follows: + * .in: returns the writable pipe end; parent writes to it, + * the readable pipe end becomes child's stdin + * .out, .err: returns the readable pipe end; parent reads from + * it, the writable pipe end becomes child's stdout/stderr + * The caller of start_command() must close the returned FDs + * after it has completed reading from/writing to it! + * - Specify > 0 to set a channel to a particular FD as follows: + * .in: a readable FD, becomes child's stdin + * .out: a writable FD, becomes child's stdout/stderr + * .err > 0 not supported + * The specified FD is closed by start_command(), even in case + * of errors! + */ + int in; + int out; + int err; + const char *dir; + const char *const *env; + unsigned no_stdin:1; + unsigned no_stdout:1; + unsigned no_stderr:1; + unsigned perf_cmd:1; /* if this is to be perf sub-command */ + unsigned stdout_to_stderr:1; + void (*preexec_cb)(void); +}; + +int start_command(struct child_process *); +int finish_command(struct child_process *); +int run_command(struct child_process *); + +#define RUN_COMMAND_NO_STDIN 1 +#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ +#define RUN_COMMAND_STDOUT_TO_STDERR 4 +int run_command_v_opt(const char **argv, int opt); + +#endif /* __PERF_RUN_COMMAND_H */ diff --git a/kernel/tools/perf/util/scripting-engines/Build b/kernel/tools/perf/util/scripting-engines/Build new file mode 100644 index 000000000..6516e220c --- /dev/null +++ b/kernel/tools/perf/util/scripting-engines/Build @@ -0,0 +1,6 @@ +libperf-$(CONFIG_LIBPERL) += trace-event-perl.o +libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o + +CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default + +CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow diff --git a/kernel/tools/perf/util/scripting-engines/trace-event-perl.c b/kernel/tools/perf/util/scripting-engines/trace-event-perl.c new file mode 100644 index 000000000..430b5d278 --- /dev/null +++ b/kernel/tools/perf/util/scripting-engines/trace-event-perl.c @@ -0,0 +1,632 @@ +/* + * trace-event-perl. Feed perf script events to an embedded Perl interpreter. + * + * Copyright (C) 2009 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#include "../util.h" +#include +#include + +#include "../../perf.h" +#include "../thread.h" +#include "../event.h" +#include "../trace-event.h" +#include "../evsel.h" +#include "../debug.h" + +void boot_Perf__Trace__Context(pTHX_ CV *cv); +void boot_DynaLoader(pTHX_ CV *cv); +typedef PerlInterpreter * INTERP; + +void xs_init(pTHX); + +void xs_init(pTHX) +{ + const char *file = __FILE__; + dXSUB_SYS; + + newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, + file); + newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); +} + +INTERP my_perl; + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(unsigned short) * 8)) - 1) + +static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); + +extern struct scripting_context *scripting_context; + +static char *cur_field_name; +static int zero_flag_atom; + +static void define_symbolic_value(const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + unsigned long long value; + dSP; + + value = eval_flag(field_value); + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVuv(value))); + XPUSHs(sv_2mortal(newSVpv(field_str, 0))); + + PUTBACK; + if (get_cv("main::define_symbolic_value", 0)) + call_pv("main::define_symbolic_value", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_symbolic_values(struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_symbolic_value(ev_name, field_name, field->value, field->str); + if (field->next) + define_symbolic_values(field->next, ev_name, field_name); +} + +static void define_symbolic_field(const char *ev_name, + const char *field_name) +{ + dSP; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + + PUTBACK; + if (get_cv("main::define_symbolic_field", 0)) + call_pv("main::define_symbolic_field", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_flag_value(const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + unsigned long long value; + dSP; + + value = eval_flag(field_value); + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVuv(value))); + XPUSHs(sv_2mortal(newSVpv(field_str, 0))); + + PUTBACK; + if (get_cv("main::define_flag_value", 0)) + call_pv("main::define_flag_value", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_flag_values(struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_flag_value(ev_name, field_name, field->value, field->str); + if (field->next) + define_flag_values(field->next, ev_name, field_name); +} + +static void define_flag_field(const char *ev_name, + const char *field_name, + const char *delim) +{ + dSP; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVpv(delim, 0))); + + PUTBACK; + if (get_cv("main::define_flag_field", 0)) + call_pv("main::define_flag_field", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_event_symbols(struct event_format *event, + const char *ev_name, + struct print_arg *args) +{ + switch (args->type) { + case PRINT_NULL: + break; + case PRINT_ATOM: + define_flag_value(ev_name, cur_field_name, "0", + args->atom.atom); + zero_flag_atom = 0; + break; + case PRINT_FIELD: + free(cur_field_name); + cur_field_name = strdup(args->field.name); + break; + case PRINT_FLAGS: + define_event_symbols(event, ev_name, args->flags.field); + define_flag_field(ev_name, cur_field_name, args->flags.delim); + define_flag_values(args->flags.flags, ev_name, cur_field_name); + break; + case PRINT_SYMBOL: + define_event_symbols(event, ev_name, args->symbol.field); + define_symbolic_field(ev_name, cur_field_name); + define_symbolic_values(args->symbol.symbols, ev_name, + cur_field_name); + break; + case PRINT_HEX: + define_event_symbols(event, ev_name, args->hex.field); + define_event_symbols(event, ev_name, args->hex.size); + break; + case PRINT_INT_ARRAY: + define_event_symbols(event, ev_name, args->int_array.field); + define_event_symbols(event, ev_name, args->int_array.count); + define_event_symbols(event, ev_name, args->int_array.el_size); + break; + case PRINT_BSTRING: + case PRINT_DYNAMIC_ARRAY: + case PRINT_STRING: + case PRINT_BITMASK: + break; + case PRINT_TYPE: + define_event_symbols(event, ev_name, args->typecast.item); + break; + case PRINT_OP: + if (strcmp(args->op.op, ":") == 0) + zero_flag_atom = 1; + define_event_symbols(event, ev_name, args->op.left); + define_event_symbols(event, ev_name, args->op.right); + break; + case PRINT_FUNC: + default: + pr_err("Unsupported print arg type\n"); + /* we should warn... */ + return; + } + + if (args->next) + define_event_symbols(event, ev_name, args->next); +} + +static void perl_process_tracepoint(struct perf_sample *sample, + struct perf_evsel *evsel, + struct thread *thread) +{ + struct event_format *event = evsel->tp_format; + struct format_field *field; + static char handler[256]; + unsigned long long val; + unsigned long s, ns; + int pid; + int cpu = sample->cpu; + void *data = sample->raw_data; + unsigned long long nsecs = sample->time; + const char *comm = thread__comm_str(thread); + + dSP; + + if (evsel->attr.type != PERF_TYPE_TRACEPOINT) + return; + + if (!event) + die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); + + pid = raw_field_value(event, "common_pid", data); + + sprintf(handler, "%s::%s", event->system, event->name); + + if (!test_and_set_bit(event->id, events_defined)) + define_event_symbols(event, handler, event->print_fmt.args); + + s = nsecs / NSECS_PER_SEC; + ns = nsecs - s * NSECS_PER_SEC; + + scripting_context->event_data = data; + scripting_context->pevent = evsel->tp_format->pevent; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(handler, 0))); + XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); + XPUSHs(sv_2mortal(newSVuv(cpu))); + XPUSHs(sv_2mortal(newSVuv(s))); + XPUSHs(sv_2mortal(newSVuv(ns))); + XPUSHs(sv_2mortal(newSViv(pid))); + XPUSHs(sv_2mortal(newSVpv(comm, 0))); + + /* common fields other than pid can be accessed via xsub fns */ + + for (field = event->format.fields; field; field = field->next) { + if (field->flags & FIELD_IS_STRING) { + int offset; + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(data + field->offset); + offset &= 0xffff; + } else + offset = field->offset; + XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); + } else { /* FIELD_IS_NUMERIC */ + val = read_size(event, data + field->offset, + field->size); + if (field->flags & FIELD_IS_SIGNED) { + XPUSHs(sv_2mortal(newSViv(val))); + } else { + XPUSHs(sv_2mortal(newSVuv(val))); + } + } + } + + PUTBACK; + + if (get_cv(handler, 0)) + call_pv(handler, G_SCALAR); + else if (get_cv("main::trace_unhandled", 0)) { + XPUSHs(sv_2mortal(newSVpv(handler, 0))); + XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); + XPUSHs(sv_2mortal(newSVuv(cpu))); + XPUSHs(sv_2mortal(newSVuv(nsecs))); + XPUSHs(sv_2mortal(newSViv(pid))); + XPUSHs(sv_2mortal(newSVpv(comm, 0))); + call_pv("main::trace_unhandled", G_SCALAR); + } + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void perl_process_event_generic(union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel) +{ + dSP; + + if (!get_cv("process_event", 0)) + return; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); + XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); + XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); + XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); + PUTBACK; + call_pv("process_event", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void perl_process_event(union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al) +{ + perl_process_tracepoint(sample, evsel, al->thread); + perl_process_event_generic(event, sample, evsel); +} + +static void run_start_sub(void) +{ + dSP; /* access to Perl stack */ + PUSHMARK(SP); + + if (get_cv("main::trace_begin", 0)) + call_pv("main::trace_begin", G_DISCARD | G_NOARGS); +} + +/* + * Start trace script + */ +static int perl_start_script(const char *script, int argc, const char **argv) +{ + const char **command_line; + int i, err = 0; + + command_line = malloc((argc + 2) * sizeof(const char *)); + command_line[0] = ""; + command_line[1] = script; + for (i = 2; i < argc + 2; i++) + command_line[i] = argv[i - 2]; + + my_perl = perl_alloc(); + perl_construct(my_perl); + + if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, + (char **)NULL)) { + err = -1; + goto error; + } + + if (perl_run(my_perl)) { + err = -1; + goto error; + } + + if (SvTRUE(ERRSV)) { + err = -1; + goto error; + } + + run_start_sub(); + + free(command_line); + return 0; +error: + perl_free(my_perl); + free(command_line); + + return err; +} + +static int perl_flush_script(void) +{ + return 0; +} + +/* + * Stop trace script + */ +static int perl_stop_script(void) +{ + dSP; /* access to Perl stack */ + PUSHMARK(SP); + + if (get_cv("main::trace_end", 0)) + call_pv("main::trace_end", G_DISCARD | G_NOARGS); + + perl_destruct(my_perl); + perl_free(my_perl); + + return 0; +} + +static int perl_generate_script(struct pevent *pevent, const char *outfile) +{ + struct event_format *event = NULL; + struct format_field *f; + char fname[PATH_MAX]; + int not_first, count; + FILE *ofp; + + sprintf(fname, "%s.pl", outfile); + ofp = fopen(fname, "w"); + if (ofp == NULL) { + fprintf(stderr, "couldn't open %s\n", fname); + return -1; + } + + fprintf(ofp, "# perf script event handlers, " + "generated by perf script -g perl\n"); + + fprintf(ofp, "# Licensed under the terms of the GNU GPL" + " License version 2\n\n"); + + fprintf(ofp, "# The common_* event handler fields are the most useful " + "fields common to\n"); + + fprintf(ofp, "# all events. They don't necessarily correspond to " + "the 'common_*' fields\n"); + + fprintf(ofp, "# in the format files. Those fields not available as " + "handler params can\n"); + + fprintf(ofp, "# be retrieved using Perl functions of the form " + "common_*($context).\n"); + + fprintf(ofp, "# See Context.pm for the list of available " + "functions.\n\n"); + + fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" + "Perf-Trace-Util/lib\";\n"); + + fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); + fprintf(ofp, "use Perf::Trace::Core;\n"); + fprintf(ofp, "use Perf::Trace::Context;\n"); + fprintf(ofp, "use Perf::Trace::Util;\n\n"); + + fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); + fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); + + while ((event = trace_find_next_event(pevent, event))) { + fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); + fprintf(ofp, "\tmy ("); + + fprintf(ofp, "$event_name, "); + fprintf(ofp, "$context, "); + fprintf(ofp, "$common_cpu, "); + fprintf(ofp, "$common_secs, "); + fprintf(ofp, "$common_nsecs,\n"); + fprintf(ofp, "\t $common_pid, "); + fprintf(ofp, "$common_comm,\n\t "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (++count % 5 == 0) + fprintf(ofp, "\n\t "); + + fprintf(ofp, "$%s", f->name); + } + fprintf(ofp, ") = @_;\n\n"); + + fprintf(ofp, "\tprint_header($event_name, $common_cpu, " + "$common_secs, $common_nsecs,\n\t " + "$common_pid, $common_comm);\n\n"); + + fprintf(ofp, "\tprintf(\""); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (count && count % 4 == 0) { + fprintf(ofp, "\".\n\t \""); + } + count++; + + fprintf(ofp, "%s=", f->name); + if (f->flags & FIELD_IS_STRING || + f->flags & FIELD_IS_FLAG || + f->flags & FIELD_IS_SYMBOLIC) + fprintf(ofp, "%%s"); + else if (f->flags & FIELD_IS_SIGNED) + fprintf(ofp, "%%d"); + else + fprintf(ofp, "%%u"); + } + + fprintf(ofp, "\\n\",\n\t "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + + if (++count % 5 == 0) + fprintf(ofp, "\n\t "); + + if (f->flags & FIELD_IS_FLAG) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t "); + count = 4; + } + fprintf(ofp, "flag_str(\""); + fprintf(ofp, "%s::%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", $%s)", f->name, + f->name); + } else if (f->flags & FIELD_IS_SYMBOLIC) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t "); + count = 4; + } + fprintf(ofp, "symbol_str(\""); + fprintf(ofp, "%s::%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", $%s)", f->name, + f->name); + } else + fprintf(ofp, "$%s", f->name); + } + + fprintf(ofp, ");\n"); + fprintf(ofp, "}\n\n"); + } + + fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " + "$common_cpu, $common_secs, $common_nsecs,\n\t " + "$common_pid, $common_comm) = @_;\n\n"); + + fprintf(ofp, "\tprint_header($event_name, $common_cpu, " + "$common_secs, $common_nsecs,\n\t $common_pid, " + "$common_comm);\n}\n\n"); + + fprintf(ofp, "sub print_header\n{\n" + "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" + "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " + "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n"); + + fprintf(ofp, + "\n# Packed byte string args of process_event():\n" + "#\n" + "# $event:\tunion perf_event\tutil/event.h\n" + "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n" + "# $sample:\tstruct perf_sample\tutil/event.h\n" + "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" + "\n" + "sub process_event\n" + "{\n" + "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" + "\n" + "\tmy @event\t= unpack(\"LSS\", $event);\n" + "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n" + "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" + "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n" + "\n" + "\tuse Data::Dumper;\n" + "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" + "}\n"); + + fclose(ofp); + + fprintf(stderr, "generated Perl script: %s\n", fname); + + return 0; +} + +struct scripting_ops perl_scripting_ops = { + .name = "Perl", + .start_script = perl_start_script, + .flush_script = perl_flush_script, + .stop_script = perl_stop_script, + .process_event = perl_process_event, + .generate_script = perl_generate_script, +}; diff --git a/kernel/tools/perf/util/scripting-engines/trace-event-python.c b/kernel/tools/perf/util/scripting-engines/trace-event-python.c new file mode 100644 index 000000000..5544b8cdd --- /dev/null +++ b/kernel/tools/perf/util/scripting-engines/trace-event-python.c @@ -0,0 +1,1209 @@ +/* + * trace-event-python. Feed trace events to an embedded Python interpreter. + * + * Copyright (C) 2010 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "../../perf.h" +#include "../debug.h" +#include "../callchain.h" +#include "../evsel.h" +#include "../util.h" +#include "../event.h" +#include "../thread.h" +#include "../comm.h" +#include "../machine.h" +#include "../db-export.h" +#include "../thread-stack.h" +#include "../trace-event.h" +#include "../machine.h" + +PyMODINIT_FUNC initperf_trace_context(void); + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(unsigned short) * 8)) - 1) + +static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); + +#define MAX_FIELDS 64 +#define N_COMMON_FIELDS 7 + +extern struct scripting_context *scripting_context; + +static char *cur_field_name; +static int zero_flag_atom; + +static PyObject *main_module, *main_dict; + +struct tables { + struct db_export dbe; + PyObject *evsel_handler; + PyObject *machine_handler; + PyObject *thread_handler; + PyObject *comm_handler; + PyObject *comm_thread_handler; + PyObject *dso_handler; + PyObject *symbol_handler; + PyObject *branch_type_handler; + PyObject *sample_handler; + PyObject *call_path_handler; + PyObject *call_return_handler; + bool db_export_mode; +}; + +static struct tables tables_global; + +static void handler_call_die(const char *handler_name) NORETURN; +static void handler_call_die(const char *handler_name) +{ + PyErr_Print(); + Py_FatalError("problem in Python trace event handler"); + // Py_FatalError does not return + // but we have to make the compiler happy + abort(); +} + +/* + * Insert val into into the dictionary and decrement the reference counter. + * This is necessary for dictionaries since PyDict_SetItemString() does not + * steal a reference, as opposed to PyTuple_SetItem(). + */ +static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val) +{ + PyDict_SetItemString(dict, key, val); + Py_DECREF(val); +} + +static PyObject *get_handler(const char *handler_name) +{ + PyObject *handler; + + handler = PyDict_GetItemString(main_dict, handler_name); + if (handler && !PyCallable_Check(handler)) + return NULL; + return handler; +} + +static void call_object(PyObject *handler, PyObject *args, const char *die_msg) +{ + PyObject *retval; + + retval = PyObject_CallObject(handler, args); + if (retval == NULL) + handler_call_die(die_msg); + Py_DECREF(retval); +} + +static void try_call_object(const char *handler_name, PyObject *args) +{ + PyObject *handler; + + handler = get_handler(handler_name); + if (handler) + call_object(handler, args, handler_name); +} + +static void define_value(enum print_arg_type field_type, + const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + const char *handler_name = "define_flag_value"; + PyObject *t; + unsigned long long value; + unsigned n = 0; + + if (field_type == PRINT_SYMBOL) + handler_name = "define_symbolic_value"; + + t = PyTuple_New(4); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + value = eval_flag(field_value); + + PyTuple_SetItem(t, n++, PyString_FromString(ev_name)); + PyTuple_SetItem(t, n++, PyString_FromString(field_name)); + PyTuple_SetItem(t, n++, PyInt_FromLong(value)); + PyTuple_SetItem(t, n++, PyString_FromString(field_str)); + + try_call_object(handler_name, t); + + Py_DECREF(t); +} + +static void define_values(enum print_arg_type field_type, + struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_value(field_type, ev_name, field_name, field->value, + field->str); + + if (field->next) + define_values(field_type, field->next, ev_name, field_name); +} + +static void define_field(enum print_arg_type field_type, + const char *ev_name, + const char *field_name, + const char *delim) +{ + const char *handler_name = "define_flag_field"; + PyObject *t; + unsigned n = 0; + + if (field_type == PRINT_SYMBOL) + handler_name = "define_symbolic_field"; + + if (field_type == PRINT_FLAGS) + t = PyTuple_New(3); + else + t = PyTuple_New(2); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + PyTuple_SetItem(t, n++, PyString_FromString(ev_name)); + PyTuple_SetItem(t, n++, PyString_FromString(field_name)); + if (field_type == PRINT_FLAGS) + PyTuple_SetItem(t, n++, PyString_FromString(delim)); + + try_call_object(handler_name, t); + + Py_DECREF(t); +} + +static void define_event_symbols(struct event_format *event, + const char *ev_name, + struct print_arg *args) +{ + switch (args->type) { + case PRINT_NULL: + break; + case PRINT_ATOM: + define_value(PRINT_FLAGS, ev_name, cur_field_name, "0", + args->atom.atom); + zero_flag_atom = 0; + break; + case PRINT_FIELD: + free(cur_field_name); + cur_field_name = strdup(args->field.name); + break; + case PRINT_FLAGS: + define_event_symbols(event, ev_name, args->flags.field); + define_field(PRINT_FLAGS, ev_name, cur_field_name, + args->flags.delim); + define_values(PRINT_FLAGS, args->flags.flags, ev_name, + cur_field_name); + break; + case PRINT_SYMBOL: + define_event_symbols(event, ev_name, args->symbol.field); + define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL); + define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name, + cur_field_name); + break; + case PRINT_HEX: + define_event_symbols(event, ev_name, args->hex.field); + define_event_symbols(event, ev_name, args->hex.size); + break; + case PRINT_INT_ARRAY: + define_event_symbols(event, ev_name, args->int_array.field); + define_event_symbols(event, ev_name, args->int_array.count); + define_event_symbols(event, ev_name, args->int_array.el_size); + break; + case PRINT_STRING: + break; + case PRINT_TYPE: + define_event_symbols(event, ev_name, args->typecast.item); + break; + case PRINT_OP: + if (strcmp(args->op.op, ":") == 0) + zero_flag_atom = 1; + define_event_symbols(event, ev_name, args->op.left); + define_event_symbols(event, ev_name, args->op.right); + break; + default: + /* gcc warns for these? */ + case PRINT_BSTRING: + case PRINT_DYNAMIC_ARRAY: + case PRINT_FUNC: + case PRINT_BITMASK: + /* we should warn... */ + return; + } + + if (args->next) + define_event_symbols(event, ev_name, args->next); +} + +static PyObject *get_field_numeric_entry(struct event_format *event, + struct format_field *field, void *data) +{ + bool is_array = field->flags & FIELD_IS_ARRAY; + PyObject *obj, *list = NULL; + unsigned long long val; + unsigned int item_size, n_items, i; + + if (is_array) { + list = PyList_New(field->arraylen); + item_size = field->size / field->arraylen; + n_items = field->arraylen; + } else { + item_size = field->size; + n_items = 1; + } + + for (i = 0; i < n_items; i++) { + + val = read_size(event, data + field->offset + i * item_size, + item_size); + if (field->flags & FIELD_IS_SIGNED) { + if ((long long)val >= LONG_MIN && + (long long)val <= LONG_MAX) + obj = PyInt_FromLong(val); + else + obj = PyLong_FromLongLong(val); + } else { + if (val <= LONG_MAX) + obj = PyInt_FromLong(val); + else + obj = PyLong_FromUnsignedLongLong(val); + } + if (is_array) + PyList_SET_ITEM(list, i, obj); + } + if (is_array) + obj = list; + return obj; +} + + +static PyObject *python_process_callchain(struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al) +{ + PyObject *pylist; + + pylist = PyList_New(0); + if (!pylist) + Py_FatalError("couldn't create Python list"); + + if (!symbol_conf.use_callchain || !sample->callchain) + goto exit; + + if (thread__resolve_callchain(al->thread, evsel, + sample, NULL, NULL, + PERF_MAX_STACK_DEPTH) != 0) { + pr_err("Failed to resolve callchain. Skipping\n"); + goto exit; + } + callchain_cursor_commit(&callchain_cursor); + + + while (1) { + PyObject *pyelem; + struct callchain_cursor_node *node; + node = callchain_cursor_current(&callchain_cursor); + if (!node) + break; + + pyelem = PyDict_New(); + if (!pyelem) + Py_FatalError("couldn't create Python dictionary"); + + + pydict_set_item_string_decref(pyelem, "ip", + PyLong_FromUnsignedLongLong(node->ip)); + + if (node->sym) { + PyObject *pysym = PyDict_New(); + if (!pysym) + Py_FatalError("couldn't create Python dictionary"); + pydict_set_item_string_decref(pysym, "start", + PyLong_FromUnsignedLongLong(node->sym->start)); + pydict_set_item_string_decref(pysym, "end", + PyLong_FromUnsignedLongLong(node->sym->end)); + pydict_set_item_string_decref(pysym, "binding", + PyInt_FromLong(node->sym->binding)); + pydict_set_item_string_decref(pysym, "name", + PyString_FromStringAndSize(node->sym->name, + node->sym->namelen)); + pydict_set_item_string_decref(pyelem, "sym", pysym); + } + + if (node->map) { + struct map *map = node->map; + const char *dsoname = "[unknown]"; + if (map && map->dso && (map->dso->name || map->dso->long_name)) { + if (symbol_conf.show_kernel_path && map->dso->long_name) + dsoname = map->dso->long_name; + else if (map->dso->name) + dsoname = map->dso->name; + } + pydict_set_item_string_decref(pyelem, "dso", + PyString_FromString(dsoname)); + } + + callchain_cursor_advance(&callchain_cursor); + PyList_Append(pylist, pyelem); + Py_DECREF(pyelem); + } + +exit: + return pylist; +} + + +static void python_process_tracepoint(struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al) +{ + struct event_format *event = evsel->tp_format; + PyObject *handler, *context, *t, *obj, *callchain; + PyObject *dict = NULL; + static char handler_name[256]; + struct format_field *field; + unsigned long s, ns; + unsigned n = 0; + int pid; + int cpu = sample->cpu; + void *data = sample->raw_data; + unsigned long long nsecs = sample->time; + const char *comm = thread__comm_str(al->thread); + + t = PyTuple_New(MAX_FIELDS); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + if (!event) + die("ug! no event found for type %d", (int)evsel->attr.config); + + pid = raw_field_value(event, "common_pid", data); + + sprintf(handler_name, "%s__%s", event->system, event->name); + + if (!test_and_set_bit(event->id, events_defined)) + define_event_symbols(event, handler_name, event->print_fmt.args); + + handler = get_handler(handler_name); + if (!handler) { + dict = PyDict_New(); + if (!dict) + Py_FatalError("couldn't create Python dict"); + } + s = nsecs / NSECS_PER_SEC; + ns = nsecs - s * NSECS_PER_SEC; + + scripting_context->event_data = data; + scripting_context->pevent = evsel->tp_format->pevent; + + context = PyCObject_FromVoidPtr(scripting_context, NULL); + + PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); + PyTuple_SetItem(t, n++, context); + + /* ip unwinding */ + callchain = python_process_callchain(sample, evsel, al); + + if (handler) { + PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); + PyTuple_SetItem(t, n++, PyInt_FromLong(s)); + PyTuple_SetItem(t, n++, PyInt_FromLong(ns)); + PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); + PyTuple_SetItem(t, n++, PyString_FromString(comm)); + PyTuple_SetItem(t, n++, callchain); + } else { + pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu)); + pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s)); + pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns)); + pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid)); + pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm)); + pydict_set_item_string_decref(dict, "common_callchain", callchain); + } + for (field = event->format.fields; field; field = field->next) { + if (field->flags & FIELD_IS_STRING) { + int offset; + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(data + field->offset); + offset &= 0xffff; + } else + offset = field->offset; + obj = PyString_FromString((char *)data + offset); + } else { /* FIELD_IS_NUMERIC */ + obj = get_field_numeric_entry(event, field, data); + } + if (handler) + PyTuple_SetItem(t, n++, obj); + else + pydict_set_item_string_decref(dict, field->name, obj); + + } + + if (!handler) + PyTuple_SetItem(t, n++, dict); + + if (_PyTuple_Resize(&t, n) == -1) + Py_FatalError("error resizing Python tuple"); + + if (handler) { + call_object(handler, t, handler_name); + } else { + try_call_object("trace_unhandled", t); + Py_DECREF(dict); + } + + Py_DECREF(t); +} + +static PyObject *tuple_new(unsigned int sz) +{ + PyObject *t; + + t = PyTuple_New(sz); + if (!t) + Py_FatalError("couldn't create Python tuple"); + return t; +} + +static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val) +{ +#if BITS_PER_LONG == 64 + return PyTuple_SetItem(t, pos, PyInt_FromLong(val)); +#endif +#if BITS_PER_LONG == 32 + return PyTuple_SetItem(t, pos, PyLong_FromLongLong(val)); +#endif +} + +static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val) +{ + return PyTuple_SetItem(t, pos, PyInt_FromLong(val)); +} + +static int tuple_set_string(PyObject *t, unsigned int pos, const char *s) +{ + return PyTuple_SetItem(t, pos, PyString_FromString(s)); +} + +static int python_export_evsel(struct db_export *dbe, struct perf_evsel *evsel) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(2); + + tuple_set_u64(t, 0, evsel->db_id); + tuple_set_string(t, 1, perf_evsel__name(evsel)); + + call_object(tables->evsel_handler, t, "evsel_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_machine(struct db_export *dbe, + struct machine *machine) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(3); + + tuple_set_u64(t, 0, machine->db_id); + tuple_set_s32(t, 1, machine->pid); + tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : ""); + + call_object(tables->machine_handler, t, "machine_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_thread(struct db_export *dbe, struct thread *thread, + u64 main_thread_db_id, struct machine *machine) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(5); + + tuple_set_u64(t, 0, thread->db_id); + tuple_set_u64(t, 1, machine->db_id); + tuple_set_u64(t, 2, main_thread_db_id); + tuple_set_s32(t, 3, thread->pid_); + tuple_set_s32(t, 4, thread->tid); + + call_object(tables->thread_handler, t, "thread_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_comm(struct db_export *dbe, struct comm *comm) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(2); + + tuple_set_u64(t, 0, comm->db_id); + tuple_set_string(t, 1, comm__str(comm)); + + call_object(tables->comm_handler, t, "comm_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_comm_thread(struct db_export *dbe, u64 db_id, + struct comm *comm, struct thread *thread) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(3); + + tuple_set_u64(t, 0, db_id); + tuple_set_u64(t, 1, comm->db_id); + tuple_set_u64(t, 2, thread->db_id); + + call_object(tables->comm_thread_handler, t, "comm_thread_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_dso(struct db_export *dbe, struct dso *dso, + struct machine *machine) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + PyObject *t; + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + + t = tuple_new(5); + + tuple_set_u64(t, 0, dso->db_id); + tuple_set_u64(t, 1, machine->db_id); + tuple_set_string(t, 2, dso->short_name); + tuple_set_string(t, 3, dso->long_name); + tuple_set_string(t, 4, sbuild_id); + + call_object(tables->dso_handler, t, "dso_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_symbol(struct db_export *dbe, struct symbol *sym, + struct dso *dso) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + u64 *sym_db_id = symbol__priv(sym); + PyObject *t; + + t = tuple_new(6); + + tuple_set_u64(t, 0, *sym_db_id); + tuple_set_u64(t, 1, dso->db_id); + tuple_set_u64(t, 2, sym->start); + tuple_set_u64(t, 3, sym->end); + tuple_set_s32(t, 4, sym->binding); + tuple_set_string(t, 5, sym->name); + + call_object(tables->symbol_handler, t, "symbol_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_branch_type(struct db_export *dbe, u32 branch_type, + const char *name) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(2); + + tuple_set_s32(t, 0, branch_type); + tuple_set_string(t, 1, name); + + call_object(tables->branch_type_handler, t, "branch_type_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_sample(struct db_export *dbe, + struct export_sample *es) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + + t = tuple_new(21); + + tuple_set_u64(t, 0, es->db_id); + tuple_set_u64(t, 1, es->evsel->db_id); + tuple_set_u64(t, 2, es->al->machine->db_id); + tuple_set_u64(t, 3, es->al->thread->db_id); + tuple_set_u64(t, 4, es->comm_db_id); + tuple_set_u64(t, 5, es->dso_db_id); + tuple_set_u64(t, 6, es->sym_db_id); + tuple_set_u64(t, 7, es->offset); + tuple_set_u64(t, 8, es->sample->ip); + tuple_set_u64(t, 9, es->sample->time); + tuple_set_s32(t, 10, es->sample->cpu); + tuple_set_u64(t, 11, es->addr_dso_db_id); + tuple_set_u64(t, 12, es->addr_sym_db_id); + tuple_set_u64(t, 13, es->addr_offset); + tuple_set_u64(t, 14, es->sample->addr); + tuple_set_u64(t, 15, es->sample->period); + tuple_set_u64(t, 16, es->sample->weight); + tuple_set_u64(t, 17, es->sample->transaction); + tuple_set_u64(t, 18, es->sample->data_src); + tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK); + tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX)); + + call_object(tables->sample_handler, t, "sample_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_call_path(struct db_export *dbe, struct call_path *cp) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + PyObject *t; + u64 parent_db_id, sym_db_id; + + parent_db_id = cp->parent ? cp->parent->db_id : 0; + sym_db_id = cp->sym ? *(u64 *)symbol__priv(cp->sym) : 0; + + t = tuple_new(4); + + tuple_set_u64(t, 0, cp->db_id); + tuple_set_u64(t, 1, parent_db_id); + tuple_set_u64(t, 2, sym_db_id); + tuple_set_u64(t, 3, cp->ip); + + call_object(tables->call_path_handler, t, "call_path_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_export_call_return(struct db_export *dbe, + struct call_return *cr) +{ + struct tables *tables = container_of(dbe, struct tables, dbe); + u64 comm_db_id = cr->comm ? cr->comm->db_id : 0; + PyObject *t; + + t = tuple_new(11); + + tuple_set_u64(t, 0, cr->db_id); + tuple_set_u64(t, 1, cr->thread->db_id); + tuple_set_u64(t, 2, comm_db_id); + tuple_set_u64(t, 3, cr->cp->db_id); + tuple_set_u64(t, 4, cr->call_time); + tuple_set_u64(t, 5, cr->return_time); + tuple_set_u64(t, 6, cr->branch_count); + tuple_set_u64(t, 7, cr->call_ref); + tuple_set_u64(t, 8, cr->return_ref); + tuple_set_u64(t, 9, cr->cp->parent->db_id); + tuple_set_s32(t, 10, cr->flags); + + call_object(tables->call_return_handler, t, "call_return_table"); + + Py_DECREF(t); + + return 0; +} + +static int python_process_call_return(struct call_return *cr, void *data) +{ + struct db_export *dbe = data; + + return db_export__call_return(dbe, cr); +} + +static void python_process_general_event(struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al) +{ + PyObject *handler, *t, *dict, *callchain, *dict_sample; + static char handler_name[64]; + unsigned n = 0; + + /* + * Use the MAX_FIELDS to make the function expandable, though + * currently there is only one item for the tuple. + */ + t = PyTuple_New(MAX_FIELDS); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + dict = PyDict_New(); + if (!dict) + Py_FatalError("couldn't create Python dictionary"); + + dict_sample = PyDict_New(); + if (!dict_sample) + Py_FatalError("couldn't create Python dictionary"); + + snprintf(handler_name, sizeof(handler_name), "%s", "process_event"); + + handler = get_handler(handler_name); + if (!handler) + goto exit; + + pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); + pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize( + (const char *)&evsel->attr, sizeof(evsel->attr))); + + pydict_set_item_string_decref(dict_sample, "pid", + PyInt_FromLong(sample->pid)); + pydict_set_item_string_decref(dict_sample, "tid", + PyInt_FromLong(sample->tid)); + pydict_set_item_string_decref(dict_sample, "cpu", + PyInt_FromLong(sample->cpu)); + pydict_set_item_string_decref(dict_sample, "ip", + PyLong_FromUnsignedLongLong(sample->ip)); + pydict_set_item_string_decref(dict_sample, "time", + PyLong_FromUnsignedLongLong(sample->time)); + pydict_set_item_string_decref(dict_sample, "period", + PyLong_FromUnsignedLongLong(sample->period)); + pydict_set_item_string_decref(dict, "sample", dict_sample); + + pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize( + (const char *)sample->raw_data, sample->raw_size)); + pydict_set_item_string_decref(dict, "comm", + PyString_FromString(thread__comm_str(al->thread))); + if (al->map) { + pydict_set_item_string_decref(dict, "dso", + PyString_FromString(al->map->dso->name)); + } + if (al->sym) { + pydict_set_item_string_decref(dict, "symbol", + PyString_FromString(al->sym->name)); + } + + /* ip unwinding */ + callchain = python_process_callchain(sample, evsel, al); + pydict_set_item_string_decref(dict, "callchain", callchain); + + PyTuple_SetItem(t, n++, dict); + if (_PyTuple_Resize(&t, n) == -1) + Py_FatalError("error resizing Python tuple"); + + call_object(handler, t, handler_name); +exit: + Py_DECREF(dict); + Py_DECREF(t); +} + +static void python_process_event(union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al) +{ + struct tables *tables = &tables_global; + + switch (evsel->attr.type) { + case PERF_TYPE_TRACEPOINT: + python_process_tracepoint(sample, evsel, al); + break; + /* Reserve for future process_hw/sw/raw APIs */ + default: + if (tables->db_export_mode) + db_export__sample(&tables->dbe, event, sample, evsel, al); + else + python_process_general_event(sample, evsel, al); + } +} + +static int run_start_sub(void) +{ + main_module = PyImport_AddModule("__main__"); + if (main_module == NULL) + return -1; + Py_INCREF(main_module); + + main_dict = PyModule_GetDict(main_module); + if (main_dict == NULL) + goto error; + Py_INCREF(main_dict); + + try_call_object("trace_begin", NULL); + + return 0; + +error: + Py_XDECREF(main_dict); + Py_XDECREF(main_module); + return -1; +} + +#define SET_TABLE_HANDLER_(name, handler_name, table_name) do { \ + tables->handler_name = get_handler(#table_name); \ + if (tables->handler_name) \ + tables->dbe.export_ ## name = python_export_ ## name; \ +} while (0) + +#define SET_TABLE_HANDLER(name) \ + SET_TABLE_HANDLER_(name, name ## _handler, name ## _table) + +static void set_table_handlers(struct tables *tables) +{ + const char *perf_db_export_mode = "perf_db_export_mode"; + const char *perf_db_export_calls = "perf_db_export_calls"; + PyObject *db_export_mode, *db_export_calls; + bool export_calls = false; + int ret; + + memset(tables, 0, sizeof(struct tables)); + if (db_export__init(&tables->dbe)) + Py_FatalError("failed to initialize export"); + + db_export_mode = PyDict_GetItemString(main_dict, perf_db_export_mode); + if (!db_export_mode) + return; + + ret = PyObject_IsTrue(db_export_mode); + if (ret == -1) + handler_call_die(perf_db_export_mode); + if (!ret) + return; + + tables->dbe.crp = NULL; + db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls); + if (db_export_calls) { + ret = PyObject_IsTrue(db_export_calls); + if (ret == -1) + handler_call_die(perf_db_export_calls); + export_calls = !!ret; + } + + if (export_calls) { + tables->dbe.crp = + call_return_processor__new(python_process_call_return, + &tables->dbe); + if (!tables->dbe.crp) + Py_FatalError("failed to create calls processor"); + } + + tables->db_export_mode = true; + /* + * Reserve per symbol space for symbol->db_id via symbol__priv() + */ + symbol_conf.priv_size = sizeof(u64); + + SET_TABLE_HANDLER(evsel); + SET_TABLE_HANDLER(machine); + SET_TABLE_HANDLER(thread); + SET_TABLE_HANDLER(comm); + SET_TABLE_HANDLER(comm_thread); + SET_TABLE_HANDLER(dso); + SET_TABLE_HANDLER(symbol); + SET_TABLE_HANDLER(branch_type); + SET_TABLE_HANDLER(sample); + SET_TABLE_HANDLER(call_path); + SET_TABLE_HANDLER(call_return); +} + +/* + * Start trace script + */ +static int python_start_script(const char *script, int argc, const char **argv) +{ + struct tables *tables = &tables_global; + const char **command_line; + char buf[PATH_MAX]; + int i, err = 0; + FILE *fp; + + command_line = malloc((argc + 1) * sizeof(const char *)); + command_line[0] = script; + for (i = 1; i < argc + 1; i++) + command_line[i] = argv[i - 1]; + + Py_Initialize(); + + initperf_trace_context(); + + PySys_SetArgv(argc + 1, (char **)command_line); + + fp = fopen(script, "r"); + if (!fp) { + sprintf(buf, "Can't open python script \"%s\"", script); + perror(buf); + err = -1; + goto error; + } + + err = PyRun_SimpleFile(fp, script); + if (err) { + fprintf(stderr, "Error running python script %s\n", script); + goto error; + } + + err = run_start_sub(); + if (err) { + fprintf(stderr, "Error starting python script %s\n", script); + goto error; + } + + free(command_line); + + set_table_handlers(tables); + + if (tables->db_export_mode) { + err = db_export__branch_types(&tables->dbe); + if (err) + goto error; + } + + return err; +error: + Py_Finalize(); + free(command_line); + + return err; +} + +static int python_flush_script(void) +{ + struct tables *tables = &tables_global; + + return db_export__flush(&tables->dbe); +} + +/* + * Stop trace script + */ +static int python_stop_script(void) +{ + struct tables *tables = &tables_global; + + try_call_object("trace_end", NULL); + + db_export__exit(&tables->dbe); + + Py_XDECREF(main_dict); + Py_XDECREF(main_module); + Py_Finalize(); + + return 0; +} + +static int python_generate_script(struct pevent *pevent, const char *outfile) +{ + struct event_format *event = NULL; + struct format_field *f; + char fname[PATH_MAX]; + int not_first, count; + FILE *ofp; + + sprintf(fname, "%s.py", outfile); + ofp = fopen(fname, "w"); + if (ofp == NULL) { + fprintf(stderr, "couldn't open %s\n", fname); + return -1; + } + fprintf(ofp, "# perf script event handlers, " + "generated by perf script -g python\n"); + + fprintf(ofp, "# Licensed under the terms of the GNU GPL" + " License version 2\n\n"); + + fprintf(ofp, "# The common_* event handler fields are the most useful " + "fields common to\n"); + + fprintf(ofp, "# all events. They don't necessarily correspond to " + "the 'common_*' fields\n"); + + fprintf(ofp, "# in the format files. Those fields not available as " + "handler params can\n"); + + fprintf(ofp, "# be retrieved using Python functions of the form " + "common_*(context).\n"); + + fprintf(ofp, "# See the perf-trace-python Documentation for the list " + "of available functions.\n\n"); + + fprintf(ofp, "import os\n"); + fprintf(ofp, "import sys\n\n"); + + fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n"); + fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n"); + fprintf(ofp, "\nfrom perf_trace_context import *\n"); + fprintf(ofp, "from Core import *\n\n\n"); + + fprintf(ofp, "def trace_begin():\n"); + fprintf(ofp, "\tprint \"in trace_begin\"\n\n"); + + fprintf(ofp, "def trace_end():\n"); + fprintf(ofp, "\tprint \"in trace_end\"\n\n"); + + while ((event = trace_find_next_event(pevent, event))) { + fprintf(ofp, "def %s__%s(", event->system, event->name); + fprintf(ofp, "event_name, "); + fprintf(ofp, "context, "); + fprintf(ofp, "common_cpu,\n"); + fprintf(ofp, "\tcommon_secs, "); + fprintf(ofp, "common_nsecs, "); + fprintf(ofp, "common_pid, "); + fprintf(ofp, "common_comm,\n\t"); + fprintf(ofp, "common_callchain, "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (++count % 5 == 0) + fprintf(ofp, "\n\t"); + + fprintf(ofp, "%s", f->name); + } + fprintf(ofp, "):\n"); + + fprintf(ofp, "\t\tprint_header(event_name, common_cpu, " + "common_secs, common_nsecs,\n\t\t\t" + "common_pid, common_comm)\n\n"); + + fprintf(ofp, "\t\tprint \""); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (count && count % 3 == 0) { + fprintf(ofp, "\" \\\n\t\t\""); + } + count++; + + fprintf(ofp, "%s=", f->name); + if (f->flags & FIELD_IS_STRING || + f->flags & FIELD_IS_FLAG || + f->flags & FIELD_IS_ARRAY || + f->flags & FIELD_IS_SYMBOLIC) + fprintf(ofp, "%%s"); + else if (f->flags & FIELD_IS_SIGNED) + fprintf(ofp, "%%d"); + else + fprintf(ofp, "%%u"); + } + + fprintf(ofp, "\" %% \\\n\t\t("); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + + if (++count % 5 == 0) + fprintf(ofp, "\n\t\t"); + + if (f->flags & FIELD_IS_FLAG) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t\t"); + count = 4; + } + fprintf(ofp, "flag_str(\""); + fprintf(ofp, "%s__%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", %s)", f->name, + f->name); + } else if (f->flags & FIELD_IS_SYMBOLIC) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t\t"); + count = 4; + } + fprintf(ofp, "symbol_str(\""); + fprintf(ofp, "%s__%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", %s)", f->name, + f->name); + } else + fprintf(ofp, "%s", f->name); + } + + fprintf(ofp, ")\n\n"); + + fprintf(ofp, "\t\tfor node in common_callchain:"); + fprintf(ofp, "\n\t\t\tif 'sym' in node:"); + fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])"); + fprintf(ofp, "\n\t\t\telse:"); + fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n"); + fprintf(ofp, "\t\tprint \"\\n\"\n\n"); + + } + + fprintf(ofp, "def trace_unhandled(event_name, context, " + "event_fields_dict):\n"); + + fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))" + "for k,v in sorted(event_fields_dict.items())])\n\n"); + + fprintf(ofp, "def print_header(" + "event_name, cpu, secs, nsecs, pid, comm):\n" + "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t" + "(event_name, cpu, secs, nsecs, pid, comm),\n"); + + fclose(ofp); + + fprintf(stderr, "generated Python script: %s\n", fname); + + return 0; +} + +struct scripting_ops python_scripting_ops = { + .name = "Python", + .start_script = python_start_script, + .flush_script = python_flush_script, + .stop_script = python_stop_script, + .process_event = python_process_event, + .generate_script = python_generate_script, +}; diff --git a/kernel/tools/perf/util/session.c b/kernel/tools/perf/util/session.c new file mode 100644 index 000000000..0c7401257 --- /dev/null +++ b/kernel/tools/perf/util/session.c @@ -0,0 +1,1818 @@ +#include +#include + +#include +#include +#include +#include + +#include "evlist.h" +#include "evsel.h" +#include "session.h" +#include "tool.h" +#include "sort.h" +#include "util.h" +#include "cpumap.h" +#include "perf_regs.h" +#include "asm/bug.h" + +static int machines__deliver_event(struct machines *machines, + struct perf_evlist *evlist, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool, u64 file_offset); + +static int perf_session__open(struct perf_session *session) +{ + struct perf_data_file *file = session->file; + + if (perf_session__read_header(session) < 0) { + pr_err("incompatible file format (rerun with -v to learn more)"); + return -1; + } + + if (perf_data_file__is_pipe(file)) + return 0; + + if (!perf_evlist__valid_sample_type(session->evlist)) { + pr_err("non matching sample_type"); + return -1; + } + + if (!perf_evlist__valid_sample_id_all(session->evlist)) { + pr_err("non matching sample_id_all"); + return -1; + } + + if (!perf_evlist__valid_read_format(session->evlist)) { + pr_err("non matching read_format"); + return -1; + } + + return 0; +} + +void perf_session__set_id_hdr_size(struct perf_session *session) +{ + u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); + + machines__set_id_hdr_size(&session->machines, id_hdr_size); +} + +int perf_session__create_kernel_maps(struct perf_session *session) +{ + int ret = machine__create_kernel_maps(&session->machines.host); + + if (ret >= 0) + ret = machines__create_guest_kernel_maps(&session->machines); + return ret; +} + +static void perf_session__destroy_kernel_maps(struct perf_session *session) +{ + machines__destroy_kernel_maps(&session->machines); +} + +static bool perf_session__has_comm_exec(struct perf_session *session) +{ + struct perf_evsel *evsel; + + evlist__for_each(session->evlist, evsel) { + if (evsel->attr.comm_exec) + return true; + } + + return false; +} + +static void perf_session__set_comm_exec(struct perf_session *session) +{ + bool comm_exec = perf_session__has_comm_exec(session); + + machines__set_comm_exec(&session->machines, comm_exec); +} + +static int ordered_events__deliver_event(struct ordered_events *oe, + struct ordered_event *event) +{ + struct perf_sample sample; + struct perf_session *session = container_of(oe, struct perf_session, + ordered_events); + int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample); + + if (ret) { + pr_err("Can't parse sample, err = %d\n", ret); + return ret; + } + + return machines__deliver_event(&session->machines, session->evlist, event->event, + &sample, session->tool, event->file_offset); +} + +struct perf_session *perf_session__new(struct perf_data_file *file, + bool repipe, struct perf_tool *tool) +{ + struct perf_session *session = zalloc(sizeof(*session)); + + if (!session) + goto out; + + session->repipe = repipe; + session->tool = tool; + machines__init(&session->machines); + ordered_events__init(&session->ordered_events, ordered_events__deliver_event); + + if (file) { + if (perf_data_file__open(file)) + goto out_delete; + + session->file = file; + + if (perf_data_file__is_read(file)) { + if (perf_session__open(session) < 0) + goto out_close; + + perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); + } + } + + if (!file || perf_data_file__is_write(file)) { + /* + * In O_RDONLY mode this will be performed when reading the + * kernel MMAP event, in perf_event__process_mmap(). + */ + if (perf_session__create_kernel_maps(session) < 0) + pr_warning("Cannot read kernel map\n"); + } + + if (tool && tool->ordering_requires_timestamps && + tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { + dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); + tool->ordered_events = false; + } + + return session; + + out_close: + perf_data_file__close(file); + out_delete: + perf_session__delete(session); + out: + return NULL; +} + +static void perf_session__delete_threads(struct perf_session *session) +{ + machine__delete_threads(&session->machines.host); +} + +static void perf_session_env__delete(struct perf_session_env *env) +{ + zfree(&env->hostname); + zfree(&env->os_release); + zfree(&env->version); + zfree(&env->arch); + zfree(&env->cpu_desc); + zfree(&env->cpuid); + + zfree(&env->cmdline); + zfree(&env->sibling_cores); + zfree(&env->sibling_threads); + zfree(&env->numa_nodes); + zfree(&env->pmu_mappings); +} + +void perf_session__delete(struct perf_session *session) +{ + perf_session__destroy_kernel_maps(session); + perf_session__delete_threads(session); + perf_session_env__delete(&session->header.env); + machines__exit(&session->machines); + if (session->file) + perf_data_file__close(session->file); + free(session); +} + +static int process_event_synth_tracing_data_stub(struct perf_tool *tool + __maybe_unused, + union perf_event *event + __maybe_unused, + struct perf_session *session + __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_evlist **pevlist + __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_event_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_build_id_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *session __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct ordered_events *oe __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +static int process_finished_round(struct perf_tool *tool, + union perf_event *event, + struct ordered_events *oe); + +static int process_id_index_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *perf_session + __maybe_unused) +{ + dump_printf(": unhandled!\n"); + return 0; +} + +void perf_tool__fill_defaults(struct perf_tool *tool) +{ + if (tool->sample == NULL) + tool->sample = process_event_sample_stub; + if (tool->mmap == NULL) + tool->mmap = process_event_stub; + if (tool->mmap2 == NULL) + tool->mmap2 = process_event_stub; + if (tool->comm == NULL) + tool->comm = process_event_stub; + if (tool->fork == NULL) + tool->fork = process_event_stub; + if (tool->exit == NULL) + tool->exit = process_event_stub; + if (tool->lost == NULL) + tool->lost = perf_event__process_lost; + if (tool->read == NULL) + tool->read = process_event_sample_stub; + if (tool->throttle == NULL) + tool->throttle = process_event_stub; + if (tool->unthrottle == NULL) + tool->unthrottle = process_event_stub; + if (tool->attr == NULL) + tool->attr = process_event_synth_attr_stub; + if (tool->tracing_data == NULL) + tool->tracing_data = process_event_synth_tracing_data_stub; + if (tool->build_id == NULL) + tool->build_id = process_build_id_stub; + if (tool->finished_round == NULL) { + if (tool->ordered_events) + tool->finished_round = process_finished_round; + else + tool->finished_round = process_finished_round_stub; + } + if (tool->id_index == NULL) + tool->id_index = process_id_index_stub; +} + +static void swap_sample_id_all(union perf_event *event, void *data) +{ + void *end = (void *) event + event->header.size; + int size = end - data; + + BUG_ON(size % sizeof(u64)); + mem_bswap_64(data, size); +} + +static void perf_event__all64_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + struct perf_event_header *hdr = &event->header; + mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); +} + +static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) +{ + event->comm.pid = bswap_32(event->comm.pid); + event->comm.tid = bswap_32(event->comm.tid); + + if (sample_id_all) { + void *data = &event->comm.comm; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} + +static void perf_event__mmap_swap(union perf_event *event, + bool sample_id_all) +{ + event->mmap.pid = bswap_32(event->mmap.pid); + event->mmap.tid = bswap_32(event->mmap.tid); + event->mmap.start = bswap_64(event->mmap.start); + event->mmap.len = bswap_64(event->mmap.len); + event->mmap.pgoff = bswap_64(event->mmap.pgoff); + + if (sample_id_all) { + void *data = &event->mmap.filename; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} + +static void perf_event__mmap2_swap(union perf_event *event, + bool sample_id_all) +{ + event->mmap2.pid = bswap_32(event->mmap2.pid); + event->mmap2.tid = bswap_32(event->mmap2.tid); + event->mmap2.start = bswap_64(event->mmap2.start); + event->mmap2.len = bswap_64(event->mmap2.len); + event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); + event->mmap2.maj = bswap_32(event->mmap2.maj); + event->mmap2.min = bswap_32(event->mmap2.min); + event->mmap2.ino = bswap_64(event->mmap2.ino); + + if (sample_id_all) { + void *data = &event->mmap2.filename; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} +static void perf_event__task_swap(union perf_event *event, bool sample_id_all) +{ + event->fork.pid = bswap_32(event->fork.pid); + event->fork.tid = bswap_32(event->fork.tid); + event->fork.ppid = bswap_32(event->fork.ppid); + event->fork.ptid = bswap_32(event->fork.ptid); + event->fork.time = bswap_64(event->fork.time); + + if (sample_id_all) + swap_sample_id_all(event, &event->fork + 1); +} + +static void perf_event__read_swap(union perf_event *event, bool sample_id_all) +{ + event->read.pid = bswap_32(event->read.pid); + event->read.tid = bswap_32(event->read.tid); + event->read.value = bswap_64(event->read.value); + event->read.time_enabled = bswap_64(event->read.time_enabled); + event->read.time_running = bswap_64(event->read.time_running); + event->read.id = bswap_64(event->read.id); + + if (sample_id_all) + swap_sample_id_all(event, &event->read + 1); +} + +static void perf_event__throttle_swap(union perf_event *event, + bool sample_id_all) +{ + event->throttle.time = bswap_64(event->throttle.time); + event->throttle.id = bswap_64(event->throttle.id); + event->throttle.stream_id = bswap_64(event->throttle.stream_id); + + if (sample_id_all) + swap_sample_id_all(event, &event->throttle + 1); +} + +static u8 revbyte(u8 b) +{ + int rev = (b >> 4) | ((b & 0xf) << 4); + rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); + rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); + return (u8) rev; +} + +/* + * XXX this is hack in attempt to carry flags bitfield + * throught endian village. ABI says: + * + * Bit-fields are allocated from right to left (least to most significant) + * on little-endian implementations and from left to right (most to least + * significant) on big-endian implementations. + * + * The above seems to be byte specific, so we need to reverse each + * byte of the bitfield. 'Internet' also says this might be implementation + * specific and we probably need proper fix and carry perf_event_attr + * bitfield flags in separate data file FEAT_ section. Thought this seems + * to work for now. + */ +static void swap_bitfield(u8 *p, unsigned len) +{ + unsigned i; + + for (i = 0; i < len; i++) { + *p = revbyte(*p); + p++; + } +} + +/* exported for swapping attributes in file header */ +void perf_event__attr_swap(struct perf_event_attr *attr) +{ + attr->type = bswap_32(attr->type); + attr->size = bswap_32(attr->size); + attr->config = bswap_64(attr->config); + attr->sample_period = bswap_64(attr->sample_period); + attr->sample_type = bswap_64(attr->sample_type); + attr->read_format = bswap_64(attr->read_format); + attr->wakeup_events = bswap_32(attr->wakeup_events); + attr->bp_type = bswap_32(attr->bp_type); + attr->bp_addr = bswap_64(attr->bp_addr); + attr->bp_len = bswap_64(attr->bp_len); + attr->branch_sample_type = bswap_64(attr->branch_sample_type); + attr->sample_regs_user = bswap_64(attr->sample_regs_user); + attr->sample_stack_user = bswap_32(attr->sample_stack_user); + + swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); +} + +static void perf_event__hdr_attr_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + size_t size; + + perf_event__attr_swap(&event->attr.attr); + + size = event->header.size; + size -= (void *)&event->attr.id - (void *)event; + mem_bswap_64(event->attr.id, size); +} + +static void perf_event__event_type_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->event_type.event_type.event_id = + bswap_64(event->event_type.event_type.event_id); +} + +static void perf_event__tracing_data_swap(union perf_event *event, + bool sample_id_all __maybe_unused) +{ + event->tracing_data.size = bswap_32(event->tracing_data.size); +} + +typedef void (*perf_event__swap_op)(union perf_event *event, + bool sample_id_all); + +static perf_event__swap_op perf_event__swap_ops[] = { + [PERF_RECORD_MMAP] = perf_event__mmap_swap, + [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, + [PERF_RECORD_COMM] = perf_event__comm_swap, + [PERF_RECORD_FORK] = perf_event__task_swap, + [PERF_RECORD_EXIT] = perf_event__task_swap, + [PERF_RECORD_LOST] = perf_event__all64_swap, + [PERF_RECORD_READ] = perf_event__read_swap, + [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, + [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, + [PERF_RECORD_SAMPLE] = perf_event__all64_swap, + [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, + [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, + [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, + [PERF_RECORD_HEADER_BUILD_ID] = NULL, + [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, + [PERF_RECORD_HEADER_MAX] = NULL, +}; + +/* + * When perf record finishes a pass on every buffers, it records this pseudo + * event. + * We record the max timestamp t found in the pass n. + * Assuming these timestamps are monotonic across cpus, we know that if + * a buffer still has events with timestamps below t, they will be all + * available and then read in the pass n + 1. + * Hence when we start to read the pass n + 2, we can safely flush every + * events with timestamps below t. + * + * ============ PASS n ================= + * CPU 0 | CPU 1 + * | + * cnt1 timestamps | cnt2 timestamps + * 1 | 2 + * 2 | 3 + * - | 4 <--- max recorded + * + * ============ PASS n + 1 ============== + * CPU 0 | CPU 1 + * | + * cnt1 timestamps | cnt2 timestamps + * 3 | 5 + * 4 | 6 + * 5 | 7 <---- max recorded + * + * Flush every events below timestamp 4 + * + * ============ PASS n + 2 ============== + * CPU 0 | CPU 1 + * | + * cnt1 timestamps | cnt2 timestamps + * 6 | 8 + * 7 | 9 + * - | 10 + * + * Flush every events below timestamp 7 + * etc... + */ +static int process_finished_round(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct ordered_events *oe) +{ + return ordered_events__flush(oe, OE_FLUSH__ROUND); +} + +int perf_session__queue_event(struct perf_session *s, union perf_event *event, + struct perf_sample *sample, u64 file_offset) +{ + return ordered_events__queue(&s->ordered_events, event, sample, file_offset); +} + +static void callchain__lbr_callstack_printf(struct perf_sample *sample) +{ + struct ip_callchain *callchain = sample->callchain; + struct branch_stack *lbr_stack = sample->branch_stack; + u64 kernel_callchain_nr = callchain->nr; + unsigned int i; + + for (i = 0; i < kernel_callchain_nr; i++) { + if (callchain->ips[i] == PERF_CONTEXT_USER) + break; + } + + if ((i != kernel_callchain_nr) && lbr_stack->nr) { + u64 total_nr; + /* + * LBR callstack can only get user call chain, + * i is kernel call chain number, + * 1 is PERF_CONTEXT_USER. + * + * The user call chain is stored in LBR registers. + * LBR are pair registers. The caller is stored + * in "from" register, while the callee is stored + * in "to" register. + * For example, there is a call stack + * "A"->"B"->"C"->"D". + * The LBR registers will recorde like + * "C"->"D", "B"->"C", "A"->"B". + * So only the first "to" register and all "from" + * registers are needed to construct the whole stack. + */ + total_nr = i + 1 + lbr_stack->nr + 1; + kernel_callchain_nr = i + 1; + + printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); + + for (i = 0; i < kernel_callchain_nr; i++) + printf("..... %2d: %016" PRIx64 "\n", + i, callchain->ips[i]); + + printf("..... %2d: %016" PRIx64 "\n", + (int)(kernel_callchain_nr), lbr_stack->entries[0].to); + for (i = 0; i < lbr_stack->nr; i++) + printf("..... %2d: %016" PRIx64 "\n", + (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); + } +} + +static void callchain__printf(struct perf_evsel *evsel, + struct perf_sample *sample) +{ + unsigned int i; + struct ip_callchain *callchain = sample->callchain; + + if (has_branch_callstack(evsel)) + callchain__lbr_callstack_printf(sample); + + printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); + + for (i = 0; i < callchain->nr; i++) + printf("..... %2d: %016" PRIx64 "\n", + i, callchain->ips[i]); +} + +static void branch_stack__printf(struct perf_sample *sample) +{ + uint64_t i; + + printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); + + for (i = 0; i < sample->branch_stack->nr; i++) + printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", + i, sample->branch_stack->entries[i].from, + sample->branch_stack->entries[i].to); +} + +static void regs_dump__printf(u64 mask, u64 *regs) +{ + unsigned rid, i = 0; + + for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { + u64 val = regs[i++]; + + printf(".... %-5s 0x%" PRIx64 "\n", + perf_reg_name(rid), val); + } +} + +static const char *regs_abi[] = { + [PERF_SAMPLE_REGS_ABI_NONE] = "none", + [PERF_SAMPLE_REGS_ABI_32] = "32-bit", + [PERF_SAMPLE_REGS_ABI_64] = "64-bit", +}; + +static inline const char *regs_dump_abi(struct regs_dump *d) +{ + if (d->abi > PERF_SAMPLE_REGS_ABI_64) + return "unknown"; + + return regs_abi[d->abi]; +} + +static void regs__printf(const char *type, struct regs_dump *regs) +{ + u64 mask = regs->mask; + + printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", + type, + mask, + regs_dump_abi(regs)); + + regs_dump__printf(mask, regs->regs); +} + +static void regs_user__printf(struct perf_sample *sample) +{ + struct regs_dump *user_regs = &sample->user_regs; + + if (user_regs->regs) + regs__printf("user", user_regs); +} + +static void regs_intr__printf(struct perf_sample *sample) +{ + struct regs_dump *intr_regs = &sample->intr_regs; + + if (intr_regs->regs) + regs__printf("intr", intr_regs); +} + +static void stack_user__printf(struct stack_dump *dump) +{ + printf("... ustack: size %" PRIu64 ", offset 0x%x\n", + dump->size, dump->offset); +} + +static void perf_evlist__print_tstamp(struct perf_evlist *evlist, + union perf_event *event, + struct perf_sample *sample) +{ + u64 sample_type = __perf_evlist__combined_sample_type(evlist); + + if (event->header.type != PERF_RECORD_SAMPLE && + !perf_evlist__sample_id_all(evlist)) { + fputs("-1 -1 ", stdout); + return; + } + + if ((sample_type & PERF_SAMPLE_CPU)) + printf("%u ", sample->cpu); + + if (sample_type & PERF_SAMPLE_TIME) + printf("%" PRIu64 " ", sample->time); +} + +static void sample_read__printf(struct perf_sample *sample, u64 read_format) +{ + printf("... sample_read:\n"); + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + printf("...... time enabled %016" PRIx64 "\n", + sample->read.time_enabled); + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + printf("...... time running %016" PRIx64 "\n", + sample->read.time_running); + + if (read_format & PERF_FORMAT_GROUP) { + u64 i; + + printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); + + for (i = 0; i < sample->read.group.nr; i++) { + struct sample_read_value *value; + + value = &sample->read.group.values[i]; + printf("..... id %016" PRIx64 + ", value %016" PRIx64 "\n", + value->id, value->value); + } + } else + printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", + sample->read.one.id, sample->read.one.value); +} + +static void dump_event(struct perf_evlist *evlist, union perf_event *event, + u64 file_offset, struct perf_sample *sample) +{ + if (!dump_trace) + return; + + printf("\n%#" PRIx64 " [%#x]: event: %d\n", + file_offset, event->header.size, event->header.type); + + trace_event(event); + + if (sample) + perf_evlist__print_tstamp(evlist, event, sample); + + printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, + event->header.size, perf_event__name(event->header.type)); +} + +static void dump_sample(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *sample) +{ + u64 sample_type; + + if (!dump_trace) + return; + + printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", + event->header.misc, sample->pid, sample->tid, sample->ip, + sample->period, sample->addr); + + sample_type = evsel->attr.sample_type; + + if (sample_type & PERF_SAMPLE_CALLCHAIN) + callchain__printf(evsel, sample); + + if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel)) + branch_stack__printf(sample); + + if (sample_type & PERF_SAMPLE_REGS_USER) + regs_user__printf(sample); + + if (sample_type & PERF_SAMPLE_REGS_INTR) + regs_intr__printf(sample); + + if (sample_type & PERF_SAMPLE_STACK_USER) + stack_user__printf(&sample->user_stack); + + if (sample_type & PERF_SAMPLE_WEIGHT) + printf("... weight: %" PRIu64 "\n", sample->weight); + + if (sample_type & PERF_SAMPLE_DATA_SRC) + printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); + + if (sample_type & PERF_SAMPLE_TRANSACTION) + printf("... transaction: %" PRIx64 "\n", sample->transaction); + + if (sample_type & PERF_SAMPLE_READ) + sample_read__printf(sample, evsel->attr.read_format); +} + +static struct machine *machines__find_for_cpumode(struct machines *machines, + union perf_event *event, + struct perf_sample *sample) +{ + const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct machine *machine; + + if (perf_guest && + ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || + (cpumode == PERF_RECORD_MISC_GUEST_USER))) { + u32 pid; + + if (event->header.type == PERF_RECORD_MMAP + || event->header.type == PERF_RECORD_MMAP2) + pid = event->mmap.pid; + else + pid = sample->pid; + + machine = machines__find(machines, pid); + if (!machine) + machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID); + return machine; + } + + return &machines->host; +} + +static int deliver_sample_value(struct perf_evlist *evlist, + struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct sample_read_value *v, + struct machine *machine) +{ + struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); + + if (sid) { + sample->id = v->id; + sample->period = v->value - sid->period; + sid->period = v->value; + } + + if (!sid || sid->evsel == NULL) { + ++evlist->stats.nr_unknown_id; + return 0; + } + + return tool->sample(tool, event, sample, sid->evsel, machine); +} + +static int deliver_sample_group(struct perf_evlist *evlist, + struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + int ret = -EINVAL; + u64 i; + + for (i = 0; i < sample->read.group.nr; i++) { + ret = deliver_sample_value(evlist, tool, event, sample, + &sample->read.group.values[i], + machine); + if (ret) + break; + } + + return ret; +} + +static int + perf_evlist__deliver_sample(struct perf_evlist *evlist, + struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + /* We know evsel != NULL. */ + u64 sample_type = evsel->attr.sample_type; + u64 read_format = evsel->attr.read_format; + + /* Standard sample delievery. */ + if (!(sample_type & PERF_SAMPLE_READ)) + return tool->sample(tool, event, sample, evsel, machine); + + /* For PERF_SAMPLE_READ we have either single or group mode. */ + if (read_format & PERF_FORMAT_GROUP) + return deliver_sample_group(evlist, tool, event, sample, + machine); + else + return deliver_sample_value(evlist, tool, event, sample, + &sample->read.one, machine); +} + +static int machines__deliver_event(struct machines *machines, + struct perf_evlist *evlist, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool, u64 file_offset) +{ + struct perf_evsel *evsel; + struct machine *machine; + + dump_event(evlist, event, file_offset, sample); + + evsel = perf_evlist__id2evsel(evlist, sample->id); + + machine = machines__find_for_cpumode(machines, event, sample); + + switch (event->header.type) { + case PERF_RECORD_SAMPLE: + dump_sample(evsel, event, sample); + if (evsel == NULL) { + ++evlist->stats.nr_unknown_id; + return 0; + } + if (machine == NULL) { + ++evlist->stats.nr_unprocessable_samples; + return 0; + } + return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); + case PERF_RECORD_MMAP: + return tool->mmap(tool, event, sample, machine); + case PERF_RECORD_MMAP2: + return tool->mmap2(tool, event, sample, machine); + case PERF_RECORD_COMM: + return tool->comm(tool, event, sample, machine); + case PERF_RECORD_FORK: + return tool->fork(tool, event, sample, machine); + case PERF_RECORD_EXIT: + return tool->exit(tool, event, sample, machine); + case PERF_RECORD_LOST: + if (tool->lost == perf_event__process_lost) + evlist->stats.total_lost += event->lost.lost; + return tool->lost(tool, event, sample, machine); + case PERF_RECORD_READ: + return tool->read(tool, event, sample, evsel, machine); + case PERF_RECORD_THROTTLE: + return tool->throttle(tool, event, sample, machine); + case PERF_RECORD_UNTHROTTLE: + return tool->unthrottle(tool, event, sample, machine); + default: + ++evlist->stats.nr_unknown_events; + return -1; + } +} + +static s64 perf_session__process_user_event(struct perf_session *session, + union perf_event *event, + u64 file_offset) +{ + struct ordered_events *oe = &session->ordered_events; + struct perf_tool *tool = session->tool; + int fd = perf_data_file__fd(session->file); + int err; + + dump_event(session->evlist, event, file_offset, NULL); + + /* These events are processed right away */ + switch (event->header.type) { + case PERF_RECORD_HEADER_ATTR: + err = tool->attr(tool, event, &session->evlist); + if (err == 0) { + perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); + } + return err; + case PERF_RECORD_HEADER_EVENT_TYPE: + /* + * Depreceated, but we need to handle it for sake + * of old data files create in pipe mode. + */ + return 0; + case PERF_RECORD_HEADER_TRACING_DATA: + /* setup for reading amidst mmap */ + lseek(fd, file_offset, SEEK_SET); + return tool->tracing_data(tool, event, session); + case PERF_RECORD_HEADER_BUILD_ID: + return tool->build_id(tool, event, session); + case PERF_RECORD_FINISHED_ROUND: + return tool->finished_round(tool, event, oe); + case PERF_RECORD_ID_INDEX: + return tool->id_index(tool, event, session); + default: + return -EINVAL; + } +} + +int perf_session__deliver_synth_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample) +{ + struct perf_evlist *evlist = session->evlist; + struct perf_tool *tool = session->tool; + + events_stats__inc(&evlist->stats, event->header.type); + + if (event->header.type >= PERF_RECORD_USER_TYPE_START) + return perf_session__process_user_event(session, event, 0); + + return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); +} + +static void event_swap(union perf_event *event, bool sample_id_all) +{ + perf_event__swap_op swap; + + swap = perf_event__swap_ops[event->header.type]; + if (swap) + swap(event, sample_id_all); +} + +int perf_session__peek_event(struct perf_session *session, off_t file_offset, + void *buf, size_t buf_sz, + union perf_event **event_ptr, + struct perf_sample *sample) +{ + union perf_event *event; + size_t hdr_sz, rest; + int fd; + + if (session->one_mmap && !session->header.needs_swap) { + event = file_offset - session->one_mmap_offset + + session->one_mmap_addr; + goto out_parse_sample; + } + + if (perf_data_file__is_pipe(session->file)) + return -1; + + fd = perf_data_file__fd(session->file); + hdr_sz = sizeof(struct perf_event_header); + + if (buf_sz < hdr_sz) + return -1; + + if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || + readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz) + return -1; + + event = (union perf_event *)buf; + + if (session->header.needs_swap) + perf_event_header__bswap(&event->header); + + if (event->header.size < hdr_sz) + return -1; + + rest = event->header.size - hdr_sz; + + if (readn(fd, &buf, rest) != (ssize_t)rest) + return -1; + + if (session->header.needs_swap) + event_swap(event, perf_evlist__sample_id_all(session->evlist)); + +out_parse_sample: + + if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && + perf_evlist__parse_sample(session->evlist, event, sample)) + return -1; + + *event_ptr = event; + + return 0; +} + +static s64 perf_session__process_event(struct perf_session *session, + union perf_event *event, u64 file_offset) +{ + struct perf_evlist *evlist = session->evlist; + struct perf_tool *tool = session->tool; + struct perf_sample sample; + int ret; + + if (session->header.needs_swap) + event_swap(event, perf_evlist__sample_id_all(evlist)); + + if (event->header.type >= PERF_RECORD_HEADER_MAX) + return -EINVAL; + + events_stats__inc(&evlist->stats, event->header.type); + + if (event->header.type >= PERF_RECORD_USER_TYPE_START) + return perf_session__process_user_event(session, event, file_offset); + + /* + * For all kernel events we get the sample data + */ + ret = perf_evlist__parse_sample(evlist, event, &sample); + if (ret) + return ret; + + if (tool->ordered_events) { + ret = perf_session__queue_event(session, event, &sample, file_offset); + if (ret != -ETIME) + return ret; + } + + return machines__deliver_event(&session->machines, evlist, event, + &sample, tool, file_offset); +} + +void perf_event_header__bswap(struct perf_event_header *hdr) +{ + hdr->type = bswap_32(hdr->type); + hdr->misc = bswap_16(hdr->misc); + hdr->size = bswap_16(hdr->size); +} + +struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) +{ + return machine__findnew_thread(&session->machines.host, -1, pid); +} + +static struct thread *perf_session__register_idle_thread(struct perf_session *session) +{ + struct thread *thread; + + thread = machine__findnew_thread(&session->machines.host, 0, 0); + if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { + pr_err("problem inserting idle task.\n"); + thread = NULL; + } + + return thread; +} + +static void perf_session__warn_about_errors(const struct perf_session *session) +{ + const struct events_stats *stats = &session->evlist->stats; + const struct ordered_events *oe = &session->ordered_events; + + if (session->tool->lost == perf_event__process_lost && + stats->nr_events[PERF_RECORD_LOST] != 0) { + ui__warning("Processed %d events and lost %d chunks!\n\n" + "Check IO/CPU overload!\n\n", + stats->nr_events[0], + stats->nr_events[PERF_RECORD_LOST]); + } + + if (stats->nr_unknown_events != 0) { + ui__warning("Found %u unknown events!\n\n" + "Is this an older tool processing a perf.data " + "file generated by a more recent tool?\n\n" + "If that is not the case, consider " + "reporting to linux-kernel@vger.kernel.org.\n\n", + stats->nr_unknown_events); + } + + if (stats->nr_unknown_id != 0) { + ui__warning("%u samples with id not present in the header\n", + stats->nr_unknown_id); + } + + if (stats->nr_invalid_chains != 0) { + ui__warning("Found invalid callchains!\n\n" + "%u out of %u events were discarded for this reason.\n\n" + "Consider reporting to linux-kernel@vger.kernel.org.\n\n", + stats->nr_invalid_chains, + stats->nr_events[PERF_RECORD_SAMPLE]); + } + + if (stats->nr_unprocessable_samples != 0) { + ui__warning("%u unprocessable samples recorded.\n" + "Do you have a KVM guest running and not using 'perf kvm'?\n", + stats->nr_unprocessable_samples); + } + + if (oe->nr_unordered_events != 0) + ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); +} + +volatile int session_done; + +static int __perf_session__process_pipe_events(struct perf_session *session) +{ + struct ordered_events *oe = &session->ordered_events; + struct perf_tool *tool = session->tool; + int fd = perf_data_file__fd(session->file); + union perf_event *event; + uint32_t size, cur_size = 0; + void *buf = NULL; + s64 skip = 0; + u64 head; + ssize_t err; + void *p; + + perf_tool__fill_defaults(tool); + + head = 0; + cur_size = sizeof(union perf_event); + + buf = malloc(cur_size); + if (!buf) + return -errno; +more: + event = buf; + err = readn(fd, event, sizeof(struct perf_event_header)); + if (err <= 0) { + if (err == 0) + goto done; + + pr_err("failed to read event header\n"); + goto out_err; + } + + if (session->header.needs_swap) + perf_event_header__bswap(&event->header); + + size = event->header.size; + if (size < sizeof(struct perf_event_header)) { + pr_err("bad event header size\n"); + goto out_err; + } + + if (size > cur_size) { + void *new = realloc(buf, size); + if (!new) { + pr_err("failed to allocate memory to read event\n"); + goto out_err; + } + buf = new; + cur_size = size; + event = buf; + } + p = event; + p += sizeof(struct perf_event_header); + + if (size - sizeof(struct perf_event_header)) { + err = readn(fd, p, size - sizeof(struct perf_event_header)); + if (err <= 0) { + if (err == 0) { + pr_err("unexpected end of event stream\n"); + goto done; + } + + pr_err("failed to read event data\n"); + goto out_err; + } + } + + if ((skip = perf_session__process_event(session, event, head)) < 0) { + pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", + head, event->header.size, event->header.type); + err = -EINVAL; + goto out_err; + } + + head += size; + + if (skip > 0) + head += skip; + + if (!session_done()) + goto more; +done: + /* do the final flush for ordered samples */ + err = ordered_events__flush(oe, OE_FLUSH__FINAL); +out_err: + free(buf); + perf_session__warn_about_errors(session); + ordered_events__free(&session->ordered_events); + return err; +} + +static union perf_event * +fetch_mmaped_event(struct perf_session *session, + u64 head, size_t mmap_size, char *buf) +{ + union perf_event *event; + + /* + * Ensure we have enough space remaining to read + * the size of the event in the headers. + */ + if (head + sizeof(event->header) > mmap_size) + return NULL; + + event = (union perf_event *)(buf + head); + + if (session->header.needs_swap) + perf_event_header__bswap(&event->header); + + if (head + event->header.size > mmap_size) { + /* We're not fetching the event so swap back again */ + if (session->header.needs_swap) + perf_event_header__bswap(&event->header); + return NULL; + } + + return event; +} + +/* + * On 64bit we can mmap the data file in one go. No need for tiny mmap + * slices. On 32bit we use 32MB. + */ +#if BITS_PER_LONG == 64 +#define MMAP_SIZE ULLONG_MAX +#define NUM_MMAPS 1 +#else +#define MMAP_SIZE (32 * 1024 * 1024ULL) +#define NUM_MMAPS 128 +#endif + +static int __perf_session__process_events(struct perf_session *session, + u64 data_offset, u64 data_size, + u64 file_size) +{ + struct ordered_events *oe = &session->ordered_events; + struct perf_tool *tool = session->tool; + int fd = perf_data_file__fd(session->file); + u64 head, page_offset, file_offset, file_pos, size; + int err, mmap_prot, mmap_flags, map_idx = 0; + size_t mmap_size; + char *buf, *mmaps[NUM_MMAPS]; + union perf_event *event; + struct ui_progress prog; + s64 skip; + + perf_tool__fill_defaults(tool); + + page_offset = page_size * (data_offset / page_size); + file_offset = page_offset; + head = data_offset - page_offset; + + if (data_size && (data_offset + data_size < file_size)) + file_size = data_offset + data_size; + + ui_progress__init(&prog, file_size, "Processing events..."); + + mmap_size = MMAP_SIZE; + if (mmap_size > file_size) { + mmap_size = file_size; + session->one_mmap = true; + } + + memset(mmaps, 0, sizeof(mmaps)); + + mmap_prot = PROT_READ; + mmap_flags = MAP_SHARED; + + if (session->header.needs_swap) { + mmap_prot |= PROT_WRITE; + mmap_flags = MAP_PRIVATE; + } +remap: + buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd, + file_offset); + if (buf == MAP_FAILED) { + pr_err("failed to mmap file\n"); + err = -errno; + goto out_err; + } + mmaps[map_idx] = buf; + map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); + file_pos = file_offset + head; + if (session->one_mmap) { + session->one_mmap_addr = buf; + session->one_mmap_offset = file_offset; + } + +more: + event = fetch_mmaped_event(session, head, mmap_size, buf); + if (!event) { + if (mmaps[map_idx]) { + munmap(mmaps[map_idx], mmap_size); + mmaps[map_idx] = NULL; + } + + page_offset = page_size * (head / page_size); + file_offset += page_offset; + head -= page_offset; + goto remap; + } + + size = event->header.size; + + if (size < sizeof(struct perf_event_header) || + (skip = perf_session__process_event(session, event, file_pos)) < 0) { + pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", + file_offset + head, event->header.size, + event->header.type); + err = -EINVAL; + goto out_err; + } + + if (skip) + size += skip; + + head += size; + file_pos += size; + + ui_progress__update(&prog, size); + + if (session_done()) + goto out; + + if (file_pos < file_size) + goto more; + +out: + /* do the final flush for ordered samples */ + err = ordered_events__flush(oe, OE_FLUSH__FINAL); +out_err: + ui_progress__finish(); + perf_session__warn_about_errors(session); + ordered_events__free(&session->ordered_events); + session->one_mmap = false; + return err; +} + +int perf_session__process_events(struct perf_session *session) +{ + u64 size = perf_data_file__size(session->file); + int err; + + if (perf_session__register_idle_thread(session) == NULL) + return -ENOMEM; + + if (!perf_data_file__is_pipe(session->file)) + err = __perf_session__process_events(session, + session->header.data_offset, + session->header.data_size, size); + else + err = __perf_session__process_pipe_events(session); + + return err; +} + +bool perf_session__has_traces(struct perf_session *session, const char *msg) +{ + struct perf_evsel *evsel; + + evlist__for_each(session->evlist, evsel) { + if (evsel->attr.type == PERF_TYPE_TRACEPOINT) + return true; + } + + pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); + return false; +} + +int maps__set_kallsyms_ref_reloc_sym(struct map **maps, + const char *symbol_name, u64 addr) +{ + char *bracket; + enum map_type i; + struct ref_reloc_sym *ref; + + ref = zalloc(sizeof(struct ref_reloc_sym)); + if (ref == NULL) + return -ENOMEM; + + ref->name = strdup(symbol_name); + if (ref->name == NULL) { + free(ref); + return -ENOMEM; + } + + bracket = strchr(ref->name, ']'); + if (bracket) + *bracket = '\0'; + + ref->addr = addr; + + for (i = 0; i < MAP__NR_TYPES; ++i) { + struct kmap *kmap = map__kmap(maps[i]); + + if (!kmap) + continue; + kmap->ref_reloc_sym = ref; + } + + return 0; +} + +size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) +{ + return machines__fprintf_dsos(&session->machines, fp); +} + +size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); +} + +size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) +{ + size_t ret = fprintf(fp, "Aggregated stats:\n"); + + ret += events_stats__fprintf(&session->evlist->stats, fp); + return ret; +} + +size_t perf_session__fprintf(struct perf_session *session, FILE *fp) +{ + /* + * FIXME: Here we have to actually print all the machines in this + * session, not just the host... + */ + return machine__fprintf(&session->machines.host, fp); +} + +struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, + unsigned int type) +{ + struct perf_evsel *pos; + + evlist__for_each(session->evlist, pos) { + if (pos->attr.type == type) + return pos; + } + return NULL; +} + +void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, + struct addr_location *al, + unsigned int print_opts, unsigned int stack_depth) +{ + struct callchain_cursor_node *node; + int print_ip = print_opts & PRINT_IP_OPT_IP; + int print_sym = print_opts & PRINT_IP_OPT_SYM; + int print_dso = print_opts & PRINT_IP_OPT_DSO; + int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; + int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; + int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE; + char s = print_oneline ? ' ' : '\t'; + + if (symbol_conf.use_callchain && sample->callchain) { + struct addr_location node_al; + + if (thread__resolve_callchain(al->thread, evsel, + sample, NULL, NULL, + PERF_MAX_STACK_DEPTH) != 0) { + if (verbose) + error("Failed to resolve callchain. Skipping\n"); + return; + } + callchain_cursor_commit(&callchain_cursor); + + if (print_symoffset) + node_al = *al; + + while (stack_depth) { + u64 addr = 0; + + node = callchain_cursor_current(&callchain_cursor); + if (!node) + break; + + if (node->sym && node->sym->ignore) + goto next; + + if (print_ip) + printf("%c%16" PRIx64, s, node->ip); + + if (node->map) + addr = node->map->map_ip(node->map, node->ip); + + if (print_sym) { + printf(" "); + if (print_symoffset) { + node_al.addr = addr; + node_al.map = node->map; + symbol__fprintf_symname_offs(node->sym, &node_al, stdout); + } else + symbol__fprintf_symname(node->sym, stdout); + } + + if (print_dso) { + printf(" ("); + map__fprintf_dsoname(node->map, stdout); + printf(")"); + } + + if (print_srcline) + map__fprintf_srcline(node->map, addr, "\n ", + stdout); + + if (!print_oneline) + printf("\n"); + + stack_depth--; +next: + callchain_cursor_advance(&callchain_cursor); + } + + } else { + if (al->sym && al->sym->ignore) + return; + + if (print_ip) + printf("%16" PRIx64, sample->ip); + + if (print_sym) { + printf(" "); + if (print_symoffset) + symbol__fprintf_symname_offs(al->sym, al, + stdout); + else + symbol__fprintf_symname(al->sym, stdout); + } + + if (print_dso) { + printf(" ("); + map__fprintf_dsoname(al->map, stdout); + printf(")"); + } + + if (print_srcline) + map__fprintf_srcline(al->map, al->addr, "\n ", stdout); + } +} + +int perf_session__cpu_bitmap(struct perf_session *session, + const char *cpu_list, unsigned long *cpu_bitmap) +{ + int i, err = -1; + struct cpu_map *map; + + for (i = 0; i < PERF_TYPE_MAX; ++i) { + struct perf_evsel *evsel; + + evsel = perf_session__find_first_evtype(session, i); + if (!evsel) + continue; + + if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { + pr_err("File does not contain CPU events. " + "Remove -c option to proceed.\n"); + return -1; + } + } + + map = cpu_map__new(cpu_list); + if (map == NULL) { + pr_err("Invalid cpu_list\n"); + return -1; + } + + for (i = 0; i < map->nr; i++) { + int cpu = map->map[i]; + + if (cpu >= MAX_NR_CPUS) { + pr_err("Requested CPU %d too large. " + "Consider raising MAX_NR_CPUS\n", cpu); + goto out_delete_map; + } + + set_bit(cpu, cpu_bitmap); + } + + err = 0; + +out_delete_map: + cpu_map__delete(map); + return err; +} + +void perf_session__fprintf_info(struct perf_session *session, FILE *fp, + bool full) +{ + struct stat st; + int fd, ret; + + if (session == NULL || fp == NULL) + return; + + fd = perf_data_file__fd(session->file); + + ret = fstat(fd, &st); + if (ret == -1) + return; + + fprintf(fp, "# ========\n"); + fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); + perf_header__fprintf_info(session, fp, full); + fprintf(fp, "# ========\n#\n"); +} + + +int __perf_session__set_tracepoints_handlers(struct perf_session *session, + const struct perf_evsel_str_handler *assocs, + size_t nr_assocs) +{ + struct perf_evsel *evsel; + size_t i; + int err; + + for (i = 0; i < nr_assocs; i++) { + /* + * Adding a handler for an event not in the session, + * just ignore it. + */ + evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); + if (evsel == NULL) + continue; + + err = -EEXIST; + if (evsel->handler != NULL) + goto out; + evsel->handler = assocs[i].handler; + } + + err = 0; +out: + return err; +} + +int perf_event__process_id_index(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_session *session) +{ + struct perf_evlist *evlist = session->evlist; + struct id_index_event *ie = &event->id_index; + size_t i, nr, max_nr; + + max_nr = (ie->header.size - sizeof(struct id_index_event)) / + sizeof(struct id_index_entry); + nr = ie->nr; + if (nr > max_nr) + return -EINVAL; + + if (dump_trace) + fprintf(stdout, " nr: %zu\n", nr); + + for (i = 0; i < nr; i++) { + struct id_index_entry *e = &ie->entries[i]; + struct perf_sample_id *sid; + + if (dump_trace) { + fprintf(stdout, " ... id: %"PRIu64, e->id); + fprintf(stdout, " idx: %"PRIu64, e->idx); + fprintf(stdout, " cpu: %"PRId64, e->cpu); + fprintf(stdout, " tid: %"PRId64"\n", e->tid); + } + + sid = perf_evlist__id2sid(evlist, e->id); + if (!sid) + return -ENOENT; + sid->idx = e->idx; + sid->cpu = e->cpu; + sid->tid = e->tid; + } + return 0; +} + +int perf_event__synthesize_id_index(struct perf_tool *tool, + perf_event__handler_t process, + struct perf_evlist *evlist, + struct machine *machine) +{ + union perf_event *ev; + struct perf_evsel *evsel; + size_t nr = 0, i = 0, sz, max_nr, n; + int err; + + pr_debug2("Synthesizing id index\n"); + + max_nr = (UINT16_MAX - sizeof(struct id_index_event)) / + sizeof(struct id_index_entry); + + evlist__for_each(evlist, evsel) + nr += evsel->ids; + + n = nr > max_nr ? max_nr : nr; + sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry); + ev = zalloc(sz); + if (!ev) + return -ENOMEM; + + ev->id_index.header.type = PERF_RECORD_ID_INDEX; + ev->id_index.header.size = sz; + ev->id_index.nr = n; + + evlist__for_each(evlist, evsel) { + u32 j; + + for (j = 0; j < evsel->ids; j++) { + struct id_index_entry *e; + struct perf_sample_id *sid; + + if (i >= n) { + err = process(tool, ev, NULL, machine); + if (err) + goto out_err; + nr -= n; + i = 0; + } + + e = &ev->id_index.entries[i++]; + + e->id = evsel->id[j]; + + sid = perf_evlist__id2sid(evlist, e->id); + if (!sid) { + free(ev); + return -ENOENT; + } + + e->idx = sid->idx; + e->cpu = sid->cpu; + e->tid = sid->tid; + } + } + + sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry); + ev->id_index.header.size = sz; + ev->id_index.nr = nr; + + err = process(tool, ev, NULL, machine); +out_err: + free(ev); + + return err; +} diff --git a/kernel/tools/perf/util/session.h b/kernel/tools/perf/util/session.h new file mode 100644 index 000000000..d5fa7b791 --- /dev/null +++ b/kernel/tools/perf/util/session.h @@ -0,0 +1,133 @@ +#ifndef __PERF_SESSION_H +#define __PERF_SESSION_H + +#include "trace-event.h" +#include "event.h" +#include "header.h" +#include "machine.h" +#include "symbol.h" +#include "thread.h" +#include "data.h" +#include "ordered-events.h" +#include +#include + +struct ip_callchain; +struct thread; + +struct perf_session { + struct perf_header header; + struct machines machines; + struct perf_evlist *evlist; + struct trace_event tevent; + bool repipe; + bool one_mmap; + void *one_mmap_addr; + u64 one_mmap_offset; + struct ordered_events ordered_events; + struct perf_data_file *file; + struct perf_tool *tool; +}; + +#define PRINT_IP_OPT_IP (1<<0) +#define PRINT_IP_OPT_SYM (1<<1) +#define PRINT_IP_OPT_DSO (1<<2) +#define PRINT_IP_OPT_SYMOFFSET (1<<3) +#define PRINT_IP_OPT_ONELINE (1<<4) +#define PRINT_IP_OPT_SRCLINE (1<<5) + +struct perf_tool; + +struct perf_session *perf_session__new(struct perf_data_file *file, + bool repipe, struct perf_tool *tool); +void perf_session__delete(struct perf_session *session); + +void perf_event_header__bswap(struct perf_event_header *hdr); + +int perf_session__peek_event(struct perf_session *session, off_t file_offset, + void *buf, size_t buf_sz, + union perf_event **event_ptr, + struct perf_sample *sample); + +int perf_session__process_events(struct perf_session *session); + +int perf_session__queue_event(struct perf_session *s, union perf_event *event, + struct perf_sample *sample, u64 file_offset); + +void perf_tool__fill_defaults(struct perf_tool *tool); + +int perf_session__resolve_callchain(struct perf_session *session, + struct perf_evsel *evsel, + struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent); + +bool perf_session__has_traces(struct perf_session *session, const char *msg); + +void perf_event__attr_swap(struct perf_event_attr *attr); + +int perf_session__create_kernel_maps(struct perf_session *session); + +void perf_session__set_id_hdr_size(struct perf_session *session); + +static inline +struct machine *perf_session__find_machine(struct perf_session *session, pid_t pid) +{ + return machines__find(&session->machines, pid); +} + +static inline +struct machine *perf_session__findnew_machine(struct perf_session *session, pid_t pid) +{ + return machines__findnew(&session->machines, pid); +} + +struct thread *perf_session__findnew(struct perf_session *session, pid_t pid); +size_t perf_session__fprintf(struct perf_session *session, FILE *fp); + +size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp); + +size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, + bool (fn)(struct dso *dso, int parm), int parm); + +size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); + +struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, + unsigned int type); + +void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, + struct addr_location *al, + unsigned int print_opts, unsigned int stack_depth); + +int perf_session__cpu_bitmap(struct perf_session *session, + const char *cpu_list, unsigned long *cpu_bitmap); + +void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full); + +struct perf_evsel_str_handler; + +int __perf_session__set_tracepoints_handlers(struct perf_session *session, + const struct perf_evsel_str_handler *assocs, + size_t nr_assocs); + +#define perf_session__set_tracepoints_handlers(session, array) \ + __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) + +extern volatile int session_done; + +#define session_done() ACCESS_ONCE(session_done) + +int perf_session__deliver_synth_event(struct perf_session *session, + union perf_event *event, + struct perf_sample *sample); + +int perf_event__process_id_index(struct perf_tool *tool, + union perf_event *event, + struct perf_session *session); + +int perf_event__synthesize_id_index(struct perf_tool *tool, + perf_event__handler_t process, + struct perf_evlist *evlist, + struct machine *machine); + +#endif /* __PERF_SESSION_H */ diff --git a/kernel/tools/perf/util/setup.py b/kernel/tools/perf/util/setup.py new file mode 100644 index 000000000..183310376 --- /dev/null +++ b/kernel/tools/perf/util/setup.py @@ -0,0 +1,48 @@ +#!/usr/bin/python2 + +from distutils.core import setup, Extension +from os import getenv + +from distutils.command.build_ext import build_ext as _build_ext +from distutils.command.install_lib import install_lib as _install_lib + +class build_ext(_build_ext): + def finalize_options(self): + _build_ext.finalize_options(self) + self.build_lib = build_lib + self.build_temp = build_tmp + +class install_lib(_install_lib): + def finalize_options(self): + _install_lib.finalize_options(self) + self.build_dir = build_lib + + +cflags = getenv('CFLAGS', '').split() +# switch off several checks (need to be at the end of cflags list) +cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ] + +build_lib = getenv('PYTHON_EXTBUILD_LIB') +build_tmp = getenv('PYTHON_EXTBUILD_TMP') +libtraceevent = getenv('LIBTRACEEVENT') +libapikfs = getenv('LIBAPI') + +ext_sources = [f.strip() for f in file('util/python-ext-sources') + if len(f.strip()) > 0 and f[0] != '#'] + +perf = Extension('perf', + sources = ext_sources, + include_dirs = ['util/include'], + extra_compile_args = cflags, + extra_objects = [libtraceevent, libapikfs], + ) + +setup(name='perf', + version='0.1', + description='Interface with the Linux profiling infrastructure', + author='Arnaldo Carvalho de Melo', + author_email='acme@redhat.com', + license='GPLv2', + url='http://perf.wiki.kernel.org', + ext_modules=[perf], + cmdclass={'build_ext': build_ext, 'install_lib': install_lib}) diff --git a/kernel/tools/perf/util/sigchain.c b/kernel/tools/perf/util/sigchain.c new file mode 100644 index 000000000..ba785e9b1 --- /dev/null +++ b/kernel/tools/perf/util/sigchain.c @@ -0,0 +1,52 @@ +#include "sigchain.h" +#include "cache.h" + +#define SIGCHAIN_MAX_SIGNALS 32 + +struct sigchain_signal { + sigchain_fun *old; + int n; + int alloc; +}; +static struct sigchain_signal signals[SIGCHAIN_MAX_SIGNALS]; + +static void check_signum(int sig) +{ + if (sig < 1 || sig >= SIGCHAIN_MAX_SIGNALS) + die("BUG: signal out of range: %d", sig); +} + +static int sigchain_push(int sig, sigchain_fun f) +{ + struct sigchain_signal *s = signals + sig; + check_signum(sig); + + ALLOC_GROW(s->old, s->n + 1, s->alloc); + s->old[s->n] = signal(sig, f); + if (s->old[s->n] == SIG_ERR) + return -1; + s->n++; + return 0; +} + +int sigchain_pop(int sig) +{ + struct sigchain_signal *s = signals + sig; + check_signum(sig); + if (s->n < 1) + return 0; + + if (signal(sig, s->old[s->n - 1]) == SIG_ERR) + return -1; + s->n--; + return 0; +} + +void sigchain_push_common(sigchain_fun f) +{ + sigchain_push(SIGINT, f); + sigchain_push(SIGHUP, f); + sigchain_push(SIGTERM, f); + sigchain_push(SIGQUIT, f); + sigchain_push(SIGPIPE, f); +} diff --git a/kernel/tools/perf/util/sigchain.h b/kernel/tools/perf/util/sigchain.h new file mode 100644 index 000000000..959d64eb5 --- /dev/null +++ b/kernel/tools/perf/util/sigchain.h @@ -0,0 +1,10 @@ +#ifndef __PERF_SIGCHAIN_H +#define __PERF_SIGCHAIN_H + +typedef void (*sigchain_fun)(int); + +int sigchain_pop(int sig); + +void sigchain_push_common(sigchain_fun f); + +#endif /* __PERF_SIGCHAIN_H */ diff --git a/kernel/tools/perf/util/sort.c b/kernel/tools/perf/util/sort.c new file mode 100644 index 000000000..4593f36ec --- /dev/null +++ b/kernel/tools/perf/util/sort.c @@ -0,0 +1,1861 @@ +#include +#include "sort.h" +#include "hist.h" +#include "comm.h" +#include "symbol.h" +#include "evsel.h" + +regex_t parent_regex; +const char default_parent_pattern[] = "^sys_|^do_page_fault"; +const char *parent_pattern = default_parent_pattern; +const char default_sort_order[] = "comm,dso,symbol"; +const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to"; +const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; +const char default_top_sort_order[] = "dso,symbol"; +const char default_diff_sort_order[] = "dso,symbol"; +const char *sort_order; +const char *field_order; +regex_t ignore_callees_regex; +int have_ignore_callees = 0; +int sort__need_collapse = 0; +int sort__has_parent = 0; +int sort__has_sym = 0; +int sort__has_dso = 0; +enum sort_mode sort__mode = SORT_MODE__NORMAL; + + +static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) +{ + int n; + va_list ap; + + va_start(ap, fmt); + n = vsnprintf(bf, size, fmt, ap); + if (symbol_conf.field_sep && n > 0) { + char *sep = bf; + + while (1) { + sep = strchr(sep, *symbol_conf.field_sep); + if (sep == NULL) + break; + *sep = '.'; + } + } + va_end(ap); + + if (n >= (int)size) + return size - 1; + return n; +} + +static int64_t cmp_null(const void *l, const void *r) +{ + if (!l && !r) + return 0; + else if (!l) + return -1; + else + return 1; +} + +/* --sort pid */ + +static int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->tid - left->thread->tid; +} + +static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + const char *comm = thread__comm_str(he->thread); + + width = max(7U, width) - 6; + return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid, + width, width, comm ?: ""); +} + +struct sort_entry sort_thread = { + .se_header = " Pid:Command", + .se_cmp = sort__thread_cmp, + .se_snprintf = hist_entry__thread_snprintf, + .se_width_idx = HISTC_THREAD, +}; + +/* --sort comm */ + +static int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + /* Compare the addr that should be unique among comm */ + return comm__str(right->comm) - comm__str(left->comm); +} + +static int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + /* Compare the addr that should be unique among comm */ + return comm__str(right->comm) - comm__str(left->comm); +} + +static int64_t +sort__comm_sort(struct hist_entry *left, struct hist_entry *right) +{ + return strcmp(comm__str(right->comm), comm__str(left->comm)); +} + +static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); +} + +struct sort_entry sort_comm = { + .se_header = "Command", + .se_cmp = sort__comm_cmp, + .se_collapse = sort__comm_collapse, + .se_sort = sort__comm_sort, + .se_snprintf = hist_entry__comm_snprintf, + .se_width_idx = HISTC_COMM, +}; + +/* --sort dso */ + +static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) +{ + struct dso *dso_l = map_l ? map_l->dso : NULL; + struct dso *dso_r = map_r ? map_r->dso : NULL; + const char *dso_name_l, *dso_name_r; + + if (!dso_l || !dso_r) + return cmp_null(dso_r, dso_l); + + if (verbose) { + dso_name_l = dso_l->long_name; + dso_name_r = dso_r->long_name; + } else { + dso_name_l = dso_l->short_name; + dso_name_r = dso_r->short_name; + } + + return strcmp(dso_name_l, dso_name_r); +} + +static int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return _sort__dso_cmp(right->ms.map, left->ms.map); +} + +static int _hist_entry__dso_snprintf(struct map *map, char *bf, + size_t size, unsigned int width) +{ + if (map && map->dso) { + const char *dso_name = !verbose ? map->dso->short_name : + map->dso->long_name; + return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); + } + + return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); +} + +static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); +} + +struct sort_entry sort_dso = { + .se_header = "Shared Object", + .se_cmp = sort__dso_cmp, + .se_snprintf = hist_entry__dso_snprintf, + .se_width_idx = HISTC_DSO, +}; + +/* --sort symbol */ + +static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) +{ + return (int64_t)(right_ip - left_ip); +} + +static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) +{ + u64 ip_l, ip_r; + + if (!sym_l || !sym_r) + return cmp_null(sym_l, sym_r); + + if (sym_l == sym_r) + return 0; + + ip_l = sym_l->start; + ip_r = sym_r->start; + + return (int64_t)(ip_r - ip_l); +} + +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + int64_t ret; + + if (!left->ms.sym && !right->ms.sym) + return _sort__addr_cmp(left->ip, right->ip); + + /* + * comparing symbol address alone is not enough since it's a + * relative address within a dso. + */ + if (!sort__has_dso) { + ret = sort__dso_cmp(left, right); + if (ret != 0) + return ret; + } + + return _sort__sym_cmp(left->ms.sym, right->ms.sym); +} + +static int64_t +sort__sym_sort(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->ms.sym || !right->ms.sym) + return cmp_null(left->ms.sym, right->ms.sym); + + return strcmp(right->ms.sym->name, left->ms.sym->name); +} + +static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, + u64 ip, char level, char *bf, size_t size, + unsigned int width) +{ + size_t ret = 0; + + if (verbose) { + char o = map ? dso__symtab_origin(map->dso) : '!'; + ret += repsep_snprintf(bf, size, "%-#*llx %c ", + BITS_PER_LONG / 4 + 2, ip, o); + } + + ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); + if (sym && map) { + if (map->type == MAP__VARIABLE) { + ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); + ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", + ip - map->unmap_ip(map, sym->start)); + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, ""); + } else { + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, + sym->name); + } + } else { + size_t len = BITS_PER_LONG / 4; + ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", + len, ip); + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, ""); + } + + if (ret > width) + bf[width] = '\0'; + + return width; +} + +static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, + he->level, bf, size, width); +} + +struct sort_entry sort_sym = { + .se_header = "Symbol", + .se_cmp = sort__sym_cmp, + .se_sort = sort__sym_sort, + .se_snprintf = hist_entry__sym_snprintf, + .se_width_idx = HISTC_SYMBOL, +}; + +/* --sort srcline */ + +static int64_t +sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->srcline) { + if (!left->ms.map) + left->srcline = SRCLINE_UNKNOWN; + else { + struct map *map = left->ms.map; + left->srcline = get_srcline(map->dso, + map__rip_2objdump(map, left->ip), + left->ms.sym, true); + } + } + if (!right->srcline) { + if (!right->ms.map) + right->srcline = SRCLINE_UNKNOWN; + else { + struct map *map = right->ms.map; + right->srcline = get_srcline(map->dso, + map__rip_2objdump(map, right->ip), + right->ms.sym, true); + } + } + return strcmp(right->srcline, left->srcline); +} + +static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline); +} + +struct sort_entry sort_srcline = { + .se_header = "Source:Line", + .se_cmp = sort__srcline_cmp, + .se_snprintf = hist_entry__srcline_snprintf, + .se_width_idx = HISTC_SRCLINE, +}; + +/* --sort parent */ + +static int64_t +sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct symbol *sym_l = left->parent; + struct symbol *sym_r = right->parent; + + if (!sym_l || !sym_r) + return cmp_null(sym_l, sym_r); + + return strcmp(sym_r->name, sym_l->name); +} + +static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*.*s", width, width, + he->parent ? he->parent->name : "[other]"); +} + +struct sort_entry sort_parent = { + .se_header = "Parent symbol", + .se_cmp = sort__parent_cmp, + .se_snprintf = hist_entry__parent_snprintf, + .se_width_idx = HISTC_PARENT, +}; + +/* --sort cpu */ + +static int64_t +sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->cpu - left->cpu; +} + +static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); +} + +struct sort_entry sort_cpu = { + .se_header = "CPU", + .se_cmp = sort__cpu_cmp, + .se_snprintf = hist_entry__cpu_snprintf, + .se_width_idx = HISTC_CPU, +}; + +/* sort keys for branch stacks */ + +static int64_t +sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + return _sort__dso_cmp(left->branch_info->from.map, + right->branch_info->from.map); +} + +static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + if (he->branch_info) + return _hist_entry__dso_snprintf(he->branch_info->from.map, + bf, size, width); + else + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); +} + +static int64_t +sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + return _sort__dso_cmp(left->branch_info->to.map, + right->branch_info->to.map); +} + +static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + if (he->branch_info) + return _hist_entry__dso_snprintf(he->branch_info->to.map, + bf, size, width); + else + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); +} + +static int64_t +sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct addr_map_symbol *from_l = &left->branch_info->from; + struct addr_map_symbol *from_r = &right->branch_info->from; + + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + from_l = &left->branch_info->from; + from_r = &right->branch_info->from; + + if (!from_l->sym && !from_r->sym) + return _sort__addr_cmp(from_l->addr, from_r->addr); + + return _sort__sym_cmp(from_l->sym, from_r->sym); +} + +static int64_t +sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct addr_map_symbol *to_l, *to_r; + + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + to_l = &left->branch_info->to; + to_r = &right->branch_info->to; + + if (!to_l->sym && !to_r->sym) + return _sort__addr_cmp(to_l->addr, to_r->addr); + + return _sort__sym_cmp(to_l->sym, to_r->sym); +} + +static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + if (he->branch_info) { + struct addr_map_symbol *from = &he->branch_info->from; + + return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, + he->level, bf, size, width); + } + + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); +} + +static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + if (he->branch_info) { + struct addr_map_symbol *to = &he->branch_info->to; + + return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, + he->level, bf, size, width); + } + + return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); +} + +struct sort_entry sort_dso_from = { + .se_header = "Source Shared Object", + .se_cmp = sort__dso_from_cmp, + .se_snprintf = hist_entry__dso_from_snprintf, + .se_width_idx = HISTC_DSO_FROM, +}; + +struct sort_entry sort_dso_to = { + .se_header = "Target Shared Object", + .se_cmp = sort__dso_to_cmp, + .se_snprintf = hist_entry__dso_to_snprintf, + .se_width_idx = HISTC_DSO_TO, +}; + +struct sort_entry sort_sym_from = { + .se_header = "Source Symbol", + .se_cmp = sort__sym_from_cmp, + .se_snprintf = hist_entry__sym_from_snprintf, + .se_width_idx = HISTC_SYMBOL_FROM, +}; + +struct sort_entry sort_sym_to = { + .se_header = "Target Symbol", + .se_cmp = sort__sym_to_cmp, + .se_snprintf = hist_entry__sym_to_snprintf, + .se_width_idx = HISTC_SYMBOL_TO, +}; + +static int64_t +sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) +{ + unsigned char mp, p; + + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; + p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; + return mp || p; +} + +static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width){ + static const char *out = "N/A"; + + if (he->branch_info) { + if (he->branch_info->flags.predicted) + out = "N"; + else if (he->branch_info->flags.mispred) + out = "Y"; + } + + return repsep_snprintf(bf, size, "%-*.*s", width, width, out); +} + +/* --sort daddr_sym */ +static int64_t +sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t l = 0, r = 0; + + if (left->mem_info) + l = left->mem_info->daddr.addr; + if (right->mem_info) + r = right->mem_info->daddr.addr; + + return (int64_t)(r - l); +} + +static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + uint64_t addr = 0; + struct map *map = NULL; + struct symbol *sym = NULL; + + if (he->mem_info) { + addr = he->mem_info->daddr.addr; + map = he->mem_info->daddr.map; + sym = he->mem_info->daddr.sym; + } + return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, + width); +} + +static int64_t +sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct map *map_l = NULL; + struct map *map_r = NULL; + + if (left->mem_info) + map_l = left->mem_info->daddr.map; + if (right->mem_info) + map_r = right->mem_info->daddr.map; + + return _sort__dso_cmp(map_l, map_r); +} + +static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + struct map *map = NULL; + + if (he->mem_info) + map = he->mem_info->daddr.map; + + return _hist_entry__dso_snprintf(map, bf, size, width); +} + +static int64_t +sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_lock = PERF_MEM_LOCK_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_lock = PERF_MEM_LOCK_NA; + + return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); +} + +static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + const char *out; + u64 mask = PERF_MEM_LOCK_NA; + + if (he->mem_info) + mask = he->mem_info->data_src.mem_lock; + + if (mask & PERF_MEM_LOCK_NA) + out = "N/A"; + else if (mask & PERF_MEM_LOCK_LOCKED) + out = "Yes"; + else + out = "No"; + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static int64_t +sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_dtlb = PERF_MEM_TLB_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_dtlb = PERF_MEM_TLB_NA; + + return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); +} + +static const char * const tlb_access[] = { + "N/A", + "HIT", + "MISS", + "L1", + "L2", + "Walker", + "Fault", +}; +#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *)) + +static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + char out[64]; + size_t sz = sizeof(out) - 1; /* -1 for null termination */ + size_t l = 0, i; + u64 m = PERF_MEM_TLB_NA; + u64 hit, miss; + + out[0] = '\0'; + + if (he->mem_info) + m = he->mem_info->data_src.mem_dtlb; + + hit = m & PERF_MEM_TLB_HIT; + miss = m & PERF_MEM_TLB_MISS; + + /* already taken care of */ + m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS); + + for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (l) { + strcat(out, " or "); + l += 4; + } + strncat(out, tlb_access[i], sz - l); + l += strlen(tlb_access[i]); + } + if (*out == '\0') + strcpy(out, "N/A"); + if (hit) + strncat(out, " hit", sz - l); + if (miss) + strncat(out, " miss", sz - l); + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static int64_t +sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_lvl = PERF_MEM_LVL_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_lvl = PERF_MEM_LVL_NA; + + return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); +} + +static const char * const mem_lvl[] = { + "N/A", + "HIT", + "MISS", + "L1", + "LFB", + "L2", + "L3", + "Local RAM", + "Remote RAM (1 hop)", + "Remote RAM (2 hops)", + "Remote Cache (1 hop)", + "Remote Cache (2 hops)", + "I/O", + "Uncached", +}; +#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *)) + +static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + char out[64]; + size_t sz = sizeof(out) - 1; /* -1 for null termination */ + size_t i, l = 0; + u64 m = PERF_MEM_LVL_NA; + u64 hit, miss; + + if (he->mem_info) + m = he->mem_info->data_src.mem_lvl; + + out[0] = '\0'; + + hit = m & PERF_MEM_LVL_HIT; + miss = m & PERF_MEM_LVL_MISS; + + /* already taken care of */ + m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS); + + for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (l) { + strcat(out, " or "); + l += 4; + } + strncat(out, mem_lvl[i], sz - l); + l += strlen(mem_lvl[i]); + } + if (*out == '\0') + strcpy(out, "N/A"); + if (hit) + strncat(out, " hit", sz - l); + if (miss) + strncat(out, " miss", sz - l); + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static int64_t +sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; + + return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); +} + +static const char * const snoop_access[] = { + "N/A", + "None", + "Miss", + "Hit", + "HitM", +}; +#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *)) + +static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + char out[64]; + size_t sz = sizeof(out) - 1; /* -1 for null termination */ + size_t i, l = 0; + u64 m = PERF_MEM_SNOOP_NA; + + out[0] = '\0'; + + if (he->mem_info) + m = he->mem_info->data_src.mem_snoop; + + for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (l) { + strcat(out, " or "); + l += 4; + } + strncat(out, snoop_access[i], sz - l); + l += strlen(snoop_access[i]); + } + + if (*out == '\0') + strcpy(out, "N/A"); + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static inline u64 cl_address(u64 address) +{ + /* return the cacheline of the address */ + return (address & ~(cacheline_size - 1)); +} + +static int64_t +sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) +{ + u64 l, r; + struct map *l_map, *r_map; + + if (!left->mem_info) return -1; + if (!right->mem_info) return 1; + + /* group event types together */ + if (left->cpumode > right->cpumode) return -1; + if (left->cpumode < right->cpumode) return 1; + + l_map = left->mem_info->daddr.map; + r_map = right->mem_info->daddr.map; + + /* if both are NULL, jump to sort on al_addr instead */ + if (!l_map && !r_map) + goto addr; + + if (!l_map) return -1; + if (!r_map) return 1; + + if (l_map->maj > r_map->maj) return -1; + if (l_map->maj < r_map->maj) return 1; + + if (l_map->min > r_map->min) return -1; + if (l_map->min < r_map->min) return 1; + + if (l_map->ino > r_map->ino) return -1; + if (l_map->ino < r_map->ino) return 1; + + if (l_map->ino_generation > r_map->ino_generation) return -1; + if (l_map->ino_generation < r_map->ino_generation) return 1; + + /* + * Addresses with no major/minor numbers are assumed to be + * anonymous in userspace. Sort those on pid then address. + * + * The kernel and non-zero major/minor mapped areas are + * assumed to be unity mapped. Sort those on address. + */ + + if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && + (!(l_map->flags & MAP_SHARED)) && + !l_map->maj && !l_map->min && !l_map->ino && + !l_map->ino_generation) { + /* userspace anonymous */ + + if (left->thread->pid_ > right->thread->pid_) return -1; + if (left->thread->pid_ < right->thread->pid_) return 1; + } + +addr: + /* al_addr does all the right addr - start + offset calculations */ + l = cl_address(left->mem_info->daddr.al_addr); + r = cl_address(right->mem_info->daddr.al_addr); + + if (l > r) return -1; + if (l < r) return 1; + + return 0; +} + +static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + + uint64_t addr = 0; + struct map *map = NULL; + struct symbol *sym = NULL; + char level = he->level; + + if (he->mem_info) { + addr = cl_address(he->mem_info->daddr.al_addr); + map = he->mem_info->daddr.map; + sym = he->mem_info->daddr.sym; + + /* print [s] for shared data mmaps */ + if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && + map && (map->type == MAP__VARIABLE) && + (map->flags & MAP_SHARED) && + (map->maj || map->min || map->ino || + map->ino_generation)) + level = 's'; + else if (!map) + level = 'X'; + } + return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, + width); +} + +struct sort_entry sort_mispredict = { + .se_header = "Branch Mispredicted", + .se_cmp = sort__mispredict_cmp, + .se_snprintf = hist_entry__mispredict_snprintf, + .se_width_idx = HISTC_MISPREDICT, +}; + +static u64 he_weight(struct hist_entry *he) +{ + return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; +} + +static int64_t +sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return he_weight(left) - he_weight(right); +} + +static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); +} + +struct sort_entry sort_local_weight = { + .se_header = "Local Weight", + .se_cmp = sort__local_weight_cmp, + .se_snprintf = hist_entry__local_weight_snprintf, + .se_width_idx = HISTC_LOCAL_WEIGHT, +}; + +static int64_t +sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->stat.weight - right->stat.weight; +} + +static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); +} + +struct sort_entry sort_global_weight = { + .se_header = "Weight", + .se_cmp = sort__global_weight_cmp, + .se_snprintf = hist_entry__global_weight_snprintf, + .se_width_idx = HISTC_GLOBAL_WEIGHT, +}; + +struct sort_entry sort_mem_daddr_sym = { + .se_header = "Data Symbol", + .se_cmp = sort__daddr_cmp, + .se_snprintf = hist_entry__daddr_snprintf, + .se_width_idx = HISTC_MEM_DADDR_SYMBOL, +}; + +struct sort_entry sort_mem_daddr_dso = { + .se_header = "Data Object", + .se_cmp = sort__dso_daddr_cmp, + .se_snprintf = hist_entry__dso_daddr_snprintf, + .se_width_idx = HISTC_MEM_DADDR_SYMBOL, +}; + +struct sort_entry sort_mem_locked = { + .se_header = "Locked", + .se_cmp = sort__locked_cmp, + .se_snprintf = hist_entry__locked_snprintf, + .se_width_idx = HISTC_MEM_LOCKED, +}; + +struct sort_entry sort_mem_tlb = { + .se_header = "TLB access", + .se_cmp = sort__tlb_cmp, + .se_snprintf = hist_entry__tlb_snprintf, + .se_width_idx = HISTC_MEM_TLB, +}; + +struct sort_entry sort_mem_lvl = { + .se_header = "Memory access", + .se_cmp = sort__lvl_cmp, + .se_snprintf = hist_entry__lvl_snprintf, + .se_width_idx = HISTC_MEM_LVL, +}; + +struct sort_entry sort_mem_snoop = { + .se_header = "Snoop", + .se_cmp = sort__snoop_cmp, + .se_snprintf = hist_entry__snoop_snprintf, + .se_width_idx = HISTC_MEM_SNOOP, +}; + +struct sort_entry sort_mem_dcacheline = { + .se_header = "Data Cacheline", + .se_cmp = sort__dcacheline_cmp, + .se_snprintf = hist_entry__dcacheline_snprintf, + .se_width_idx = HISTC_MEM_DCACHELINE, +}; + +static int64_t +sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + return left->branch_info->flags.abort != + right->branch_info->flags.abort; +} + +static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + static const char *out = "N/A"; + + if (he->branch_info) { + if (he->branch_info->flags.abort) + out = "A"; + else + out = "."; + } + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +struct sort_entry sort_abort = { + .se_header = "Transaction abort", + .se_cmp = sort__abort_cmp, + .se_snprintf = hist_entry__abort_snprintf, + .se_width_idx = HISTC_ABORT, +}; + +static int64_t +sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) +{ + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + + return left->branch_info->flags.in_tx != + right->branch_info->flags.in_tx; +} + +static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + static const char *out = "N/A"; + + if (he->branch_info) { + if (he->branch_info->flags.in_tx) + out = "T"; + else + out = "."; + } + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +struct sort_entry sort_in_tx = { + .se_header = "Branch in transaction", + .se_cmp = sort__in_tx_cmp, + .se_snprintf = hist_entry__in_tx_snprintf, + .se_width_idx = HISTC_IN_TX, +}; + +static int64_t +sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->transaction - right->transaction; +} + +static inline char *add_str(char *p, const char *str) +{ + strcpy(p, str); + return p + strlen(str); +} + +static struct txbit { + unsigned flag; + const char *name; + int skip_for_len; +} txbits[] = { + { PERF_TXN_ELISION, "EL ", 0 }, + { PERF_TXN_TRANSACTION, "TX ", 1 }, + { PERF_TXN_SYNC, "SYNC ", 1 }, + { PERF_TXN_ASYNC, "ASYNC ", 0 }, + { PERF_TXN_RETRY, "RETRY ", 0 }, + { PERF_TXN_CONFLICT, "CON ", 0 }, + { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, + { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, + { 0, NULL, 0 } +}; + +int hist_entry__transaction_len(void) +{ + int i; + int len = 0; + + for (i = 0; txbits[i].name; i++) { + if (!txbits[i].skip_for_len) + len += strlen(txbits[i].name); + } + len += 4; /* :XX */ + return len; +} + +static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + u64 t = he->transaction; + char buf[128]; + char *p = buf; + int i; + + buf[0] = 0; + for (i = 0; txbits[i].name; i++) + if (txbits[i].flag & t) + p = add_str(p, txbits[i].name); + if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) + p = add_str(p, "NEITHER "); + if (t & PERF_TXN_ABORT_MASK) { + sprintf(p, ":%" PRIx64, + (t & PERF_TXN_ABORT_MASK) >> + PERF_TXN_ABORT_SHIFT); + p += strlen(p); + } + + return repsep_snprintf(bf, size, "%-*s", width, buf); +} + +struct sort_entry sort_transaction = { + .se_header = "Transaction ", + .se_cmp = sort__transaction_cmp, + .se_snprintf = hist_entry__transaction_snprintf, + .se_width_idx = HISTC_TRANSACTION, +}; + +struct sort_dimension { + const char *name; + struct sort_entry *entry; + int taken; +}; + +#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } + +static struct sort_dimension common_sort_dimensions[] = { + DIM(SORT_PID, "pid", sort_thread), + DIM(SORT_COMM, "comm", sort_comm), + DIM(SORT_DSO, "dso", sort_dso), + DIM(SORT_SYM, "symbol", sort_sym), + DIM(SORT_PARENT, "parent", sort_parent), + DIM(SORT_CPU, "cpu", sort_cpu), + DIM(SORT_SRCLINE, "srcline", sort_srcline), + DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), + DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), + DIM(SORT_TRANSACTION, "transaction", sort_transaction), +}; + +#undef DIM + +#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } + +static struct sort_dimension bstack_sort_dimensions[] = { + DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), + DIM(SORT_DSO_TO, "dso_to", sort_dso_to), + DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), + DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), + DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), + DIM(SORT_IN_TX, "in_tx", sort_in_tx), + DIM(SORT_ABORT, "abort", sort_abort), +}; + +#undef DIM + +#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } + +static struct sort_dimension memory_sort_dimensions[] = { + DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), + DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), + DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), + DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), + DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), + DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), + DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), +}; + +#undef DIM + +struct hpp_dimension { + const char *name; + struct perf_hpp_fmt *fmt; + int taken; +}; + +#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } + +static struct hpp_dimension hpp_sort_dimensions[] = { + DIM(PERF_HPP__OVERHEAD, "overhead"), + DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), + DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), + DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), + DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), + DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), + DIM(PERF_HPP__SAMPLES, "sample"), + DIM(PERF_HPP__PERIOD, "period"), +}; + +#undef DIM + +struct hpp_sort_entry { + struct perf_hpp_fmt hpp; + struct sort_entry *se; +}; + +bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) +{ + struct hpp_sort_entry *hse_a; + struct hpp_sort_entry *hse_b; + + if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) + return false; + + hse_a = container_of(a, struct hpp_sort_entry, hpp); + hse_b = container_of(b, struct hpp_sort_entry, hpp); + + return hse_a->se == hse_b->se; +} + +void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) +{ + struct hpp_sort_entry *hse; + + if (!perf_hpp__is_sort_entry(fmt)) + return; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); +} + +static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel) +{ + struct hpp_sort_entry *hse; + size_t len = fmt->user_len; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + + if (!len) + len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx); + + return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); +} + +static int __sort__hpp_width(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp __maybe_unused, + struct perf_evsel *evsel) +{ + struct hpp_sort_entry *hse; + size_t len = fmt->user_len; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + + if (!len) + len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx); + + return len; +} + +static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct hpp_sort_entry *hse; + size_t len = fmt->user_len; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + + if (!len) + len = hists__col_len(he->hists, hse->se->se_width_idx); + + return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); +} + +static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b) +{ + struct hpp_sort_entry *hse; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + return hse->se->se_cmp(a, b); +} + +static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b) +{ + struct hpp_sort_entry *hse; + int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; + return collapse_fn(a, b); +} + +static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, + struct hist_entry *a, struct hist_entry *b) +{ + struct hpp_sort_entry *hse; + int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + sort_fn = hse->se->se_sort ?: hse->se->se_cmp; + return sort_fn(a, b); +} + +static struct hpp_sort_entry * +__sort_dimension__alloc_hpp(struct sort_dimension *sd) +{ + struct hpp_sort_entry *hse; + + hse = malloc(sizeof(*hse)); + if (hse == NULL) { + pr_err("Memory allocation failed\n"); + return NULL; + } + + hse->se = sd->entry; + hse->hpp.name = sd->entry->se_header; + hse->hpp.header = __sort__hpp_header; + hse->hpp.width = __sort__hpp_width; + hse->hpp.entry = __sort__hpp_entry; + hse->hpp.color = NULL; + + hse->hpp.cmp = __sort__hpp_cmp; + hse->hpp.collapse = __sort__hpp_collapse; + hse->hpp.sort = __sort__hpp_sort; + + INIT_LIST_HEAD(&hse->hpp.list); + INIT_LIST_HEAD(&hse->hpp.sort_list); + hse->hpp.elide = false; + hse->hpp.len = 0; + hse->hpp.user_len = 0; + + return hse; +} + +bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) +{ + return format->header == __sort__hpp_header; +} + +static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd) +{ + struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd); + + if (hse == NULL) + return -1; + + perf_hpp__register_sort_field(&hse->hpp); + return 0; +} + +static int __sort_dimension__add_hpp_output(struct sort_dimension *sd) +{ + struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd); + + if (hse == NULL) + return -1; + + perf_hpp__column_register(&hse->hpp); + return 0; +} + +static int __sort_dimension__add(struct sort_dimension *sd) +{ + if (sd->taken) + return 0; + + if (__sort_dimension__add_hpp_sort(sd) < 0) + return -1; + + if (sd->entry->se_collapse) + sort__need_collapse = 1; + + sd->taken = 1; + + return 0; +} + +static int __hpp_dimension__add(struct hpp_dimension *hd) +{ + if (!hd->taken) { + hd->taken = 1; + + perf_hpp__register_sort_field(hd->fmt); + } + return 0; +} + +static int __sort_dimension__add_output(struct sort_dimension *sd) +{ + if (sd->taken) + return 0; + + if (__sort_dimension__add_hpp_output(sd) < 0) + return -1; + + sd->taken = 1; + return 0; +} + +static int __hpp_dimension__add_output(struct hpp_dimension *hd) +{ + if (!hd->taken) { + hd->taken = 1; + + perf_hpp__column_register(hd->fmt); + } + return 0; +} + +int sort_dimension__add(const char *tok) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { + struct sort_dimension *sd = &common_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry == &sort_parent) { + int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); + if (ret) { + char err[BUFSIZ]; + + regerror(ret, &parent_regex, err, sizeof(err)); + pr_err("Invalid regex: %s\n%s", parent_pattern, err); + return -EINVAL; + } + sort__has_parent = 1; + } else if (sd->entry == &sort_sym) { + sort__has_sym = 1; + /* + * perf diff displays the performance difference amongst + * two or more perf.data files. Those files could come + * from different binaries. So we should not compare + * their ips, but the name of symbol. + */ + if (sort__mode == SORT_MODE__DIFF) + sd->entry->se_collapse = sort__sym_sort; + + } else if (sd->entry == &sort_dso) { + sort__has_dso = 1; + } + + return __sort_dimension__add(sd); + } + + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { + struct hpp_dimension *hd = &hpp_sort_dimensions[i]; + + if (strncasecmp(tok, hd->name, strlen(tok))) + continue; + + return __hpp_dimension__add(hd); + } + + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { + struct sort_dimension *sd = &bstack_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sort__mode != SORT_MODE__BRANCH) + return -EINVAL; + + if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) + sort__has_sym = 1; + + __sort_dimension__add(sd); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { + struct sort_dimension *sd = &memory_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sort__mode != SORT_MODE__MEMORY) + return -EINVAL; + + if (sd->entry == &sort_mem_daddr_sym) + sort__has_sym = 1; + + __sort_dimension__add(sd); + return 0; + } + + return -ESRCH; +} + +static const char *get_default_sort_order(void) +{ + const char *default_sort_orders[] = { + default_sort_order, + default_branch_sort_order, + default_mem_sort_order, + default_top_sort_order, + default_diff_sort_order, + }; + + BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); + + return default_sort_orders[sort__mode]; +} + +static int setup_sort_order(void) +{ + char *new_sort_order; + + /* + * Append '+'-prefixed sort order to the default sort + * order string. + */ + if (!sort_order || is_strict_order(sort_order)) + return 0; + + if (sort_order[1] == '\0') { + error("Invalid --sort key: `+'"); + return -EINVAL; + } + + /* + * We allocate new sort_order string, but we never free it, + * because it's checked over the rest of the code. + */ + if (asprintf(&new_sort_order, "%s,%s", + get_default_sort_order(), sort_order + 1) < 0) { + error("Not enough memory to set up --sort"); + return -ENOMEM; + } + + sort_order = new_sort_order; + return 0; +} + +static int __setup_sorting(void) +{ + char *tmp, *tok, *str; + const char *sort_keys; + int ret = 0; + + ret = setup_sort_order(); + if (ret) + return ret; + + sort_keys = sort_order; + if (sort_keys == NULL) { + if (is_strict_order(field_order)) { + /* + * If user specified field order but no sort order, + * we'll honor it and not add default sort orders. + */ + return 0; + } + + sort_keys = get_default_sort_order(); + } + + str = strdup(sort_keys); + if (str == NULL) { + error("Not enough memory to setup sort keys"); + return -ENOMEM; + } + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + ret = sort_dimension__add(tok); + if (ret == -EINVAL) { + error("Invalid --sort key: `%s'", tok); + break; + } else if (ret == -ESRCH) { + error("Unknown --sort key: `%s'", tok); + break; + } + } + + free(str); + return ret; +} + +void perf_hpp__set_elide(int idx, bool elide) +{ + struct perf_hpp_fmt *fmt; + struct hpp_sort_entry *hse; + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + if (hse->se->se_width_idx == idx) { + fmt->elide = elide; + break; + } + } +} + +static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) +{ + if (list && strlist__nr_entries(list) == 1) { + if (fp != NULL) + fprintf(fp, "# %s: %s\n", list_name, + strlist__entry(list, 0)->s); + return true; + } + return false; +} + +static bool get_elide(int idx, FILE *output) +{ + switch (idx) { + case HISTC_SYMBOL: + return __get_elide(symbol_conf.sym_list, "symbol", output); + case HISTC_DSO: + return __get_elide(symbol_conf.dso_list, "dso", output); + case HISTC_COMM: + return __get_elide(symbol_conf.comm_list, "comm", output); + default: + break; + } + + if (sort__mode != SORT_MODE__BRANCH) + return false; + + switch (idx) { + case HISTC_SYMBOL_FROM: + return __get_elide(symbol_conf.sym_from_list, "sym_from", output); + case HISTC_SYMBOL_TO: + return __get_elide(symbol_conf.sym_to_list, "sym_to", output); + case HISTC_DSO_FROM: + return __get_elide(symbol_conf.dso_from_list, "dso_from", output); + case HISTC_DSO_TO: + return __get_elide(symbol_conf.dso_to_list, "dso_to", output); + default: + break; + } + + return false; +} + +void sort__setup_elide(FILE *output) +{ + struct perf_hpp_fmt *fmt; + struct hpp_sort_entry *hse; + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + fmt->elide = get_elide(hse->se->se_width_idx, output); + } + + /* + * It makes no sense to elide all of sort entries. + * Just revert them to show up again. + */ + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + if (!fmt->elide) + return; + } + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + fmt->elide = false; + } +} + +static int output_field_add(char *tok) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { + struct sort_dimension *sd = &common_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + return __sort_dimension__add_output(sd); + } + + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { + struct hpp_dimension *hd = &hpp_sort_dimensions[i]; + + if (strncasecmp(tok, hd->name, strlen(tok))) + continue; + + return __hpp_dimension__add_output(hd); + } + + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { + struct sort_dimension *sd = &bstack_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + return __sort_dimension__add_output(sd); + } + + for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { + struct sort_dimension *sd = &memory_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + return __sort_dimension__add_output(sd); + } + + return -ESRCH; +} + +static void reset_dimensions(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) + common_sort_dimensions[i].taken = 0; + + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) + hpp_sort_dimensions[i].taken = 0; + + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) + bstack_sort_dimensions[i].taken = 0; + + for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) + memory_sort_dimensions[i].taken = 0; +} + +bool is_strict_order(const char *order) +{ + return order && (*order != '+'); +} + +static int __setup_output_field(void) +{ + char *tmp, *tok, *str, *strp; + int ret = -EINVAL; + + if (field_order == NULL) + return 0; + + reset_dimensions(); + + strp = str = strdup(field_order); + if (str == NULL) { + error("Not enough memory to setup output fields"); + return -ENOMEM; + } + + if (!is_strict_order(field_order)) + strp++; + + if (!strlen(strp)) { + error("Invalid --fields key: `+'"); + goto out; + } + + for (tok = strtok_r(strp, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + ret = output_field_add(tok); + if (ret == -EINVAL) { + error("Invalid --fields key: `%s'", tok); + break; + } else if (ret == -ESRCH) { + error("Unknown --fields key: `%s'", tok); + break; + } + } + +out: + free(str); + return ret; +} + +int setup_sorting(void) +{ + int err; + + err = __setup_sorting(); + if (err < 0) + return err; + + if (parent_pattern != default_parent_pattern) { + err = sort_dimension__add("parent"); + if (err < 0) + return err; + } + + reset_dimensions(); + + /* + * perf diff doesn't use default hpp output fields. + */ + if (sort__mode != SORT_MODE__DIFF) + perf_hpp__init(); + + err = __setup_output_field(); + if (err < 0) + return err; + + /* copy sort keys to output fields */ + perf_hpp__setup_output_field(); + /* and then copy output fields to sort keys */ + perf_hpp__append_sort_keys(); + + return 0; +} + +void reset_output_field(void) +{ + sort__need_collapse = 0; + sort__has_parent = 0; + sort__has_sym = 0; + sort__has_dso = 0; + + field_order = NULL; + sort_order = NULL; + + reset_dimensions(); + perf_hpp__reset_output_field(); +} diff --git a/kernel/tools/perf/util/sort.h b/kernel/tools/perf/util/sort.h new file mode 100644 index 000000000..846036a92 --- /dev/null +++ b/kernel/tools/perf/util/sort.h @@ -0,0 +1,222 @@ +#ifndef __PERF_SORT_H +#define __PERF_SORT_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" +#include "hist.h" +#include "thread.h" + +extern regex_t parent_regex; +extern const char *sort_order; +extern const char *field_order; +extern const char default_parent_pattern[]; +extern const char *parent_pattern; +extern const char default_sort_order[]; +extern regex_t ignore_callees_regex; +extern int have_ignore_callees; +extern int sort__need_collapse; +extern int sort__has_parent; +extern int sort__has_sym; +extern enum sort_mode sort__mode; +extern struct sort_entry sort_comm; +extern struct sort_entry sort_dso; +extern struct sort_entry sort_sym; +extern struct sort_entry sort_parent; +extern struct sort_entry sort_dso_from; +extern struct sort_entry sort_dso_to; +extern struct sort_entry sort_sym_from; +extern struct sort_entry sort_sym_to; +extern enum sort_type sort__first_dimension; +extern const char default_mem_sort_order[]; + +struct he_stat { + u64 period; + u64 period_sys; + u64 period_us; + u64 period_guest_sys; + u64 period_guest_us; + u64 weight; + u32 nr_events; +}; + +struct hist_entry_diff { + bool computed; + + /* PERF_HPP__DELTA */ + double period_ratio_delta; + + /* PERF_HPP__RATIO */ + double period_ratio; + + /* HISTC_WEIGHTED_DIFF */ + s64 wdiff; +}; + +/** + * struct hist_entry - histogram entry + * + * @row_offset - offset from the first callchain expanded to appear on screen + * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding + */ +struct hist_entry { + struct rb_node rb_node_in; + struct rb_node rb_node; + union { + struct list_head node; + struct list_head head; + } pairs; + struct he_stat stat; + struct he_stat *stat_acc; + struct map_symbol ms; + struct thread *thread; + struct comm *comm; + u64 ip; + u64 transaction; + s32 cpu; + u8 cpumode; + + struct hist_entry_diff diff; + + /* We are added by hists__add_dummy_entry. */ + bool dummy; + + /* XXX These two should move to some tree widget lib */ + u16 row_offset; + u16 nr_rows; + + bool init_have_children; + char level; + u8 filtered; + char *srcline; + struct symbol *parent; + unsigned long position; + struct rb_root sorted_chain; + struct branch_info *branch_info; + struct hists *hists; + struct mem_info *mem_info; + struct callchain_root callchain[0]; /* must be last member */ +}; + +static inline bool hist_entry__has_pairs(struct hist_entry *he) +{ + return !list_empty(&he->pairs.node); +} + +static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he) +{ + if (hist_entry__has_pairs(he)) + return list_entry(he->pairs.node.next, struct hist_entry, pairs.node); + return NULL; +} + +static inline void hist_entry__add_pair(struct hist_entry *pair, + struct hist_entry *he) +{ + list_add_tail(&pair->pairs.node, &he->pairs.head); +} + +static inline float hist_entry__get_percent_limit(struct hist_entry *he) +{ + u64 period = he->stat.period; + u64 total_period = hists__total_period(he->hists); + + if (unlikely(total_period == 0)) + return 0; + + if (symbol_conf.cumulate_callchain) + period = he->stat_acc->period; + + return period * 100.0 / total_period; +} + + +enum sort_mode { + SORT_MODE__NORMAL, + SORT_MODE__BRANCH, + SORT_MODE__MEMORY, + SORT_MODE__TOP, + SORT_MODE__DIFF, +}; + +enum sort_type { + /* common sort keys */ + SORT_PID, + SORT_COMM, + SORT_DSO, + SORT_SYM, + SORT_PARENT, + SORT_CPU, + SORT_SRCLINE, + SORT_LOCAL_WEIGHT, + SORT_GLOBAL_WEIGHT, + SORT_TRANSACTION, + + /* branch stack specific sort keys */ + __SORT_BRANCH_STACK, + SORT_DSO_FROM = __SORT_BRANCH_STACK, + SORT_DSO_TO, + SORT_SYM_FROM, + SORT_SYM_TO, + SORT_MISPREDICT, + SORT_ABORT, + SORT_IN_TX, + + /* memory mode specific sort keys */ + __SORT_MEMORY_MODE, + SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE, + SORT_MEM_DADDR_DSO, + SORT_MEM_LOCKED, + SORT_MEM_TLB, + SORT_MEM_LVL, + SORT_MEM_SNOOP, + SORT_MEM_DCACHELINE, +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + const char *se_header; + + int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *); + int64_t (*se_sort)(struct hist_entry *, struct hist_entry *); + int (*se_snprintf)(struct hist_entry *he, char *bf, size_t size, + unsigned int width); + u8 se_width_idx; +}; + +extern struct sort_entry sort_thread; +extern struct list_head hist_entry__sort_list; + +int setup_sorting(void); +int setup_output_field(void); +void reset_output_field(void); +extern int sort_dimension__add(const char *); +void sort__setup_elide(FILE *fp); +void perf_hpp__set_elide(int idx, bool elide); + +int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); + +bool is_strict_order(const char *order); +#endif /* __PERF_SORT_H */ diff --git a/kernel/tools/perf/util/srcline.c b/kernel/tools/perf/util/srcline.c new file mode 100644 index 000000000..c93fb0c5b --- /dev/null +++ b/kernel/tools/perf/util/srcline.c @@ -0,0 +1,308 @@ +#include +#include +#include + +#include + +#include "util/dso.h" +#include "util/util.h" +#include "util/debug.h" + +#include "symbol.h" + +#ifdef HAVE_LIBBFD_SUPPORT + +/* + * Implement addr2line using libbfd. + */ +#define PACKAGE "perf" +#include + +struct a2l_data { + const char *input; + u64 addr; + + bool found; + const char *filename; + const char *funcname; + unsigned line; + + bfd *abfd; + asymbol **syms; +}; + +static int bfd_error(const char *string) +{ + const char *errmsg; + + errmsg = bfd_errmsg(bfd_get_error()); + fflush(stdout); + + if (string) + pr_debug("%s: %s\n", string, errmsg); + else + pr_debug("%s\n", errmsg); + + return -1; +} + +static int slurp_symtab(bfd *abfd, struct a2l_data *a2l) +{ + long storage; + long symcount; + asymbol **syms; + bfd_boolean dynamic = FALSE; + + if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0) + return bfd_error(bfd_get_filename(abfd)); + + storage = bfd_get_symtab_upper_bound(abfd); + if (storage == 0L) { + storage = bfd_get_dynamic_symtab_upper_bound(abfd); + dynamic = TRUE; + } + if (storage < 0L) + return bfd_error(bfd_get_filename(abfd)); + + syms = malloc(storage); + if (dynamic) + symcount = bfd_canonicalize_dynamic_symtab(abfd, syms); + else + symcount = bfd_canonicalize_symtab(abfd, syms); + + if (symcount < 0) { + free(syms); + return bfd_error(bfd_get_filename(abfd)); + } + + a2l->syms = syms; + return 0; +} + +static void find_address_in_section(bfd *abfd, asection *section, void *data) +{ + bfd_vma pc, vma; + bfd_size_type size; + struct a2l_data *a2l = data; + + if (a2l->found) + return; + + if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0) + return; + + pc = a2l->addr; + vma = bfd_get_section_vma(abfd, section); + size = bfd_get_section_size(section); + + if (pc < vma || pc >= vma + size) + return; + + a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma, + &a2l->filename, &a2l->funcname, + &a2l->line); +} + +static struct a2l_data *addr2line_init(const char *path) +{ + bfd *abfd; + struct a2l_data *a2l = NULL; + + abfd = bfd_openr(path, NULL); + if (abfd == NULL) + return NULL; + + if (!bfd_check_format(abfd, bfd_object)) + goto out; + + a2l = zalloc(sizeof(*a2l)); + if (a2l == NULL) + goto out; + + a2l->abfd = abfd; + a2l->input = strdup(path); + if (a2l->input == NULL) + goto out; + + if (slurp_symtab(abfd, a2l)) + goto out; + + return a2l; + +out: + if (a2l) { + zfree((char **)&a2l->input); + free(a2l); + } + bfd_close(abfd); + return NULL; +} + +static void addr2line_cleanup(struct a2l_data *a2l) +{ + if (a2l->abfd) + bfd_close(a2l->abfd); + zfree((char **)&a2l->input); + zfree(&a2l->syms); + free(a2l); +} + +static int addr2line(const char *dso_name, u64 addr, + char **file, unsigned int *line, struct dso *dso) +{ + int ret = 0; + struct a2l_data *a2l = dso->a2l; + + if (!a2l) { + dso->a2l = addr2line_init(dso_name); + a2l = dso->a2l; + } + + if (a2l == NULL) { + pr_warning("addr2line_init failed for %s\n", dso_name); + return 0; + } + + a2l->addr = addr; + a2l->found = false; + + bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); + + if (a2l->found && a2l->filename) { + *file = strdup(a2l->filename); + *line = a2l->line; + + if (*file) + ret = 1; + } + + return ret; +} + +void dso__free_a2l(struct dso *dso) +{ + struct a2l_data *a2l = dso->a2l; + + if (!a2l) + return; + + addr2line_cleanup(a2l); + + dso->a2l = NULL; +} + +#else /* HAVE_LIBBFD_SUPPORT */ + +static int addr2line(const char *dso_name, u64 addr, + char **file, unsigned int *line_nr, + struct dso *dso __maybe_unused) +{ + FILE *fp; + char cmd[PATH_MAX]; + char *filename = NULL; + size_t len; + char *sep; + int ret = 0; + + scnprintf(cmd, sizeof(cmd), "addr2line -e %s %016"PRIx64, + dso_name, addr); + + fp = popen(cmd, "r"); + if (fp == NULL) { + pr_warning("popen failed for %s\n", dso_name); + return 0; + } + + if (getline(&filename, &len, fp) < 0 || !len) { + pr_warning("addr2line has no output for %s\n", dso_name); + goto out; + } + + sep = strchr(filename, '\n'); + if (sep) + *sep = '\0'; + + if (!strcmp(filename, "??:0")) { + pr_debug("no debugging info in %s\n", dso_name); + free(filename); + goto out; + } + + sep = strchr(filename, ':'); + if (sep) { + *sep++ = '\0'; + *file = filename; + *line_nr = strtoul(sep, NULL, 0); + ret = 1; + } +out: + pclose(fp); + return ret; +} + +void dso__free_a2l(struct dso *dso __maybe_unused) +{ +} + +#endif /* HAVE_LIBBFD_SUPPORT */ + +/* + * Number of addr2line failures (without success) before disabling it for that + * dso. + */ +#define A2L_FAIL_LIMIT 123 + +char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, + bool show_sym) +{ + char *file = NULL; + unsigned line = 0; + char *srcline; + const char *dso_name; + + if (!dso->has_srcline) + goto out; + + if (dso->symsrc_filename) + dso_name = dso->symsrc_filename; + else + dso_name = dso->long_name; + + if (dso_name[0] == '[') + goto out; + + if (!strncmp(dso_name, "/tmp/perf-", 10)) + goto out; + + if (!addr2line(dso_name, addr, &file, &line, dso)) + goto out; + + if (asprintf(&srcline, "%s:%u", basename(file), line) < 0) { + free(file); + goto out; + } + + dso->a2l_fails = 0; + + free(file); + return srcline; + +out: + if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) { + dso->has_srcline = 0; + dso__free_a2l(dso); + } + if (sym) { + if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "", + addr - sym->start) < 0) + return SRCLINE_UNKNOWN; + } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0) + return SRCLINE_UNKNOWN; + return srcline; +} + +void free_srcline(char *srcline) +{ + if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) + free(srcline); +} diff --git a/kernel/tools/perf/util/stat.c b/kernel/tools/perf/util/stat.c new file mode 100644 index 000000000..6506b3dfb --- /dev/null +++ b/kernel/tools/perf/util/stat.c @@ -0,0 +1,63 @@ +#include + +#include "stat.h" + +void update_stats(struct stats *stats, u64 val) +{ + double delta; + + stats->n++; + delta = val - stats->mean; + stats->mean += delta / stats->n; + stats->M2 += delta*(val - stats->mean); + + if (val > stats->max) + stats->max = val; + + if (val < stats->min) + stats->min = val; +} + +double avg_stats(struct stats *stats) +{ + return stats->mean; +} + +/* + * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + * + * (\Sum n_i^2) - ((\Sum n_i)^2)/n + * s^2 = ------------------------------- + * n - 1 + * + * http://en.wikipedia.org/wiki/Stddev + * + * The std dev of the mean is related to the std dev by: + * + * s + * s_mean = ------- + * sqrt(n) + * + */ +double stddev_stats(struct stats *stats) +{ + double variance, variance_mean; + + if (stats->n < 2) + return 0.0; + + variance = stats->M2 / (stats->n - 1); + variance_mean = variance / stats->n; + + return sqrt(variance_mean); +} + +double rel_stddev_stats(double stddev, double avg) +{ + double pct = 0.0; + + if (avg) + pct = 100.0 * stddev/avg; + + return pct; +} diff --git a/kernel/tools/perf/util/stat.h b/kernel/tools/perf/util/stat.h new file mode 100644 index 000000000..5667fc3e3 --- /dev/null +++ b/kernel/tools/perf/util/stat.h @@ -0,0 +1,25 @@ +#ifndef __PERF_STATS_H +#define __PERF_STATS_H + +#include + +struct stats +{ + double n, mean, M2; + u64 max, min; +}; + +void update_stats(struct stats *stats, u64 val); +double avg_stats(struct stats *stats); +double stddev_stats(struct stats *stats); +double rel_stddev_stats(double stddev, double avg); + +static inline void init_stats(struct stats *stats) +{ + stats->n = 0.0; + stats->mean = 0.0; + stats->M2 = 0.0; + stats->min = (u64) -1; + stats->max = 0; +} +#endif diff --git a/kernel/tools/perf/util/strbuf.c b/kernel/tools/perf/util/strbuf.c new file mode 100644 index 000000000..4abe23550 --- /dev/null +++ b/kernel/tools/perf/util/strbuf.c @@ -0,0 +1,134 @@ +#include "cache.h" +#include + +int prefixcmp(const char *str, const char *prefix) +{ + for (; ; str++, prefix++) + if (!*prefix) + return 0; + else if (*str != *prefix) + return (unsigned char)*prefix - (unsigned char)*str; +} + +/* + * Used as the default ->buf value, so that people can always assume + * buf is non NULL and ->buf is NUL terminated even for a freshly + * initialized strbuf. + */ +char strbuf_slopbuf[1]; + +void strbuf_init(struct strbuf *sb, ssize_t hint) +{ + sb->alloc = sb->len = 0; + sb->buf = strbuf_slopbuf; + if (hint) + strbuf_grow(sb, hint); +} + +void strbuf_release(struct strbuf *sb) +{ + if (sb->alloc) { + zfree(&sb->buf); + strbuf_init(sb, 0); + } +} + +char *strbuf_detach(struct strbuf *sb, size_t *sz) +{ + char *res = sb->alloc ? sb->buf : NULL; + if (sz) + *sz = sb->len; + strbuf_init(sb, 0); + return res; +} + +void strbuf_grow(struct strbuf *sb, size_t extra) +{ + if (sb->len + extra + 1 <= sb->len) + die("you want to use way too much memory"); + if (!sb->alloc) + sb->buf = NULL; + ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); +} + +static void strbuf_splice(struct strbuf *sb, size_t pos, size_t len, + const void *data, size_t dlen) +{ + if (pos + len < pos) + die("you want to use way too much memory"); + if (pos > sb->len) + die("`pos' is too far after the end of the buffer"); + if (pos + len > sb->len) + die("`pos + len' is too far after the end of the buffer"); + + if (dlen >= len) + strbuf_grow(sb, dlen - len); + memmove(sb->buf + pos + dlen, + sb->buf + pos + len, + sb->len - pos - len); + memcpy(sb->buf + pos, data, dlen); + strbuf_setlen(sb, sb->len + dlen - len); +} + +void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) +{ + strbuf_splice(sb, pos, len, NULL, 0); +} + +void strbuf_add(struct strbuf *sb, const void *data, size_t len) +{ + strbuf_grow(sb, len); + memcpy(sb->buf + sb->len, data, len); + strbuf_setlen(sb, sb->len + len); +} + +void strbuf_addf(struct strbuf *sb, const char *fmt, ...) +{ + int len; + va_list ap; + + if (!strbuf_avail(sb)) + strbuf_grow(sb, 64); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len < 0) + die("your vsnprintf is broken"); + if (len > strbuf_avail(sb)) { + strbuf_grow(sb, len); + va_start(ap, fmt); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + va_end(ap); + if (len > strbuf_avail(sb)) { + die("this should not happen, your vsnprintf is broken"); + } + } + strbuf_setlen(sb, sb->len + len); +} + +ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint) +{ + size_t oldlen = sb->len; + size_t oldalloc = sb->alloc; + + strbuf_grow(sb, hint ? hint : 8192); + for (;;) { + ssize_t cnt; + + cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1); + if (cnt < 0) { + if (oldalloc == 0) + strbuf_release(sb); + else + strbuf_setlen(sb, oldlen); + return -1; + } + if (!cnt) + break; + sb->len += cnt; + strbuf_grow(sb, 8192); + } + + sb->buf[sb->len] = '\0'; + return sb->len - oldlen; +} diff --git a/kernel/tools/perf/util/strbuf.h b/kernel/tools/perf/util/strbuf.h new file mode 100644 index 000000000..436ac319f --- /dev/null +++ b/kernel/tools/perf/util/strbuf.h @@ -0,0 +1,92 @@ +#ifndef __PERF_STRBUF_H +#define __PERF_STRBUF_H + +/* + * Strbuf's can be use in many ways: as a byte array, or to store arbitrary + * long, overflow safe strings. + * + * Strbufs has some invariants that are very important to keep in mind: + * + * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to + * build complex strings/buffers whose final size isn't easily known. + * + * It is NOT legal to copy the ->buf pointer away. + * `strbuf_detach' is the operation that detachs a buffer from its shell + * while keeping the shell valid wrt its invariants. + * + * 2. the ->buf member is a byte array that has at least ->len + 1 bytes + * allocated. The extra byte is used to store a '\0', allowing the ->buf + * member to be a valid C-string. Every strbuf function ensure this + * invariant is preserved. + * + * Note that it is OK to "play" with the buffer directly if you work it + * that way: + * + * strbuf_grow(sb, SOME_SIZE); + * ... Here, the memory array starting at sb->buf, and of length + * ... strbuf_avail(sb) is all yours, and you are sure that + * ... strbuf_avail(sb) is at least SOME_SIZE. + * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE); + * + * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb). + * + * Doing so is safe, though if it has to be done in many places, adding the + * missing API to the strbuf module is the way to go. + * + * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1 + * even if it's true in the current implementation. Alloc is somehow a + * "private" member that should not be messed with. + */ + +#include + +extern char strbuf_slopbuf[]; +struct strbuf { + size_t alloc; + size_t len; + char *buf; +}; + +#define STRBUF_INIT { 0, 0, strbuf_slopbuf } + +/*----- strbuf life cycle -----*/ +extern void strbuf_init(struct strbuf *buf, ssize_t hint); +extern void strbuf_release(struct strbuf *); +extern char *strbuf_detach(struct strbuf *, size_t *); + +/*----- strbuf size related -----*/ +static inline ssize_t strbuf_avail(const struct strbuf *sb) { + return sb->alloc ? sb->alloc - sb->len - 1 : 0; +} + +extern void strbuf_grow(struct strbuf *, size_t); + +static inline void strbuf_setlen(struct strbuf *sb, size_t len) { + if (!sb->alloc) + strbuf_grow(sb, 0); + assert(len < sb->alloc); + sb->len = len; + sb->buf[len] = '\0'; +} + +/*----- add data in your buffer -----*/ +static inline void strbuf_addch(struct strbuf *sb, int c) { + strbuf_grow(sb, 1); + sb->buf[sb->len++] = c; + sb->buf[sb->len] = '\0'; +} + +extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); + +extern void strbuf_add(struct strbuf *, const void *, size_t); +static inline void strbuf_addstr(struct strbuf *sb, const char *s) { + strbuf_add(sb, s, strlen(s)); +} + +__attribute__((format(printf,2,3))) +extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); + +/* XXX: if read fails, any partial read is undone */ +extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint); + +#endif /* __PERF_STRBUF_H */ diff --git a/kernel/tools/perf/util/strfilter.c b/kernel/tools/perf/util/strfilter.c new file mode 100644 index 000000000..79a757a2a --- /dev/null +++ b/kernel/tools/perf/util/strfilter.c @@ -0,0 +1,199 @@ +#include "util.h" +#include "string.h" +#include "strfilter.h" + +/* Operators */ +static const char *OP_and = "&"; /* Logical AND */ +static const char *OP_or = "|"; /* Logical OR */ +static const char *OP_not = "!"; /* Logical NOT */ + +#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!') +#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')') + +static void strfilter_node__delete(struct strfilter_node *node) +{ + if (node) { + if (node->p && !is_operator(*node->p)) + zfree((char **)&node->p); + strfilter_node__delete(node->l); + strfilter_node__delete(node->r); + free(node); + } +} + +void strfilter__delete(struct strfilter *filter) +{ + if (filter) { + strfilter_node__delete(filter->root); + free(filter); + } +} + +static const char *get_token(const char *s, const char **e) +{ + const char *p; + + while (isspace(*s)) /* Skip spaces */ + s++; + + if (*s == '\0') { + p = s; + goto end; + } + + p = s + 1; + if (!is_separator(*s)) { + /* End search */ +retry: + while (*p && !is_separator(*p) && !isspace(*p)) + p++; + /* Escape and special case: '!' is also used in glob pattern */ + if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) { + p++; + goto retry; + } + } +end: + *e = p; + return s; +} + +static struct strfilter_node *strfilter_node__alloc(const char *op, + struct strfilter_node *l, + struct strfilter_node *r) +{ + struct strfilter_node *node = zalloc(sizeof(*node)); + + if (node) { + node->p = op; + node->l = l; + node->r = r; + } + + return node; +} + +static struct strfilter_node *strfilter_node__new(const char *s, + const char **ep) +{ + struct strfilter_node root, *cur, *last_op; + const char *e; + + if (!s) + return NULL; + + memset(&root, 0, sizeof(root)); + last_op = cur = &root; + + s = get_token(s, &e); + while (*s != '\0' && *s != ')') { + switch (*s) { + case '&': /* Exchg last OP->r with AND */ + if (!cur->r || !last_op->r) + goto error; + cur = strfilter_node__alloc(OP_and, last_op->r, NULL); + if (!cur) + goto nomem; + last_op->r = cur; + last_op = cur; + break; + case '|': /* Exchg the root with OR */ + if (!cur->r || !root.r) + goto error; + cur = strfilter_node__alloc(OP_or, root.r, NULL); + if (!cur) + goto nomem; + root.r = cur; + last_op = cur; + break; + case '!': /* Add NOT as a leaf node */ + if (cur->r) + goto error; + cur->r = strfilter_node__alloc(OP_not, NULL, NULL); + if (!cur->r) + goto nomem; + cur = cur->r; + break; + case '(': /* Recursively parses inside the parenthesis */ + if (cur->r) + goto error; + cur->r = strfilter_node__new(s + 1, &s); + if (!s) + goto nomem; + if (!cur->r || *s != ')') + goto error; + e = s + 1; + break; + default: + if (cur->r) + goto error; + cur->r = strfilter_node__alloc(NULL, NULL, NULL); + if (!cur->r) + goto nomem; + cur->r->p = strndup(s, e - s); + if (!cur->r->p) + goto nomem; + } + s = get_token(e, &e); + } + if (!cur->r) + goto error; + *ep = s; + return root.r; +nomem: + s = NULL; +error: + *ep = s; + strfilter_node__delete(root.r); + return NULL; +} + +/* + * Parse filter rule and return new strfilter. + * Return NULL if fail, and *ep == NULL if memory allocation failed. + */ +struct strfilter *strfilter__new(const char *rules, const char **err) +{ + struct strfilter *filter = zalloc(sizeof(*filter)); + const char *ep = NULL; + + if (filter) + filter->root = strfilter_node__new(rules, &ep); + + if (!filter || !filter->root || *ep != '\0') { + if (err) + *err = ep; + strfilter__delete(filter); + filter = NULL; + } + + return filter; +} + +static bool strfilter_node__compare(struct strfilter_node *node, + const char *str) +{ + if (!node || !node->p) + return false; + + switch (*node->p) { + case '|': /* OR */ + return strfilter_node__compare(node->l, str) || + strfilter_node__compare(node->r, str); + case '&': /* AND */ + return strfilter_node__compare(node->l, str) && + strfilter_node__compare(node->r, str); + case '!': /* NOT */ + return !strfilter_node__compare(node->r, str); + default: + return strglobmatch(str, node->p); + } +} + +/* Return true if STR matches the filter rules */ +bool strfilter__compare(struct strfilter *filter, const char *str) +{ + if (!filter) + return false; + return strfilter_node__compare(filter->root, str); +} diff --git a/kernel/tools/perf/util/strfilter.h b/kernel/tools/perf/util/strfilter.h new file mode 100644 index 000000000..fe611f3c9 --- /dev/null +++ b/kernel/tools/perf/util/strfilter.h @@ -0,0 +1,48 @@ +#ifndef __PERF_STRFILTER_H +#define __PERF_STRFILTER_H +/* General purpose glob matching filter */ + +#include +#include + +/* A node of string filter */ +struct strfilter_node { + struct strfilter_node *l; /* Tree left branche (for &,|) */ + struct strfilter_node *r; /* Tree right branche (for !,&,|) */ + const char *p; /* Operator or rule */ +}; + +/* String filter */ +struct strfilter { + struct strfilter_node *root; +}; + +/** + * strfilter__new - Create a new string filter + * @rules: Filter rule, which is a combination of glob expressions. + * @err: Pointer which points an error detected on @rules + * + * Parse @rules and return new strfilter. Return NULL if an error detected. + * In that case, *@err will indicate where it is detected, and *@err is NULL + * if a memory allocation is failed. + */ +struct strfilter *strfilter__new(const char *rules, const char **err); + +/** + * strfilter__compare - compare given string and a string filter + * @filter: String filter + * @str: target string + * + * Compare @str and @filter. Return true if the str match the rule + */ +bool strfilter__compare(struct strfilter *filter, const char *str); + +/** + * strfilter__delete - delete a string filter + * @filter: String filter to delete + * + * Delete @filter. + */ +void strfilter__delete(struct strfilter *filter); + +#endif diff --git a/kernel/tools/perf/util/string.c b/kernel/tools/perf/util/string.c new file mode 100644 index 000000000..6afd6106c --- /dev/null +++ b/kernel/tools/perf/util/string.c @@ -0,0 +1,359 @@ +#include "util.h" +#include "linux/string.h" + +#define K 1024LL +/* + * perf_atoll() + * Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB") + * and return its numeric value + */ +s64 perf_atoll(const char *str) +{ + s64 length; + char *p; + char c; + + if (!isdigit(str[0])) + goto out_err; + + length = strtoll(str, &p, 10); + switch (c = *p++) { + case 'b': case 'B': + if (*p) + goto out_err; + case '\0': + return length; + default: + goto out_err; + /* two-letter suffices */ + case 'k': case 'K': + length <<= 10; + break; + case 'm': case 'M': + length <<= 20; + break; + case 'g': case 'G': + length <<= 30; + break; + case 't': case 'T': + length <<= 40; + break; + } + /* we want the cases to match */ + if (islower(c)) { + if (strcmp(p, "b") != 0) + goto out_err; + } else { + if (strcmp(p, "B") != 0) + goto out_err; + } + return length; + +out_err: + return -1; +} + +/* + * Helper function for splitting a string into an argv-like array. + * originally copied from lib/argv_split.c + */ +static const char *skip_sep(const char *cp) +{ + while (*cp && isspace(*cp)) + cp++; + + return cp; +} + +static const char *skip_arg(const char *cp) +{ + while (*cp && !isspace(*cp)) + cp++; + + return cp; +} + +static int count_argc(const char *str) +{ + int count = 0; + + while (*str) { + str = skip_sep(str); + if (*str) { + count++; + str = skip_arg(str); + } + } + + return count; +} + +/** + * argv_free - free an argv + * @argv - the argument vector to be freed + * + * Frees an argv and the strings it points to. + */ +void argv_free(char **argv) +{ + char **p; + for (p = argv; *p; p++) + zfree(p); + + free(argv); +} + +/** + * argv_split - split a string at whitespace, returning an argv + * @str: the string to be split + * @argcp: returned argument count + * + * Returns an array of pointers to strings which are split out from + * @str. This is performed by strictly splitting on white-space; no + * quote processing is performed. Multiple whitespace characters are + * considered to be a single argument separator. The returned array + * is always NULL-terminated. Returns NULL on memory allocation + * failure. + */ +char **argv_split(const char *str, int *argcp) +{ + int argc = count_argc(str); + char **argv = zalloc(sizeof(*argv) * (argc+1)); + char **argvp; + + if (argv == NULL) + goto out; + + if (argcp) + *argcp = argc; + + argvp = argv; + + while (*str) { + str = skip_sep(str); + + if (*str) { + const char *p = str; + char *t; + + str = skip_arg(str); + + t = strndup(p, str-p); + if (t == NULL) + goto fail; + *argvp++ = t; + } + } + *argvp = NULL; + +out: + return argv; + +fail: + argv_free(argv); + return NULL; +} + +/* Character class matching */ +static bool __match_charclass(const char *pat, char c, const char **npat) +{ + bool complement = false, ret = true; + + if (*pat == '!') { + complement = true; + pat++; + } + if (*pat++ == c) /* First character is special */ + goto end; + + while (*pat && *pat != ']') { /* Matching */ + if (*pat == '-' && *(pat + 1) != ']') { /* Range */ + if (*(pat - 1) <= c && c <= *(pat + 1)) + goto end; + if (*(pat - 1) > *(pat + 1)) + goto error; + pat += 2; + } else if (*pat++ == c) + goto end; + } + if (!*pat) + goto error; + ret = false; + +end: + while (*pat && *pat != ']') /* Searching closing */ + pat++; + if (!*pat) + goto error; + *npat = pat + 1; + return complement ? !ret : ret; + +error: + return false; +} + +/* Glob/lazy pattern matching */ +static bool __match_glob(const char *str, const char *pat, bool ignore_space) +{ + while (*str && *pat && *pat != '*') { + if (ignore_space) { + /* Ignore spaces for lazy matching */ + if (isspace(*str)) { + str++; + continue; + } + if (isspace(*pat)) { + pat++; + continue; + } + } + if (*pat == '?') { /* Matches any single character */ + str++; + pat++; + continue; + } else if (*pat == '[') /* Character classes/Ranges */ + if (__match_charclass(pat + 1, *str, &pat)) { + str++; + continue; + } else + return false; + else if (*pat == '\\') /* Escaped char match as normal char */ + pat++; + if (*str++ != *pat++) + return false; + } + /* Check wild card */ + if (*pat == '*') { + while (*pat == '*') + pat++; + if (!*pat) /* Tail wild card matches all */ + return true; + while (*str) + if (__match_glob(str++, pat, ignore_space)) + return true; + } + return !*str && !*pat; +} + +/** + * strglobmatch - glob expression pattern matching + * @str: the target string to match + * @pat: the pattern string to match + * + * This returns true if the @str matches @pat. @pat can includes wildcards + * ('*','?') and character classes ([CHARS], complementation and ranges are + * also supported). Also, this supports escape character ('\') to use special + * characters as normal character. + * + * Note: if @pat syntax is broken, this always returns false. + */ +bool strglobmatch(const char *str, const char *pat) +{ + return __match_glob(str, pat, false); +} + +/** + * strlazymatch - matching pattern strings lazily with glob pattern + * @str: the target string to match + * @pat: the pattern string to match + * + * This is similar to strglobmatch, except this ignores spaces in + * the target string. + */ +bool strlazymatch(const char *str, const char *pat) +{ + return __match_glob(str, pat, true); +} + +/** + * strtailcmp - Compare the tail of two strings + * @s1: 1st string to be compared + * @s2: 2nd string to be compared + * + * Return 0 if whole of either string is same as another's tail part. + */ +int strtailcmp(const char *s1, const char *s2) +{ + int i1 = strlen(s1); + int i2 = strlen(s2); + while (--i1 >= 0 && --i2 >= 0) { + if (s1[i1] != s2[i2]) + return s1[i1] - s2[i2]; + } + return 0; +} + +/** + * strxfrchar - Locate and replace character in @s + * @s: The string to be searched/changed. + * @from: Source character to be replaced. + * @to: Destination character. + * + * Return pointer to the changed string. + */ +char *strxfrchar(char *s, char from, char to) +{ + char *p = s; + + while ((p = strchr(p, from)) != NULL) + *p++ = to; + + return s; +} + +/** + * ltrim - Removes leading whitespace from @s. + * @s: The string to be stripped. + * + * Return pointer to the first non-whitespace character in @s. + */ +char *ltrim(char *s) +{ + int len = strlen(s); + + while (len && isspace(*s)) { + len--; + s++; + } + + return s; +} + +/** + * rtrim - Removes trailing whitespace from @s. + * @s: The string to be stripped. + * + * Note that the first trailing whitespace is replaced with a %NUL-terminator + * in the given string @s. Returns @s. + */ +char *rtrim(char *s) +{ + size_t size = strlen(s); + char *end; + + if (!size) + return s; + + end = s + size - 1; + while (end >= s && isspace(*end)) + end--; + *(end + 1) = '\0'; + + return s; +} + +/** + * memdup - duplicate region of memory + * @src: memory region to duplicate + * @len: memory region length + */ +void *memdup(const void *src, size_t len) +{ + void *p; + + p = malloc(len); + if (p) + memcpy(p, src, len); + + return p; +} diff --git a/kernel/tools/perf/util/strlist.c b/kernel/tools/perf/util/strlist.c new file mode 100644 index 000000000..71f9d102b --- /dev/null +++ b/kernel/tools/perf/util/strlist.c @@ -0,0 +1,173 @@ +/* + * (c) 2009 Arnaldo Carvalho de Melo + * + * Licensed under the GPLv2. + */ + +#include "strlist.h" +#include "util.h" +#include +#include +#include +#include + +static +struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry) +{ + const char *s = entry; + struct rb_node *rc = NULL; + struct strlist *strlist = container_of(rblist, struct strlist, rblist); + struct str_node *snode = malloc(sizeof(*snode)); + + if (snode != NULL) { + if (strlist->dupstr) { + s = strdup(s); + if (s == NULL) + goto out_delete; + } + snode->s = s; + rc = &snode->rb_node; + } + + return rc; + +out_delete: + free(snode); + return NULL; +} + +static void str_node__delete(struct str_node *snode, bool dupstr) +{ + if (dupstr) + zfree((char **)&snode->s); + free(snode); +} + +static +void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node) +{ + struct strlist *slist = container_of(rblist, struct strlist, rblist); + struct str_node *snode = container_of(rb_node, struct str_node, rb_node); + + str_node__delete(snode, slist->dupstr); +} + +static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) +{ + const char *str = entry; + struct str_node *snode = container_of(rb_node, struct str_node, rb_node); + + return strcmp(snode->s, str); +} + +int strlist__add(struct strlist *slist, const char *new_entry) +{ + return rblist__add_node(&slist->rblist, new_entry); +} + +int strlist__load(struct strlist *slist, const char *filename) +{ + char entry[1024]; + int err; + FILE *fp = fopen(filename, "r"); + + if (fp == NULL) + return errno; + + while (fgets(entry, sizeof(entry), fp) != NULL) { + const size_t len = strlen(entry); + + if (len == 0) + continue; + entry[len - 1] = '\0'; + + err = strlist__add(slist, entry); + if (err != 0) + goto out; + } + + err = 0; +out: + fclose(fp); + return err; +} + +void strlist__remove(struct strlist *slist, struct str_node *snode) +{ + rblist__remove_node(&slist->rblist, &snode->rb_node); +} + +struct str_node *strlist__find(struct strlist *slist, const char *entry) +{ + struct str_node *snode = NULL; + struct rb_node *rb_node = rblist__find(&slist->rblist, entry); + + if (rb_node) + snode = container_of(rb_node, struct str_node, rb_node); + + return snode; +} + +static int strlist__parse_list_entry(struct strlist *slist, const char *s) +{ + if (strncmp(s, "file://", 7) == 0) + return strlist__load(slist, s + 7); + + return strlist__add(slist, s); +} + +int strlist__parse_list(struct strlist *slist, const char *s) +{ + char *sep; + int err; + + while ((sep = strchr(s, ',')) != NULL) { + *sep = '\0'; + err = strlist__parse_list_entry(slist, s); + *sep = ','; + if (err != 0) + return err; + s = sep + 1; + } + + return *s ? strlist__parse_list_entry(slist, s) : 0; +} + +struct strlist *strlist__new(bool dupstr, const char *list) +{ + struct strlist *slist = malloc(sizeof(*slist)); + + if (slist != NULL) { + rblist__init(&slist->rblist); + slist->rblist.node_cmp = strlist__node_cmp; + slist->rblist.node_new = strlist__node_new; + slist->rblist.node_delete = strlist__node_delete; + + slist->dupstr = dupstr; + if (list && strlist__parse_list(slist, list) != 0) + goto out_error; + } + + return slist; +out_error: + free(slist); + return NULL; +} + +void strlist__delete(struct strlist *slist) +{ + if (slist != NULL) + rblist__delete(&slist->rblist); +} + +struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) +{ + struct str_node *snode = NULL; + struct rb_node *rb_node; + + rb_node = rblist__entry(&slist->rblist, idx); + if (rb_node) + snode = container_of(rb_node, struct str_node, rb_node); + + return snode; +} diff --git a/kernel/tools/perf/util/strlist.h b/kernel/tools/perf/util/strlist.h new file mode 100644 index 000000000..5c7f87069 --- /dev/null +++ b/kernel/tools/perf/util/strlist.h @@ -0,0 +1,79 @@ +#ifndef __PERF_STRLIST_H +#define __PERF_STRLIST_H + +#include +#include + +#include "rblist.h" + +struct str_node { + struct rb_node rb_node; + const char *s; +}; + +struct strlist { + struct rblist rblist; + bool dupstr; +}; + +struct strlist *strlist__new(bool dupstr, const char *slist); +void strlist__delete(struct strlist *slist); + +void strlist__remove(struct strlist *slist, struct str_node *sn); +int strlist__load(struct strlist *slist, const char *filename); +int strlist__add(struct strlist *slist, const char *str); + +struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx); +struct str_node *strlist__find(struct strlist *slist, const char *entry); + +static inline bool strlist__has_entry(struct strlist *slist, const char *entry) +{ + return strlist__find(slist, entry) != NULL; +} + +static inline bool strlist__empty(const struct strlist *slist) +{ + return rblist__empty(&slist->rblist); +} + +static inline unsigned int strlist__nr_entries(const struct strlist *slist) +{ + return rblist__nr_entries(&slist->rblist); +} + +/* For strlist iteration */ +static inline struct str_node *strlist__first(struct strlist *slist) +{ + struct rb_node *rn = rb_first(&slist->rblist.entries); + return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; +} +static inline struct str_node *strlist__next(struct str_node *sn) +{ + struct rb_node *rn; + if (!sn) + return NULL; + rn = rb_next(&sn->rb_node); + return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; +} + +/** + * strlist_for_each - iterate over a strlist + * @pos: the &struct str_node to use as a loop cursor. + * @slist: the &struct strlist for loop. + */ +#define strlist__for_each(pos, slist) \ + for (pos = strlist__first(slist); pos; pos = strlist__next(pos)) + +/** + * strlist_for_each_safe - iterate over a strlist safe against removal of + * str_node + * @pos: the &struct str_node to use as a loop cursor. + * @n: another &struct str_node to use as temporary storage. + * @slist: the &struct strlist for loop. + */ +#define strlist__for_each_safe(pos, n, slist) \ + for (pos = strlist__first(slist), n = strlist__next(pos); pos;\ + pos = n, n = strlist__next(n)) + +int strlist__parse_list(struct strlist *slist, const char *s); +#endif /* __PERF_STRLIST_H */ diff --git a/kernel/tools/perf/util/svghelper.c b/kernel/tools/perf/util/svghelper.c new file mode 100644 index 000000000..283d3e73e --- /dev/null +++ b/kernel/tools/perf/util/svghelper.c @@ -0,0 +1,808 @@ +/* + * svghelper.c - helper functions for outputting svg + * + * (C) Copyright 2009 Intel Corporation + * + * Authors: + * Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include +#include +#include + +#include "perf.h" +#include "svghelper.h" +#include "util.h" +#include "cpumap.h" + +static u64 first_time, last_time; +static u64 turbo_frequency, max_freq; + + +#define SLOT_MULT 30.0 +#define SLOT_HEIGHT 25.0 +#define SLOT_HALF (SLOT_HEIGHT / 2) + +int svg_page_width = 1000; +u64 svg_highlight; +const char *svg_highlight_name; + +#define MIN_TEXT_SIZE 0.01 + +static u64 total_height; +static FILE *svgfile; + +static double cpu2slot(int cpu) +{ + return 2 * cpu + 1; +} + +static int *topology_map; + +static double cpu2y(int cpu) +{ + if (topology_map) + return cpu2slot(topology_map[cpu]) * SLOT_MULT; + else + return cpu2slot(cpu) * SLOT_MULT; +} + +static double time2pixels(u64 __time) +{ + double X; + + X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time); + return X; +} + +/* + * Round text sizes so that the svg viewer only needs a discrete + * number of renderings of the font + */ +static double round_text_size(double size) +{ + int loop = 100; + double target = 10.0; + + if (size >= 10.0) + return size; + while (loop--) { + if (size >= target) + return target; + target = target / 2.0; + } + return size; +} + +void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) +{ + int new_width; + + svgfile = fopen(filename, "w"); + if (!svgfile) { + fprintf(stderr, "Cannot open %s for output\n", filename); + return; + } + first_time = start; + first_time = first_time / 100000000 * 100000000; + last_time = end; + + /* + * if the recording is short, we default to a width of 1000, but + * for longer recordings we want at least 200 units of width per second + */ + new_width = (last_time - first_time) / 5000000; + + if (new_width > svg_page_width) + svg_page_width = new_width; + + total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT; + fprintf(svgfile, " \n"); + fprintf(svgfile, "\n"); + fprintf(svgfile, "\n", svg_page_width, total_height); + + fprintf(svgfile, "\n \n\n"); +} + +static double normalize_height(double height) +{ + if (height < 0.25) + return 0.25; + else if (height < 0.50) + return 0.50; + else if (height < 0.75) + return 0.75; + else + return 0.100; +} + +void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) +{ + double w = time2pixels(end) - time2pixels(start); + height = normalize_height(height); + + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + fprintf(svgfile, "fd=%d error=%d merges=%d\n", fd, err, merges); + fprintf(svgfile, "\n", + time2pixels(start), + w, + Yslot * SLOT_MULT, + SLOT_HALF * height, + type); + fprintf(svgfile, "\n"); +} + +void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) +{ + double w = time2pixels(end) - time2pixels(start); + height = normalize_height(height); + + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + fprintf(svgfile, "fd=%d error=%d merges=%d\n", fd, err, merges); + fprintf(svgfile, "\n", + time2pixels(start), + w, + Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HALF * height, + SLOT_HALF * height, + type); + fprintf(svgfile, "\n"); +} + +void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges) +{ + double w = time2pixels(end) - time2pixels(start); + height = normalize_height(height); + + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + fprintf(svgfile, "fd=%d error=%d merges=%d\n", fd, err, merges); + fprintf(svgfile, "\n", + time2pixels(start), + w, + Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HEIGHT * height, + SLOT_HEIGHT * height, + type); + fprintf(svgfile, "\n"); +} + +void svg_box(int Yslot, u64 start, u64 end, const char *type) +{ + if (!svgfile) + return; + + fprintf(svgfile, "\n", + time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type); +} + +static char *time_to_string(u64 duration); +void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) +{ + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + fprintf(svgfile, "#%d blocked %s\n", cpu, + time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "Blocked on:\n%s\n", backtrace); + svg_box(Yslot, start, end, "blocked"); + fprintf(svgfile, "\n"); +} + +void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) +{ + double text_size; + const char *type; + + if (!svgfile) + return; + + if (svg_highlight && end - start > svg_highlight) + type = "sample_hi"; + else + type = "sample"; + fprintf(svgfile, "\n"); + + fprintf(svgfile, "#%d running %s\n", + cpu, time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "Switched because:\n%s\n", backtrace); + fprintf(svgfile, "\n", + time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, + type); + + text_size = (time2pixels(end)-time2pixels(start)); + if (cpu > 9) + text_size = text_size/2; + if (text_size > 1.25) + text_size = 1.25; + text_size = round_text_size(text_size); + + if (text_size > MIN_TEXT_SIZE) + fprintf(svgfile, "%i\n", + time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1); + + fprintf(svgfile, "\n"); +} + +static char *time_to_string(u64 duration) +{ + static char text[80]; + + text[0] = 0; + + if (duration < 1000) /* less than 1 usec */ + return text; + + if (duration < 1000 * 1000) { /* less than 1 msec */ + sprintf(text, "%.1f us", duration / 1000.0); + return text; + } + sprintf(text, "%.1f ms", duration / 1000.0 / 1000); + + return text; +} + +void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) +{ + char *text; + const char *style; + double font_size; + + if (!svgfile) + return; + + style = "waiting"; + + if (end-start > 10 * 1000000) /* 10 msec */ + style = "WAITING"; + + text = time_to_string(end-start); + + font_size = 1.0 * (time2pixels(end)-time2pixels(start)); + + if (font_size > 3) + font_size = 3; + + font_size = round_text_size(font_size); + + fprintf(svgfile, "\n", time2pixels(start), Yslot * SLOT_MULT); + fprintf(svgfile, "#%d waiting %s\n", cpu, time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "Waiting on:\n%s\n", backtrace); + fprintf(svgfile, "\n", + time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style); + if (font_size > MIN_TEXT_SIZE) + fprintf(svgfile, " %s\n", + font_size, text); + fprintf(svgfile, "\n"); +} + +static char *cpu_model(void) +{ + static char cpu_m[255]; + char buf[256]; + FILE *file; + + cpu_m[0] = 0; + /* CPU type */ + file = fopen("/proc/cpuinfo", "r"); + if (file) { + while (fgets(buf, 255, file)) { + if (strstr(buf, "model name")) { + strncpy(cpu_m, &buf[13], 255); + break; + } + } + fclose(file); + } + + /* CPU type */ + file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r"); + if (file) { + while (fgets(buf, 255, file)) { + unsigned int freq; + freq = strtoull(buf, NULL, 10); + if (freq > max_freq) + max_freq = freq; + } + fclose(file); + } + return cpu_m; +} + +void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq) +{ + char cpu_string[80]; + if (!svgfile) + return; + + max_freq = __max_freq; + turbo_frequency = __turbo_freq; + + fprintf(svgfile, "\n"); + + fprintf(svgfile, "\n", + time2pixels(first_time), + time2pixels(last_time)-time2pixels(first_time), + cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT); + + sprintf(cpu_string, "CPU %i", (int)cpu); + fprintf(svgfile, "%s\n", + 10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string); + + fprintf(svgfile, "%s\n", + 10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model()); + + fprintf(svgfile, "\n"); +} + +void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace) +{ + double width; + const char *type; + + if (!svgfile) + return; + + if (svg_highlight && end - start >= svg_highlight) + type = "sample_hi"; + else if (svg_highlight_name && strstr(name, svg_highlight_name)) + type = "sample_hi"; + else + type = "sample"; + + fprintf(svgfile, "\n", time2pixels(start), cpu2y(cpu)); + fprintf(svgfile, "%d %s running %s\n", pid, name, time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "Switched because:\n%s\n", backtrace); + fprintf(svgfile, "\n", + time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type); + width = time2pixels(end)-time2pixels(start); + if (width > 6) + width = 6; + + width = round_text_size(width); + + if (width > MIN_TEXT_SIZE) + fprintf(svgfile, "%s\n", + width, name); + + fprintf(svgfile, "\n"); +} + +void svg_cstate(int cpu, u64 start, u64 end, int type) +{ + double width; + char style[128]; + + if (!svgfile) + return; + + + fprintf(svgfile, "\n"); + + if (type > 6) + type = 6; + sprintf(style, "c%i", type); + + fprintf(svgfile, "\n", + style, + time2pixels(start), time2pixels(end)-time2pixels(start), + cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT); + + width = (time2pixels(end)-time2pixels(start))/2.0; + if (width > 6) + width = 6; + + width = round_text_size(width); + + if (width > MIN_TEXT_SIZE) + fprintf(svgfile, "C%i\n", + time2pixels(start), cpu2y(cpu)+width, width, type); + + fprintf(svgfile, "\n"); +} + +static char *HzToHuman(unsigned long hz) +{ + static char buffer[1024]; + unsigned long long Hz; + + memset(buffer, 0, 1024); + + Hz = hz; + + /* default: just put the Number in */ + sprintf(buffer, "%9lli", Hz); + + if (Hz > 1000) + sprintf(buffer, " %6lli Mhz", (Hz+500)/1000); + + if (Hz > 1500000) + sprintf(buffer, " %6.2f Ghz", (Hz+5000.0)/1000000); + + if (Hz == turbo_frequency) + sprintf(buffer, "Turbo"); + + return buffer; +} + +void svg_pstate(int cpu, u64 start, u64 end, u64 freq) +{ + double height = 0; + + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + + if (max_freq) + height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT); + height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height; + fprintf(svgfile, "\n", + time2pixels(start), time2pixels(end), height, height); + fprintf(svgfile, "%s\n", + time2pixels(start), height+0.9, HzToHuman(freq)); + + fprintf(svgfile, "\n"); +} + + +void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace) +{ + double height; + + if (!svgfile) + return; + + + fprintf(svgfile, "\n"); + + fprintf(svgfile, "%s wakes up %s\n", + desc1 ? desc1 : "?", + desc2 ? desc2 : "?"); + + if (backtrace) + fprintf(svgfile, "%s\n", backtrace); + + if (row1 < row2) { + if (row1) { + fprintf(svgfile, "\n", + time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32); + if (desc2) + fprintf(svgfile, "%s >\n", + time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2); + } + if (row2) { + fprintf(svgfile, "\n", + time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row2 * SLOT_MULT); + if (desc1) + fprintf(svgfile, "%s >\n", + time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1); + } + } else { + if (row2) { + fprintf(svgfile, "\n", + time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32); + if (desc1) + fprintf(svgfile, "%s <\n", + time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1); + } + if (row1) { + fprintf(svgfile, "\n", + time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row1 * SLOT_MULT); + if (desc2) + fprintf(svgfile, "%s <\n", + time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2); + } + } + height = row1 * SLOT_MULT; + if (row2 > row1) + height += SLOT_HEIGHT; + if (row1) + fprintf(svgfile, "\n", + time2pixels(start), height); + + fprintf(svgfile, "\n"); +} + +void svg_wakeline(u64 start, int row1, int row2, const char *backtrace) +{ + double height; + + if (!svgfile) + return; + + + fprintf(svgfile, "\n"); + + if (backtrace) + fprintf(svgfile, "%s\n", backtrace); + + if (row1 < row2) + fprintf(svgfile, "\n", + time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT); + else + fprintf(svgfile, "\n", + time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT); + + height = row1 * SLOT_MULT; + if (row2 > row1) + height += SLOT_HEIGHT; + fprintf(svgfile, "\n", + time2pixels(start), height); + + fprintf(svgfile, "\n"); +} + +void svg_interrupt(u64 start, int row, const char *backtrace) +{ + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + + fprintf(svgfile, "Wakeup from interrupt\n"); + + if (backtrace) + fprintf(svgfile, "%s\n", backtrace); + + fprintf(svgfile, "\n", + time2pixels(start), row * SLOT_MULT); + fprintf(svgfile, "\n", + time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT); + + fprintf(svgfile, "\n"); +} + +void svg_text(int Yslot, u64 start, const char *text) +{ + if (!svgfile) + return; + + fprintf(svgfile, "%s\n", + time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text); +} + +static void svg_legenda_box(int X, const char *text, const char *style) +{ + double boxsize; + boxsize = SLOT_HEIGHT / 2; + + fprintf(svgfile, "\n", + X, boxsize, boxsize, style); + fprintf(svgfile, "%s\n", + X + boxsize + 5, boxsize, 0.8 * boxsize, text); +} + +void svg_io_legenda(void) +{ + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + svg_legenda_box(0, "Disk", "disk"); + svg_legenda_box(100, "Network", "net"); + svg_legenda_box(200, "Sync", "sync"); + svg_legenda_box(300, "Poll", "poll"); + svg_legenda_box(400, "Error", "error"); + fprintf(svgfile, "\n"); +} + +void svg_legenda(void) +{ + if (!svgfile) + return; + + fprintf(svgfile, "\n"); + svg_legenda_box(0, "Running", "sample"); + svg_legenda_box(100, "Idle","c1"); + svg_legenda_box(200, "Deeper Idle", "c3"); + svg_legenda_box(350, "Deepest Idle", "c6"); + svg_legenda_box(550, "Sleeping", "process2"); + svg_legenda_box(650, "Waiting for cpu", "waiting"); + svg_legenda_box(800, "Blocked on IO", "blocked"); + fprintf(svgfile, "\n"); +} + +void svg_time_grid(double min_thickness) +{ + u64 i; + + if (!svgfile) + return; + + i = first_time; + while (i < last_time) { + int color = 220; + double thickness = 0.075; + if ((i % 100000000) == 0) { + thickness = 0.5; + color = 192; + } + if ((i % 1000000000) == 0) { + thickness = 2.0; + color = 128; + } + + if (thickness >= min_thickness) + fprintf(svgfile, "\n", + time2pixels(i), SLOT_MULT/2, time2pixels(i), + total_height, color, color, color, thickness); + + i += 10000000; + } +} + +void svg_close(void) +{ + if (svgfile) { + fprintf(svgfile, "\n"); + fclose(svgfile); + svgfile = NULL; + } +} + +#define cpumask_bits(maskp) ((maskp)->bits) +typedef struct { DECLARE_BITMAP(bits, MAX_NR_CPUS); } cpumask_t; + +struct topology { + cpumask_t *sib_core; + int sib_core_nr; + cpumask_t *sib_thr; + int sib_thr_nr; +}; + +static void scan_thread_topology(int *map, struct topology *t, int cpu, int *pos) +{ + int i; + int thr; + + for (i = 0; i < t->sib_thr_nr; i++) { + if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i]))) + continue; + + for_each_set_bit(thr, + cpumask_bits(&t->sib_thr[i]), + MAX_NR_CPUS) + if (map[thr] == -1) + map[thr] = (*pos)++; + } +} + +static void scan_core_topology(int *map, struct topology *t) +{ + int pos = 0; + int i; + int cpu; + + for (i = 0; i < t->sib_core_nr; i++) + for_each_set_bit(cpu, + cpumask_bits(&t->sib_core[i]), + MAX_NR_CPUS) + scan_thread_topology(map, t, cpu, &pos); +} + +static int str_to_bitmap(char *s, cpumask_t *b) +{ + int i; + int ret = 0; + struct cpu_map *m; + int c; + + m = cpu_map__new(s); + if (!m) + return -1; + + for (i = 0; i < m->nr; i++) { + c = m->map[i]; + if (c >= MAX_NR_CPUS) { + ret = -1; + break; + } + + set_bit(c, cpumask_bits(b)); + } + + cpu_map__delete(m); + + return ret; +} + +int svg_build_topology_map(char *sib_core, int sib_core_nr, + char *sib_thr, int sib_thr_nr) +{ + int i; + struct topology t; + + t.sib_core_nr = sib_core_nr; + t.sib_thr_nr = sib_thr_nr; + t.sib_core = calloc(sib_core_nr, sizeof(cpumask_t)); + t.sib_thr = calloc(sib_thr_nr, sizeof(cpumask_t)); + + if (!t.sib_core || !t.sib_thr) { + fprintf(stderr, "topology: no memory\n"); + goto exit; + } + + for (i = 0; i < sib_core_nr; i++) { + if (str_to_bitmap(sib_core, &t.sib_core[i])) { + fprintf(stderr, "topology: can't parse siblings map\n"); + goto exit; + } + + sib_core += strlen(sib_core) + 1; + } + + for (i = 0; i < sib_thr_nr; i++) { + if (str_to_bitmap(sib_thr, &t.sib_thr[i])) { + fprintf(stderr, "topology: can't parse siblings map\n"); + goto exit; + } + + sib_thr += strlen(sib_thr) + 1; + } + + topology_map = malloc(sizeof(int) * MAX_NR_CPUS); + if (!topology_map) { + fprintf(stderr, "topology: no memory\n"); + goto exit; + } + + for (i = 0; i < MAX_NR_CPUS; i++) + topology_map[i] = -1; + + scan_core_topology(topology_map, &t); + + return 0; + +exit: + zfree(&t.sib_core); + zfree(&t.sib_thr); + + return -1; +} diff --git a/kernel/tools/perf/util/svghelper.h b/kernel/tools/perf/util/svghelper.h new file mode 100644 index 000000000..9292a5291 --- /dev/null +++ b/kernel/tools/perf/util/svghelper.h @@ -0,0 +1,37 @@ +#ifndef __PERF_SVGHELPER_H +#define __PERF_SVGHELPER_H + +#include + +extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end); +extern void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges); +extern void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges); +extern void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges); +extern void svg_box(int Yslot, u64 start, u64 end, const char *type); +extern void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); +extern void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); +extern void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); +extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency); + + +extern void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace); +extern void svg_cstate(int cpu, u64 start, u64 end, int type); +extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq); + + +extern void svg_time_grid(double min_thickness); +extern void svg_io_legenda(void); +extern void svg_legenda(void); +extern void svg_wakeline(u64 start, int row1, int row2, const char *backtrace); +extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace); +extern void svg_interrupt(u64 start, int row, const char *backtrace); +extern void svg_text(int Yslot, u64 start, const char *text); +extern void svg_close(void); +extern int svg_build_topology_map(char *sib_core, int sib_core_nr, + char *sib_thr, int sib_thr_nr); + +extern int svg_page_width; +extern u64 svg_highlight; +extern const char *svg_highlight_name; + +#endif /* __PERF_SVGHELPER_H */ diff --git a/kernel/tools/perf/util/symbol-elf.c b/kernel/tools/perf/util/symbol-elf.c new file mode 100644 index 000000000..a7ab6063e --- /dev/null +++ b/kernel/tools/perf/util/symbol-elf.c @@ -0,0 +1,1774 @@ +#include +#include +#include +#include +#include +#include + +#include "symbol.h" +#include "machine.h" +#include "vdso.h" +#include +#include "debug.h" + +#ifndef EM_AARCH64 +#define EM_AARCH64 183 /* ARM 64 bit */ +#endif + + +#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT +extern char *cplus_demangle(const char *, int); + +static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) +{ + return cplus_demangle(c, i); +} +#else +#ifdef NO_DEMANGLE +static inline char *bfd_demangle(void __maybe_unused *v, + const char __maybe_unused *c, + int __maybe_unused i) +{ + return NULL; +} +#else +#define PACKAGE 'perf' +#include +#endif +#endif + +#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT +static int elf_getphdrnum(Elf *elf, size_t *dst) +{ + GElf_Ehdr gehdr; + GElf_Ehdr *ehdr; + + ehdr = gelf_getehdr(elf, &gehdr); + if (!ehdr) + return -1; + + *dst = ehdr->e_phnum; + + return 0; +} +#endif + +#ifndef NT_GNU_BUILD_ID +#define NT_GNU_BUILD_ID 3 +#endif + +/** + * elf_symtab__for_each_symbol - iterate thru all the symbols + * + * @syms: struct elf_symtab instance to iterate + * @idx: uint32_t idx + * @sym: GElf_Sym iterator + */ +#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ + for (idx = 0, gelf_getsym(syms, idx, &sym);\ + idx < nr_syms; \ + idx++, gelf_getsym(syms, idx, &sym)) + +static inline uint8_t elf_sym__type(const GElf_Sym *sym) +{ + return GELF_ST_TYPE(sym->st_info); +} + +#ifndef STT_GNU_IFUNC +#define STT_GNU_IFUNC 10 +#endif + +static inline int elf_sym__is_function(const GElf_Sym *sym) +{ + return (elf_sym__type(sym) == STT_FUNC || + elf_sym__type(sym) == STT_GNU_IFUNC) && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF; +} + +static inline bool elf_sym__is_object(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_OBJECT && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF; +} + +static inline int elf_sym__is_label(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_NOTYPE && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF && + sym->st_shndx != SHN_ABS; +} + +static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) +{ + switch (type) { + case MAP__FUNCTION: + return elf_sym__is_function(sym); + case MAP__VARIABLE: + return elf_sym__is_object(sym); + default: + return false; + } +} + +static inline const char *elf_sym__name(const GElf_Sym *sym, + const Elf_Data *symstrs) +{ + return symstrs->d_buf + sym->st_name; +} + +static inline const char *elf_sec__name(const GElf_Shdr *shdr, + const Elf_Data *secstrs) +{ + return secstrs->d_buf + shdr->sh_name; +} + +static inline int elf_sec__is_text(const GElf_Shdr *shdr, + const Elf_Data *secstrs) +{ + return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; +} + +static inline bool elf_sec__is_data(const GElf_Shdr *shdr, + const Elf_Data *secstrs) +{ + return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; +} + +static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, + enum map_type type) +{ + switch (type) { + case MAP__FUNCTION: + return elf_sec__is_text(shdr, secstrs); + case MAP__VARIABLE: + return elf_sec__is_data(shdr, secstrs); + default: + return false; + } +} + +static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) +{ + Elf_Scn *sec = NULL; + GElf_Shdr shdr; + size_t cnt = 1; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + gelf_getshdr(sec, &shdr); + + if ((addr >= shdr.sh_addr) && + (addr < (shdr.sh_addr + shdr.sh_size))) + return cnt; + + ++cnt; + } + + return -1; +} + +Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, size_t *idx) +{ + Elf_Scn *sec = NULL; + size_t cnt = 1; + + /* Elf is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) + return NULL; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *str; + + gelf_getshdr(sec, shp); + str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); + if (str && !strcmp(name, str)) { + if (idx) + *idx = cnt; + return sec; + } + ++cnt; + } + + return NULL; +} + +#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) + +#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) + +/* + * We need to check if we have a .dynsym, so that we can handle the + * .plt, synthesizing its symbols, that aren't on the symtabs (be it + * .dynsym or .symtab). + * And always look at the original dso, not at debuginfo packages, that + * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). + */ +int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, + symbol_filter_t filter) +{ + uint32_t nr_rel_entries, idx; + GElf_Sym sym; + u64 plt_offset; + GElf_Shdr shdr_plt; + struct symbol *f; + GElf_Shdr shdr_rel_plt, shdr_dynsym; + Elf_Data *reldata, *syms, *symstrs; + Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; + size_t dynsym_idx; + GElf_Ehdr ehdr; + char sympltname[1024]; + Elf *elf; + int nr = 0, symidx, err = 0; + + if (!ss->dynsym) + return 0; + + elf = ss->elf; + ehdr = ss->ehdr; + + scn_dynsym = ss->dynsym; + shdr_dynsym = ss->dynshdr; + dynsym_idx = ss->dynsym_idx; + + if (scn_dynsym == NULL) + goto out_elf_end; + + scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, + ".rela.plt", NULL); + if (scn_plt_rel == NULL) { + scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, + ".rel.plt", NULL); + if (scn_plt_rel == NULL) + goto out_elf_end; + } + + err = -1; + + if (shdr_rel_plt.sh_link != dynsym_idx) + goto out_elf_end; + + if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) + goto out_elf_end; + + /* + * Fetch the relocation section to find the idxes to the GOT + * and the symbols in the .dynsym they refer to. + */ + reldata = elf_getdata(scn_plt_rel, NULL); + if (reldata == NULL) + goto out_elf_end; + + syms = elf_getdata(scn_dynsym, NULL); + if (syms == NULL) + goto out_elf_end; + + scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); + if (scn_symstrs == NULL) + goto out_elf_end; + + symstrs = elf_getdata(scn_symstrs, NULL); + if (symstrs == NULL) + goto out_elf_end; + + if (symstrs->d_size == 0) + goto out_elf_end; + + nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; + plt_offset = shdr_plt.sh_offset; + + if (shdr_rel_plt.sh_type == SHT_RELA) { + GElf_Rela pos_mem, *pos; + + elf_section__for_each_rela(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + STB_GLOBAL, sympltname); + if (!f) + goto out_elf_end; + + if (filter && filter(map, f)) + symbol__delete(f); + else { + symbols__insert(&dso->symbols[map->type], f); + ++nr; + } + } + } else if (shdr_rel_plt.sh_type == SHT_REL) { + GElf_Rel pos_mem, *pos; + elf_section__for_each_rel(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + STB_GLOBAL, sympltname); + if (!f) + goto out_elf_end; + + if (filter && filter(map, f)) + symbol__delete(f); + else { + symbols__insert(&dso->symbols[map->type], f); + ++nr; + } + } + } + + err = 0; +out_elf_end: + if (err == 0) + return nr; + pr_debug("%s: problems reading %s PLT info.\n", + __func__, dso->long_name); + return 0; +} + +/* + * Align offset to 4 bytes as needed for note name and descriptor data. + */ +#define NOTE_ALIGN(n) (((n) + 3) & -4U) + +static int elf_read_build_id(Elf *elf, void *bf, size_t size) +{ + int err = -1; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *data; + Elf_Scn *sec; + Elf_Kind ek; + void *ptr; + + if (size < BUILD_ID_SIZE) + goto out; + + ek = elf_kind(elf); + if (ek != ELF_K_ELF) + goto out; + + if (gelf_getehdr(elf, &ehdr) == NULL) { + pr_err("%s: cannot get elf header.\n", __func__); + goto out; + } + + /* + * Check following sections for notes: + * '.note.gnu.build-id' + * '.notes' + * '.note' (VDSO specific) + */ + do { + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".note.gnu.build-id", NULL); + if (sec) + break; + + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".notes", NULL); + if (sec) + break; + + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".note", NULL); + if (sec) + break; + + return err; + + } while (0); + + data = elf_getdata(sec, NULL); + if (data == NULL) + goto out; + + ptr = data->d_buf; + while (ptr < (data->d_buf + data->d_size)) { + GElf_Nhdr *nhdr = ptr; + size_t namesz = NOTE_ALIGN(nhdr->n_namesz), + descsz = NOTE_ALIGN(nhdr->n_descsz); + const char *name; + + ptr += sizeof(*nhdr); + name = ptr; + ptr += namesz; + if (nhdr->n_type == NT_GNU_BUILD_ID && + nhdr->n_namesz == sizeof("GNU")) { + if (memcmp(name, "GNU", sizeof("GNU")) == 0) { + size_t sz = min(size, descsz); + memcpy(bf, ptr, sz); + memset(bf + sz, 0, size - sz); + err = descsz; + break; + } + } + ptr += descsz; + } + +out: + return err; +} + +int filename__read_build_id(const char *filename, void *bf, size_t size) +{ + int fd, err = -1; + Elf *elf; + + if (size < BUILD_ID_SIZE) + goto out; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); + goto out_close; + } + + err = elf_read_build_id(elf, bf, size); + + elf_end(elf); +out_close: + close(fd); +out: + return err; +} + +int sysfs__read_build_id(const char *filename, void *build_id, size_t size) +{ + int fd, err = -1; + + if (size < BUILD_ID_SIZE) + goto out; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + while (1) { + char bf[BUFSIZ]; + GElf_Nhdr nhdr; + size_t namesz, descsz; + + if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) + break; + + namesz = NOTE_ALIGN(nhdr.n_namesz); + descsz = NOTE_ALIGN(nhdr.n_descsz); + if (nhdr.n_type == NT_GNU_BUILD_ID && + nhdr.n_namesz == sizeof("GNU")) { + if (read(fd, bf, namesz) != (ssize_t)namesz) + break; + if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { + size_t sz = min(descsz, size); + if (read(fd, build_id, sz) == (ssize_t)sz) { + memset(build_id + sz, 0, size - sz); + err = 0; + break; + } + } else if (read(fd, bf, descsz) != (ssize_t)descsz) + break; + } else { + int n = namesz + descsz; + if (read(fd, bf, n) != n) + break; + } + } + close(fd); +out: + return err; +} + +int filename__read_debuglink(const char *filename, char *debuglink, + size_t size) +{ + int fd, err = -1; + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *data; + Elf_Scn *sec; + Elf_Kind ek; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); + goto out_close; + } + + ek = elf_kind(elf); + if (ek != ELF_K_ELF) + goto out_elf_end; + + if (gelf_getehdr(elf, &ehdr) == NULL) { + pr_err("%s: cannot get elf header.\n", __func__); + goto out_elf_end; + } + + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".gnu_debuglink", NULL); + if (sec == NULL) + goto out_elf_end; + + data = elf_getdata(sec, NULL); + if (data == NULL) + goto out_elf_end; + + /* the start of this section is a zero-terminated string */ + strncpy(debuglink, data->d_buf, size); + + err = 0; + +out_elf_end: + elf_end(elf); +out_close: + close(fd); +out: + return err; +} + +static int dso__swap_init(struct dso *dso, unsigned char eidata) +{ + static unsigned int const endian = 1; + + dso->needs_swap = DSO_SWAP__NO; + + switch (eidata) { + case ELFDATA2LSB: + /* We are big endian, DSO is little endian. */ + if (*(unsigned char const *)&endian != 1) + dso->needs_swap = DSO_SWAP__YES; + break; + + case ELFDATA2MSB: + /* We are little endian, DSO is big endian. */ + if (*(unsigned char const *)&endian != 0) + dso->needs_swap = DSO_SWAP__YES; + break; + + default: + pr_err("unrecognized DSO data encoding %d\n", eidata); + return -EINVAL; + } + + return 0; +} + +static int decompress_kmodule(struct dso *dso, const char *name, + enum dso_binary_type type) +{ + int fd = -1; + char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; + struct kmod_path m; + + if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && + type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && + type != DSO_BINARY_TYPE__BUILD_ID_CACHE) + return -1; + + if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE) + name = dso->long_name; + + if (kmod_path__parse_ext(&m, name) || !m.comp) + return -1; + + fd = mkstemp(tmpbuf); + if (fd < 0) { + dso->load_errno = errno; + goto out; + } + + if (!decompress_to_file(m.ext, name, fd)) { + dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; + close(fd); + fd = -1; + } + + unlink(tmpbuf); + +out: + free(m.ext); + return fd; +} + +bool symsrc__possibly_runtime(struct symsrc *ss) +{ + return ss->dynsym || ss->opdsec; +} + +bool symsrc__has_symtab(struct symsrc *ss) +{ + return ss->symtab != NULL; +} + +void symsrc__destroy(struct symsrc *ss) +{ + zfree(&ss->name); + elf_end(ss->elf); + close(ss->fd); +} + +int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, + enum dso_binary_type type) +{ + int err = -1; + GElf_Ehdr ehdr; + Elf *elf; + int fd; + + if (dso__needs_decompress(dso)) { + fd = decompress_kmodule(dso, name, type); + if (fd < 0) + return -1; + } else { + fd = open(name, O_RDONLY); + if (fd < 0) { + dso->load_errno = errno; + return -1; + } + } + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + pr_debug("%s: cannot read %s ELF file.\n", __func__, name); + dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; + goto out_close; + } + + if (gelf_getehdr(elf, &ehdr) == NULL) { + dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; + pr_debug("%s: cannot get elf header.\n", __func__); + goto out_elf_end; + } + + if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) { + dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR; + goto out_elf_end; + } + + /* Always reject images with a mismatched build-id: */ + if (dso->has_build_id) { + u8 build_id[BUILD_ID_SIZE]; + + if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) { + dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID; + goto out_elf_end; + } + + if (!dso__build_id_equal(dso, build_id)) { + dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; + goto out_elf_end; + } + } + + ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); + + ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", + NULL); + if (ss->symshdr.sh_type != SHT_SYMTAB) + ss->symtab = NULL; + + ss->dynsym_idx = 0; + ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", + &ss->dynsym_idx); + if (ss->dynshdr.sh_type != SHT_DYNSYM) + ss->dynsym = NULL; + + ss->opdidx = 0; + ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", + &ss->opdidx); + if (ss->opdshdr.sh_type != SHT_PROGBITS) + ss->opdsec = NULL; + + if (dso->kernel == DSO_TYPE_USER) { + GElf_Shdr shdr; + ss->adjust_symbols = (ehdr.e_type == ET_EXEC || + ehdr.e_type == ET_REL || + dso__is_vdso(dso) || + elf_section_by_name(elf, &ehdr, &shdr, + ".gnu.prelink_undo", + NULL) != NULL); + } else { + ss->adjust_symbols = ehdr.e_type == ET_EXEC || + ehdr.e_type == ET_REL; + } + + ss->name = strdup(name); + if (!ss->name) { + dso->load_errno = errno; + goto out_elf_end; + } + + ss->elf = elf; + ss->fd = fd; + ss->ehdr = ehdr; + ss->type = type; + + return 0; + +out_elf_end: + elf_end(elf); +out_close: + close(fd); + return err; +} + +/** + * ref_reloc_sym_not_found - has kernel relocation symbol been found. + * @kmap: kernel maps and relocation reference symbol + * + * This function returns %true if we are dealing with the kernel maps and the + * relocation reference symbol has not yet been found. Otherwise %false is + * returned. + */ +static bool ref_reloc_sym_not_found(struct kmap *kmap) +{ + return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && + !kmap->ref_reloc_sym->unrelocated_addr; +} + +/** + * ref_reloc - kernel relocation offset. + * @kmap: kernel maps and relocation reference symbol + * + * This function returns the offset of kernel addresses as determined by using + * the relocation reference symbol i.e. if the kernel has not been relocated + * then the return value is zero. + */ +static u64 ref_reloc(struct kmap *kmap) +{ + if (kmap && kmap->ref_reloc_sym && + kmap->ref_reloc_sym->unrelocated_addr) + return kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; + return 0; +} + +static bool want_demangle(bool is_kernel_sym) +{ + return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; +} + +int dso__load_sym(struct dso *dso, struct map *map, + struct symsrc *syms_ss, struct symsrc *runtime_ss, + symbol_filter_t filter, int kmodule) +{ + struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; + struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL; + struct map *curr_map = map; + struct dso *curr_dso = dso; + Elf_Data *symstrs, *secstrs; + uint32_t nr_syms; + int err = -1; + uint32_t idx; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *syms, *opddata = NULL; + GElf_Sym sym; + Elf_Scn *sec, *sec_strndx; + Elf *elf; + int nr = 0; + bool remap_kernel = false, adjust_kernel_syms = false; + + if (kmap && !kmaps) + return -1; + + dso->symtab_type = syms_ss->type; + dso->is_64_bit = syms_ss->is_64_bit; + dso->rel = syms_ss->ehdr.e_type == ET_REL; + + /* + * Modules may already have symbols from kallsyms, but those symbols + * have the wrong values for the dso maps, so remove them. + */ + if (kmodule && syms_ss->symtab) + symbols__delete(&dso->symbols[map->type]); + + if (!syms_ss->symtab) { + /* + * If the vmlinux is stripped, fail so we will fall back + * to using kallsyms. The vmlinux runtime symbols aren't + * of much use. + */ + if (dso->kernel) + goto out_elf_end; + + syms_ss->symtab = syms_ss->dynsym; + syms_ss->symshdr = syms_ss->dynshdr; + } + + elf = syms_ss->elf; + ehdr = syms_ss->ehdr; + sec = syms_ss->symtab; + shdr = syms_ss->symshdr; + + if (runtime_ss->opdsec) + opddata = elf_rawdata(runtime_ss->opdsec, NULL); + + syms = elf_getdata(sec, NULL); + if (syms == NULL) + goto out_elf_end; + + sec = elf_getscn(elf, shdr.sh_link); + if (sec == NULL) + goto out_elf_end; + + symstrs = elf_getdata(sec, NULL); + if (symstrs == NULL) + goto out_elf_end; + + sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); + if (sec_strndx == NULL) + goto out_elf_end; + + secstrs = elf_getdata(sec_strndx, NULL); + if (secstrs == NULL) + goto out_elf_end; + + nr_syms = shdr.sh_size / shdr.sh_entsize; + + memset(&sym, 0, sizeof(sym)); + + /* + * The kernel relocation symbol is needed in advance in order to adjust + * kernel maps correctly. + */ + if (ref_reloc_sym_not_found(kmap)) { + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { + const char *elf_name = elf_sym__name(&sym, symstrs); + + if (strcmp(elf_name, kmap->ref_reloc_sym->name)) + continue; + kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; + map->reloc = kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; + break; + } + } + + dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); + /* + * Initial kernel and module mappings do not map to the dso. For + * function mappings, flag the fixups. + */ + if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { + remap_kernel = true; + adjust_kernel_syms = dso->adjust_symbols; + } + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { + struct symbol *f; + const char *elf_name = elf_sym__name(&sym, symstrs); + char *demangled = NULL; + int is_label = elf_sym__is_label(&sym); + const char *section_name; + bool used_opd = false; + + if (!is_label && !elf_sym__is_a(&sym, map->type)) + continue; + + /* Reject ARM ELF "mapping symbols": these aren't unique and + * don't identify functions, so will confuse the profile + * output: */ + if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) { + if (elf_name[0] == '$' && strchr("adtx", elf_name[1]) + && (elf_name[2] == '\0' || elf_name[2] == '.')) + continue; + } + + if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { + u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; + u64 *opd = opddata->d_buf + offset; + sym.st_value = DSO__SWAP(dso, u64, *opd); + sym.st_shndx = elf_addr_to_index(runtime_ss->elf, + sym.st_value); + used_opd = true; + } + /* + * When loading symbols in a data mapping, ABS symbols (which + * has a value of SHN_ABS in its st_shndx) failed at + * elf_getscn(). And it marks the loading as a failure so + * already loaded symbols cannot be fixed up. + * + * I'm not sure what should be done. Just ignore them for now. + * - Namhyung Kim + */ + if (sym.st_shndx == SHN_ABS) + continue; + + sec = elf_getscn(runtime_ss->elf, sym.st_shndx); + if (!sec) + goto out_elf_end; + + gelf_getshdr(sec, &shdr); + + if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) + continue; + + section_name = elf_sec__name(&shdr, secstrs); + + /* On ARM, symbols for thumb functions have 1 added to + * the symbol address as a flag - remove it */ + if ((ehdr.e_machine == EM_ARM) && + (map->type == MAP__FUNCTION) && + (sym.st_value & 1)) + --sym.st_value; + + if (dso->kernel || kmodule) { + char dso_name[PATH_MAX]; + + /* Adjust symbol to map to file offset */ + if (adjust_kernel_syms) + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + + if (strcmp(section_name, + (curr_dso->short_name + + dso->short_name_len)) == 0) + goto new_symbol; + + if (strcmp(section_name, ".text") == 0) { + /* + * The initial kernel mapping is based on + * kallsyms and identity maps. Overwrite it to + * map to the kernel dso. + */ + if (remap_kernel && dso->kernel) { + remap_kernel = false; + map->start = shdr.sh_addr + + ref_reloc(kmap); + map->end = map->start + shdr.sh_size; + map->pgoff = shdr.sh_offset; + map->map_ip = map__map_ip; + map->unmap_ip = map__unmap_ip; + /* Ensure maps are correctly ordered */ + if (kmaps) { + map_groups__remove(kmaps, map); + map_groups__insert(kmaps, map); + } + } + + /* + * The initial module mapping is based on + * /proc/modules mapped to offset zero. + * Overwrite it to map to the module dso. + */ + if (remap_kernel && kmodule) { + remap_kernel = false; + map->pgoff = shdr.sh_offset; + } + + curr_map = map; + curr_dso = dso; + goto new_symbol; + } + + if (!kmap) + goto new_symbol; + + snprintf(dso_name, sizeof(dso_name), + "%s%s", dso->short_name, section_name); + + curr_map = map_groups__find_by_name(kmaps, map->type, dso_name); + if (curr_map == NULL) { + u64 start = sym.st_value; + + if (kmodule) + start += map->start + shdr.sh_offset; + + curr_dso = dso__new(dso_name); + if (curr_dso == NULL) + goto out_elf_end; + curr_dso->kernel = dso->kernel; + curr_dso->long_name = dso->long_name; + curr_dso->long_name_len = dso->long_name_len; + curr_map = map__new2(start, curr_dso, + map->type); + if (curr_map == NULL) { + dso__delete(curr_dso); + goto out_elf_end; + } + if (adjust_kernel_syms) { + curr_map->start = shdr.sh_addr + + ref_reloc(kmap); + curr_map->end = curr_map->start + + shdr.sh_size; + curr_map->pgoff = shdr.sh_offset; + } else { + curr_map->map_ip = identity__map_ip; + curr_map->unmap_ip = identity__map_ip; + } + curr_dso->symtab_type = dso->symtab_type; + map_groups__insert(kmaps, curr_map); + /* + * The new DSO should go to the kernel DSOS + */ + dsos__add(&map->groups->machine->kernel_dsos, + curr_dso); + dso__set_loaded(curr_dso, map->type); + } else + curr_dso = curr_map->dso; + + goto new_symbol; + } + + if ((used_opd && runtime_ss->adjust_symbols) + || (!used_opd && syms_ss->adjust_symbols)) { + pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " + "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, + (u64)sym.st_value, (u64)shdr.sh_addr, + (u64)shdr.sh_offset); + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + } +new_symbol: + /* + * We need to figure out if the object was created from C++ sources + * DWARF DW_compile_unit has this, but we don't always have access + * to it... + */ + if (want_demangle(dso->kernel || kmodule)) { + int demangle_flags = DMGL_NO_OPTS; + if (verbose) + demangle_flags = DMGL_PARAMS | DMGL_ANSI; + + demangled = bfd_demangle(NULL, elf_name, demangle_flags); + if (demangled != NULL) + elf_name = demangled; + } + f = symbol__new(sym.st_value, sym.st_size, + GELF_ST_BIND(sym.st_info), elf_name); + free(demangled); + if (!f) + goto out_elf_end; + + if (filter && filter(curr_map, f)) + symbol__delete(f); + else { + symbols__insert(&curr_dso->symbols[curr_map->type], f); + nr++; + } + } + + /* + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) { + if (!symbol_conf.allow_aliases) + symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); + if (kmap) { + /* + * We need to fixup this here too because we create new + * maps here, for things like vsyscall sections. + */ + __map_groups__fixup_end(kmaps, map->type); + } + } + err = nr; +out_elf_end: + return err; +} + +static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) +{ + GElf_Phdr phdr; + size_t i, phdrnum; + int err; + u64 sz; + + if (elf_getphdrnum(elf, &phdrnum)) + return -1; + + for (i = 0; i < phdrnum; i++) { + if (gelf_getphdr(elf, i, &phdr) == NULL) + return -1; + if (phdr.p_type != PT_LOAD) + continue; + if (exe) { + if (!(phdr.p_flags & PF_X)) + continue; + } else { + if (!(phdr.p_flags & PF_R)) + continue; + } + sz = min(phdr.p_memsz, phdr.p_filesz); + if (!sz) + continue; + err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); + if (err) + return err; + } + return 0; +} + +int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, + bool *is_64_bit) +{ + int err; + Elf *elf; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return -1; + + if (is_64_bit) + *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); + + err = elf_read_maps(elf, exe, mapfn, data); + + elf_end(elf); + return err; +} + +enum dso_type dso__type_fd(int fd) +{ + enum dso_type dso_type = DSO__TYPE_UNKNOWN; + GElf_Ehdr ehdr; + Elf_Kind ek; + Elf *elf; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + goto out; + + ek = elf_kind(elf); + if (ek != ELF_K_ELF) + goto out_end; + + if (gelf_getclass(elf) == ELFCLASS64) { + dso_type = DSO__TYPE_64BIT; + goto out_end; + } + + if (gelf_getehdr(elf, &ehdr) == NULL) + goto out_end; + + if (ehdr.e_machine == EM_X86_64) + dso_type = DSO__TYPE_X32BIT; + else + dso_type = DSO__TYPE_32BIT; +out_end: + elf_end(elf); +out: + return dso_type; +} + +static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) +{ + ssize_t r; + size_t n; + int err = -1; + char *buf = malloc(page_size); + + if (buf == NULL) + return -1; + + if (lseek(to, to_offs, SEEK_SET) != to_offs) + goto out; + + if (lseek(from, from_offs, SEEK_SET) != from_offs) + goto out; + + while (len) { + n = page_size; + if (len < n) + n = len; + /* Use read because mmap won't work on proc files */ + r = read(from, buf, n); + if (r < 0) + goto out; + if (!r) + break; + n = r; + r = write(to, buf, n); + if (r < 0) + goto out; + if ((size_t)r != n) + goto out; + len -= n; + } + + err = 0; +out: + free(buf); + return err; +} + +struct kcore { + int fd; + int elfclass; + Elf *elf; + GElf_Ehdr ehdr; +}; + +static int kcore__open(struct kcore *kcore, const char *filename) +{ + GElf_Ehdr *ehdr; + + kcore->fd = open(filename, O_RDONLY); + if (kcore->fd == -1) + return -1; + + kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); + if (!kcore->elf) + goto out_close; + + kcore->elfclass = gelf_getclass(kcore->elf); + if (kcore->elfclass == ELFCLASSNONE) + goto out_end; + + ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); + if (!ehdr) + goto out_end; + + return 0; + +out_end: + elf_end(kcore->elf); +out_close: + close(kcore->fd); + return -1; +} + +static int kcore__init(struct kcore *kcore, char *filename, int elfclass, + bool temp) +{ + GElf_Ehdr *ehdr; + + kcore->elfclass = elfclass; + + if (temp) + kcore->fd = mkstemp(filename); + else + kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); + if (kcore->fd == -1) + return -1; + + kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); + if (!kcore->elf) + goto out_close; + + if (!gelf_newehdr(kcore->elf, elfclass)) + goto out_end; + + ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); + if (!ehdr) + goto out_end; + + return 0; + +out_end: + elf_end(kcore->elf); +out_close: + close(kcore->fd); + unlink(filename); + return -1; +} + +static void kcore__close(struct kcore *kcore) +{ + elf_end(kcore->elf); + close(kcore->fd); +} + +static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) +{ + GElf_Ehdr *ehdr = &to->ehdr; + GElf_Ehdr *kehdr = &from->ehdr; + + memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); + ehdr->e_type = kehdr->e_type; + ehdr->e_machine = kehdr->e_machine; + ehdr->e_version = kehdr->e_version; + ehdr->e_entry = 0; + ehdr->e_shoff = 0; + ehdr->e_flags = kehdr->e_flags; + ehdr->e_phnum = count; + ehdr->e_shentsize = 0; + ehdr->e_shnum = 0; + ehdr->e_shstrndx = 0; + + if (from->elfclass == ELFCLASS32) { + ehdr->e_phoff = sizeof(Elf32_Ehdr); + ehdr->e_ehsize = sizeof(Elf32_Ehdr); + ehdr->e_phentsize = sizeof(Elf32_Phdr); + } else { + ehdr->e_phoff = sizeof(Elf64_Ehdr); + ehdr->e_ehsize = sizeof(Elf64_Ehdr); + ehdr->e_phentsize = sizeof(Elf64_Phdr); + } + + if (!gelf_update_ehdr(to->elf, ehdr)) + return -1; + + if (!gelf_newphdr(to->elf, count)) + return -1; + + return 0; +} + +static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, + u64 addr, u64 len) +{ + GElf_Phdr gphdr; + GElf_Phdr *phdr; + + phdr = gelf_getphdr(kcore->elf, idx, &gphdr); + if (!phdr) + return -1; + + phdr->p_type = PT_LOAD; + phdr->p_flags = PF_R | PF_W | PF_X; + phdr->p_offset = offset; + phdr->p_vaddr = addr; + phdr->p_paddr = 0; + phdr->p_filesz = len; + phdr->p_memsz = len; + phdr->p_align = page_size; + + if (!gelf_update_phdr(kcore->elf, idx, phdr)) + return -1; + + return 0; +} + +static off_t kcore__write(struct kcore *kcore) +{ + return elf_update(kcore->elf, ELF_C_WRITE); +} + +struct phdr_data { + off_t offset; + u64 addr; + u64 len; +}; + +struct kcore_copy_info { + u64 stext; + u64 etext; + u64 first_symbol; + u64 last_symbol; + u64 first_module; + u64 last_module_symbol; + struct phdr_data kernel_map; + struct phdr_data modules_map; +}; + +static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, + u64 start) +{ + struct kcore_copy_info *kci = arg; + + if (!symbol_type__is_a(type, MAP__FUNCTION)) + return 0; + + if (strchr(name, '[')) { + if (start > kci->last_module_symbol) + kci->last_module_symbol = start; + return 0; + } + + if (!kci->first_symbol || start < kci->first_symbol) + kci->first_symbol = start; + + if (!kci->last_symbol || start > kci->last_symbol) + kci->last_symbol = start; + + if (!strcmp(name, "_stext")) { + kci->stext = start; + return 0; + } + + if (!strcmp(name, "_etext")) { + kci->etext = start; + return 0; + } + + return 0; +} + +static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, + const char *dir) +{ + char kallsyms_filename[PATH_MAX]; + + scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); + + if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) + return -1; + + if (kallsyms__parse(kallsyms_filename, kci, + kcore_copy__process_kallsyms) < 0) + return -1; + + return 0; +} + +static int kcore_copy__process_modules(void *arg, + const char *name __maybe_unused, + u64 start) +{ + struct kcore_copy_info *kci = arg; + + if (!kci->first_module || start < kci->first_module) + kci->first_module = start; + + return 0; +} + +static int kcore_copy__parse_modules(struct kcore_copy_info *kci, + const char *dir) +{ + char modules_filename[PATH_MAX]; + + scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); + + if (symbol__restricted_filename(modules_filename, "/proc/modules")) + return -1; + + if (modules__parse(modules_filename, kci, + kcore_copy__process_modules) < 0) + return -1; + + return 0; +} + +static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, + u64 s, u64 e) +{ + if (p->addr || s < start || s >= end) + return; + + p->addr = s; + p->offset = (s - start) + pgoff; + p->len = e < end ? e - s : end - s; +} + +static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) +{ + struct kcore_copy_info *kci = data; + u64 end = start + len; + + kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, + kci->etext); + + kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, + kci->last_module_symbol); + + return 0; +} + +static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) +{ + if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) + return -1; + + return 0; +} + +static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, + Elf *elf) +{ + if (kcore_copy__parse_kallsyms(kci, dir)) + return -1; + + if (kcore_copy__parse_modules(kci, dir)) + return -1; + + if (kci->stext) + kci->stext = round_down(kci->stext, page_size); + else + kci->stext = round_down(kci->first_symbol, page_size); + + if (kci->etext) { + kci->etext = round_up(kci->etext, page_size); + } else if (kci->last_symbol) { + kci->etext = round_up(kci->last_symbol, page_size); + kci->etext += page_size; + } + + kci->first_module = round_down(kci->first_module, page_size); + + if (kci->last_module_symbol) { + kci->last_module_symbol = round_up(kci->last_module_symbol, + page_size); + kci->last_module_symbol += page_size; + } + + if (!kci->stext || !kci->etext) + return -1; + + if (kci->first_module && !kci->last_module_symbol) + return -1; + + return kcore_copy__read_maps(kci, elf); +} + +static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, + const char *name) +{ + char from_filename[PATH_MAX]; + char to_filename[PATH_MAX]; + + scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); + scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); + + return copyfile_mode(from_filename, to_filename, 0400); +} + +static int kcore_copy__unlink(const char *dir, const char *name) +{ + char filename[PATH_MAX]; + + scnprintf(filename, PATH_MAX, "%s/%s", dir, name); + + return unlink(filename); +} + +static int kcore_copy__compare_fds(int from, int to) +{ + char *buf_from; + char *buf_to; + ssize_t ret; + size_t len; + int err = -1; + + buf_from = malloc(page_size); + buf_to = malloc(page_size); + if (!buf_from || !buf_to) + goto out; + + while (1) { + /* Use read because mmap won't work on proc files */ + ret = read(from, buf_from, page_size); + if (ret < 0) + goto out; + + if (!ret) + break; + + len = ret; + + if (readn(to, buf_to, len) != (int)len) + goto out; + + if (memcmp(buf_from, buf_to, len)) + goto out; + } + + err = 0; +out: + free(buf_to); + free(buf_from); + return err; +} + +static int kcore_copy__compare_files(const char *from_filename, + const char *to_filename) +{ + int from, to, err = -1; + + from = open(from_filename, O_RDONLY); + if (from < 0) + return -1; + + to = open(to_filename, O_RDONLY); + if (to < 0) + goto out_close_from; + + err = kcore_copy__compare_fds(from, to); + + close(to); +out_close_from: + close(from); + return err; +} + +static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, + const char *name) +{ + char from_filename[PATH_MAX]; + char to_filename[PATH_MAX]; + + scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); + scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); + + return kcore_copy__compare_files(from_filename, to_filename); +} + +/** + * kcore_copy - copy kallsyms, modules and kcore from one directory to another. + * @from_dir: from directory + * @to_dir: to directory + * + * This function copies kallsyms, modules and kcore files from one directory to + * another. kallsyms and modules are copied entirely. Only code segments are + * copied from kcore. It is assumed that two segments suffice: one for the + * kernel proper and one for all the modules. The code segments are determined + * from kallsyms and modules files. The kernel map starts at _stext or the + * lowest function symbol, and ends at _etext or the highest function symbol. + * The module map starts at the lowest module address and ends at the highest + * module symbol. Start addresses are rounded down to the nearest page. End + * addresses are rounded up to the nearest page. An extra page is added to the + * highest kernel symbol and highest module symbol to, hopefully, encompass that + * symbol too. Because it contains only code sections, the resulting kcore is + * unusual. One significant peculiarity is that the mapping (start -> pgoff) + * is not the same for the kernel map and the modules map. That happens because + * the data is copied adjacently whereas the original kcore has gaps. Finally, + * kallsyms and modules files are compared with their copies to check that + * modules have not been loaded or unloaded while the copies were taking place. + * + * Return: %0 on success, %-1 on failure. + */ +int kcore_copy(const char *from_dir, const char *to_dir) +{ + struct kcore kcore; + struct kcore extract; + size_t count = 2; + int idx = 0, err = -1; + off_t offset = page_size, sz, modules_offset = 0; + struct kcore_copy_info kci = { .stext = 0, }; + char kcore_filename[PATH_MAX]; + char extract_filename[PATH_MAX]; + + if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) + return -1; + + if (kcore_copy__copy_file(from_dir, to_dir, "modules")) + goto out_unlink_kallsyms; + + scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); + scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); + + if (kcore__open(&kcore, kcore_filename)) + goto out_unlink_modules; + + if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) + goto out_kcore_close; + + if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) + goto out_kcore_close; + + if (!kci.modules_map.addr) + count -= 1; + + if (kcore__copy_hdr(&kcore, &extract, count)) + goto out_extract_close; + + if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, + kci.kernel_map.len)) + goto out_extract_close; + + if (kci.modules_map.addr) { + modules_offset = offset + kci.kernel_map.len; + if (kcore__add_phdr(&extract, idx, modules_offset, + kci.modules_map.addr, kci.modules_map.len)) + goto out_extract_close; + } + + sz = kcore__write(&extract); + if (sz < 0 || sz > offset) + goto out_extract_close; + + if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, + kci.kernel_map.len)) + goto out_extract_close; + + if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, + extract.fd, modules_offset, + kci.modules_map.len)) + goto out_extract_close; + + if (kcore_copy__compare_file(from_dir, to_dir, "modules")) + goto out_extract_close; + + if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) + goto out_extract_close; + + err = 0; + +out_extract_close: + kcore__close(&extract); + if (err) + unlink(extract_filename); +out_kcore_close: + kcore__close(&kcore); +out_unlink_modules: + if (err) + kcore_copy__unlink(to_dir, "modules"); +out_unlink_kallsyms: + if (err) + kcore_copy__unlink(to_dir, "kallsyms"); + + return err; +} + +int kcore_extract__create(struct kcore_extract *kce) +{ + struct kcore kcore; + struct kcore extract; + size_t count = 1; + int idx = 0, err = -1; + off_t offset = page_size, sz; + + if (kcore__open(&kcore, kce->kcore_filename)) + return -1; + + strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); + if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) + goto out_kcore_close; + + if (kcore__copy_hdr(&kcore, &extract, count)) + goto out_extract_close; + + if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) + goto out_extract_close; + + sz = kcore__write(&extract); + if (sz < 0 || sz > offset) + goto out_extract_close; + + if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) + goto out_extract_close; + + err = 0; + +out_extract_close: + kcore__close(&extract); + if (err) + unlink(kce->extract_filename); +out_kcore_close: + kcore__close(&kcore); + + return err; +} + +void kcore_extract__delete(struct kcore_extract *kce) +{ + unlink(kce->extract_filename); +} + +void symbol__elf_init(void) +{ + elf_version(EV_CURRENT); +} diff --git a/kernel/tools/perf/util/symbol-minimal.c b/kernel/tools/perf/util/symbol-minimal.c new file mode 100644 index 000000000..fd8477cac --- /dev/null +++ b/kernel/tools/perf/util/symbol-minimal.c @@ -0,0 +1,377 @@ +#include "symbol.h" +#include "util.h" + +#include +#include +#include +#include +#include + + +static bool check_need_swap(int file_endian) +{ + const int data = 1; + u8 *check = (u8 *)&data; + int host_endian; + + if (check[0] == 1) + host_endian = ELFDATA2LSB; + else + host_endian = ELFDATA2MSB; + + return host_endian != file_endian; +} + +#define NOTE_ALIGN(sz) (((sz) + 3) & ~3) + +#define NT_GNU_BUILD_ID 3 + +static int read_build_id(void *note_data, size_t note_len, void *bf, + size_t size, bool need_swap) +{ + struct { + u32 n_namesz; + u32 n_descsz; + u32 n_type; + } *nhdr; + void *ptr; + + ptr = note_data; + while (ptr < (note_data + note_len)) { + const char *name; + size_t namesz, descsz; + + nhdr = ptr; + if (need_swap) { + nhdr->n_namesz = bswap_32(nhdr->n_namesz); + nhdr->n_descsz = bswap_32(nhdr->n_descsz); + nhdr->n_type = bswap_32(nhdr->n_type); + } + + namesz = NOTE_ALIGN(nhdr->n_namesz); + descsz = NOTE_ALIGN(nhdr->n_descsz); + + ptr += sizeof(*nhdr); + name = ptr; + ptr += namesz; + if (nhdr->n_type == NT_GNU_BUILD_ID && + nhdr->n_namesz == sizeof("GNU")) { + if (memcmp(name, "GNU", sizeof("GNU")) == 0) { + size_t sz = min(size, descsz); + memcpy(bf, ptr, sz); + memset(bf + sz, 0, size - sz); + return 0; + } + } + ptr += descsz; + } + + return -1; +} + +int filename__read_debuglink(const char *filename __maybe_unused, + char *debuglink __maybe_unused, + size_t size __maybe_unused) +{ + return -1; +} + +/* + * Just try PT_NOTE header otherwise fails + */ +int filename__read_build_id(const char *filename, void *bf, size_t size) +{ + FILE *fp; + int ret = -1; + bool need_swap = false; + u8 e_ident[EI_NIDENT]; + size_t buf_size; + void *buf; + int i; + + fp = fopen(filename, "r"); + if (fp == NULL) + return -1; + + if (fread(e_ident, sizeof(e_ident), 1, fp) != 1) + goto out; + + if (memcmp(e_ident, ELFMAG, SELFMAG) || + e_ident[EI_VERSION] != EV_CURRENT) + goto out; + + need_swap = check_need_swap(e_ident[EI_DATA]); + + /* for simplicity */ + fseek(fp, 0, SEEK_SET); + + if (e_ident[EI_CLASS] == ELFCLASS32) { + Elf32_Ehdr ehdr; + Elf32_Phdr *phdr; + + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + goto out; + + if (need_swap) { + ehdr.e_phoff = bswap_32(ehdr.e_phoff); + ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); + ehdr.e_phnum = bswap_16(ehdr.e_phnum); + } + + buf_size = ehdr.e_phentsize * ehdr.e_phnum; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + fseek(fp, ehdr.e_phoff, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + void *tmp; + long offset; + + if (need_swap) { + phdr->p_type = bswap_32(phdr->p_type); + phdr->p_offset = bswap_32(phdr->p_offset); + phdr->p_filesz = bswap_32(phdr->p_filesz); + } + + if (phdr->p_type != PT_NOTE) + continue; + + buf_size = phdr->p_filesz; + offset = phdr->p_offset; + tmp = realloc(buf, buf_size); + if (tmp == NULL) + goto out_free; + + buf = tmp; + fseek(fp, offset, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + ret = read_build_id(buf, buf_size, bf, size, need_swap); + if (ret == 0) + ret = size; + break; + } + } else { + Elf64_Ehdr ehdr; + Elf64_Phdr *phdr; + + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + goto out; + + if (need_swap) { + ehdr.e_phoff = bswap_64(ehdr.e_phoff); + ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); + ehdr.e_phnum = bswap_16(ehdr.e_phnum); + } + + buf_size = ehdr.e_phentsize * ehdr.e_phnum; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + fseek(fp, ehdr.e_phoff, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + void *tmp; + long offset; + + if (need_swap) { + phdr->p_type = bswap_32(phdr->p_type); + phdr->p_offset = bswap_64(phdr->p_offset); + phdr->p_filesz = bswap_64(phdr->p_filesz); + } + + if (phdr->p_type != PT_NOTE) + continue; + + buf_size = phdr->p_filesz; + offset = phdr->p_offset; + tmp = realloc(buf, buf_size); + if (tmp == NULL) + goto out_free; + + buf = tmp; + fseek(fp, offset, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + ret = read_build_id(buf, buf_size, bf, size, need_swap); + if (ret == 0) + ret = size; + break; + } + } +out_free: + free(buf); +out: + fclose(fp); + return ret; +} + +int sysfs__read_build_id(const char *filename, void *build_id, size_t size) +{ + int fd; + int ret = -1; + struct stat stbuf; + size_t buf_size; + void *buf; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return -1; + + if (fstat(fd, &stbuf) < 0) + goto out; + + buf_size = stbuf.st_size; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + if (read(fd, buf, buf_size) != (ssize_t) buf_size) + goto out_free; + + ret = read_build_id(buf, buf_size, build_id, size, false); +out_free: + free(buf); +out: + close(fd); + return ret; +} + +int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, + enum dso_binary_type type) +{ + int fd = open(name, O_RDONLY); + if (fd < 0) + goto out_errno; + + ss->name = strdup(name); + if (!ss->name) + goto out_close; + + ss->fd = fd; + ss->type = type; + + return 0; +out_close: + close(fd); +out_errno: + dso->load_errno = errno; + return -1; +} + +bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused) +{ + /* Assume all sym sources could be a runtime image. */ + return true; +} + +bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) +{ + return false; +} + +void symsrc__destroy(struct symsrc *ss) +{ + zfree(&ss->name); + close(ss->fd); +} + +int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, + struct symsrc *ss __maybe_unused, + struct map *map __maybe_unused, + symbol_filter_t filter __maybe_unused) +{ + return 0; +} + +static int fd__is_64_bit(int fd) +{ + u8 e_ident[EI_NIDENT]; + + if (lseek(fd, 0, SEEK_SET)) + return -1; + + if (readn(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident)) + return -1; + + if (memcmp(e_ident, ELFMAG, SELFMAG) || + e_ident[EI_VERSION] != EV_CURRENT) + return -1; + + return e_ident[EI_CLASS] == ELFCLASS64; +} + +enum dso_type dso__type_fd(int fd) +{ + Elf64_Ehdr ehdr; + int ret; + + ret = fd__is_64_bit(fd); + if (ret < 0) + return DSO__TYPE_UNKNOWN; + + if (ret) + return DSO__TYPE_64BIT; + + if (readn(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr)) + return DSO__TYPE_UNKNOWN; + + if (ehdr.e_machine == EM_X86_64) + return DSO__TYPE_X32BIT; + + return DSO__TYPE_32BIT; +} + +int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, + struct symsrc *ss, + struct symsrc *runtime_ss __maybe_unused, + symbol_filter_t filter __maybe_unused, + int kmodule __maybe_unused) +{ + unsigned char *build_id[BUILD_ID_SIZE]; + int ret; + + ret = fd__is_64_bit(ss->fd); + if (ret >= 0) + dso->is_64_bit = ret; + + if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) { + dso__set_build_id(dso, build_id); + } + return 0; +} + +int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused, + mapfn_t mapfn __maybe_unused, void *data __maybe_unused, + bool *is_64_bit __maybe_unused) +{ + return -1; +} + +int kcore_extract__create(struct kcore_extract *kce __maybe_unused) +{ + return -1; +} + +void kcore_extract__delete(struct kcore_extract *kce __maybe_unused) +{ +} + +int kcore_copy(const char *from_dir __maybe_unused, + const char *to_dir __maybe_unused) +{ + return -1; +} + +void symbol__elf_init(void) +{ +} diff --git a/kernel/tools/perf/util/symbol.c b/kernel/tools/perf/util/symbol.c new file mode 100644 index 000000000..201f6c4ca --- /dev/null +++ b/kernel/tools/perf/util/symbol.c @@ -0,0 +1,2001 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "build-id.h" +#include "util.h" +#include "debug.h" +#include "machine.h" +#include "symbol.h" +#include "strlist.h" +#include "intlist.h" +#include "header.h" + +#include +#include +#include +#include + +static int dso__load_kernel_sym(struct dso *dso, struct map *map, + symbol_filter_t filter); +static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, + symbol_filter_t filter); +int vmlinux_path__nr_entries; +char **vmlinux_path; + +struct symbol_conf symbol_conf = { + .use_modules = true, + .try_vmlinux_path = true, + .annotate_src = true, + .demangle = true, + .demangle_kernel = false, + .cumulate_callchain = true, + .show_hist_headers = true, + .symfs = "", +}; + +static enum dso_binary_type binary_type_symtab[] = { + DSO_BINARY_TYPE__KALLSYMS, + DSO_BINARY_TYPE__GUEST_KALLSYMS, + DSO_BINARY_TYPE__JAVA_JIT, + DSO_BINARY_TYPE__DEBUGLINK, + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, + DSO_BINARY_TYPE__GUEST_KMODULE_COMP, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) + +bool symbol_type__is_a(char symbol_type, enum map_type map_type) +{ + symbol_type = toupper(symbol_type); + + switch (map_type) { + case MAP__FUNCTION: + return symbol_type == 'T' || symbol_type == 'W'; + case MAP__VARIABLE: + return symbol_type == 'D'; + default: + return false; + } +} + +static int prefix_underscores_count(const char *str) +{ + const char *tail = str; + + while (*tail == '_') + tail++; + + return tail - str; +} + +#define SYMBOL_A 0 +#define SYMBOL_B 1 + +static int choose_best_symbol(struct symbol *syma, struct symbol *symb) +{ + s64 a; + s64 b; + size_t na, nb; + + /* Prefer a symbol with non zero length */ + a = syma->end - syma->start; + b = symb->end - symb->start; + if ((b == 0) && (a > 0)) + return SYMBOL_A; + else if ((a == 0) && (b > 0)) + return SYMBOL_B; + + /* Prefer a non weak symbol over a weak one */ + a = syma->binding == STB_WEAK; + b = symb->binding == STB_WEAK; + if (b && !a) + return SYMBOL_A; + if (a && !b) + return SYMBOL_B; + + /* Prefer a global symbol over a non global one */ + a = syma->binding == STB_GLOBAL; + b = symb->binding == STB_GLOBAL; + if (a && !b) + return SYMBOL_A; + if (b && !a) + return SYMBOL_B; + + /* Prefer a symbol with less underscores */ + a = prefix_underscores_count(syma->name); + b = prefix_underscores_count(symb->name); + if (b > a) + return SYMBOL_A; + else if (a > b) + return SYMBOL_B; + + /* Choose the symbol with the longest name */ + na = strlen(syma->name); + nb = strlen(symb->name); + if (na > nb) + return SYMBOL_A; + else if (na < nb) + return SYMBOL_B; + + /* Avoid "SyS" kernel syscall aliases */ + if (na >= 3 && !strncmp(syma->name, "SyS", 3)) + return SYMBOL_B; + if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) + return SYMBOL_B; + + return SYMBOL_A; +} + +void symbols__fixup_duplicate(struct rb_root *symbols) +{ + struct rb_node *nd; + struct symbol *curr, *next; + + nd = rb_first(symbols); + + while (nd) { + curr = rb_entry(nd, struct symbol, rb_node); +again: + nd = rb_next(&curr->rb_node); + next = rb_entry(nd, struct symbol, rb_node); + + if (!nd) + break; + + if (curr->start != next->start) + continue; + + if (choose_best_symbol(curr, next) == SYMBOL_A) { + rb_erase(&next->rb_node, symbols); + symbol__delete(next); + goto again; + } else { + nd = rb_next(&curr->rb_node); + rb_erase(&curr->rb_node, symbols); + symbol__delete(curr); + } + } +} + +void symbols__fixup_end(struct rb_root *symbols) +{ + struct rb_node *nd, *prevnd = rb_first(symbols); + struct symbol *curr, *prev; + + if (prevnd == NULL) + return; + + curr = rb_entry(prevnd, struct symbol, rb_node); + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + prev = curr; + curr = rb_entry(nd, struct symbol, rb_node); + + if (prev->end == prev->start && prev->end != curr->start) + prev->end = curr->start; + } + + /* Last entry */ + if (curr->end == curr->start) + curr->end = roundup(curr->start, 4096); +} + +void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) +{ + struct map *prev, *curr; + struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); + + if (prevnd == NULL) + return; + + curr = rb_entry(prevnd, struct map, rb_node); + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + prev = curr; + curr = rb_entry(nd, struct map, rb_node); + prev->end = curr->start; + } + + /* + * We still haven't the actual symbols, so guess the + * last map final address. + */ + curr->end = ~0ULL; +} + +struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) +{ + size_t namelen = strlen(name) + 1; + struct symbol *sym = calloc(1, (symbol_conf.priv_size + + sizeof(*sym) + namelen)); + if (sym == NULL) + return NULL; + + if (symbol_conf.priv_size) + sym = ((void *)sym) + symbol_conf.priv_size; + + sym->start = start; + sym->end = len ? start + len : start; + sym->binding = binding; + sym->namelen = namelen - 1; + + pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", + __func__, name, start, sym->end); + memcpy(sym->name, name, namelen); + + return sym; +} + +void symbol__delete(struct symbol *sym) +{ + free(((void *)sym) - symbol_conf.priv_size); +} + +size_t symbol__fprintf(struct symbol *sym, FILE *fp) +{ + return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", + sym->start, sym->end, + sym->binding == STB_GLOBAL ? 'g' : + sym->binding == STB_LOCAL ? 'l' : 'w', + sym->name); +} + +size_t symbol__fprintf_symname_offs(const struct symbol *sym, + const struct addr_location *al, FILE *fp) +{ + unsigned long offset; + size_t length; + + if (sym && sym->name) { + length = fprintf(fp, "%s", sym->name); + if (al) { + if (al->addr < sym->end) + offset = al->addr - sym->start; + else + offset = al->addr - al->map->start - sym->start; + length += fprintf(fp, "+0x%lx", offset); + } + return length; + } else + return fprintf(fp, "[unknown]"); +} + +size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) +{ + return symbol__fprintf_symname_offs(sym, NULL, fp); +} + +void symbols__delete(struct rb_root *symbols) +{ + struct symbol *pos; + struct rb_node *next = rb_first(symbols); + + while (next) { + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, symbols); + symbol__delete(pos); + } +} + +void symbols__insert(struct rb_root *symbols, struct symbol *sym) +{ + struct rb_node **p = &symbols->rb_node; + struct rb_node *parent = NULL; + const u64 ip = sym->start; + struct symbol *s; + + while (*p != NULL) { + parent = *p; + s = rb_entry(parent, struct symbol, rb_node); + if (ip < s->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&sym->rb_node, parent, p); + rb_insert_color(&sym->rb_node, symbols); +} + +static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) +{ + struct rb_node *n; + + if (symbols == NULL) + return NULL; + + n = symbols->rb_node; + + while (n) { + struct symbol *s = rb_entry(n, struct symbol, rb_node); + + if (ip < s->start) + n = n->rb_left; + else if (ip >= s->end) + n = n->rb_right; + else + return s; + } + + return NULL; +} + +static struct symbol *symbols__first(struct rb_root *symbols) +{ + struct rb_node *n = rb_first(symbols); + + if (n) + return rb_entry(n, struct symbol, rb_node); + + return NULL; +} + +static struct symbol *symbols__next(struct symbol *sym) +{ + struct rb_node *n = rb_next(&sym->rb_node); + + if (n) + return rb_entry(n, struct symbol, rb_node); + + return NULL; +} + +struct symbol_name_rb_node { + struct rb_node rb_node; + struct symbol sym; +}; + +static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) +{ + struct rb_node **p = &symbols->rb_node; + struct rb_node *parent = NULL; + struct symbol_name_rb_node *symn, *s; + + symn = container_of(sym, struct symbol_name_rb_node, sym); + + while (*p != NULL) { + parent = *p; + s = rb_entry(parent, struct symbol_name_rb_node, rb_node); + if (strcmp(sym->name, s->sym.name) < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&symn->rb_node, parent, p); + rb_insert_color(&symn->rb_node, symbols); +} + +static void symbols__sort_by_name(struct rb_root *symbols, + struct rb_root *source) +{ + struct rb_node *nd; + + for (nd = rb_first(source); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + symbols__insert_by_name(symbols, pos); + } +} + +static struct symbol *symbols__find_by_name(struct rb_root *symbols, + const char *name) +{ + struct rb_node *n; + struct symbol_name_rb_node *s; + + if (symbols == NULL) + return NULL; + + n = symbols->rb_node; + + while (n) { + int cmp; + + s = rb_entry(n, struct symbol_name_rb_node, rb_node); + cmp = strcmp(name, s->sym.name); + + if (cmp < 0) + n = n->rb_left; + else if (cmp > 0) + n = n->rb_right; + else + break; + } + + if (n == NULL) + return NULL; + + /* return first symbol that has same name (if any) */ + for (n = rb_prev(n); n; n = rb_prev(n)) { + struct symbol_name_rb_node *tmp; + + tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); + if (strcmp(tmp->sym.name, s->sym.name)) + break; + + s = tmp; + } + + return &s->sym; +} + +struct symbol *dso__find_symbol(struct dso *dso, + enum map_type type, u64 addr) +{ + return symbols__find(&dso->symbols[type], addr); +} + +struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) +{ + return symbols__first(&dso->symbols[type]); +} + +struct symbol *dso__next_symbol(struct symbol *sym) +{ + return symbols__next(sym); +} + +struct symbol *symbol__next_by_name(struct symbol *sym) +{ + struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); + struct rb_node *n = rb_next(&s->rb_node); + + return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; +} + + /* + * Teturns first symbol that matched with @name. + */ +struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, + const char *name) +{ + return symbols__find_by_name(&dso->symbol_names[type], name); +} + +void dso__sort_by_name(struct dso *dso, enum map_type type) +{ + dso__set_sorted_by_name(dso, type); + return symbols__sort_by_name(&dso->symbol_names[type], + &dso->symbols[type]); +} + +size_t dso__fprintf_symbols_by_name(struct dso *dso, + enum map_type type, FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + struct symbol_name_rb_node *pos; + + for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); + fprintf(fp, "%s\n", pos->sym.name); + } + + return ret; +} + +int modules__parse(const char *filename, void *arg, + int (*process_module)(void *arg, const char *name, + u64 start)) +{ + char *line = NULL; + size_t n; + FILE *file; + int err = 0; + + file = fopen(filename, "r"); + if (file == NULL) + return -1; + + while (1) { + char name[PATH_MAX]; + u64 start; + char *sep; + ssize_t line_len; + + line_len = getline(&line, &n, file); + if (line_len < 0) { + if (feof(file)) + break; + err = -1; + goto out; + } + + if (!line) { + err = -1; + goto out; + } + + line[--line_len] = '\0'; /* \n */ + + sep = strrchr(line, 'x'); + if (sep == NULL) + continue; + + hex2u64(sep + 1, &start); + + sep = strchr(line, ' '); + if (sep == NULL) + continue; + + *sep = '\0'; + + scnprintf(name, sizeof(name), "[%s]", line); + + err = process_module(arg, name, start); + if (err) + break; + } +out: + free(line); + fclose(file); + return err; +} + +struct process_kallsyms_args { + struct map *map; + struct dso *dso; +}; + +/* + * These are symbols in the kernel image, so make sure that + * sym is from a kernel DSO. + */ +bool symbol__is_idle(struct symbol *sym) +{ + const char * const idle_symbols[] = { + "cpu_idle", + "cpu_startup_entry", + "intel_idle", + "default_idle", + "native_safe_halt", + "enter_idle", + "exit_idle", + "mwait_idle", + "mwait_idle_with_hints", + "poll_idle", + "ppc64_runlatch_off", + "pseries_dedicated_idle_sleep", + NULL + }; + + int i; + + if (!sym) + return false; + + for (i = 0; idle_symbols[i]; i++) { + if (!strcmp(idle_symbols[i], sym->name)) + return true; + } + + return false; +} + +static int map__process_kallsym_symbol(void *arg, const char *name, + char type, u64 start) +{ + struct symbol *sym; + struct process_kallsyms_args *a = arg; + struct rb_root *root = &a->dso->symbols[a->map->type]; + + if (!symbol_type__is_a(type, a->map->type)) + return 0; + + /* + * module symbols are not sorted so we add all + * symbols, setting length to 0, and rely on + * symbols__fixup_end() to fix it up. + */ + sym = symbol__new(start, 0, kallsyms2elf_type(type), name); + if (sym == NULL) + return -ENOMEM; + /* + * We will pass the symbols to the filter later, in + * map__split_kallsyms, when we have split the maps per module + */ + symbols__insert(root, sym); + + return 0; +} + +/* + * Loads the function entries in /proc/kallsyms into kernel_map->dso, + * so that we can in the next step set the symbol ->end address and then + * call kernel_maps__split_kallsyms. + */ +static int dso__load_all_kallsyms(struct dso *dso, const char *filename, + struct map *map) +{ + struct process_kallsyms_args args = { .map = map, .dso = dso, }; + return kallsyms__parse(filename, &args, map__process_kallsym_symbol); +} + +static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, + symbol_filter_t filter) +{ + struct map_groups *kmaps = map__kmaps(map); + struct map *curr_map; + struct symbol *pos; + int count = 0, moved = 0; + struct rb_root *root = &dso->symbols[map->type]; + struct rb_node *next = rb_first(root); + + if (!kmaps) + return -1; + + while (next) { + char *module; + + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + + module = strchr(pos->name, '\t'); + if (module) + *module = '\0'; + + curr_map = map_groups__find(kmaps, map->type, pos->start); + + if (!curr_map || (filter && filter(curr_map, pos))) { + rb_erase(&pos->rb_node, root); + symbol__delete(pos); + } else { + pos->start -= curr_map->start - curr_map->pgoff; + if (pos->end) + pos->end -= curr_map->start - curr_map->pgoff; + if (curr_map != map) { + rb_erase(&pos->rb_node, root); + symbols__insert( + &curr_map->dso->symbols[curr_map->type], + pos); + ++moved; + } else { + ++count; + } + } + } + + /* Symbols have been adjusted */ + dso->adjust_symbols = 1; + + return count + moved; +} + +/* + * Split the symbols into maps, making sure there are no overlaps, i.e. the + * kernel range is broken in several maps, named [kernel].N, as we don't have + * the original ELF section names vmlinux have. + */ +static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, + symbol_filter_t filter) +{ + struct map_groups *kmaps = map__kmaps(map); + struct machine *machine; + struct map *curr_map = map; + struct symbol *pos; + int count = 0, moved = 0; + struct rb_root *root = &dso->symbols[map->type]; + struct rb_node *next = rb_first(root); + int kernel_range = 0; + + if (!kmaps) + return -1; + + machine = kmaps->machine; + + while (next) { + char *module; + + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + + module = strchr(pos->name, '\t'); + if (module) { + if (!symbol_conf.use_modules) + goto discard_symbol; + + *module++ = '\0'; + + if (strcmp(curr_map->dso->short_name, module)) { + if (curr_map != map && + dso->kernel == DSO_TYPE_GUEST_KERNEL && + machine__is_default_guest(machine)) { + /* + * We assume all symbols of a module are + * continuous in * kallsyms, so curr_map + * points to a module and all its + * symbols are in its kmap. Mark it as + * loaded. + */ + dso__set_loaded(curr_map->dso, + curr_map->type); + } + + curr_map = map_groups__find_by_name(kmaps, + map->type, module); + if (curr_map == NULL) { + pr_debug("%s/proc/{kallsyms,modules} " + "inconsistency while looking " + "for \"%s\" module!\n", + machine->root_dir, module); + curr_map = map; + goto discard_symbol; + } + + if (curr_map->dso->loaded && + !machine__is_default_guest(machine)) + goto discard_symbol; + } + /* + * So that we look just like we get from .ko files, + * i.e. not prelinked, relative to map->start. + */ + pos->start = curr_map->map_ip(curr_map, pos->start); + pos->end = curr_map->map_ip(curr_map, pos->end); + } else if (curr_map != map) { + char dso_name[PATH_MAX]; + struct dso *ndso; + + if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; + } + + if (count == 0) { + curr_map = map; + goto filter_symbol; + } + + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + snprintf(dso_name, sizeof(dso_name), + "[guest.kernel].%d", + kernel_range++); + else + snprintf(dso_name, sizeof(dso_name), + "[kernel].%d", + kernel_range++); + + ndso = dso__new(dso_name); + if (ndso == NULL) + return -1; + + ndso->kernel = dso->kernel; + + curr_map = map__new2(pos->start, ndso, map->type); + if (curr_map == NULL) { + dso__delete(ndso); + return -1; + } + + curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; + map_groups__insert(kmaps, curr_map); + ++kernel_range; + } else if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; + } +filter_symbol: + if (filter && filter(curr_map, pos)) { +discard_symbol: rb_erase(&pos->rb_node, root); + symbol__delete(pos); + } else { + if (curr_map != map) { + rb_erase(&pos->rb_node, root); + symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); + ++moved; + } else + ++count; + } + } + + if (curr_map != map && + dso->kernel == DSO_TYPE_GUEST_KERNEL && + machine__is_default_guest(kmaps->machine)) { + dso__set_loaded(curr_map->dso, curr_map->type); + } + + return count + moved; +} + +bool symbol__restricted_filename(const char *filename, + const char *restricted_filename) +{ + bool restricted = false; + + if (symbol_conf.kptr_restrict) { + char *r = realpath(filename, NULL); + + if (r != NULL) { + restricted = strcmp(r, restricted_filename) == 0; + free(r); + return restricted; + } + } + + return restricted; +} + +struct module_info { + struct rb_node rb_node; + char *name; + u64 start; +}; + +static void add_module(struct module_info *mi, struct rb_root *modules) +{ + struct rb_node **p = &modules->rb_node; + struct rb_node *parent = NULL; + struct module_info *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct module_info, rb_node); + if (strcmp(mi->name, m->name) < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&mi->rb_node, parent, p); + rb_insert_color(&mi->rb_node, modules); +} + +static void delete_modules(struct rb_root *modules) +{ + struct module_info *mi; + struct rb_node *next = rb_first(modules); + + while (next) { + mi = rb_entry(next, struct module_info, rb_node); + next = rb_next(&mi->rb_node); + rb_erase(&mi->rb_node, modules); + zfree(&mi->name); + free(mi); + } +} + +static struct module_info *find_module(const char *name, + struct rb_root *modules) +{ + struct rb_node *n = modules->rb_node; + + while (n) { + struct module_info *m; + int cmp; + + m = rb_entry(n, struct module_info, rb_node); + cmp = strcmp(name, m->name); + if (cmp < 0) + n = n->rb_left; + else if (cmp > 0) + n = n->rb_right; + else + return m; + } + + return NULL; +} + +static int __read_proc_modules(void *arg, const char *name, u64 start) +{ + struct rb_root *modules = arg; + struct module_info *mi; + + mi = zalloc(sizeof(struct module_info)); + if (!mi) + return -ENOMEM; + + mi->name = strdup(name); + mi->start = start; + + if (!mi->name) { + free(mi); + return -ENOMEM; + } + + add_module(mi, modules); + + return 0; +} + +static int read_proc_modules(const char *filename, struct rb_root *modules) +{ + if (symbol__restricted_filename(filename, "/proc/modules")) + return -1; + + if (modules__parse(filename, modules, __read_proc_modules)) { + delete_modules(modules); + return -1; + } + + return 0; +} + +int compare_proc_modules(const char *from, const char *to) +{ + struct rb_root from_modules = RB_ROOT; + struct rb_root to_modules = RB_ROOT; + struct rb_node *from_node, *to_node; + struct module_info *from_m, *to_m; + int ret = -1; + + if (read_proc_modules(from, &from_modules)) + return -1; + + if (read_proc_modules(to, &to_modules)) + goto out_delete_from; + + from_node = rb_first(&from_modules); + to_node = rb_first(&to_modules); + while (from_node) { + if (!to_node) + break; + + from_m = rb_entry(from_node, struct module_info, rb_node); + to_m = rb_entry(to_node, struct module_info, rb_node); + + if (from_m->start != to_m->start || + strcmp(from_m->name, to_m->name)) + break; + + from_node = rb_next(from_node); + to_node = rb_next(to_node); + } + + if (!from_node && !to_node) + ret = 0; + + delete_modules(&to_modules); +out_delete_from: + delete_modules(&from_modules); + + return ret; +} + +static int do_validate_kcore_modules(const char *filename, struct map *map, + struct map_groups *kmaps) +{ + struct rb_root modules = RB_ROOT; + struct map *old_map; + int err; + + err = read_proc_modules(filename, &modules); + if (err) + return err; + + old_map = map_groups__first(kmaps, map->type); + while (old_map) { + struct map *next = map_groups__next(old_map); + struct module_info *mi; + + if (old_map == map || old_map->start == map->start) { + /* The kernel map */ + old_map = next; + continue; + } + + /* Module must be in memory at the same address */ + mi = find_module(old_map->dso->short_name, &modules); + if (!mi || mi->start != old_map->start) { + err = -EINVAL; + goto out; + } + + old_map = next; + } +out: + delete_modules(&modules); + return err; +} + +/* + * If kallsyms is referenced by name then we look for filename in the same + * directory. + */ +static bool filename_from_kallsyms_filename(char *filename, + const char *base_name, + const char *kallsyms_filename) +{ + char *name; + + strcpy(filename, kallsyms_filename); + name = strrchr(filename, '/'); + if (!name) + return false; + + name += 1; + + if (!strcmp(name, "kallsyms")) { + strcpy(name, base_name); + return true; + } + + return false; +} + +static int validate_kcore_modules(const char *kallsyms_filename, + struct map *map) +{ + struct map_groups *kmaps = map__kmaps(map); + char modules_filename[PATH_MAX]; + + if (!kmaps) + return -EINVAL; + + if (!filename_from_kallsyms_filename(modules_filename, "modules", + kallsyms_filename)) + return -EINVAL; + + if (do_validate_kcore_modules(modules_filename, map, kmaps)) + return -EINVAL; + + return 0; +} + +static int validate_kcore_addresses(const char *kallsyms_filename, + struct map *map) +{ + struct kmap *kmap = map__kmap(map); + + if (!kmap) + return -EINVAL; + + if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { + u64 start; + + start = kallsyms__get_function_start(kallsyms_filename, + kmap->ref_reloc_sym->name); + if (start != kmap->ref_reloc_sym->addr) + return -EINVAL; + } + + return validate_kcore_modules(kallsyms_filename, map); +} + +struct kcore_mapfn_data { + struct dso *dso; + enum map_type type; + struct list_head maps; +}; + +static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) +{ + struct kcore_mapfn_data *md = data; + struct map *map; + + map = map__new2(start, md->dso, md->type); + if (map == NULL) + return -ENOMEM; + + map->end = map->start + len; + map->pgoff = pgoff; + + list_add(&map->node, &md->maps); + + return 0; +} + +static int dso__load_kcore(struct dso *dso, struct map *map, + const char *kallsyms_filename) +{ + struct map_groups *kmaps = map__kmaps(map); + struct machine *machine; + struct kcore_mapfn_data md; + struct map *old_map, *new_map, *replacement_map = NULL; + bool is_64_bit; + int err, fd; + char kcore_filename[PATH_MAX]; + struct symbol *sym; + + if (!kmaps) + return -EINVAL; + + machine = kmaps->machine; + + /* This function requires that the map is the kernel map */ + if (map != machine->vmlinux_maps[map->type]) + return -EINVAL; + + if (!filename_from_kallsyms_filename(kcore_filename, "kcore", + kallsyms_filename)) + return -EINVAL; + + /* Modules and kernel must be present at their original addresses */ + if (validate_kcore_addresses(kallsyms_filename, map)) + return -EINVAL; + + md.dso = dso; + md.type = map->type; + INIT_LIST_HEAD(&md.maps); + + fd = open(kcore_filename, O_RDONLY); + if (fd < 0) + return -EINVAL; + + /* Read new maps into temporary lists */ + err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, + &is_64_bit); + if (err) + goto out_err; + dso->is_64_bit = is_64_bit; + + if (list_empty(&md.maps)) { + err = -EINVAL; + goto out_err; + } + + /* Remove old maps */ + old_map = map_groups__first(kmaps, map->type); + while (old_map) { + struct map *next = map_groups__next(old_map); + + if (old_map != map) + map_groups__remove(kmaps, old_map); + old_map = next; + } + + /* Find the kernel map using the first symbol */ + sym = dso__first_symbol(dso, map->type); + list_for_each_entry(new_map, &md.maps, node) { + if (sym && sym->start >= new_map->start && + sym->start < new_map->end) { + replacement_map = new_map; + break; + } + } + + if (!replacement_map) + replacement_map = list_entry(md.maps.next, struct map, node); + + /* Add new maps */ + while (!list_empty(&md.maps)) { + new_map = list_entry(md.maps.next, struct map, node); + list_del(&new_map->node); + if (new_map == replacement_map) { + map->start = new_map->start; + map->end = new_map->end; + map->pgoff = new_map->pgoff; + map->map_ip = new_map->map_ip; + map->unmap_ip = new_map->unmap_ip; + map__delete(new_map); + /* Ensure maps are correctly ordered */ + map_groups__remove(kmaps, map); + map_groups__insert(kmaps, map); + } else { + map_groups__insert(kmaps, new_map); + } + } + + /* + * Set the data type and long name so that kcore can be read via + * dso__data_read_addr(). + */ + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; + else + dso->binary_type = DSO_BINARY_TYPE__KCORE; + dso__set_long_name(dso, strdup(kcore_filename), true); + + close(fd); + + if (map->type == MAP__FUNCTION) + pr_debug("Using %s for kernel object code\n", kcore_filename); + else + pr_debug("Using %s for kernel data\n", kcore_filename); + + return 0; + +out_err: + while (!list_empty(&md.maps)) { + map = list_entry(md.maps.next, struct map, node); + list_del(&map->node); + map__delete(map); + } + close(fd); + return -EINVAL; +} + +/* + * If the kernel is relocated at boot time, kallsyms won't match. Compute the + * delta based on the relocation reference symbol. + */ +static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +{ + struct kmap *kmap = map__kmap(map); + u64 addr; + + if (!kmap) + return -1; + + if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) + return 0; + + addr = kallsyms__get_function_start(filename, + kmap->ref_reloc_sym->name); + if (!addr) + return -1; + + *delta = addr - kmap->ref_reloc_sym->addr; + return 0; +} + +int dso__load_kallsyms(struct dso *dso, const char *filename, + struct map *map, symbol_filter_t filter) +{ + u64 delta = 0; + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return -1; + + if (dso__load_all_kallsyms(dso, filename, map) < 0) + return -1; + + if (kallsyms__delta(map, filename, &delta)) + return -1; + + symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); + + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; + else + dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; + + if (!dso__load_kcore(dso, map, filename)) + return dso__split_kallsyms_for_kcore(dso, map, filter); + else + return dso__split_kallsyms(dso, map, delta, filter); +} + +static int dso__load_perf_map(struct dso *dso, struct map *map, + symbol_filter_t filter) +{ + char *line = NULL; + size_t n; + FILE *file; + int nr_syms = 0; + + file = fopen(dso->long_name, "r"); + if (file == NULL) + goto out_failure; + + while (!feof(file)) { + u64 start, size; + struct symbol *sym; + int line_len, len; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + len = hex2u64(line, &start); + + len++; + if (len + 2 >= line_len) + continue; + + len += hex2u64(line + len, &size); + + len++; + if (len + 2 >= line_len) + continue; + + sym = symbol__new(start, size, STB_GLOBAL, line + len); + + if (sym == NULL) + goto out_delete_line; + + if (filter && filter(map, sym)) + symbol__delete(sym); + else { + symbols__insert(&dso->symbols[map->type], sym); + nr_syms++; + } + } + + free(line); + fclose(file); + + return nr_syms; + +out_delete_line: + free(line); +out_failure: + return -1; +} + +static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, + enum dso_binary_type type) +{ + switch (type) { + case DSO_BINARY_TYPE__JAVA_JIT: + case DSO_BINARY_TYPE__DEBUGLINK: + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + return !kmod && dso->kernel == DSO_TYPE_USER; + + case DSO_BINARY_TYPE__KALLSYMS: + case DSO_BINARY_TYPE__VMLINUX: + case DSO_BINARY_TYPE__KCORE: + return dso->kernel == DSO_TYPE_KERNEL; + + case DSO_BINARY_TYPE__GUEST_KALLSYMS: + case DSO_BINARY_TYPE__GUEST_VMLINUX: + case DSO_BINARY_TYPE__GUEST_KCORE: + return dso->kernel == DSO_TYPE_GUEST_KERNEL; + + case DSO_BINARY_TYPE__GUEST_KMODULE: + case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: + case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: + case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: + /* + * kernel modules know their symtab type - it's set when + * creating a module dso in machine__new_module(). + */ + return kmod && dso->symtab_type == type; + + case DSO_BINARY_TYPE__BUILD_ID_CACHE: + return true; + + case DSO_BINARY_TYPE__NOT_FOUND: + default: + return false; + } +} + +int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) +{ + char *name; + int ret = -1; + u_int i; + struct machine *machine; + char *root_dir = (char *) ""; + int ss_pos = 0; + struct symsrc ss_[2]; + struct symsrc *syms_ss = NULL, *runtime_ss = NULL; + bool kmod; + + dso__set_loaded(dso, map->type); + + if (dso->kernel == DSO_TYPE_KERNEL) + return dso__load_kernel_sym(dso, map, filter); + else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + return dso__load_guest_kernel_sym(dso, map, filter); + + if (map->groups && map->groups->machine) + machine = map->groups->machine; + else + machine = NULL; + + dso->adjust_symbols = 0; + + if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { + struct stat st; + + if (lstat(dso->name, &st) < 0) + return -1; + + if (st.st_uid && (st.st_uid != geteuid())) { + pr_warning("File %s not owned by current user or root, " + "ignoring it.\n", dso->name); + return -1; + } + + ret = dso__load_perf_map(dso, map, filter); + dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : + DSO_BINARY_TYPE__NOT_FOUND; + return ret; + } + + if (machine) + root_dir = machine->root_dir; + + name = malloc(PATH_MAX); + if (!name) + return -1; + + kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || + dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || + dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || + dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; + + /* + * Iterate over candidate debug images. + * Keep track of "interesting" ones (those which have a symtab, dynsym, + * and/or opd section) for processing. + */ + for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { + struct symsrc *ss = &ss_[ss_pos]; + bool next_slot = false; + + enum dso_binary_type symtab_type = binary_type_symtab[i]; + + if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) + continue; + + if (dso__read_binary_type_filename(dso, symtab_type, + root_dir, name, PATH_MAX)) + continue; + + /* Name is now the name of the next image to try */ + if (symsrc__init(ss, dso, name, symtab_type) < 0) + continue; + + if (!syms_ss && symsrc__has_symtab(ss)) { + syms_ss = ss; + next_slot = true; + if (!dso->symsrc_filename) + dso->symsrc_filename = strdup(name); + } + + if (!runtime_ss && symsrc__possibly_runtime(ss)) { + runtime_ss = ss; + next_slot = true; + } + + if (next_slot) { + ss_pos++; + + if (syms_ss && runtime_ss) + break; + } else { + symsrc__destroy(ss); + } + + } + + if (!runtime_ss && !syms_ss) + goto out_free; + + if (runtime_ss && !syms_ss) { + syms_ss = runtime_ss; + } + + /* We'll have to hope for the best */ + if (!runtime_ss && syms_ss) + runtime_ss = syms_ss; + + if (syms_ss) + ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod); + else + ret = -1; + + if (ret > 0) { + int nr_plt; + + nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); + if (nr_plt > 0) + ret += nr_plt; + } + + for (; ss_pos > 0; ss_pos--) + symsrc__destroy(&ss_[ss_pos - 1]); +out_free: + free(name); + if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) + return 0; + return ret; +} + +struct map *map_groups__find_by_name(struct map_groups *mg, + enum map_type type, const char *name) +{ + struct rb_node *nd; + + for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); + + if (map->dso && strcmp(map->dso->short_name, name) == 0) + return map; + } + + return NULL; +} + +int dso__load_vmlinux(struct dso *dso, struct map *map, + const char *vmlinux, bool vmlinux_allocated, + symbol_filter_t filter) +{ + int err = -1; + struct symsrc ss; + char symfs_vmlinux[PATH_MAX]; + enum dso_binary_type symtab_type; + + if (vmlinux[0] == '/') + snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); + else + symbol__join_symfs(symfs_vmlinux, vmlinux); + + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; + else + symtab_type = DSO_BINARY_TYPE__VMLINUX; + + if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) + return -1; + + err = dso__load_sym(dso, map, &ss, &ss, filter, 0); + symsrc__destroy(&ss); + + if (err > 0) { + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; + else + dso->binary_type = DSO_BINARY_TYPE__VMLINUX; + dso__set_long_name(dso, vmlinux, vmlinux_allocated); + dso__set_loaded(dso, map->type); + pr_debug("Using %s for symbols\n", symfs_vmlinux); + } + + return err; +} + +int dso__load_vmlinux_path(struct dso *dso, struct map *map, + symbol_filter_t filter) +{ + int i, err = 0; + char *filename = NULL; + + if (!symbol_conf.ignore_vmlinux_buildid) + filename = dso__build_id_filename(dso, NULL, 0); + if (filename != NULL) { + err = dso__load_vmlinux(dso, map, filename, true, filter); + if (err > 0) + goto out; + free(filename); + } + + pr_debug("Looking at the vmlinux_path (%d entries long)\n", + vmlinux_path__nr_entries + 1); + + for (i = 0; i < vmlinux_path__nr_entries; ++i) { + err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); + if (err > 0) + break; + } +out: + return err; +} + +static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) +{ + char kallsyms_filename[PATH_MAX]; + struct dirent *dent; + int ret = -1; + DIR *d; + + d = opendir(dir); + if (!d) + return -1; + + while (1) { + dent = readdir(d); + if (!dent) + break; + if (dent->d_type != DT_DIR) + continue; + scnprintf(kallsyms_filename, sizeof(kallsyms_filename), + "%s/%s/kallsyms", dir, dent->d_name); + if (!validate_kcore_addresses(kallsyms_filename, map)) { + strlcpy(dir, kallsyms_filename, dir_sz); + ret = 0; + break; + } + } + + closedir(d); + + return ret; +} + +static char *dso__find_kallsyms(struct dso *dso, struct map *map) +{ + u8 host_build_id[BUILD_ID_SIZE]; + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + bool is_host = false; + char path[PATH_MAX]; + + if (!dso->has_build_id) { + /* + * Last resort, if we don't have a build-id and couldn't find + * any vmlinux file, try the running kernel kallsyms table. + */ + goto proc_kallsyms; + } + + if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, + sizeof(host_build_id)) == 0) + is_host = dso__build_id_equal(dso, host_build_id); + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + + scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, + sbuild_id); + + /* Use /proc/kallsyms if possible */ + if (is_host) { + DIR *d; + int fd; + + /* If no cached kcore go with /proc/kallsyms */ + d = opendir(path); + if (!d) + goto proc_kallsyms; + closedir(d); + + /* + * Do not check the build-id cache, until we know we cannot use + * /proc/kcore. + */ + fd = open("/proc/kcore", O_RDONLY); + if (fd != -1) { + close(fd); + /* If module maps match go with /proc/kallsyms */ + if (!validate_kcore_addresses("/proc/kallsyms", map)) + goto proc_kallsyms; + } + + /* Find kallsyms in build-id cache with kcore */ + if (!find_matching_kcore(map, path, sizeof(path))) + return strdup(path); + + goto proc_kallsyms; + } + + /* Find kallsyms in build-id cache with kcore */ + if (!find_matching_kcore(map, path, sizeof(path))) + return strdup(path); + + scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", + buildid_dir, sbuild_id); + + if (access(path, F_OK)) { + pr_err("No kallsyms or vmlinux with build-id %s was found\n", + sbuild_id); + return NULL; + } + + return strdup(path); + +proc_kallsyms: + return strdup("/proc/kallsyms"); +} + +static int dso__load_kernel_sym(struct dso *dso, struct map *map, + symbol_filter_t filter) +{ + int err; + const char *kallsyms_filename = NULL; + char *kallsyms_allocated_filename = NULL; + /* + * Step 1: if the user specified a kallsyms or vmlinux filename, use + * it and only it, reporting errors to the user if it cannot be used. + * + * For instance, try to analyse an ARM perf.data file _without_ a + * build-id, or if the user specifies the wrong path to the right + * vmlinux file, obviously we can't fallback to another vmlinux (a + * x86_86 one, on the machine where analysis is being performed, say), + * or worse, /proc/kallsyms. + * + * If the specified file _has_ a build-id and there is a build-id + * section in the perf.data file, we will still do the expected + * validation in dso__load_vmlinux and will bail out if they don't + * match. + */ + if (symbol_conf.kallsyms_name != NULL) { + kallsyms_filename = symbol_conf.kallsyms_name; + goto do_kallsyms; + } + + if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { + return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, + false, filter); + } + + if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { + err = dso__load_vmlinux_path(dso, map, filter); + if (err > 0) + return err; + } + + /* do not try local files if a symfs was given */ + if (symbol_conf.symfs[0] != 0) + return -1; + + kallsyms_allocated_filename = dso__find_kallsyms(dso, map); + if (!kallsyms_allocated_filename) + return -1; + + kallsyms_filename = kallsyms_allocated_filename; + +do_kallsyms: + err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); + if (err > 0) + pr_debug("Using %s for symbols\n", kallsyms_filename); + free(kallsyms_allocated_filename); + + if (err > 0 && !dso__is_kcore(dso)) { + dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; + dso__set_long_name(dso, "[kernel.kallsyms]", false); + map__fixup_start(map); + map__fixup_end(map); + } + + return err; +} + +static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, + symbol_filter_t filter) +{ + int err; + const char *kallsyms_filename = NULL; + struct machine *machine; + char path[PATH_MAX]; + + if (!map->groups) { + pr_debug("Guest kernel map hasn't the point to groups\n"); + return -1; + } + machine = map->groups->machine; + + if (machine__is_default_guest(machine)) { + /* + * if the user specified a vmlinux filename, use it and only + * it, reporting errors to the user if it cannot be used. + * Or use file guest_kallsyms inputted by user on commandline + */ + if (symbol_conf.default_guest_vmlinux_name != NULL) { + err = dso__load_vmlinux(dso, map, + symbol_conf.default_guest_vmlinux_name, + false, filter); + return err; + } + + kallsyms_filename = symbol_conf.default_guest_kallsyms; + if (!kallsyms_filename) + return -1; + } else { + sprintf(path, "%s/proc/kallsyms", machine->root_dir); + kallsyms_filename = path; + } + + err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); + if (err > 0) + pr_debug("Using %s for symbols\n", kallsyms_filename); + if (err > 0 && !dso__is_kcore(dso)) { + dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; + machine__mmap_name(machine, path, sizeof(path)); + dso__set_long_name(dso, strdup(path), true); + map__fixup_start(map); + map__fixup_end(map); + } + + return err; +} + +static void vmlinux_path__exit(void) +{ + while (--vmlinux_path__nr_entries >= 0) + zfree(&vmlinux_path[vmlinux_path__nr_entries]); + + zfree(&vmlinux_path); +} + +static int vmlinux_path__init(struct perf_session_env *env) +{ + struct utsname uts; + char bf[PATH_MAX]; + char *kernel_version; + + vmlinux_path = malloc(sizeof(char *) * 6); + if (vmlinux_path == NULL) + return -1; + + vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + + /* only try kernel version if no symfs was given */ + if (symbol_conf.symfs[0] != 0) + return 0; + + if (env) { + kernel_version = env->os_release; + } else { + if (uname(&uts) < 0) + goto out_fail; + + kernel_version = uts.release; + } + + snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s", + kernel_version); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", + kernel_version); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + + return 0; + +out_fail: + vmlinux_path__exit(); + return -1; +} + +int setup_list(struct strlist **list, const char *list_str, + const char *list_name) +{ + if (list_str == NULL) + return 0; + + *list = strlist__new(true, list_str); + if (!*list) { + pr_err("problems parsing %s list\n", list_name); + return -1; + } + return 0; +} + +int setup_intlist(struct intlist **list, const char *list_str, + const char *list_name) +{ + if (list_str == NULL) + return 0; + + *list = intlist__new(list_str); + if (!*list) { + pr_err("problems parsing %s list\n", list_name); + return -1; + } + return 0; +} + +static bool symbol__read_kptr_restrict(void) +{ + bool value = false; + + if (geteuid() != 0) { + FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); + if (fp != NULL) { + char line[8]; + + if (fgets(line, sizeof(line), fp) != NULL) + value = atoi(line) != 0; + + fclose(fp); + } + } + + return value; +} + +int symbol__init(struct perf_session_env *env) +{ + const char *symfs; + + if (symbol_conf.initialized) + return 0; + + symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); + + symbol__elf_init(); + + if (symbol_conf.sort_by_name) + symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - + sizeof(struct symbol)); + + if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) + return -1; + + if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { + pr_err("'.' is the only non valid --field-separator argument\n"); + return -1; + } + + if (setup_list(&symbol_conf.dso_list, + symbol_conf.dso_list_str, "dso") < 0) + return -1; + + if (setup_list(&symbol_conf.comm_list, + symbol_conf.comm_list_str, "comm") < 0) + goto out_free_dso_list; + + if (setup_intlist(&symbol_conf.pid_list, + symbol_conf.pid_list_str, "pid") < 0) + goto out_free_comm_list; + + if (setup_intlist(&symbol_conf.tid_list, + symbol_conf.tid_list_str, "tid") < 0) + goto out_free_pid_list; + + if (setup_list(&symbol_conf.sym_list, + symbol_conf.sym_list_str, "symbol") < 0) + goto out_free_tid_list; + + /* + * A path to symbols of "/" is identical to "" + * reset here for simplicity. + */ + symfs = realpath(symbol_conf.symfs, NULL); + if (symfs == NULL) + symfs = symbol_conf.symfs; + if (strcmp(symfs, "/") == 0) + symbol_conf.symfs = ""; + if (symfs != symbol_conf.symfs) + free((void *)symfs); + + symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); + + symbol_conf.initialized = true; + return 0; + +out_free_tid_list: + intlist__delete(symbol_conf.tid_list); +out_free_pid_list: + intlist__delete(symbol_conf.pid_list); +out_free_comm_list: + strlist__delete(symbol_conf.comm_list); +out_free_dso_list: + strlist__delete(symbol_conf.dso_list); + return -1; +} + +void symbol__exit(void) +{ + if (!symbol_conf.initialized) + return; + strlist__delete(symbol_conf.sym_list); + strlist__delete(symbol_conf.dso_list); + strlist__delete(symbol_conf.comm_list); + intlist__delete(symbol_conf.tid_list); + intlist__delete(symbol_conf.pid_list); + vmlinux_path__exit(); + symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; + symbol_conf.initialized = false; +} diff --git a/kernel/tools/perf/util/symbol.h b/kernel/tools/perf/util/symbol.h new file mode 100644 index 000000000..095615001 --- /dev/null +++ b/kernel/tools/perf/util/symbol.h @@ -0,0 +1,306 @@ +#ifndef __PERF_SYMBOL +#define __PERF_SYMBOL 1 + +#include +#include +#include +#include "map.h" +#include "../perf.h" +#include +#include +#include +#include +#include +#include "build-id.h" +#include "event.h" +#include "util.h" + +#ifdef HAVE_LIBELF_SUPPORT +#include +#include +#endif +#include + +#include "dso.h" + +/* + * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP; + * for newer versions we can use mmap to reduce memory usage: + */ +#ifdef HAVE_LIBELF_MMAP_SUPPORT +# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP +#else +# define PERF_ELF_C_READ_MMAP ELF_C_READ +#endif + +#ifdef HAVE_LIBELF_SUPPORT +extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, size_t *idx); +#endif + +#ifndef DMGL_PARAMS +#define DMGL_NO_OPTS 0 /* For readability... */ +#define DMGL_PARAMS (1 << 0) /* Include function args */ +#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ +#endif + +/** struct symbol - symtab entry + * + * @ignore - resolvable but tools ignore it (e.g. idle routines) + */ +struct symbol { + struct rb_node rb_node; + u64 start; + u64 end; + u16 namelen; + u8 binding; + bool ignore; + char name[0]; +}; + +void symbol__delete(struct symbol *sym); +void symbols__delete(struct rb_root *symbols); + +/* symbols__for_each_entry - iterate over symbols (rb_root) + * + * @symbols: the rb_root of symbols + * @pos: the 'struct symbol *' to use as a loop cursor + * @nd: the 'struct rb_node *' to use as a temporary storage + */ +#define symbols__for_each_entry(symbols, pos, nd) \ + for (nd = rb_first(symbols); \ + nd && (pos = rb_entry(nd, struct symbol, rb_node)); \ + nd = rb_next(nd)) + +static inline size_t symbol__size(const struct symbol *sym) +{ + return sym->end - sym->start; +} + +struct strlist; +struct intlist; + +struct symbol_conf { + unsigned short priv_size; + unsigned short nr_events; + bool try_vmlinux_path, + ignore_vmlinux, + ignore_vmlinux_buildid, + show_kernel_path, + use_modules, + allow_aliases, + sort_by_name, + show_nr_samples, + show_total_period, + use_callchain, + cumulate_callchain, + exclude_other, + show_cpu_utilization, + initialized, + kptr_restrict, + annotate_asm_raw, + annotate_src, + event_group, + demangle, + demangle_kernel, + filter_relative, + show_hist_headers, + branch_callstack; + const char *vmlinux_name, + *kallsyms_name, + *source_prefix, + *field_sep; + const char *default_guest_vmlinux_name, + *default_guest_kallsyms, + *default_guest_modules; + const char *guestmount; + const char *dso_list_str, + *comm_list_str, + *pid_list_str, + *tid_list_str, + *sym_list_str, + *col_width_list_str; + struct strlist *dso_list, + *comm_list, + *sym_list, + *dso_from_list, + *dso_to_list, + *sym_from_list, + *sym_to_list; + struct intlist *pid_list, + *tid_list; + const char *symfs; +}; + +extern struct symbol_conf symbol_conf; + +static inline int __symbol__join_symfs(char *bf, size_t size, const char *path) +{ + return path__join(bf, size, symbol_conf.symfs, path); +} + +#define symbol__join_symfs(bf, path) __symbol__join_symfs(bf, sizeof(bf), path) + +extern int vmlinux_path__nr_entries; +extern char **vmlinux_path; + +static inline void *symbol__priv(struct symbol *sym) +{ + return ((void *)sym) - symbol_conf.priv_size; +} + +struct ref_reloc_sym { + const char *name; + u64 addr; + u64 unrelocated_addr; +}; + +struct map_symbol { + struct map *map; + struct symbol *sym; + bool unfolded; + bool has_children; +}; + +struct addr_map_symbol { + struct map *map; + struct symbol *sym; + u64 addr; + u64 al_addr; +}; + +struct branch_info { + struct addr_map_symbol from; + struct addr_map_symbol to; + struct branch_flags flags; +}; + +struct mem_info { + struct addr_map_symbol iaddr; + struct addr_map_symbol daddr; + union perf_mem_data_src data_src; +}; + +struct addr_location { + struct machine *machine; + struct thread *thread; + struct map *map; + struct symbol *sym; + u64 addr; + char level; + u8 filtered; + u8 cpumode; + s32 cpu; +}; + +struct symsrc { + char *name; + int fd; + enum dso_binary_type type; + +#ifdef HAVE_LIBELF_SUPPORT + Elf *elf; + GElf_Ehdr ehdr; + + Elf_Scn *opdsec; + size_t opdidx; + GElf_Shdr opdshdr; + + Elf_Scn *symtab; + GElf_Shdr symshdr; + + Elf_Scn *dynsym; + size_t dynsym_idx; + GElf_Shdr dynshdr; + + bool adjust_symbols; + bool is_64_bit; +#endif +}; + +void symsrc__destroy(struct symsrc *ss); +int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, + enum dso_binary_type type); +bool symsrc__has_symtab(struct symsrc *ss); +bool symsrc__possibly_runtime(struct symsrc *ss); + +int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); +int dso__load_vmlinux(struct dso *dso, struct map *map, + const char *vmlinux, bool vmlinux_allocated, + symbol_filter_t filter); +int dso__load_vmlinux_path(struct dso *dso, struct map *map, + symbol_filter_t filter); +int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, + symbol_filter_t filter); + +struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, + u64 addr); +struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, + const char *name); +struct symbol *symbol__next_by_name(struct symbol *sym); + +struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); +struct symbol *dso__next_symbol(struct symbol *sym); + +enum dso_type dso__type_fd(int fd); + +int filename__read_build_id(const char *filename, void *bf, size_t size); +int sysfs__read_build_id(const char *filename, void *bf, size_t size); +int modules__parse(const char *filename, void *arg, + int (*process_module)(void *arg, const char *name, + u64 start)); +int filename__read_debuglink(const char *filename, char *debuglink, + size_t size); + +struct perf_session_env; +int symbol__init(struct perf_session_env *env); +void symbol__exit(void); +void symbol__elf_init(void); +struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); +size_t symbol__fprintf_symname_offs(const struct symbol *sym, + const struct addr_location *al, FILE *fp); +size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); +size_t symbol__fprintf(struct symbol *sym, FILE *fp); +bool symbol_type__is_a(char symbol_type, enum map_type map_type); +bool symbol__restricted_filename(const char *filename, + const char *restricted_filename); +bool symbol__is_idle(struct symbol *sym); + +int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, + struct symsrc *runtime_ss, symbol_filter_t filter, + int kmodule); +int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, + struct map *map, symbol_filter_t filter); + +void symbols__insert(struct rb_root *symbols, struct symbol *sym); +void symbols__fixup_duplicate(struct rb_root *symbols); +void symbols__fixup_end(struct rb_root *symbols); +void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); + +typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); +int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, + bool *is_64_bit); + +#define PERF_KCORE_EXTRACT "/tmp/perf-kcore-XXXXXX" + +struct kcore_extract { + char *kcore_filename; + u64 addr; + u64 offs; + u64 len; + char extract_filename[sizeof(PERF_KCORE_EXTRACT)]; + int fd; +}; + +int kcore_extract__create(struct kcore_extract *kce); +void kcore_extract__delete(struct kcore_extract *kce); + +int kcore_copy(const char *from_dir, const char *to_dir); +int compare_proc_modules(const char *from, const char *to); + +int setup_list(struct strlist **list, const char *list_str, + const char *list_name); +int setup_intlist(struct intlist **list, const char *list_str, + const char *list_name); + +#endif /* __PERF_SYMBOL */ diff --git a/kernel/tools/perf/util/target.c b/kernel/tools/perf/util/target.c new file mode 100644 index 000000000..a53603b27 --- /dev/null +++ b/kernel/tools/perf/util/target.c @@ -0,0 +1,155 @@ +/* + * Helper functions for handling target threads/cpus + * + * Copyright (C) 2012, LG Electronics, Namhyung Kim + * + * Released under the GPL v2. + */ + +#include "target.h" +#include "debug.h" + +#include +#include + + +enum target_errno target__validate(struct target *target) +{ + enum target_errno ret = TARGET_ERRNO__SUCCESS; + + if (target->pid) + target->tid = target->pid; + + /* CPU and PID are mutually exclusive */ + if (target->tid && target->cpu_list) { + target->cpu_list = NULL; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__PID_OVERRIDE_CPU; + } + + /* UID and PID are mutually exclusive */ + if (target->tid && target->uid_str) { + target->uid_str = NULL; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__PID_OVERRIDE_UID; + } + + /* UID and CPU are mutually exclusive */ + if (target->uid_str && target->cpu_list) { + target->cpu_list = NULL; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__UID_OVERRIDE_CPU; + } + + /* PID and SYSTEM are mutually exclusive */ + if (target->tid && target->system_wide) { + target->system_wide = false; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM; + } + + /* UID and SYSTEM are mutually exclusive */ + if (target->uid_str && target->system_wide) { + target->system_wide = false; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM; + } + + /* THREAD and SYSTEM/CPU are mutually exclusive */ + if (target->per_thread && (target->system_wide || target->cpu_list)) { + target->per_thread = false; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD; + } + + return ret; +} + +enum target_errno target__parse_uid(struct target *target) +{ + struct passwd pwd, *result; + char buf[1024]; + const char *str = target->uid_str; + + target->uid = UINT_MAX; + if (str == NULL) + return TARGET_ERRNO__SUCCESS; + + /* Try user name first */ + getpwnam_r(str, &pwd, buf, sizeof(buf), &result); + + if (result == NULL) { + /* + * The user name not found. Maybe it's a UID number. + */ + char *endptr; + int uid = strtol(str, &endptr, 10); + + if (*endptr != '\0') + return TARGET_ERRNO__INVALID_UID; + + getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); + + if (result == NULL) + return TARGET_ERRNO__USER_NOT_FOUND; + } + + target->uid = result->pw_uid; + return TARGET_ERRNO__SUCCESS; +} + +/* + * This must have a same ordering as the enum target_errno. + */ +static const char *target__error_str[] = { + "PID/TID switch overriding CPU", + "PID/TID switch overriding UID", + "UID switch overriding CPU", + "PID/TID switch overriding SYSTEM", + "UID switch overriding SYSTEM", + "SYSTEM/CPU switch overriding PER-THREAD", + "Invalid User: %s", + "Problems obtaining information for user %s", +}; + +int target__strerror(struct target *target, int errnum, + char *buf, size_t buflen) +{ + int idx; + const char *msg; + + BUG_ON(buflen == 0); + + if (errnum >= 0) { + const char *err = strerror_r(errnum, buf, buflen); + + if (err != buf) + scnprintf(buf, buflen, "%s", err); + + return 0; + } + + if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END) + return -1; + + idx = errnum - __TARGET_ERRNO__START; + msg = target__error_str[idx]; + + switch (errnum) { + case TARGET_ERRNO__PID_OVERRIDE_CPU ... + TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD: + snprintf(buf, buflen, "%s", msg); + break; + + case TARGET_ERRNO__INVALID_UID: + case TARGET_ERRNO__USER_NOT_FOUND: + snprintf(buf, buflen, msg, target->uid_str); + break; + + default: + /* cannot reach here */ + break; + } + + return 0; +} diff --git a/kernel/tools/perf/util/target.h b/kernel/tools/perf/util/target.h new file mode 100644 index 000000000..7381b1ca4 --- /dev/null +++ b/kernel/tools/perf/util/target.h @@ -0,0 +1,79 @@ +#ifndef _PERF_TARGET_H +#define _PERF_TARGET_H + +#include +#include + +struct target { + const char *pid; + const char *tid; + const char *cpu_list; + const char *uid_str; + uid_t uid; + bool system_wide; + bool uses_mmap; + bool default_per_cpu; + bool per_thread; +}; + +enum target_errno { + TARGET_ERRNO__SUCCESS = 0, + + /* + * Choose an arbitrary negative big number not to clash with standard + * errno since SUS requires the errno has distinct positive values. + * See 'Issue 6' in the link below. + * + * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html + */ + __TARGET_ERRNO__START = -10000, + + /* for target__validate() */ + TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START, + TARGET_ERRNO__PID_OVERRIDE_UID, + TARGET_ERRNO__UID_OVERRIDE_CPU, + TARGET_ERRNO__PID_OVERRIDE_SYSTEM, + TARGET_ERRNO__UID_OVERRIDE_SYSTEM, + TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD, + + /* for target__parse_uid() */ + TARGET_ERRNO__INVALID_UID, + TARGET_ERRNO__USER_NOT_FOUND, + + __TARGET_ERRNO__END, +}; + +enum target_errno target__validate(struct target *target); +enum target_errno target__parse_uid(struct target *target); + +int target__strerror(struct target *target, int errnum, char *buf, size_t buflen); + +static inline bool target__has_task(struct target *target) +{ + return target->tid || target->pid || target->uid_str; +} + +static inline bool target__has_cpu(struct target *target) +{ + return target->system_wide || target->cpu_list; +} + +static inline bool target__none(struct target *target) +{ + return !target__has_task(target) && !target__has_cpu(target); +} + +static inline bool target__uses_dummy_map(struct target *target) +{ + bool use_dummy = false; + + if (target->default_per_cpu) + use_dummy = target->per_thread ? true : false; + else if (target__has_task(target) || + (!target__has_cpu(target) && !target->uses_mmap)) + use_dummy = true; + + return use_dummy; +} + +#endif /* _PERF_TARGET_H */ diff --git a/kernel/tools/perf/util/thread-stack.c b/kernel/tools/perf/util/thread-stack.c new file mode 100644 index 000000000..9ed59a452 --- /dev/null +++ b/kernel/tools/perf/util/thread-stack.c @@ -0,0 +1,747 @@ +/* + * thread-stack.c: Synthesize a thread's stack using call / return events + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include +#include "thread.h" +#include "event.h" +#include "machine.h" +#include "util.h" +#include "debug.h" +#include "symbol.h" +#include "comm.h" +#include "thread-stack.h" + +#define CALL_PATH_BLOCK_SHIFT 8 +#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT) +#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1) + +struct call_path_block { + struct call_path cp[CALL_PATH_BLOCK_SIZE]; + struct list_head node; +}; + +/** + * struct call_path_root - root of all call paths. + * @call_path: root call path + * @blocks: list of blocks to store call paths + * @next: next free space + * @sz: number of spaces + */ +struct call_path_root { + struct call_path call_path; + struct list_head blocks; + size_t next; + size_t sz; +}; + +/** + * struct call_return_processor - provides a call-back to consume call-return + * information. + * @cpr: call path root + * @process: call-back that accepts call/return information + * @data: anonymous data for call-back + */ +struct call_return_processor { + struct call_path_root *cpr; + int (*process)(struct call_return *cr, void *data); + void *data; +}; + +#define STACK_GROWTH 2048 + +/** + * struct thread_stack_entry - thread stack entry. + * @ret_addr: return address + * @timestamp: timestamp (if known) + * @ref: external reference (e.g. db_id of sample) + * @branch_count: the branch count when the entry was created + * @cp: call path + * @no_call: a 'call' was not seen + */ +struct thread_stack_entry { + u64 ret_addr; + u64 timestamp; + u64 ref; + u64 branch_count; + struct call_path *cp; + bool no_call; +}; + +/** + * struct thread_stack - thread stack constructed from 'call' and 'return' + * branch samples. + * @stack: array that holds the stack + * @cnt: number of entries in the stack + * @sz: current maximum stack size + * @trace_nr: current trace number + * @branch_count: running branch count + * @kernel_start: kernel start address + * @last_time: last timestamp + * @crp: call/return processor + * @comm: current comm + */ +struct thread_stack { + struct thread_stack_entry *stack; + size_t cnt; + size_t sz; + u64 trace_nr; + u64 branch_count; + u64 kernel_start; + u64 last_time; + struct call_return_processor *crp; + struct comm *comm; +}; + +static int thread_stack__grow(struct thread_stack *ts) +{ + struct thread_stack_entry *new_stack; + size_t sz, new_sz; + + new_sz = ts->sz + STACK_GROWTH; + sz = new_sz * sizeof(struct thread_stack_entry); + + new_stack = realloc(ts->stack, sz); + if (!new_stack) + return -ENOMEM; + + ts->stack = new_stack; + ts->sz = new_sz; + + return 0; +} + +static struct thread_stack *thread_stack__new(struct thread *thread, + struct call_return_processor *crp) +{ + struct thread_stack *ts; + + ts = zalloc(sizeof(struct thread_stack)); + if (!ts) + return NULL; + + if (thread_stack__grow(ts)) { + free(ts); + return NULL; + } + + if (thread->mg && thread->mg->machine) + ts->kernel_start = machine__kernel_start(thread->mg->machine); + else + ts->kernel_start = 1ULL << 63; + ts->crp = crp; + + return ts; +} + +static int thread_stack__push(struct thread_stack *ts, u64 ret_addr) +{ + int err = 0; + + if (ts->cnt == ts->sz) { + err = thread_stack__grow(ts); + if (err) { + pr_warning("Out of memory: discarding thread stack\n"); + ts->cnt = 0; + } + } + + ts->stack[ts->cnt++].ret_addr = ret_addr; + + return err; +} + +static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr) +{ + size_t i; + + /* + * In some cases there may be functions which are not seen to return. + * For example when setjmp / longjmp has been used. Or the perf context + * switch in the kernel which doesn't stop and start tracing in exactly + * the same code path. When that happens the return address will be + * further down the stack. If the return address is not found at all, + * we assume the opposite (i.e. this is a return for a call that wasn't + * seen for some reason) and leave the stack alone. + */ + for (i = ts->cnt; i; ) { + if (ts->stack[--i].ret_addr == ret_addr) { + ts->cnt = i; + return; + } + } +} + +static bool thread_stack__in_kernel(struct thread_stack *ts) +{ + if (!ts->cnt) + return false; + + return ts->stack[ts->cnt - 1].cp->in_kernel; +} + +static int thread_stack__call_return(struct thread *thread, + struct thread_stack *ts, size_t idx, + u64 timestamp, u64 ref, bool no_return) +{ + struct call_return_processor *crp = ts->crp; + struct thread_stack_entry *tse; + struct call_return cr = { + .thread = thread, + .comm = ts->comm, + .db_id = 0, + }; + + tse = &ts->stack[idx]; + cr.cp = tse->cp; + cr.call_time = tse->timestamp; + cr.return_time = timestamp; + cr.branch_count = ts->branch_count - tse->branch_count; + cr.call_ref = tse->ref; + cr.return_ref = ref; + if (tse->no_call) + cr.flags |= CALL_RETURN_NO_CALL; + if (no_return) + cr.flags |= CALL_RETURN_NO_RETURN; + + return crp->process(&cr, crp->data); +} + +static int thread_stack__flush(struct thread *thread, struct thread_stack *ts) +{ + struct call_return_processor *crp = ts->crp; + int err; + + if (!crp) { + ts->cnt = 0; + return 0; + } + + while (ts->cnt) { + err = thread_stack__call_return(thread, ts, --ts->cnt, + ts->last_time, 0, true); + if (err) { + pr_err("Error flushing thread stack!\n"); + ts->cnt = 0; + return err; + } + } + + return 0; +} + +int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, + u64 to_ip, u16 insn_len, u64 trace_nr) +{ + if (!thread) + return -EINVAL; + + if (!thread->ts) { + thread->ts = thread_stack__new(thread, NULL); + if (!thread->ts) { + pr_warning("Out of memory: no thread stack\n"); + return -ENOMEM; + } + thread->ts->trace_nr = trace_nr; + } + + /* + * When the trace is discontinuous, the trace_nr changes. In that case + * the stack might be completely invalid. Better to report nothing than + * to report something misleading, so flush the stack. + */ + if (trace_nr != thread->ts->trace_nr) { + if (thread->ts->trace_nr) + thread_stack__flush(thread, thread->ts); + thread->ts->trace_nr = trace_nr; + } + + /* Stop here if thread_stack__process() is in use */ + if (thread->ts->crp) + return 0; + + if (flags & PERF_IP_FLAG_CALL) { + u64 ret_addr; + + if (!to_ip) + return 0; + ret_addr = from_ip + insn_len; + if (ret_addr == to_ip) + return 0; /* Zero-length calls are excluded */ + return thread_stack__push(thread->ts, ret_addr); + } else if (flags & PERF_IP_FLAG_RETURN) { + if (!from_ip) + return 0; + thread_stack__pop(thread->ts, to_ip); + } + + return 0; +} + +void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) +{ + if (!thread || !thread->ts) + return; + + if (trace_nr != thread->ts->trace_nr) { + if (thread->ts->trace_nr) + thread_stack__flush(thread, thread->ts); + thread->ts->trace_nr = trace_nr; + } +} + +void thread_stack__free(struct thread *thread) +{ + if (thread->ts) { + thread_stack__flush(thread, thread->ts); + zfree(&thread->ts->stack); + zfree(&thread->ts); + } +} + +void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, + size_t sz, u64 ip) +{ + size_t i; + + if (!thread || !thread->ts) + chain->nr = 1; + else + chain->nr = min(sz, thread->ts->cnt + 1); + + chain->ips[0] = ip; + + for (i = 1; i < chain->nr; i++) + chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr; +} + +static void call_path__init(struct call_path *cp, struct call_path *parent, + struct symbol *sym, u64 ip, bool in_kernel) +{ + cp->parent = parent; + cp->sym = sym; + cp->ip = sym ? 0 : ip; + cp->db_id = 0; + cp->in_kernel = in_kernel; + RB_CLEAR_NODE(&cp->rb_node); + cp->children = RB_ROOT; +} + +static struct call_path_root *call_path_root__new(void) +{ + struct call_path_root *cpr; + + cpr = zalloc(sizeof(struct call_path_root)); + if (!cpr) + return NULL; + call_path__init(&cpr->call_path, NULL, NULL, 0, false); + INIT_LIST_HEAD(&cpr->blocks); + return cpr; +} + +static void call_path_root__free(struct call_path_root *cpr) +{ + struct call_path_block *pos, *n; + + list_for_each_entry_safe(pos, n, &cpr->blocks, node) { + list_del(&pos->node); + free(pos); + } + free(cpr); +} + +static struct call_path *call_path__new(struct call_path_root *cpr, + struct call_path *parent, + struct symbol *sym, u64 ip, + bool in_kernel) +{ + struct call_path_block *cpb; + struct call_path *cp; + size_t n; + + if (cpr->next < cpr->sz) { + cpb = list_last_entry(&cpr->blocks, struct call_path_block, + node); + } else { + cpb = zalloc(sizeof(struct call_path_block)); + if (!cpb) + return NULL; + list_add_tail(&cpb->node, &cpr->blocks); + cpr->sz += CALL_PATH_BLOCK_SIZE; + } + + n = cpr->next++ & CALL_PATH_BLOCK_MASK; + cp = &cpb->cp[n]; + + call_path__init(cp, parent, sym, ip, in_kernel); + + return cp; +} + +static struct call_path *call_path__findnew(struct call_path_root *cpr, + struct call_path *parent, + struct symbol *sym, u64 ip, u64 ks) +{ + struct rb_node **p; + struct rb_node *node_parent = NULL; + struct call_path *cp; + bool in_kernel = ip >= ks; + + if (sym) + ip = 0; + + if (!parent) + return call_path__new(cpr, parent, sym, ip, in_kernel); + + p = &parent->children.rb_node; + while (*p != NULL) { + node_parent = *p; + cp = rb_entry(node_parent, struct call_path, rb_node); + + if (cp->sym == sym && cp->ip == ip) + return cp; + + if (sym < cp->sym || (sym == cp->sym && ip < cp->ip)) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + cp = call_path__new(cpr, parent, sym, ip, in_kernel); + if (!cp) + return NULL; + + rb_link_node(&cp->rb_node, node_parent, p); + rb_insert_color(&cp->rb_node, &parent->children); + + return cp; +} + +struct call_return_processor * +call_return_processor__new(int (*process)(struct call_return *cr, void *data), + void *data) +{ + struct call_return_processor *crp; + + crp = zalloc(sizeof(struct call_return_processor)); + if (!crp) + return NULL; + crp->cpr = call_path_root__new(); + if (!crp->cpr) + goto out_free; + crp->process = process; + crp->data = data; + return crp; + +out_free: + free(crp); + return NULL; +} + +void call_return_processor__free(struct call_return_processor *crp) +{ + if (crp) { + call_path_root__free(crp->cpr); + free(crp); + } +} + +static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, + u64 timestamp, u64 ref, struct call_path *cp, + bool no_call) +{ + struct thread_stack_entry *tse; + int err; + + if (ts->cnt == ts->sz) { + err = thread_stack__grow(ts); + if (err) + return err; + } + + tse = &ts->stack[ts->cnt++]; + tse->ret_addr = ret_addr; + tse->timestamp = timestamp; + tse->ref = ref; + tse->branch_count = ts->branch_count; + tse->cp = cp; + tse->no_call = no_call; + + return 0; +} + +static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, + u64 ret_addr, u64 timestamp, u64 ref, + struct symbol *sym) +{ + int err; + + if (!ts->cnt) + return 1; + + if (ts->cnt == 1) { + struct thread_stack_entry *tse = &ts->stack[0]; + + if (tse->cp->sym == sym) + return thread_stack__call_return(thread, ts, --ts->cnt, + timestamp, ref, false); + } + + if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) { + return thread_stack__call_return(thread, ts, --ts->cnt, + timestamp, ref, false); + } else { + size_t i = ts->cnt - 1; + + while (i--) { + if (ts->stack[i].ret_addr != ret_addr) + continue; + i += 1; + while (ts->cnt > i) { + err = thread_stack__call_return(thread, ts, + --ts->cnt, + timestamp, ref, + true); + if (err) + return err; + } + return thread_stack__call_return(thread, ts, --ts->cnt, + timestamp, ref, false); + } + } + + return 1; +} + +static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts, + struct perf_sample *sample, + struct addr_location *from_al, + struct addr_location *to_al, u64 ref) +{ + struct call_path_root *cpr = ts->crp->cpr; + struct call_path *cp; + struct symbol *sym; + u64 ip; + + if (sample->ip) { + ip = sample->ip; + sym = from_al->sym; + } else if (sample->addr) { + ip = sample->addr; + sym = to_al->sym; + } else { + return 0; + } + + cp = call_path__findnew(cpr, &cpr->call_path, sym, ip, + ts->kernel_start); + if (!cp) + return -ENOMEM; + + return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, + true); +} + +static int thread_stack__no_call_return(struct thread *thread, + struct thread_stack *ts, + struct perf_sample *sample, + struct addr_location *from_al, + struct addr_location *to_al, u64 ref) +{ + struct call_path_root *cpr = ts->crp->cpr; + struct call_path *cp, *parent; + u64 ks = ts->kernel_start; + int err; + + if (sample->ip >= ks && sample->addr < ks) { + /* Return to userspace, so pop all kernel addresses */ + while (thread_stack__in_kernel(ts)) { + err = thread_stack__call_return(thread, ts, --ts->cnt, + sample->time, ref, + true); + if (err) + return err; + } + + /* If the stack is empty, push the userspace address */ + if (!ts->cnt) { + cp = call_path__findnew(cpr, &cpr->call_path, + to_al->sym, sample->addr, + ts->kernel_start); + if (!cp) + return -ENOMEM; + return thread_stack__push_cp(ts, 0, sample->time, ref, + cp, true); + } + } else if (thread_stack__in_kernel(ts) && sample->ip < ks) { + /* Return to userspace, so pop all kernel addresses */ + while (thread_stack__in_kernel(ts)) { + err = thread_stack__call_return(thread, ts, --ts->cnt, + sample->time, ref, + true); + if (err) + return err; + } + } + + if (ts->cnt) + parent = ts->stack[ts->cnt - 1].cp; + else + parent = &cpr->call_path; + + /* This 'return' had no 'call', so push and pop top of stack */ + cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip, + ts->kernel_start); + if (!cp) + return -ENOMEM; + + err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, + true); + if (err) + return err; + + return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref, + to_al->sym); +} + +static int thread_stack__trace_begin(struct thread *thread, + struct thread_stack *ts, u64 timestamp, + u64 ref) +{ + struct thread_stack_entry *tse; + int err; + + if (!ts->cnt) + return 0; + + /* Pop trace end */ + tse = &ts->stack[ts->cnt - 1]; + if (tse->cp->sym == NULL && tse->cp->ip == 0) { + err = thread_stack__call_return(thread, ts, --ts->cnt, + timestamp, ref, false); + if (err) + return err; + } + + return 0; +} + +static int thread_stack__trace_end(struct thread_stack *ts, + struct perf_sample *sample, u64 ref) +{ + struct call_path_root *cpr = ts->crp->cpr; + struct call_path *cp; + u64 ret_addr; + + /* No point having 'trace end' on the bottom of the stack */ + if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref)) + return 0; + + cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0, + ts->kernel_start); + if (!cp) + return -ENOMEM; + + ret_addr = sample->ip + sample->insn_len; + + return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, + false); +} + +int thread_stack__process(struct thread *thread, struct comm *comm, + struct perf_sample *sample, + struct addr_location *from_al, + struct addr_location *to_al, u64 ref, + struct call_return_processor *crp) +{ + struct thread_stack *ts = thread->ts; + int err = 0; + + if (ts) { + if (!ts->crp) { + /* Supersede thread_stack__event() */ + thread_stack__free(thread); + thread->ts = thread_stack__new(thread, crp); + if (!thread->ts) + return -ENOMEM; + ts = thread->ts; + ts->comm = comm; + } + } else { + thread->ts = thread_stack__new(thread, crp); + if (!thread->ts) + return -ENOMEM; + ts = thread->ts; + ts->comm = comm; + } + + /* Flush stack on exec */ + if (ts->comm != comm && thread->pid_ == thread->tid) { + err = thread_stack__flush(thread, ts); + if (err) + return err; + ts->comm = comm; + } + + /* If the stack is empty, put the current symbol on the stack */ + if (!ts->cnt) { + err = thread_stack__bottom(thread, ts, sample, from_al, to_al, + ref); + if (err) + return err; + } + + ts->branch_count += 1; + ts->last_time = sample->time; + + if (sample->flags & PERF_IP_FLAG_CALL) { + struct call_path_root *cpr = ts->crp->cpr; + struct call_path *cp; + u64 ret_addr; + + if (!sample->ip || !sample->addr) + return 0; + + ret_addr = sample->ip + sample->insn_len; + if (ret_addr == sample->addr) + return 0; /* Zero-length calls are excluded */ + + cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, + to_al->sym, sample->addr, + ts->kernel_start); + if (!cp) + return -ENOMEM; + err = thread_stack__push_cp(ts, ret_addr, sample->time, ref, + cp, false); + } else if (sample->flags & PERF_IP_FLAG_RETURN) { + if (!sample->ip || !sample->addr) + return 0; + + err = thread_stack__pop_cp(thread, ts, sample->addr, + sample->time, ref, from_al->sym); + if (err) { + if (err < 0) + return err; + err = thread_stack__no_call_return(thread, ts, sample, + from_al, to_al, ref); + } + } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) { + err = thread_stack__trace_begin(thread, ts, sample->time, ref); + } else if (sample->flags & PERF_IP_FLAG_TRACE_END) { + err = thread_stack__trace_end(ts, sample, ref); + } + + return err; +} diff --git a/kernel/tools/perf/util/thread-stack.h b/kernel/tools/perf/util/thread-stack.h new file mode 100644 index 000000000..b843bbef8 --- /dev/null +++ b/kernel/tools/perf/util/thread-stack.h @@ -0,0 +1,111 @@ +/* + * thread-stack.h: Synthesize a thread's stack using call / return events + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __PERF_THREAD_STACK_H +#define __PERF_THREAD_STACK_H + +#include + +#include +#include + +struct thread; +struct comm; +struct ip_callchain; +struct symbol; +struct dso; +struct call_return_processor; +struct comm; +struct perf_sample; +struct addr_location; + +/* + * Call/Return flags. + * + * CALL_RETURN_NO_CALL: 'return' but no matching 'call' + * CALL_RETURN_NO_RETURN: 'call' but no matching 'return' + */ +enum { + CALL_RETURN_NO_CALL = 1 << 0, + CALL_RETURN_NO_RETURN = 1 << 1, +}; + +/** + * struct call_return - paired call/return information. + * @thread: thread in which call/return occurred + * @comm: comm in which call/return occurred + * @cp: call path + * @call_time: timestamp of call (if known) + * @return_time: timestamp of return (if known) + * @branch_count: number of branches seen between call and return + * @call_ref: external reference to 'call' sample (e.g. db_id) + * @return_ref: external reference to 'return' sample (e.g. db_id) + * @db_id: id used for db-export + * @flags: Call/Return flags + */ +struct call_return { + struct thread *thread; + struct comm *comm; + struct call_path *cp; + u64 call_time; + u64 return_time; + u64 branch_count; + u64 call_ref; + u64 return_ref; + u64 db_id; + u32 flags; +}; + +/** + * struct call_path - node in list of calls leading to a function call. + * @parent: call path to the parent function call + * @sym: symbol of function called + * @ip: only if sym is null, the ip of the function + * @db_id: id used for db-export + * @in_kernel: whether function is a in the kernel + * @rb_node: node in parent's tree of called functions + * @children: tree of call paths of functions called + * + * In combination with the call_return structure, the call_path structure + * defines a context-sensitve call-graph. + */ +struct call_path { + struct call_path *parent; + struct symbol *sym; + u64 ip; + u64 db_id; + bool in_kernel; + struct rb_node rb_node; + struct rb_root children; +}; + +int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, + u64 to_ip, u16 insn_len, u64 trace_nr); +void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr); +void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, + size_t sz, u64 ip); +void thread_stack__free(struct thread *thread); + +struct call_return_processor * +call_return_processor__new(int (*process)(struct call_return *cr, void *data), + void *data); +void call_return_processor__free(struct call_return_processor *crp); +int thread_stack__process(struct thread *thread, struct comm *comm, + struct perf_sample *sample, + struct addr_location *from_al, + struct addr_location *to_al, u64 ref, + struct call_return_processor *crp); + +#endif diff --git a/kernel/tools/perf/util/thread.c b/kernel/tools/perf/util/thread.c new file mode 100644 index 000000000..1c8fbc958 --- /dev/null +++ b/kernel/tools/perf/util/thread.c @@ -0,0 +1,232 @@ +#include "../perf.h" +#include +#include +#include +#include "session.h" +#include "thread.h" +#include "thread-stack.h" +#include "util.h" +#include "debug.h" +#include "comm.h" +#include "unwind.h" + +int thread__init_map_groups(struct thread *thread, struct machine *machine) +{ + struct thread *leader; + pid_t pid = thread->pid_; + + if (pid == thread->tid || pid == -1) { + thread->mg = map_groups__new(machine); + } else { + leader = machine__findnew_thread(machine, pid, pid); + if (leader) + thread->mg = map_groups__get(leader->mg); + } + + return thread->mg ? 0 : -1; +} + +struct thread *thread__new(pid_t pid, pid_t tid) +{ + char *comm_str; + struct comm *comm; + struct thread *thread = zalloc(sizeof(*thread)); + + if (thread != NULL) { + thread->pid_ = pid; + thread->tid = tid; + thread->ppid = -1; + thread->cpu = -1; + INIT_LIST_HEAD(&thread->comm_list); + + if (unwind__prepare_access(thread) < 0) + goto err_thread; + + comm_str = malloc(32); + if (!comm_str) + goto err_thread; + + snprintf(comm_str, 32, ":%d", tid); + comm = comm__new(comm_str, 0, false); + free(comm_str); + if (!comm) + goto err_thread; + + list_add(&comm->list, &thread->comm_list); + + } + + return thread; + +err_thread: + free(thread); + return NULL; +} + +void thread__delete(struct thread *thread) +{ + struct comm *comm, *tmp; + + thread_stack__free(thread); + + if (thread->mg) { + map_groups__put(thread->mg); + thread->mg = NULL; + } + list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) { + list_del(&comm->list); + comm__free(comm); + } + unwind__finish_access(thread); + + free(thread); +} + +struct thread *thread__get(struct thread *thread) +{ + ++thread->refcnt; + return thread; +} + +void thread__put(struct thread *thread) +{ + if (thread && --thread->refcnt == 0) { + list_del_init(&thread->node); + thread__delete(thread); + } +} + +struct comm *thread__comm(const struct thread *thread) +{ + if (list_empty(&thread->comm_list)) + return NULL; + + return list_first_entry(&thread->comm_list, struct comm, list); +} + +struct comm *thread__exec_comm(const struct thread *thread) +{ + struct comm *comm, *last = NULL; + + list_for_each_entry(comm, &thread->comm_list, list) { + if (comm->exec) + return comm; + last = comm; + } + + return last; +} + +int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, + bool exec) +{ + struct comm *new, *curr = thread__comm(thread); + int err; + + /* Override the default :tid entry */ + if (!thread->comm_set) { + err = comm__override(curr, str, timestamp, exec); + if (err) + return err; + } else { + new = comm__new(str, timestamp, exec); + if (!new) + return -ENOMEM; + list_add(&new->list, &thread->comm_list); + + if (exec) + unwind__flush_access(thread); + } + + thread->comm_set = true; + + return 0; +} + +const char *thread__comm_str(const struct thread *thread) +{ + const struct comm *comm = thread__comm(thread); + + if (!comm) + return NULL; + + return comm__str(comm); +} + +/* CHECKME: it should probably better return the max comm len from its comm list */ +int thread__comm_len(struct thread *thread) +{ + if (!thread->comm_len) { + const char *comm = thread__comm_str(thread); + if (!comm) + return 0; + thread->comm_len = strlen(comm); + } + + return thread->comm_len; +} + +size_t thread__fprintf(struct thread *thread, FILE *fp) +{ + return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + + map_groups__fprintf(thread->mg, fp); +} + +void thread__insert_map(struct thread *thread, struct map *map) +{ + map_groups__fixup_overlappings(thread->mg, map, stderr); + map_groups__insert(thread->mg, map); +} + +static int thread__clone_map_groups(struct thread *thread, + struct thread *parent) +{ + int i; + + /* This is new thread, we share map groups for process. */ + if (thread->pid_ == parent->pid_) + return 0; + + /* But this one is new process, copy maps. */ + for (i = 0; i < MAP__NR_TYPES; ++i) + if (map_groups__clone(thread->mg, parent->mg, i) < 0) + return -ENOMEM; + + return 0; +} + +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) +{ + int err; + + if (parent->comm_set) { + const char *comm = thread__comm_str(parent); + if (!comm) + return -ENOMEM; + err = thread__set_comm(thread, comm, timestamp); + if (err) + return err; + } + + thread->ppid = parent->tid; + return thread__clone_map_groups(thread, parent); +} + +void thread__find_cpumode_addr_location(struct thread *thread, + enum map_type type, u64 addr, + struct addr_location *al) +{ + size_t i; + const u8 const cpumodes[] = { + PERF_RECORD_MISC_USER, + PERF_RECORD_MISC_KERNEL, + PERF_RECORD_MISC_GUEST_USER, + PERF_RECORD_MISC_GUEST_KERNEL + }; + + for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { + thread__find_addr_location(thread, cpumodes[i], type, addr, al); + if (al->map) + break; + } +} diff --git a/kernel/tools/perf/util/thread.h b/kernel/tools/perf/util/thread.h new file mode 100644 index 000000000..9b8a54dc3 --- /dev/null +++ b/kernel/tools/perf/util/thread.h @@ -0,0 +1,117 @@ +#ifndef __PERF_THREAD_H +#define __PERF_THREAD_H + +#include +#include +#include +#include +#include "symbol.h" +#include +#include + +struct thread_stack; + +struct thread { + union { + struct rb_node rb_node; + struct list_head node; + }; + struct map_groups *mg; + pid_t pid_; /* Not all tools update this */ + pid_t tid; + pid_t ppid; + int cpu; + int refcnt; + char shortname[3]; + bool comm_set; + bool dead; /* if set thread has exited */ + struct list_head comm_list; + int comm_len; + u64 db_id; + + void *priv; + struct thread_stack *ts; +}; + +struct machine; +struct comm; + +struct thread *thread__new(pid_t pid, pid_t tid); +int thread__init_map_groups(struct thread *thread, struct machine *machine); +void thread__delete(struct thread *thread); + +struct thread *thread__get(struct thread *thread); +void thread__put(struct thread *thread); + +static inline void __thread__zput(struct thread **thread) +{ + thread__put(*thread); + *thread = NULL; +} + +#define thread__zput(thread) __thread__zput(&thread) + +static inline void thread__exited(struct thread *thread) +{ + thread->dead = true; +} + +int __thread__set_comm(struct thread *thread, const char *comm, u64 timestamp, + bool exec); +static inline int thread__set_comm(struct thread *thread, const char *comm, + u64 timestamp) +{ + return __thread__set_comm(thread, comm, timestamp, false); +} + +int thread__comm_len(struct thread *thread); +struct comm *thread__comm(const struct thread *thread); +struct comm *thread__exec_comm(const struct thread *thread); +const char *thread__comm_str(const struct thread *thread); +void thread__insert_map(struct thread *thread, struct map *map); +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp); +size_t thread__fprintf(struct thread *thread, FILE *fp); + +void thread__find_addr_map(struct thread *thread, + u8 cpumode, enum map_type type, u64 addr, + struct addr_location *al); + +void thread__find_addr_location(struct thread *thread, + u8 cpumode, enum map_type type, u64 addr, + struct addr_location *al); + +void thread__find_cpumode_addr_location(struct thread *thread, + enum map_type type, u64 addr, + struct addr_location *al); + +static inline void *thread__priv(struct thread *thread) +{ + return thread->priv; +} + +static inline void thread__set_priv(struct thread *thread, void *p) +{ + thread->priv = p; +} + +static inline bool thread__is_filtered(struct thread *thread) +{ + if (symbol_conf.comm_list && + !strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) { + return true; + } + + if (symbol_conf.pid_list && + !intlist__has_entry(symbol_conf.pid_list, thread->pid_)) { + return true; + } + + if (symbol_conf.tid_list && + !intlist__has_entry(symbol_conf.tid_list, thread->tid)) { + return true; + } + + return false; +} + +#endif /* __PERF_THREAD_H */ diff --git a/kernel/tools/perf/util/thread_map.c b/kernel/tools/perf/util/thread_map.c new file mode 100644 index 000000000..f93b97347 --- /dev/null +++ b/kernel/tools/perf/util/thread_map.c @@ -0,0 +1,299 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "strlist.h" +#include +#include "thread_map.h" +#include "util.h" + +/* Skip "." and ".." directories */ +static int filter(const struct dirent *dir) +{ + if (dir->d_name[0] == '.') + return 0; + else + return 1; +} + +struct thread_map *thread_map__new_by_pid(pid_t pid) +{ + struct thread_map *threads; + char name[256]; + int items; + struct dirent **namelist = NULL; + int i; + + sprintf(name, "/proc/%d/task", pid); + items = scandir(name, &namelist, filter, NULL); + if (items <= 0) + return NULL; + + threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); + if (threads != NULL) { + for (i = 0; i < items; i++) + threads->map[i] = atoi(namelist[i]->d_name); + threads->nr = items; + } + + for (i=0; imap[0] = tid; + threads->nr = 1; + } + + return threads; +} + +struct thread_map *thread_map__new_by_uid(uid_t uid) +{ + DIR *proc; + int max_threads = 32, items, i; + char path[256]; + struct dirent dirent, *next, **namelist = NULL; + struct thread_map *threads = malloc(sizeof(*threads) + + max_threads * sizeof(pid_t)); + if (threads == NULL) + goto out; + + proc = opendir("/proc"); + if (proc == NULL) + goto out_free_threads; + + threads->nr = 0; + + while (!readdir_r(proc, &dirent, &next) && next) { + char *end; + bool grow = false; + struct stat st; + pid_t pid = strtol(dirent.d_name, &end, 10); + + if (*end) /* only interested in proper numerical dirents */ + continue; + + snprintf(path, sizeof(path), "/proc/%s", dirent.d_name); + + if (stat(path, &st) != 0) + continue; + + if (st.st_uid != uid) + continue; + + snprintf(path, sizeof(path), "/proc/%d/task", pid); + items = scandir(path, &namelist, filter, NULL); + if (items <= 0) + goto out_free_closedir; + + while (threads->nr + items >= max_threads) { + max_threads *= 2; + grow = true; + } + + if (grow) { + struct thread_map *tmp; + + tmp = realloc(threads, (sizeof(*threads) + + max_threads * sizeof(pid_t))); + if (tmp == NULL) + goto out_free_namelist; + + threads = tmp; + } + + for (i = 0; i < items; i++) + threads->map[threads->nr + i] = atoi(namelist[i]->d_name); + + for (i = 0; i < items; i++) + zfree(&namelist[i]); + free(namelist); + + threads->nr += items; + } + +out_closedir: + closedir(proc); +out: + return threads; + +out_free_threads: + free(threads); + return NULL; + +out_free_namelist: + for (i = 0; i < items; i++) + zfree(&namelist[i]); + free(namelist); + +out_free_closedir: + zfree(&threads); + goto out_closedir; +} + +struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) +{ + if (pid != -1) + return thread_map__new_by_pid(pid); + + if (tid == -1 && uid != UINT_MAX) + return thread_map__new_by_uid(uid); + + return thread_map__new_by_tid(tid); +} + +static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) +{ + struct thread_map *threads = NULL, *nt; + char name[256]; + int items, total_tasks = 0; + struct dirent **namelist = NULL; + int i, j = 0; + pid_t pid, prev_pid = INT_MAX; + char *end_ptr; + struct str_node *pos; + struct strlist *slist = strlist__new(false, pid_str); + + if (!slist) + return NULL; + + strlist__for_each(pos, slist) { + pid = strtol(pos->s, &end_ptr, 10); + + if (pid == INT_MIN || pid == INT_MAX || + (*end_ptr != '\0' && *end_ptr != ',')) + goto out_free_threads; + + if (pid == prev_pid) + continue; + + sprintf(name, "/proc/%d/task", pid); + items = scandir(name, &namelist, filter, NULL); + if (items <= 0) + goto out_free_threads; + + total_tasks += items; + nt = realloc(threads, (sizeof(*threads) + + sizeof(pid_t) * total_tasks)); + if (nt == NULL) + goto out_free_namelist; + + threads = nt; + + for (i = 0; i < items; i++) { + threads->map[j++] = atoi(namelist[i]->d_name); + zfree(&namelist[i]); + } + threads->nr = total_tasks; + free(namelist); + } + +out: + strlist__delete(slist); + return threads; + +out_free_namelist: + for (i = 0; i < items; i++) + zfree(&namelist[i]); + free(namelist); + +out_free_threads: + zfree(&threads); + goto out; +} + +struct thread_map *thread_map__new_dummy(void) +{ + struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); + + if (threads != NULL) { + threads->map[0] = -1; + threads->nr = 1; + } + return threads; +} + +static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) +{ + struct thread_map *threads = NULL, *nt; + int ntasks = 0; + pid_t tid, prev_tid = INT_MAX; + char *end_ptr; + struct str_node *pos; + struct strlist *slist; + + /* perf-stat expects threads to be generated even if tid not given */ + if (!tid_str) + return thread_map__new_dummy(); + + slist = strlist__new(false, tid_str); + if (!slist) + return NULL; + + strlist__for_each(pos, slist) { + tid = strtol(pos->s, &end_ptr, 10); + + if (tid == INT_MIN || tid == INT_MAX || + (*end_ptr != '\0' && *end_ptr != ',')) + goto out_free_threads; + + if (tid == prev_tid) + continue; + + ntasks++; + nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks); + + if (nt == NULL) + goto out_free_threads; + + threads = nt; + threads->map[ntasks - 1] = tid; + threads->nr = ntasks; + } +out: + return threads; + +out_free_threads: + zfree(&threads); + goto out; +} + +struct thread_map *thread_map__new_str(const char *pid, const char *tid, + uid_t uid) +{ + if (pid) + return thread_map__new_by_pid_str(pid); + + if (!tid && uid != UINT_MAX) + return thread_map__new_by_uid(uid); + + return thread_map__new_by_tid_str(tid); +} + +void thread_map__delete(struct thread_map *threads) +{ + free(threads); +} + +size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) +{ + int i; + size_t printed = fprintf(fp, "%d thread%s: ", + threads->nr, threads->nr > 1 ? "s" : ""); + for (i = 0; i < threads->nr; ++i) + printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]); + + return printed + fprintf(fp, "\n"); +} diff --git a/kernel/tools/perf/util/thread_map.h b/kernel/tools/perf/util/thread_map.h new file mode 100644 index 000000000..95313f43c --- /dev/null +++ b/kernel/tools/perf/util/thread_map.h @@ -0,0 +1,30 @@ +#ifndef __PERF_THREAD_MAP_H +#define __PERF_THREAD_MAP_H + +#include +#include + +struct thread_map { + int nr; + pid_t map[]; +}; + +struct thread_map *thread_map__new_dummy(void); +struct thread_map *thread_map__new_by_pid(pid_t pid); +struct thread_map *thread_map__new_by_tid(pid_t tid); +struct thread_map *thread_map__new_by_uid(uid_t uid); +struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); + +struct thread_map *thread_map__new_str(const char *pid, + const char *tid, uid_t uid); + +void thread_map__delete(struct thread_map *threads); + +size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); + +static inline int thread_map__nr(struct thread_map *threads) +{ + return threads ? threads->nr : 1; +} + +#endif /* __PERF_THREAD_MAP_H */ diff --git a/kernel/tools/perf/util/tool.h b/kernel/tools/perf/util/tool.h new file mode 100644 index 000000000..51d9e56c0 --- /dev/null +++ b/kernel/tools/perf/util/tool.h @@ -0,0 +1,52 @@ +#ifndef __PERF_TOOL_H +#define __PERF_TOOL_H + +#include + +struct perf_session; +union perf_event; +struct perf_evlist; +struct perf_evsel; +struct perf_sample; +struct perf_tool; +struct machine; +struct ordered_events; + +typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, struct machine *machine); + +typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, struct machine *machine); + +typedef int (*event_attr_op)(struct perf_tool *tool, + union perf_event *event, + struct perf_evlist **pevlist); + +typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, + struct perf_session *session); + +typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event, + struct ordered_events *oe); + +struct perf_tool { + event_sample sample, + read; + event_op mmap, + mmap2, + comm, + fork, + exit, + lost, + throttle, + unthrottle; + event_attr_op attr; + event_op2 tracing_data; + event_oe finished_round; + event_op2 build_id, + id_index; + bool ordered_events; + bool ordering_requires_timestamps; +}; + +#endif /* __PERF_TOOL_H */ diff --git a/kernel/tools/perf/util/top.c b/kernel/tools/perf/util/top.c new file mode 100644 index 000000000..8e517def9 --- /dev/null +++ b/kernel/tools/perf/util/top.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo + * + * Refactored from builtin-top.c, see that files for further copyright notes. + * + * Released under the GPL v2. (and only v2, not any later version) + */ + +#include "cpumap.h" +#include "event.h" +#include "evlist.h" +#include "evsel.h" +#include "parse-events.h" +#include "symbol.h" +#include "top.h" +#include + +#define SNPRINTF(buf, size, fmt, args...) \ +({ \ + size_t r = snprintf(buf, size, fmt, ## args); \ + r > size ? size : r; \ +}) + +size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) +{ + float samples_per_sec; + float ksamples_per_sec; + float esamples_percent; + struct record_opts *opts = &top->record_opts; + struct target *target = &opts->target; + size_t ret = 0; + + if (top->samples) { + samples_per_sec = top->samples / top->delay_secs; + ksamples_per_sec = top->kernel_samples / top->delay_secs; + esamples_percent = (100.0 * top->exact_samples) / top->samples; + } else { + samples_per_sec = ksamples_per_sec = esamples_percent = 0.0; + } + + if (!perf_guest) { + float ksamples_percent = 0.0; + + if (samples_per_sec) + ksamples_percent = (100.0 * ksamples_per_sec) / + samples_per_sec; + ret = SNPRINTF(bf, size, + " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" + " exact: %4.1f%% [", samples_per_sec, + ksamples_percent, esamples_percent); + } else { + float us_samples_per_sec = top->us_samples / top->delay_secs; + float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; + float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs; + + ret = SNPRINTF(bf, size, + " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%" + " guest kernel:%4.1f%% guest us:%4.1f%%" + " exact: %4.1f%% [", samples_per_sec, + 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / + samples_per_sec)), + 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) / + samples_per_sec)), + 100.0 - (100.0 * ((samples_per_sec - + guest_kernel_samples_per_sec) / + samples_per_sec)), + 100.0 - (100.0 * ((samples_per_sec - + guest_us_samples_per_sec) / + samples_per_sec)), + esamples_percent); + } + + if (top->evlist->nr_entries == 1) { + struct perf_evsel *first = perf_evlist__first(top->evlist); + ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", + (uint64_t)first->attr.sample_period, + opts->freq ? "Hz" : ""); + } + + ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); + + ret += SNPRINTF(bf + ret, size - ret, "], "); + + if (target->pid) + ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", + target->pid); + else if (target->tid) + ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", + target->tid); + else if (target->uid_str != NULL) + ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", + target->uid_str); + else + ret += SNPRINTF(bf + ret, size - ret, " (all"); + + if (target->cpu_list) + ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", + top->evlist->cpus->nr > 1 ? "s" : "", + target->cpu_list); + else { + if (target->tid) + ret += SNPRINTF(bf + ret, size - ret, ")"); + else + ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", + top->evlist->cpus->nr, + top->evlist->cpus->nr > 1 ? "s" : ""); + } + + return ret; +} + +void perf_top__reset_sample_counters(struct perf_top *top) +{ + top->samples = top->us_samples = top->kernel_samples = + top->exact_samples = top->guest_kernel_samples = + top->guest_us_samples = 0; +} diff --git a/kernel/tools/perf/util/top.h b/kernel/tools/perf/util/top.h new file mode 100644 index 000000000..f92c37abb --- /dev/null +++ b/kernel/tools/perf/util/top.h @@ -0,0 +1,47 @@ +#ifndef __PERF_TOP_H +#define __PERF_TOP_H 1 + +#include "tool.h" +#include +#include +#include +#include + +struct perf_evlist; +struct perf_evsel; +struct perf_session; + +struct perf_top { + struct perf_tool tool; + struct perf_evlist *evlist; + struct record_opts record_opts; + /* + * Symbols will be added here in perf_event__process_sample and will + * get out after decayed. + */ + u64 samples; + u64 kernel_samples, us_samples; + u64 exact_samples; + u64 guest_us_samples, guest_kernel_samples; + int print_entries, count_filter, delay_secs; + int max_stack; + bool hide_kernel_symbols, hide_user_symbols, zero; + bool use_tui, use_stdio; + bool kptr_restrict_warned; + bool vmlinux_warned; + bool dump_symtab; + struct hist_entry *sym_filter_entry; + struct perf_evsel *sym_evsel; + struct perf_session *session; + struct winsize winsize; + int realtime_prio; + int sym_pcnt_filter; + const char *sym_filter; + float min_percent; +}; + +#define CONSOLE_CLEAR "" + +size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); +void perf_top__reset_sample_counters(struct perf_top *top); +#endif /* __PERF_TOP_H */ diff --git a/kernel/tools/perf/util/trace-event-info.c b/kernel/tools/perf/util/trace-event-info.c new file mode 100644 index 000000000..eb7271601 --- /dev/null +++ b/kernel/tools/perf/util/trace-event-info.c @@ -0,0 +1,595 @@ +/* + * Copyright (C) 2008,2009, Steven Rostedt + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include "util.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "trace-event.h" +#include +#include "evsel.h" +#include "debug.h" + +#define VERSION "0.5" + +static int output_fd; + + +int bigendian(void) +{ + unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0}; + unsigned int *ptr; + + ptr = (unsigned int *)(void *)str; + return *ptr == 0x01020304; +} + +/* unfortunately, you can not stat debugfs or proc files for size */ +static int record_file(const char *file, ssize_t hdr_sz) +{ + unsigned long long size = 0; + char buf[BUFSIZ], *sizep; + off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR); + int r, fd; + int err = -EIO; + + fd = open(file, O_RDONLY); + if (fd < 0) { + pr_debug("Can't read '%s'", file); + return -errno; + } + + /* put in zeros for file size, then fill true size later */ + if (hdr_sz) { + if (write(output_fd, &size, hdr_sz) != hdr_sz) + goto out; + } + + do { + r = read(fd, buf, BUFSIZ); + if (r > 0) { + size += r; + if (write(output_fd, buf, r) != r) + goto out; + } + } while (r > 0); + + /* ugh, handle big-endian hdr_size == 4 */ + sizep = (char*)&size; + if (bigendian()) + sizep += sizeof(u64) - hdr_sz; + + if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) { + pr_debug("writing file size failed\n"); + goto out; + } + + err = 0; +out: + close(fd); + return err; +} + +static int record_header_files(void) +{ + char *path; + struct stat st; + int err = -EIO; + + path = get_tracing_file("events/header_page"); + if (!path) { + pr_debug("can't get tracing/events/header_page"); + return -ENOMEM; + } + + if (stat(path, &st) < 0) { + pr_debug("can't read '%s'", path); + goto out; + } + + if (write(output_fd, "header_page", 12) != 12) { + pr_debug("can't write header_page\n"); + goto out; + } + + if (record_file(path, 8) < 0) { + pr_debug("can't record header_page file\n"); + goto out; + } + + put_tracing_file(path); + + path = get_tracing_file("events/header_event"); + if (!path) { + pr_debug("can't get tracing/events/header_event"); + err = -ENOMEM; + goto out; + } + + if (stat(path, &st) < 0) { + pr_debug("can't read '%s'", path); + goto out; + } + + if (write(output_fd, "header_event", 13) != 13) { + pr_debug("can't write header_event\n"); + goto out; + } + + if (record_file(path, 8) < 0) { + pr_debug("can't record header_event file\n"); + goto out; + } + + err = 0; +out: + put_tracing_file(path); + return err; +} + +static bool name_in_tp_list(char *sys, struct tracepoint_path *tps) +{ + while (tps) { + if (!strcmp(sys, tps->name)) + return true; + tps = tps->next; + } + + return false; +} + +static int copy_event_system(const char *sys, struct tracepoint_path *tps) +{ + struct dirent *dent; + struct stat st; + char *format; + DIR *dir; + int count = 0; + int ret; + int err; + + dir = opendir(sys); + if (!dir) { + pr_debug("can't read directory '%s'", sys); + return -errno; + } + + while ((dent = readdir(dir))) { + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || + strcmp(dent->d_name, "..") == 0 || + !name_in_tp_list(dent->d_name, tps)) + continue; + if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) { + err = -ENOMEM; + goto out; + } + ret = stat(format, &st); + free(format); + if (ret < 0) + continue; + count++; + } + + if (write(output_fd, &count, 4) != 4) { + err = -EIO; + pr_debug("can't write count\n"); + goto out; + } + + rewinddir(dir); + while ((dent = readdir(dir))) { + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || + strcmp(dent->d_name, "..") == 0 || + !name_in_tp_list(dent->d_name, tps)) + continue; + if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) { + err = -ENOMEM; + goto out; + } + ret = stat(format, &st); + + if (ret >= 0) { + err = record_file(format, 8); + if (err) { + free(format); + goto out; + } + } + free(format); + } + err = 0; +out: + closedir(dir); + return err; +} + +static int record_ftrace_files(struct tracepoint_path *tps) +{ + char *path; + int ret; + + path = get_tracing_file("events/ftrace"); + if (!path) { + pr_debug("can't get tracing/events/ftrace"); + return -ENOMEM; + } + + ret = copy_event_system(path, tps); + + put_tracing_file(path); + + return ret; +} + +static bool system_in_tp_list(char *sys, struct tracepoint_path *tps) +{ + while (tps) { + if (!strcmp(sys, tps->system)) + return true; + tps = tps->next; + } + + return false; +} + +static int record_event_files(struct tracepoint_path *tps) +{ + struct dirent *dent; + struct stat st; + char *path; + char *sys; + DIR *dir; + int count = 0; + int ret; + int err; + + path = get_tracing_file("events"); + if (!path) { + pr_debug("can't get tracing/events"); + return -ENOMEM; + } + + dir = opendir(path); + if (!dir) { + err = -errno; + pr_debug("can't read directory '%s'", path); + goto out; + } + + while ((dent = readdir(dir))) { + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || + strcmp(dent->d_name, "..") == 0 || + strcmp(dent->d_name, "ftrace") == 0 || + !system_in_tp_list(dent->d_name, tps)) + continue; + count++; + } + + if (write(output_fd, &count, 4) != 4) { + err = -EIO; + pr_debug("can't write count\n"); + goto out; + } + + rewinddir(dir); + while ((dent = readdir(dir))) { + if (dent->d_type != DT_DIR || + strcmp(dent->d_name, ".") == 0 || + strcmp(dent->d_name, "..") == 0 || + strcmp(dent->d_name, "ftrace") == 0 || + !system_in_tp_list(dent->d_name, tps)) + continue; + if (asprintf(&sys, "%s/%s", path, dent->d_name) < 0) { + err = -ENOMEM; + goto out; + } + ret = stat(sys, &st); + if (ret >= 0) { + ssize_t size = strlen(dent->d_name) + 1; + + if (write(output_fd, dent->d_name, size) != size || + copy_event_system(sys, tps) < 0) { + err = -EIO; + free(sys); + goto out; + } + } + free(sys); + } + err = 0; +out: + closedir(dir); + put_tracing_file(path); + + return err; +} + +static int record_proc_kallsyms(void) +{ + unsigned int size; + const char *path = "/proc/kallsyms"; + struct stat st; + int ret, err = 0; + + ret = stat(path, &st); + if (ret < 0) { + /* not found */ + size = 0; + if (write(output_fd, &size, 4) != 4) + err = -EIO; + return err; + } + return record_file(path, 4); +} + +static int record_ftrace_printk(void) +{ + unsigned int size; + char *path; + struct stat st; + int ret, err = 0; + + path = get_tracing_file("printk_formats"); + if (!path) { + pr_debug("can't get tracing/printk_formats"); + return -ENOMEM; + } + + ret = stat(path, &st); + if (ret < 0) { + /* not found */ + size = 0; + if (write(output_fd, &size, 4) != 4) + err = -EIO; + goto out; + } + err = record_file(path, 4); + +out: + put_tracing_file(path); + return err; +} + +static void +put_tracepoints_path(struct tracepoint_path *tps) +{ + while (tps) { + struct tracepoint_path *t = tps; + + tps = tps->next; + zfree(&t->name); + zfree(&t->system); + free(t); + } +} + +static struct tracepoint_path * +get_tracepoints_path(struct list_head *pattrs) +{ + struct tracepoint_path path, *ppath = &path; + struct perf_evsel *pos; + int nr_tracepoints = 0; + + list_for_each_entry(pos, pattrs, node) { + if (pos->attr.type != PERF_TYPE_TRACEPOINT) + continue; + ++nr_tracepoints; + + if (pos->name) { + ppath->next = tracepoint_name_to_path(pos->name); + if (ppath->next) + goto next; + + if (strchr(pos->name, ':') == NULL) + goto try_id; + + goto error; + } + +try_id: + ppath->next = tracepoint_id_to_path(pos->attr.config); + if (!ppath->next) { +error: + pr_debug("No memory to alloc tracepoints list\n"); + put_tracepoints_path(&path); + return NULL; + } +next: + ppath = ppath->next; + } + + return nr_tracepoints > 0 ? path.next : NULL; +} + +bool have_tracepoints(struct list_head *pattrs) +{ + struct perf_evsel *pos; + + list_for_each_entry(pos, pattrs, node) + if (pos->attr.type == PERF_TYPE_TRACEPOINT) + return true; + + return false; +} + +static int tracing_data_header(void) +{ + char buf[20]; + ssize_t size; + + /* just guessing this is someone's birthday.. ;) */ + buf[0] = 23; + buf[1] = 8; + buf[2] = 68; + memcpy(buf + 3, "tracing", 7); + + if (write(output_fd, buf, 10) != 10) + return -1; + + size = strlen(VERSION) + 1; + if (write(output_fd, VERSION, size) != size) + return -1; + + /* save endian */ + if (bigendian()) + buf[0] = 1; + else + buf[0] = 0; + + if (write(output_fd, buf, 1) != 1) + return -1; + + /* save size of long */ + buf[0] = sizeof(long); + if (write(output_fd, buf, 1) != 1) + return -1; + + /* save page_size */ + if (write(output_fd, &page_size, 4) != 4) + return -1; + + return 0; +} + +struct tracing_data *tracing_data_get(struct list_head *pattrs, + int fd, bool temp) +{ + struct tracepoint_path *tps; + struct tracing_data *tdata; + int err; + + output_fd = fd; + + tps = get_tracepoints_path(pattrs); + if (!tps) + return NULL; + + tdata = malloc(sizeof(*tdata)); + if (!tdata) + return NULL; + + tdata->temp = temp; + tdata->size = 0; + + if (temp) { + int temp_fd; + + snprintf(tdata->temp_file, sizeof(tdata->temp_file), + "/tmp/perf-XXXXXX"); + if (!mkstemp(tdata->temp_file)) { + pr_debug("Can't make temp file"); + return NULL; + } + + temp_fd = open(tdata->temp_file, O_RDWR); + if (temp_fd < 0) { + pr_debug("Can't read '%s'", tdata->temp_file); + return NULL; + } + + /* + * Set the temp file the default output, so all the + * tracing data are stored into it. + */ + output_fd = temp_fd; + } + + err = tracing_data_header(); + if (err) + goto out; + err = record_header_files(); + if (err) + goto out; + err = record_ftrace_files(tps); + if (err) + goto out; + err = record_event_files(tps); + if (err) + goto out; + err = record_proc_kallsyms(); + if (err) + goto out; + err = record_ftrace_printk(); + +out: + /* + * All tracing data are stored by now, we can restore + * the default output file in case we used temp file. + */ + if (temp) { + tdata->size = lseek(output_fd, 0, SEEK_CUR); + close(output_fd); + output_fd = fd; + } + + if (err) + zfree(&tdata); + + put_tracepoints_path(tps); + return tdata; +} + +int tracing_data_put(struct tracing_data *tdata) +{ + int err = 0; + + if (tdata->temp) { + err = record_file(tdata->temp_file, 0); + unlink(tdata->temp_file); + } + + free(tdata); + return err; +} + +int read_tracing_data(int fd, struct list_head *pattrs) +{ + int err; + struct tracing_data *tdata; + + /* + * We work over the real file, so we can write data + * directly, no temp file is needed. + */ + tdata = tracing_data_get(pattrs, fd, false); + if (!tdata) + return -ENOMEM; + + err = tracing_data_put(tdata); + return err; +} diff --git a/kernel/tools/perf/util/trace-event-parse.c b/kernel/tools/perf/util/trace-event-parse.c new file mode 100644 index 000000000..25d6c737b --- /dev/null +++ b/kernel/tools/perf/util/trace-event-parse.c @@ -0,0 +1,269 @@ +/* + * Copyright (C) 2009, Steven Rostedt + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "util.h" +#include "trace-event.h" + +static int get_common_field(struct scripting_context *context, + int *offset, int *size, const char *type) +{ + struct pevent *pevent = context->pevent; + struct event_format *event; + struct format_field *field; + + if (!*size) { + if (!pevent->events) + return 0; + + event = pevent->events[0]; + field = pevent_find_common_field(event, type); + if (!field) + return 0; + *offset = field->offset; + *size = field->size; + } + + return pevent_read_number(pevent, context->event_data + *offset, *size); +} + +int common_lock_depth(struct scripting_context *context) +{ + static int offset; + static int size; + int ret; + + ret = get_common_field(context, &size, &offset, + "common_lock_depth"); + if (ret < 0) + return -1; + + return ret; +} + +int common_flags(struct scripting_context *context) +{ + static int offset; + static int size; + int ret; + + ret = get_common_field(context, &size, &offset, + "common_flags"); + if (ret < 0) + return -1; + + return ret; +} + +int common_pc(struct scripting_context *context) +{ + static int offset; + static int size; + int ret; + + ret = get_common_field(context, &size, &offset, + "common_preempt_count"); + if (ret < 0) + return -1; + + return ret; +} + +unsigned long long +raw_field_value(struct event_format *event, const char *name, void *data) +{ + struct format_field *field; + unsigned long long val; + + field = pevent_find_any_field(event, name); + if (!field) + return 0ULL; + + pevent_read_number_field(field, data, &val); + + return val; +} + +unsigned long long read_size(struct event_format *event, void *ptr, int size) +{ + return pevent_read_number(event->pevent, ptr, size); +} + +void event_format__fprintf(struct event_format *event, + int cpu, void *data, int size, FILE *fp) +{ + struct pevent_record record; + struct trace_seq s; + + memset(&record, 0, sizeof(record)); + record.cpu = cpu; + record.size = size; + record.data = data; + + trace_seq_init(&s); + pevent_event_info(&s, event, &record); + trace_seq_do_fprintf(&s, fp); + trace_seq_destroy(&s); +} + +void event_format__print(struct event_format *event, + int cpu, void *data, int size) +{ + return event_format__fprintf(event, cpu, data, size, stdout); +} + +void parse_proc_kallsyms(struct pevent *pevent, + char *file, unsigned int size __maybe_unused) +{ + unsigned long long addr; + char *func; + char *line; + char *next = NULL; + char *addr_str; + char *mod; + char *fmt = NULL; + + line = strtok_r(file, "\n", &next); + while (line) { + mod = NULL; + addr_str = strtok_r(line, " ", &fmt); + addr = strtoull(addr_str, NULL, 16); + /* skip character */ + strtok_r(NULL, " ", &fmt); + func = strtok_r(NULL, "\t", &fmt); + mod = strtok_r(NULL, "]", &fmt); + /* truncate the extra '[' */ + if (mod) + mod = mod + 1; + + pevent_register_function(pevent, func, addr, mod); + + line = strtok_r(NULL, "\n", &next); + } +} + +void parse_ftrace_printk(struct pevent *pevent, + char *file, unsigned int size __maybe_unused) +{ + unsigned long long addr; + char *printk; + char *line; + char *next = NULL; + char *addr_str; + char *fmt; + + line = strtok_r(file, "\n", &next); + while (line) { + addr_str = strtok_r(line, ":", &fmt); + if (!addr_str) { + warning("printk format with empty entry"); + break; + } + addr = strtoull(addr_str, NULL, 16); + /* fmt still has a space, skip it */ + printk = strdup(fmt+1); + line = strtok_r(NULL, "\n", &next); + pevent_register_print_string(pevent, printk, addr); + } +} + +int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size) +{ + return pevent_parse_event(pevent, buf, size, "ftrace"); +} + +int parse_event_file(struct pevent *pevent, + char *buf, unsigned long size, char *sys) +{ + return pevent_parse_event(pevent, buf, size, sys); +} + +struct event_format *trace_find_next_event(struct pevent *pevent, + struct event_format *event) +{ + static int idx; + + if (!pevent || !pevent->events) + return NULL; + + if (!event) { + idx = 0; + return pevent->events[0]; + } + + if (idx < pevent->nr_events && event == pevent->events[idx]) { + idx++; + if (idx == pevent->nr_events) + return NULL; + return pevent->events[idx]; + } + + for (idx = 1; idx < pevent->nr_events; idx++) { + if (event == pevent->events[idx - 1]) + return pevent->events[idx]; + } + return NULL; +} + +struct flag { + const char *name; + unsigned long long value; +}; + +static const struct flag flags[] = { + { "HI_SOFTIRQ", 0 }, + { "TIMER_SOFTIRQ", 1 }, + { "NET_TX_SOFTIRQ", 2 }, + { "NET_RX_SOFTIRQ", 3 }, + { "BLOCK_SOFTIRQ", 4 }, + { "BLOCK_IOPOLL_SOFTIRQ", 5 }, + { "TASKLET_SOFTIRQ", 6 }, + { "SCHED_SOFTIRQ", 7 }, + { "HRTIMER_SOFTIRQ", 8 }, + { "RCU_SOFTIRQ", 9 }, + + { "HRTIMER_NORESTART", 0 }, + { "HRTIMER_RESTART", 1 }, +}; + +unsigned long long eval_flag(const char *flag) +{ + int i; + + /* + * Some flags in the format files do not get converted. + * If the flag is not numeric, see if it is something that + * we already know about. + */ + if (isdigit(flag[0])) + return strtoull(flag, NULL, 0); + + for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++) + if (strcmp(flags[i].name, flag) == 0) + return flags[i].value; + + return 0; +} diff --git a/kernel/tools/perf/util/trace-event-read.c b/kernel/tools/perf/util/trace-event-read.c new file mode 100644 index 000000000..54d9e9b54 --- /dev/null +++ b/kernel/tools/perf/util/trace-event-read.c @@ -0,0 +1,444 @@ +/* + * Copyright (C) 2009, Steven Rostedt + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "util.h" +#include "trace-event.h" +#include "debug.h" + +static int input_fd; + +static ssize_t trace_data_size; +static bool repipe; + +static int __do_read(int fd, void *buf, int size) +{ + int rsize = size; + + while (size) { + int ret = read(fd, buf, size); + + if (ret <= 0) + return -1; + + if (repipe) { + int retw = write(STDOUT_FILENO, buf, ret); + + if (retw <= 0 || retw != ret) { + pr_debug("repiping input file"); + return -1; + } + } + + size -= ret; + buf += ret; + } + + return rsize; +} + +static int do_read(void *data, int size) +{ + int r; + + r = __do_read(input_fd, data, size); + if (r <= 0) { + pr_debug("reading input file (size expected=%d received=%d)", + size, r); + return -1; + } + + trace_data_size += r; + + return r; +} + +/* If it fails, the next read will report it */ +static void skip(int size) +{ + char buf[BUFSIZ]; + int r; + + while (size) { + r = size > BUFSIZ ? BUFSIZ : size; + do_read(buf, r); + size -= r; + }; +} + +static unsigned int read4(struct pevent *pevent) +{ + unsigned int data; + + if (do_read(&data, 4) < 0) + return 0; + return __data2host4(pevent, data); +} + +static unsigned long long read8(struct pevent *pevent) +{ + unsigned long long data; + + if (do_read(&data, 8) < 0) + return 0; + return __data2host8(pevent, data); +} + +static char *read_string(void) +{ + char buf[BUFSIZ]; + char *str = NULL; + int size = 0; + off_t r; + char c; + + for (;;) { + r = read(input_fd, &c, 1); + if (r < 0) { + pr_debug("reading input file"); + goto out; + } + + if (!r) { + pr_debug("no data"); + goto out; + } + + if (repipe) { + int retw = write(STDOUT_FILENO, &c, 1); + + if (retw <= 0 || retw != r) { + pr_debug("repiping input file string"); + goto out; + } + } + + buf[size++] = c; + + if (!c) + break; + } + + trace_data_size += size; + + str = malloc(size); + if (str) + memcpy(str, buf, size); +out: + return str; +} + +static int read_proc_kallsyms(struct pevent *pevent) +{ + unsigned int size; + char *buf; + + size = read4(pevent); + if (!size) + return 0; + + buf = malloc(size + 1); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } + buf[size] = '\0'; + + parse_proc_kallsyms(pevent, buf, size); + + free(buf); + return 0; +} + +static int read_ftrace_printk(struct pevent *pevent) +{ + unsigned int size; + char *buf; + + /* it can have 0 size */ + size = read4(pevent); + if (!size) + return 0; + + buf = malloc(size); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } + + parse_ftrace_printk(pevent, buf, size); + + free(buf); + return 0; +} + +static int read_header_files(struct pevent *pevent) +{ + unsigned long long size; + char *header_page; + char buf[BUFSIZ]; + int ret = 0; + + if (do_read(buf, 12) < 0) + return -1; + + if (memcmp(buf, "header_page", 12) != 0) { + pr_debug("did not read header page"); + return -1; + } + + size = read8(pevent); + + header_page = malloc(size); + if (header_page == NULL) + return -1; + + if (do_read(header_page, size) < 0) { + pr_debug("did not read header page"); + free(header_page); + return -1; + } + + if (!pevent_parse_header_page(pevent, header_page, size, + pevent_get_long_size(pevent))) { + /* + * The commit field in the page is of type long, + * use that instead, since it represents the kernel. + */ + pevent_set_long_size(pevent, pevent->header_page_size_size); + } + free(header_page); + + if (do_read(buf, 13) < 0) + return -1; + + if (memcmp(buf, "header_event", 13) != 0) { + pr_debug("did not read header event"); + return -1; + } + + size = read8(pevent); + skip(size); + + return ret; +} + +static int read_ftrace_file(struct pevent *pevent, unsigned long long size) +{ + char *buf; + + buf = malloc(size); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } + + parse_ftrace_file(pevent, buf, size); + free(buf); + return 0; +} + +static int read_event_file(struct pevent *pevent, char *sys, + unsigned long long size) +{ + char *buf; + + buf = malloc(size); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } + + parse_event_file(pevent, buf, size, sys); + free(buf); + return 0; +} + +static int read_ftrace_files(struct pevent *pevent) +{ + unsigned long long size; + int count; + int i; + int ret; + + count = read4(pevent); + + for (i = 0; i < count; i++) { + size = read8(pevent); + ret = read_ftrace_file(pevent, size); + if (ret) + return ret; + } + return 0; +} + +static int read_event_files(struct pevent *pevent) +{ + unsigned long long size; + char *sys; + int systems; + int count; + int i,x; + int ret; + + systems = read4(pevent); + + for (i = 0; i < systems; i++) { + sys = read_string(); + if (sys == NULL) + return -1; + + count = read4(pevent); + + for (x=0; x < count; x++) { + size = read8(pevent); + ret = read_event_file(pevent, sys, size); + if (ret) + return ret; + } + } + return 0; +} + +ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) +{ + char buf[BUFSIZ]; + char test[] = { 23, 8, 68 }; + char *version; + int show_version = 0; + int show_funcs = 0; + int show_printk = 0; + ssize_t size = -1; + int file_bigendian; + int host_bigendian; + int file_long_size; + int file_page_size; + struct pevent *pevent = NULL; + int err; + + repipe = __repipe; + input_fd = fd; + + if (do_read(buf, 3) < 0) + return -1; + if (memcmp(buf, test, 3) != 0) { + pr_debug("no trace data in the file"); + return -1; + } + + if (do_read(buf, 7) < 0) + return -1; + if (memcmp(buf, "tracing", 7) != 0) { + pr_debug("not a trace file (missing 'tracing' tag)"); + return -1; + } + + version = read_string(); + if (version == NULL) + return -1; + if (show_version) + printf("version = %s\n", version); + free(version); + + if (do_read(buf, 1) < 0) + return -1; + file_bigendian = buf[0]; + host_bigendian = bigendian(); + + if (trace_event__init(tevent)) { + pr_debug("trace_event__init failed"); + goto out; + } + + pevent = tevent->pevent; + + pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); + pevent_set_file_bigendian(pevent, file_bigendian); + pevent_set_host_bigendian(pevent, host_bigendian); + + if (do_read(buf, 1) < 0) + goto out; + file_long_size = buf[0]; + + file_page_size = read4(pevent); + if (!file_page_size) + goto out; + + pevent_set_long_size(pevent, file_long_size); + pevent_set_page_size(pevent, file_page_size); + + err = read_header_files(pevent); + if (err) + goto out; + err = read_ftrace_files(pevent); + if (err) + goto out; + err = read_event_files(pevent); + if (err) + goto out; + err = read_proc_kallsyms(pevent); + if (err) + goto out; + err = read_ftrace_printk(pevent); + if (err) + goto out; + + size = trace_data_size; + repipe = false; + + if (show_funcs) { + pevent_print_funcs(pevent); + } else if (show_printk) { + pevent_print_printk(pevent); + } + + pevent = NULL; + +out: + if (pevent) + trace_event__cleanup(tevent); + return size; +} diff --git a/kernel/tools/perf/util/trace-event-scripting.c b/kernel/tools/perf/util/trace-event-scripting.c new file mode 100644 index 000000000..9df61059a --- /dev/null +++ b/kernel/tools/perf/util/trace-event-scripting.c @@ -0,0 +1,177 @@ +/* + * trace-event-scripting. Scripting engine common and initialization code. + * + * Copyright (C) 2009-2010 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include + +#include "../perf.h" +#include "util.h" +#include "trace-event.h" + +struct scripting_context *scripting_context; + +static int flush_script_unsupported(void) +{ + return 0; +} + +static int stop_script_unsupported(void) +{ + return 0; +} + +static void process_event_unsupported(union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct addr_location *al __maybe_unused) +{ +} + +static void print_python_unsupported_msg(void) +{ + fprintf(stderr, "Python scripting not supported." + " Install libpython and rebuild perf to enable it.\n" + "For example:\n # apt-get install python-dev (ubuntu)" + "\n # yum install python-devel (Fedora)" + "\n etc.\n"); +} + +static int python_start_script_unsupported(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) +{ + print_python_unsupported_msg(); + + return -1; +} + +static int python_generate_script_unsupported(struct pevent *pevent + __maybe_unused, + const char *outfile + __maybe_unused) +{ + print_python_unsupported_msg(); + + return -1; +} + +struct scripting_ops python_scripting_unsupported_ops = { + .name = "Python", + .start_script = python_start_script_unsupported, + .flush_script = flush_script_unsupported, + .stop_script = stop_script_unsupported, + .process_event = process_event_unsupported, + .generate_script = python_generate_script_unsupported, +}; + +static void register_python_scripting(struct scripting_ops *scripting_ops) +{ + int err; + err = script_spec_register("Python", scripting_ops); + if (err) + die("error registering Python script extension"); + + err = script_spec_register("py", scripting_ops); + if (err) + die("error registering py script extension"); + + scripting_context = malloc(sizeof(struct scripting_context)); +} + +#ifdef NO_LIBPYTHON +void setup_python_scripting(void) +{ + register_python_scripting(&python_scripting_unsupported_ops); +} +#else +extern struct scripting_ops python_scripting_ops; + +void setup_python_scripting(void) +{ + register_python_scripting(&python_scripting_ops); +} +#endif + +static void print_perl_unsupported_msg(void) +{ + fprintf(stderr, "Perl scripting not supported." + " Install libperl and rebuild perf to enable it.\n" + "For example:\n # apt-get install libperl-dev (ubuntu)" + "\n # yum install 'perl(ExtUtils::Embed)' (Fedora)" + "\n etc.\n"); +} + +static int perl_start_script_unsupported(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) +{ + print_perl_unsupported_msg(); + + return -1; +} + +static int perl_generate_script_unsupported(struct pevent *pevent + __maybe_unused, + const char *outfile __maybe_unused) +{ + print_perl_unsupported_msg(); + + return -1; +} + +struct scripting_ops perl_scripting_unsupported_ops = { + .name = "Perl", + .start_script = perl_start_script_unsupported, + .flush_script = flush_script_unsupported, + .stop_script = stop_script_unsupported, + .process_event = process_event_unsupported, + .generate_script = perl_generate_script_unsupported, +}; + +static void register_perl_scripting(struct scripting_ops *scripting_ops) +{ + int err; + err = script_spec_register("Perl", scripting_ops); + if (err) + die("error registering Perl script extension"); + + err = script_spec_register("pl", scripting_ops); + if (err) + die("error registering pl script extension"); + + scripting_context = malloc(sizeof(struct scripting_context)); +} + +#ifdef NO_LIBPERL +void setup_perl_scripting(void) +{ + register_perl_scripting(&perl_scripting_unsupported_ops); +} +#else +extern struct scripting_ops perl_scripting_ops; + +void setup_perl_scripting(void) +{ + register_perl_scripting(&perl_scripting_ops); +} +#endif diff --git a/kernel/tools/perf/util/trace-event.c b/kernel/tools/perf/util/trace-event.c new file mode 100644 index 000000000..6322d3716 --- /dev/null +++ b/kernel/tools/perf/util/trace-event.c @@ -0,0 +1,82 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace-event.h" +#include "util.h" + +/* + * global trace_event object used by trace_event__tp_format + * + * TODO There's no cleanup call for this. Add some sort of + * __exit function support and call trace_event__cleanup + * there. + */ +static struct trace_event tevent; + +int trace_event__init(struct trace_event *t) +{ + struct pevent *pevent = pevent_alloc(); + + if (pevent) { + t->plugin_list = traceevent_load_plugins(pevent); + t->pevent = pevent; + } + + return pevent ? 0 : -1; +} + +void trace_event__cleanup(struct trace_event *t) +{ + traceevent_unload_plugins(t->plugin_list, t->pevent); + pevent_free(t->pevent); +} + +static struct event_format* +tp_format(const char *sys, const char *name) +{ + struct pevent *pevent = tevent.pevent; + struct event_format *event = NULL; + char path[PATH_MAX]; + size_t size; + char *data; + + scnprintf(path, PATH_MAX, "%s/%s/%s/format", + tracing_events_path, sys, name); + + if (filename__read_str(path, &data, &size)) + return NULL; + + pevent_parse_format(pevent, &event, data, size, sys); + + free(data); + return event; +} + +struct event_format* +trace_event__tp_format(const char *sys, const char *name) +{ + static bool initialized; + + if (!initialized) { + int be = traceevent_host_bigendian(); + struct pevent *pevent; + + if (trace_event__init(&tevent)) + return NULL; + + pevent = tevent.pevent; + pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); + pevent_set_file_bigendian(pevent, be); + pevent_set_host_bigendian(pevent, be); + initialized = true; + } + + return tp_format(sys, name); +} diff --git a/kernel/tools/perf/util/trace-event.h b/kernel/tools/perf/util/trace-event.h new file mode 100644 index 000000000..d5168f0be --- /dev/null +++ b/kernel/tools/perf/util/trace-event.h @@ -0,0 +1,93 @@ +#ifndef _PERF_UTIL_TRACE_EVENT_H +#define _PERF_UTIL_TRACE_EVENT_H + +#include +#include "parse-events.h" + +struct machine; +struct perf_sample; +union perf_event; +struct perf_tool; +struct thread; +struct plugin_list; + +struct trace_event { + struct pevent *pevent; + struct plugin_list *plugin_list; +}; + +int trace_event__init(struct trace_event *t); +void trace_event__cleanup(struct trace_event *t); +struct event_format* +trace_event__tp_format(const char *sys, const char *name); + +int bigendian(void); + +void event_format__fprintf(struct event_format *event, + int cpu, void *data, int size, FILE *fp); + +void event_format__print(struct event_format *event, + int cpu, void *data, int size); + +int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size); +int parse_event_file(struct pevent *pevent, + char *buf, unsigned long size, char *sys); + +unsigned long long +raw_field_value(struct event_format *event, const char *name, void *data); + +void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size); +void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size); + +ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); + +struct event_format *trace_find_next_event(struct pevent *pevent, + struct event_format *event); +unsigned long long read_size(struct event_format *event, void *ptr, int size); +unsigned long long eval_flag(const char *flag); + +int read_tracing_data(int fd, struct list_head *pattrs); + +struct tracing_data { + /* size is only valid if temp is 'true' */ + ssize_t size; + bool temp; + char temp_file[50]; +}; + +struct tracing_data *tracing_data_get(struct list_head *pattrs, + int fd, bool temp); +int tracing_data_put(struct tracing_data *tdata); + + +struct addr_location; + +struct perf_session; + +struct scripting_ops { + const char *name; + int (*start_script) (const char *script, int argc, const char **argv); + int (*flush_script) (void); + int (*stop_script) (void); + void (*process_event) (union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct addr_location *al); + int (*generate_script) (struct pevent *pevent, const char *outfile); +}; + +int script_spec_register(const char *spec, struct scripting_ops *ops); + +void setup_perl_scripting(void); +void setup_python_scripting(void); + +struct scripting_context { + struct pevent *pevent; + void *event_data; +}; + +int common_pc(struct scripting_context *context); +int common_flags(struct scripting_context *context); +int common_lock_depth(struct scripting_context *context); + +#endif /* _PERF_UTIL_TRACE_EVENT_H */ diff --git a/kernel/tools/perf/util/tsc.c b/kernel/tools/perf/util/tsc.c new file mode 100644 index 000000000..4d4210d4e --- /dev/null +++ b/kernel/tools/perf/util/tsc.c @@ -0,0 +1,30 @@ +#include +#include + +#include "tsc.h" + +u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc) +{ + u64 t, quot, rem; + + t = ns - tc->time_zero; + quot = t / tc->time_mult; + rem = t % tc->time_mult; + return (quot << tc->time_shift) + + (rem << tc->time_shift) / tc->time_mult; +} + +u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc) +{ + u64 quot, rem; + + quot = cyc >> tc->time_shift; + rem = cyc & ((1 << tc->time_shift) - 1); + return tc->time_zero + quot * tc->time_mult + + ((rem * tc->time_mult) >> tc->time_shift); +} + +u64 __weak rdtsc(void) +{ + return 0; +} diff --git a/kernel/tools/perf/util/tsc.h b/kernel/tools/perf/util/tsc.h new file mode 100644 index 000000000..a8b78f1b3 --- /dev/null +++ b/kernel/tools/perf/util/tsc.h @@ -0,0 +1,12 @@ +#ifndef __PERF_TSC_H +#define __PERF_TSC_H + +#include + +#include "../arch/x86/util/tsc.h" + +u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc); +u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc); +u64 rdtsc(void); + +#endif diff --git a/kernel/tools/perf/util/unwind-libdw.c b/kernel/tools/perf/util/unwind-libdw.c new file mode 100644 index 000000000..2dcfe9a7c --- /dev/null +++ b/kernel/tools/perf/util/unwind-libdw.c @@ -0,0 +1,211 @@ +#include +#include +#include +#include +#include +#include "debug.h" +#include "unwind.h" +#include "unwind-libdw.h" +#include "machine.h" +#include "thread.h" +#include +#include "event.h" +#include "perf_regs.h" + +static char *debuginfo_path; + +static const Dwfl_Callbacks offline_callbacks = { + .find_debuginfo = dwfl_standard_find_debuginfo, + .debuginfo_path = &debuginfo_path, + .section_address = dwfl_offline_section_address, +}; + +static int __report_module(struct addr_location *al, u64 ip, + struct unwind_info *ui) +{ + Dwfl_Module *mod; + struct dso *dso = NULL; + + thread__find_addr_location(ui->thread, + PERF_RECORD_MISC_USER, + MAP__FUNCTION, ip, al); + + if (al->map) + dso = al->map->dso; + + if (!dso) + return 0; + + mod = dwfl_addrmodule(ui->dwfl, ip); + if (!mod) + mod = dwfl_report_elf(ui->dwfl, dso->short_name, + dso->long_name, -1, al->map->start, + false); + + return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1; +} + +static int report_module(u64 ip, struct unwind_info *ui) +{ + struct addr_location al; + + return __report_module(&al, ip, ui); +} + +static int entry(u64 ip, struct unwind_info *ui) + +{ + struct unwind_entry e; + struct addr_location al; + + if (__report_module(&al, ip, ui)) + return -1; + + e.ip = ip; + e.map = al.map; + e.sym = al.sym; + + pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", + al.sym ? al.sym->name : "''", + ip, + al.map ? al.map->map_ip(al.map, ip) : (u64) 0); + + return ui->cb(&e, ui->arg); +} + +static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp) +{ + /* We want only single thread to be processed. */ + if (*thread_argp != NULL) + return 0; + + *thread_argp = arg; + return dwfl_pid(dwfl); +} + +static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr, + Dwarf_Word *data) +{ + struct addr_location al; + ssize_t size; + + thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, + MAP__FUNCTION, addr, &al); + if (!al.map) { + pr_debug("unwind: no map for %lx\n", (unsigned long)addr); + return -1; + } + + if (!al.map->dso) + return -1; + + size = dso__data_read_addr(al.map->dso, al.map, ui->machine, + addr, (u8 *) data, sizeof(*data)); + + return !(size == sizeof(*data)); +} + +static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result, + void *arg) +{ + struct unwind_info *ui = arg; + struct stack_dump *stack = &ui->sample->user_stack; + u64 start, end; + int offset; + int ret; + + ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); + if (ret) + return false; + + end = start + stack->size; + + /* Check overflow. */ + if (addr + sizeof(Dwarf_Word) < addr) + return false; + + if (addr < start || addr + sizeof(Dwarf_Word) > end) { + ret = access_dso_mem(ui, addr, result); + if (ret) { + pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range" + " 0x%" PRIx64 "-0x%" PRIx64 "\n", + addr, start, end); + return false; + } + return true; + } + + offset = addr - start; + *result = *(Dwarf_Word *)&stack->data[offset]; + pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n", + addr, (unsigned long)*result, offset); + return true; +} + +static const Dwfl_Thread_Callbacks callbacks = { + .next_thread = next_thread, + .memory_read = memory_read, + .set_initial_registers = libdw__arch_set_initial_registers, +}; + +static int +frame_callback(Dwfl_Frame *state, void *arg) +{ + struct unwind_info *ui = arg; + Dwarf_Addr pc; + + if (!dwfl_frame_pc(state, &pc, NULL)) { + pr_err("%s", dwfl_errmsg(-1)); + return DWARF_CB_ABORT; + } + + return entry(pc, ui) || !(--ui->max_stack) ? + DWARF_CB_ABORT : DWARF_CB_OK; +} + +int unwind__get_entries(unwind_entry_cb_t cb, void *arg, + struct thread *thread, + struct perf_sample *data, + int max_stack) +{ + struct unwind_info ui = { + .sample = data, + .thread = thread, + .machine = thread->mg->machine, + .cb = cb, + .arg = arg, + .max_stack = max_stack, + }; + Dwarf_Word ip; + int err = -EINVAL; + + if (!data->user_regs.regs) + return -EINVAL; + + ui.dwfl = dwfl_begin(&offline_callbacks); + if (!ui.dwfl) + goto out; + + err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP); + if (err) + goto out; + + err = report_module(ip, &ui); + if (err) + goto out; + + if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui)) + goto out; + + err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui); + + if (err && !ui.max_stack) + err = 0; + + out: + if (err) + pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1)); + + dwfl_end(ui.dwfl); + return 0; +} diff --git a/kernel/tools/perf/util/unwind-libdw.h b/kernel/tools/perf/util/unwind-libdw.h new file mode 100644 index 000000000..417a1426f --- /dev/null +++ b/kernel/tools/perf/util/unwind-libdw.h @@ -0,0 +1,21 @@ +#ifndef __PERF_UNWIND_LIBDW_H +#define __PERF_UNWIND_LIBDW_H + +#include +#include "event.h" +#include "thread.h" +#include "unwind.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg); + +struct unwind_info { + Dwfl *dwfl; + struct perf_sample *sample; + struct machine *machine; + struct thread *thread; + unwind_entry_cb_t cb; + void *arg; + int max_stack; +}; + +#endif /* __PERF_UNWIND_LIBDW_H */ diff --git a/kernel/tools/perf/util/unwind-libunwind.c b/kernel/tools/perf/util/unwind-libunwind.c new file mode 100644 index 000000000..7b09a443a --- /dev/null +++ b/kernel/tools/perf/util/unwind-libunwind.c @@ -0,0 +1,653 @@ +/* + * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps. + * + * Lots of this code have been borrowed or heavily inspired from parts of + * the libunwind 0.99 code which are (amongst other contributors I may have + * forgotten): + * + * Copyright (C) 2002-2007 Hewlett-Packard Co + * Contributed by David Mosberger-Tang + * + * And the bugs have been added by: + * + * Copyright (C) 2010, Frederic Weisbecker + * Copyright (C) 2012, Jiri Olsa + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "callchain.h" +#include "thread.h" +#include "session.h" +#include "perf_regs.h" +#include "unwind.h" +#include "symbol.h" +#include "util.h" +#include "debug.h" + +extern int +UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +extern int +UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug, + unw_word_t ip, + unw_word_t segbase, + const char *obj_name, unw_word_t start, + unw_word_t end); + +#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame) + +#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */ +#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */ + +/* Pointer-encoding formats: */ +#define DW_EH_PE_omit 0xff +#define DW_EH_PE_ptr 0x00 /* pointer-sized unsigned value */ +#define DW_EH_PE_udata4 0x03 /* unsigned 32-bit value */ +#define DW_EH_PE_udata8 0x04 /* unsigned 64-bit value */ +#define DW_EH_PE_sdata4 0x0b /* signed 32-bit value */ +#define DW_EH_PE_sdata8 0x0c /* signed 64-bit value */ + +/* Pointer-encoding application: */ +#define DW_EH_PE_absptr 0x00 /* absolute value */ +#define DW_EH_PE_pcrel 0x10 /* rel. to addr. of encoded value */ + +/* + * The following are not documented by LSB v1.3, yet they are used by + * GCC, presumably they aren't documented by LSB since they aren't + * used on Linux: + */ +#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */ +#define DW_EH_PE_aligned 0x50 /* aligned pointer */ + +/* Flags intentionaly not handled, since they're not needed: + * #define DW_EH_PE_indirect 0x80 + * #define DW_EH_PE_uleb128 0x01 + * #define DW_EH_PE_udata2 0x02 + * #define DW_EH_PE_sleb128 0x09 + * #define DW_EH_PE_sdata2 0x0a + * #define DW_EH_PE_textrel 0x20 + * #define DW_EH_PE_datarel 0x30 + */ + +struct unwind_info { + struct perf_sample *sample; + struct machine *machine; + struct thread *thread; +}; + +#define dw_read(ptr, type, end) ({ \ + type *__p = (type *) ptr; \ + type __v; \ + if ((__p + 1) > (type *) end) \ + return -EINVAL; \ + __v = *__p++; \ + ptr = (typeof(ptr)) __p; \ + __v; \ + }) + +static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val, + u8 encoding) +{ + u8 *cur = *p; + *val = 0; + + switch (encoding) { + case DW_EH_PE_omit: + *val = 0; + goto out; + case DW_EH_PE_ptr: + *val = dw_read(cur, unsigned long, end); + goto out; + default: + break; + } + + switch (encoding & DW_EH_PE_APPL_MASK) { + case DW_EH_PE_absptr: + break; + case DW_EH_PE_pcrel: + *val = (unsigned long) cur; + break; + default: + return -EINVAL; + } + + if ((encoding & 0x07) == 0x00) + encoding |= DW_EH_PE_udata4; + + switch (encoding & DW_EH_PE_FORMAT_MASK) { + case DW_EH_PE_sdata4: + *val += dw_read(cur, s32, end); + break; + case DW_EH_PE_udata4: + *val += dw_read(cur, u32, end); + break; + case DW_EH_PE_sdata8: + *val += dw_read(cur, s64, end); + break; + case DW_EH_PE_udata8: + *val += dw_read(cur, u64, end); + break; + default: + return -EINVAL; + } + + out: + *p = cur; + return 0; +} + +#define dw_read_encoded_value(ptr, end, enc) ({ \ + u64 __v; \ + if (__dw_read_encoded_value(&ptr, end, &__v, enc)) { \ + return -EINVAL; \ + } \ + __v; \ + }) + +static u64 elf_section_offset(int fd, const char *name) +{ + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + u64 offset = 0; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return 0; + + do { + if (gelf_getehdr(elf, &ehdr) == NULL) + break; + + if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL)) + break; + + offset = shdr.sh_offset; + } while (0); + + elf_end(elf); + return offset; +} + +#ifndef NO_LIBUNWIND_DEBUG_FRAME +static int elf_is_exec(int fd, const char *name) +{ + Elf *elf; + GElf_Ehdr ehdr; + int retval = 0; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return 0; + if (gelf_getehdr(elf, &ehdr) == NULL) + goto out; + + retval = (ehdr.e_type == ET_EXEC); + +out: + elf_end(elf); + pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval); + return retval; +} +#endif + +struct table_entry { + u32 start_ip_offset; + u32 fde_offset; +}; + +struct eh_frame_hdr { + unsigned char version; + unsigned char eh_frame_ptr_enc; + unsigned char fde_count_enc; + unsigned char table_enc; + + /* + * The rest of the header is variable-length and consists of the + * following members: + * + * encoded_t eh_frame_ptr; + * encoded_t fde_count; + */ + + /* A single encoded pointer should not be more than 8 bytes. */ + u64 enc[2]; + + /* + * struct { + * encoded_t start_ip; + * encoded_t fde_addr; + * } binary_search_table[fde_count]; + */ + char data[0]; +} __packed; + +static int unwind_spec_ehframe(struct dso *dso, struct machine *machine, + u64 offset, u64 *table_data, u64 *segbase, + u64 *fde_count) +{ + struct eh_frame_hdr hdr; + u8 *enc = (u8 *) &hdr.enc; + u8 *end = (u8 *) &hdr.data; + ssize_t r; + + r = dso__data_read_offset(dso, machine, offset, + (u8 *) &hdr, sizeof(hdr)); + if (r != sizeof(hdr)) + return -EINVAL; + + /* We dont need eh_frame_ptr, just skip it. */ + dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc); + + *fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc); + *segbase = offset; + *table_data = (enc - (u8 *) &hdr) + offset; + return 0; +} + +static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine, + u64 *table_data, u64 *segbase, + u64 *fde_count) +{ + int ret = -EINVAL, fd; + u64 offset = dso->data.eh_frame_hdr_offset; + + if (offset == 0) { + fd = dso__data_fd(dso, machine); + if (fd < 0) + return -EINVAL; + + /* Check the .eh_frame section for unwinding info */ + offset = elf_section_offset(fd, ".eh_frame_hdr"); + dso->data.eh_frame_hdr_offset = offset; + } + + if (offset) + ret = unwind_spec_ehframe(dso, machine, offset, + table_data, segbase, + fde_count); + + return ret; +} + +#ifndef NO_LIBUNWIND_DEBUG_FRAME +static int read_unwind_spec_debug_frame(struct dso *dso, + struct machine *machine, u64 *offset) +{ + int fd; + u64 ofs = dso->data.debug_frame_offset; + + if (ofs == 0) { + fd = dso__data_fd(dso, machine); + if (fd < 0) + return -EINVAL; + + /* Check the .debug_frame section for unwinding info */ + ofs = elf_section_offset(fd, ".debug_frame"); + dso->data.debug_frame_offset = ofs; + } + + *offset = ofs; + if (*offset) + return 0; + + return -EINVAL; +} +#endif + +static struct map *find_map(unw_word_t ip, struct unwind_info *ui) +{ + struct addr_location al; + + thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, + MAP__FUNCTION, ip, &al); + return al.map; +} + +static int +find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, + int need_unwind_info, void *arg) +{ + struct unwind_info *ui = arg; + struct map *map; + unw_dyn_info_t di; + u64 table_data, segbase, fde_count; + + map = find_map(ip, ui); + if (!map || !map->dso) + return -EINVAL; + + pr_debug("unwind: find_proc_info dso %s\n", map->dso->name); + + /* Check the .eh_frame section for unwinding info */ + if (!read_unwind_spec_eh_frame(map->dso, ui->machine, + &table_data, &segbase, &fde_count)) { + memset(&di, 0, sizeof(di)); + di.format = UNW_INFO_FORMAT_REMOTE_TABLE; + di.start_ip = map->start; + di.end_ip = map->end; + di.u.rti.segbase = map->start + segbase; + di.u.rti.table_data = map->start + table_data; + di.u.rti.table_len = fde_count * sizeof(struct table_entry) + / sizeof(unw_word_t); + return dwarf_search_unwind_table(as, ip, &di, pi, + need_unwind_info, arg); + } + +#ifndef NO_LIBUNWIND_DEBUG_FRAME + /* Check the .debug_frame section for unwinding info */ + if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { + int fd = dso__data_fd(map->dso, ui->machine); + int is_exec = elf_is_exec(fd, map->dso->name); + unw_word_t base = is_exec ? 0 : map->start; + + memset(&di, 0, sizeof(di)); + if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name, + map->start, map->end)) + return dwarf_search_unwind_table(as, ip, &di, pi, + need_unwind_info, arg); + } +#endif + + return -EINVAL; +} + +static int access_fpreg(unw_addr_space_t __maybe_unused as, + unw_regnum_t __maybe_unused num, + unw_fpreg_t __maybe_unused *val, + int __maybe_unused __write, + void __maybe_unused *arg) +{ + pr_err("unwind: access_fpreg unsupported\n"); + return -UNW_EINVAL; +} + +static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as, + unw_word_t __maybe_unused *dil_addr, + void __maybe_unused *arg) +{ + return -UNW_ENOINFO; +} + +static int resume(unw_addr_space_t __maybe_unused as, + unw_cursor_t __maybe_unused *cu, + void __maybe_unused *arg) +{ + pr_err("unwind: resume unsupported\n"); + return -UNW_EINVAL; +} + +static int +get_proc_name(unw_addr_space_t __maybe_unused as, + unw_word_t __maybe_unused addr, + char __maybe_unused *bufp, size_t __maybe_unused buf_len, + unw_word_t __maybe_unused *offp, void __maybe_unused *arg) +{ + pr_err("unwind: get_proc_name unsupported\n"); + return -UNW_EINVAL; +} + +static int access_dso_mem(struct unwind_info *ui, unw_word_t addr, + unw_word_t *data) +{ + struct addr_location al; + ssize_t size; + + thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, + MAP__FUNCTION, addr, &al); + if (!al.map) { + pr_debug("unwind: no map for %lx\n", (unsigned long)addr); + return -1; + } + + if (!al.map->dso) + return -1; + + size = dso__data_read_addr(al.map->dso, al.map, ui->machine, + addr, (u8 *) data, sizeof(*data)); + + return !(size == sizeof(*data)); +} + +static int access_mem(unw_addr_space_t __maybe_unused as, + unw_word_t addr, unw_word_t *valp, + int __write, void *arg) +{ + struct unwind_info *ui = arg; + struct stack_dump *stack = &ui->sample->user_stack; + u64 start, end; + int offset; + int ret; + + /* Don't support write, probably not needed. */ + if (__write || !stack || !ui->sample->user_regs.regs) { + *valp = 0; + return 0; + } + + ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); + if (ret) + return ret; + + end = start + stack->size; + + /* Check overflow. */ + if (addr + sizeof(unw_word_t) < addr) + return -EINVAL; + + if (addr < start || addr + sizeof(unw_word_t) >= end) { + ret = access_dso_mem(ui, addr, valp); + if (ret) { + pr_debug("unwind: access_mem %p not inside range" + " 0x%" PRIx64 "-0x%" PRIx64 "\n", + (void *) addr, start, end); + *valp = 0; + return ret; + } + return 0; + } + + offset = addr - start; + *valp = *(unw_word_t *)&stack->data[offset]; + pr_debug("unwind: access_mem addr %p val %lx, offset %d\n", + (void *) addr, (unsigned long)*valp, offset); + return 0; +} + +static int access_reg(unw_addr_space_t __maybe_unused as, + unw_regnum_t regnum, unw_word_t *valp, + int __write, void *arg) +{ + struct unwind_info *ui = arg; + int id, ret; + u64 val; + + /* Don't support write, I suspect we don't need it. */ + if (__write) { + pr_err("unwind: access_reg w %d\n", regnum); + return 0; + } + + if (!ui->sample->user_regs.regs) { + *valp = 0; + return 0; + } + + id = libunwind__arch_reg_id(regnum); + if (id < 0) + return -EINVAL; + + ret = perf_reg_value(&val, &ui->sample->user_regs, id); + if (ret) { + pr_err("unwind: can't read reg %d\n", regnum); + return ret; + } + + *valp = (unw_word_t) val; + pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp); + return 0; +} + +static void put_unwind_info(unw_addr_space_t __maybe_unused as, + unw_proc_info_t *pi __maybe_unused, + void *arg __maybe_unused) +{ + pr_debug("unwind: put_unwind_info called\n"); +} + +static int entry(u64 ip, struct thread *thread, + unwind_entry_cb_t cb, void *arg) +{ + struct unwind_entry e; + struct addr_location al; + + thread__find_addr_location(thread, PERF_RECORD_MISC_USER, + MAP__FUNCTION, ip, &al); + + e.ip = ip; + e.map = al.map; + e.sym = al.sym; + + pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", + al.sym ? al.sym->name : "''", + ip, + al.map ? al.map->map_ip(al.map, ip) : (u64) 0); + + return cb(&e, arg); +} + +static void display_error(int err) +{ + switch (err) { + case UNW_EINVAL: + pr_err("unwind: Only supports local.\n"); + break; + case UNW_EUNSPEC: + pr_err("unwind: Unspecified error.\n"); + break; + case UNW_EBADREG: + pr_err("unwind: Register unavailable.\n"); + break; + default: + break; + } +} + +static unw_accessors_t accessors = { + .find_proc_info = find_proc_info, + .put_unwind_info = put_unwind_info, + .get_dyn_info_list_addr = get_dyn_info_list_addr, + .access_mem = access_mem, + .access_reg = access_reg, + .access_fpreg = access_fpreg, + .resume = resume, + .get_proc_name = get_proc_name, +}; + +int unwind__prepare_access(struct thread *thread) +{ + unw_addr_space_t addr_space; + + if (callchain_param.record_mode != CALLCHAIN_DWARF) + return 0; + + addr_space = unw_create_addr_space(&accessors, 0); + if (!addr_space) { + pr_err("unwind: Can't create unwind address space.\n"); + return -ENOMEM; + } + + unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL); + thread__set_priv(thread, addr_space); + + return 0; +} + +void unwind__flush_access(struct thread *thread) +{ + unw_addr_space_t addr_space; + + if (callchain_param.record_mode != CALLCHAIN_DWARF) + return; + + addr_space = thread__priv(thread); + unw_flush_cache(addr_space, 0, 0); +} + +void unwind__finish_access(struct thread *thread) +{ + unw_addr_space_t addr_space; + + if (callchain_param.record_mode != CALLCHAIN_DWARF) + return; + + addr_space = thread__priv(thread); + unw_destroy_addr_space(addr_space); +} + +static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, + void *arg, int max_stack) +{ + unw_addr_space_t addr_space; + unw_cursor_t c; + int ret; + + addr_space = thread__priv(ui->thread); + if (addr_space == NULL) + return -1; + + ret = unw_init_remote(&c, addr_space, ui); + if (ret) + display_error(ret); + + while (!ret && (unw_step(&c) > 0) && max_stack--) { + unw_word_t ip; + + unw_get_reg(&c, UNW_REG_IP, &ip); + ret = ip ? entry(ip, ui->thread, cb, arg) : 0; + } + + return ret; +} + +int unwind__get_entries(unwind_entry_cb_t cb, void *arg, + struct thread *thread, + struct perf_sample *data, int max_stack) +{ + u64 ip; + struct unwind_info ui = { + .sample = data, + .thread = thread, + .machine = thread->mg->machine, + }; + int ret; + + if (!data->user_regs.regs) + return -EINVAL; + + ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP); + if (ret) + return ret; + + ret = entry(ip, thread, cb, arg); + if (ret) + return -ENOMEM; + + return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0; +} diff --git a/kernel/tools/perf/util/unwind.h b/kernel/tools/perf/util/unwind.h new file mode 100644 index 000000000..12790cf94 --- /dev/null +++ b/kernel/tools/perf/util/unwind.h @@ -0,0 +1,55 @@ +#ifndef __UNWIND_H +#define __UNWIND_H + +#include +#include "event.h" +#include "symbol.h" +#include "thread.h" + +struct unwind_entry { + struct map *map; + struct symbol *sym; + u64 ip; +}; + +typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +int unwind__get_entries(unwind_entry_cb_t cb, void *arg, + struct thread *thread, + struct perf_sample *data, int max_stack); +/* libunwind specific */ +#ifdef HAVE_LIBUNWIND_SUPPORT +int libunwind__arch_reg_id(int regnum); +int unwind__prepare_access(struct thread *thread); +void unwind__flush_access(struct thread *thread); +void unwind__finish_access(struct thread *thread); +#else +static inline int unwind__prepare_access(struct thread *thread __maybe_unused) +{ + return 0; +} + +static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} +static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} +#endif +#else +static inline int +unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, + void *arg __maybe_unused, + struct thread *thread __maybe_unused, + struct perf_sample *data __maybe_unused, + int max_stack __maybe_unused) +{ + return 0; +} + +static inline int unwind__prepare_access(struct thread *thread __maybe_unused) +{ + return 0; +} + +static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} +static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ +#endif /* __UNWIND_H */ diff --git a/kernel/tools/perf/util/usage.c b/kernel/tools/perf/util/usage.c new file mode 100644 index 000000000..4007aca8e --- /dev/null +++ b/kernel/tools/perf/util/usage.c @@ -0,0 +1,84 @@ +/* + * usage.c + * + * Various reporting routines. + * Originally copied from GIT source. + * + * Copyright (C) Linus Torvalds, 2005 + */ +#include "util.h" +#include "debug.h" + +static void report(const char *prefix, const char *err, va_list params) +{ + char msg[1024]; + vsnprintf(msg, sizeof(msg), err, params); + fprintf(stderr, " %s%s\n", prefix, msg); +} + +static NORETURN void usage_builtin(const char *err) +{ + fprintf(stderr, "\n Usage: %s\n", err); + exit(129); +} + +static NORETURN void die_builtin(const char *err, va_list params) +{ + report(" Fatal: ", err, params); + exit(128); +} + +static void error_builtin(const char *err, va_list params) +{ + report(" Error: ", err, params); +} + +static void warn_builtin(const char *warn, va_list params) +{ + report(" Warning: ", warn, params); +} + +/* If we are in a dlopen()ed .so write to a global variable would segfault + * (ugh), so keep things static. */ +static void (*usage_routine)(const char *err) NORETURN = usage_builtin; +static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin; +static void (*error_routine)(const char *err, va_list params) = error_builtin; +static void (*warn_routine)(const char *err, va_list params) = warn_builtin; + +void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN) +{ + die_routine = routine; +} + +void usage(const char *err) +{ + usage_routine(err); +} + +void die(const char *err, ...) +{ + va_list params; + + va_start(params, err); + die_routine(err, params); + va_end(params); +} + +int error(const char *err, ...) +{ + va_list params; + + va_start(params, err); + error_routine(err, params); + va_end(params); + return -1; +} + +void warning(const char *warn, ...) +{ + va_list params; + + va_start(params, warn); + warn_routine(warn, params); + va_end(params); +} diff --git a/kernel/tools/perf/util/util.c b/kernel/tools/perf/util/util.c new file mode 100644 index 000000000..4ee6d0d4c --- /dev/null +++ b/kernel/tools/perf/util/util.c @@ -0,0 +1,617 @@ +#include "../perf.h" +#include "util.h" +#include "debug.h" +#include +#include +#ifdef HAVE_BACKTRACE_SUPPORT +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include "callchain.h" + +struct callchain_param callchain_param = { + .mode = CHAIN_GRAPH_REL, + .min_percent = 0.5, + .order = ORDER_CALLEE, + .key = CCKEY_FUNCTION +}; + +/* + * XXX We need to find a better place for these things... + */ +unsigned int page_size; +int cacheline_size; + +bool test_attr__enabled; + +bool perf_host = true; +bool perf_guest = false; + +char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; + +void event_attr_init(struct perf_event_attr *attr) +{ + if (!perf_host) + attr->exclude_host = 1; + if (!perf_guest) + attr->exclude_guest = 1; + /* to capture ABI version */ + attr->size = sizeof(*attr); +} + +int mkdir_p(char *path, mode_t mode) +{ + struct stat st; + int err; + char *d = path; + + if (*d != '/') + return -1; + + if (stat(path, &st) == 0) + return 0; + + while (*++d == '/'); + + while ((d = strchr(d, '/'))) { + *d = '\0'; + err = stat(path, &st) && mkdir(path, mode); + *d++ = '/'; + if (err) + return -1; + while (*d == '/') + ++d; + } + return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; +} + +static int slow_copyfile(const char *from, const char *to, mode_t mode) +{ + int err = -1; + char *line = NULL; + size_t n; + FILE *from_fp = fopen(from, "r"), *to_fp; + mode_t old_umask; + + if (from_fp == NULL) + goto out; + + old_umask = umask(mode ^ 0777); + to_fp = fopen(to, "w"); + umask(old_umask); + if (to_fp == NULL) + goto out_fclose_from; + + while (getline(&line, &n, from_fp) > 0) + if (fputs(line, to_fp) == EOF) + goto out_fclose_to; + err = 0; +out_fclose_to: + fclose(to_fp); + free(line); +out_fclose_from: + fclose(from_fp); +out: + return err; +} + +int copyfile_mode(const char *from, const char *to, mode_t mode) +{ + int fromfd, tofd; + struct stat st; + void *addr; + int err = -1; + + if (stat(from, &st)) + goto out; + + if (st.st_size == 0) /* /proc? do it slowly... */ + return slow_copyfile(from, to, mode); + + fromfd = open(from, O_RDONLY); + if (fromfd < 0) + goto out; + + tofd = creat(to, mode); + if (tofd < 0) + goto out_close_from; + + addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0); + if (addr == MAP_FAILED) + goto out_close_to; + + if (write(tofd, addr, st.st_size) == st.st_size) + err = 0; + + munmap(addr, st.st_size); +out_close_to: + close(tofd); + if (err) + unlink(to); +out_close_from: + close(fromfd); +out: + return err; +} + +int copyfile(const char *from, const char *to) +{ + return copyfile_mode(from, to, 0755); +} + +unsigned long convert_unit(unsigned long value, char *unit) +{ + *unit = ' '; + + if (value > 1000) { + value /= 1000; + *unit = 'K'; + } + + if (value > 1000) { + value /= 1000; + *unit = 'M'; + } + + if (value > 1000) { + value /= 1000; + *unit = 'G'; + } + + return value; +} + +static ssize_t ion(bool is_read, int fd, void *buf, size_t n) +{ + void *buf_start = buf; + size_t left = n; + + while (left) { + ssize_t ret = is_read ? read(fd, buf, left) : + write(fd, buf, left); + + if (ret < 0 && errno == EINTR) + continue; + if (ret <= 0) + return ret; + + left -= ret; + buf += ret; + } + + BUG_ON((size_t)(buf - buf_start) != n); + return n; +} + +/* + * Read exactly 'n' bytes or return an error. + */ +ssize_t readn(int fd, void *buf, size_t n) +{ + return ion(true, fd, buf, n); +} + +/* + * Write exactly 'n' bytes or return an error. + */ +ssize_t writen(int fd, void *buf, size_t n) +{ + return ion(false, fd, buf, n); +} + +size_t hex_width(u64 v) +{ + size_t n = 1; + + while ((v >>= 4)) + ++n; + + return n; +} + +static int hex(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + if ((ch >= 'A') && (ch <= 'F')) + return ch - 'A' + 10; + return -1; +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +int hex2u64(const char *ptr, u64 *long_val) +{ + const char *p = ptr; + *long_val = 0; + + while (*p) { + const int hex_val = hex(*p); + + if (hex_val < 0) + break; + + *long_val = (*long_val << 4) | hex_val; + p++; + } + + return p - ptr; +} + +/* Obtain a backtrace and print it to stdout. */ +#ifdef HAVE_BACKTRACE_SUPPORT +void dump_stack(void) +{ + void *array[16]; + size_t size = backtrace(array, ARRAY_SIZE(array)); + char **strings = backtrace_symbols(array, size); + size_t i; + + printf("Obtained %zd stack frames.\n", size); + + for (i = 0; i < size; i++) + printf("%s\n", strings[i]); + + free(strings); +} +#else +void dump_stack(void) {} +#endif + +void sighandler_dump_stack(int sig) +{ + psignal(sig, "perf"); + dump_stack(); + exit(sig); +} + +void get_term_dimensions(struct winsize *ws) +{ + char *s = getenv("LINES"); + + if (s != NULL) { + ws->ws_row = atoi(s); + s = getenv("COLUMNS"); + if (s != NULL) { + ws->ws_col = atoi(s); + if (ws->ws_row && ws->ws_col) + return; + } + } +#ifdef TIOCGWINSZ + if (ioctl(1, TIOCGWINSZ, ws) == 0 && + ws->ws_row && ws->ws_col) + return; +#endif + ws->ws_row = 25; + ws->ws_col = 80; +} + +void set_term_quiet_input(struct termios *old) +{ + struct termios tc; + + tcgetattr(0, old); + tc = *old; + tc.c_lflag &= ~(ICANON | ECHO); + tc.c_cc[VMIN] = 0; + tc.c_cc[VTIME] = 0; + tcsetattr(0, TCSANOW, &tc); +} + +static void set_tracing_events_path(const char *tracing, const char *mountpoint) +{ + snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s", + mountpoint, tracing, "events"); +} + +static const char *__perf_tracefs_mount(const char *mountpoint) +{ + const char *mnt; + + mnt = tracefs_mount(mountpoint); + if (!mnt) + return NULL; + + set_tracing_events_path("", mnt); + + return mnt; +} + +static const char *__perf_debugfs_mount(const char *mountpoint) +{ + const char *mnt; + + mnt = debugfs_mount(mountpoint); + if (!mnt) + return NULL; + + set_tracing_events_path("tracing/", mnt); + + return mnt; +} + +const char *perf_debugfs_mount(const char *mountpoint) +{ + const char *mnt; + + mnt = __perf_tracefs_mount(mountpoint); + if (mnt) + return mnt; + + mnt = __perf_debugfs_mount(mountpoint); + + return mnt; +} + +void perf_debugfs_set_path(const char *mntpt) +{ + snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt); + set_tracing_events_path("tracing/", mntpt); +} + +static const char *find_tracefs(void) +{ + const char *path = __perf_tracefs_mount(NULL); + + return path; +} + +static const char *find_debugfs(void) +{ + const char *path = __perf_debugfs_mount(NULL); + + if (!path) + fprintf(stderr, "Your kernel does not support the debugfs filesystem"); + + return path; +} + +/* + * Finds the path to the debugfs/tracing + * Allocates the string and stores it. + */ +const char *find_tracing_dir(void) +{ + const char *tracing_dir = ""; + static char *tracing; + static int tracing_found; + const char *debugfs; + + if (tracing_found) + return tracing; + + debugfs = find_tracefs(); + if (!debugfs) { + tracing_dir = "/tracing"; + debugfs = find_debugfs(); + if (!debugfs) + return NULL; + } + + if (asprintf(&tracing, "%s%s", debugfs, tracing_dir) < 0) + return NULL; + + tracing_found = 1; + return tracing; +} + +char *get_tracing_file(const char *name) +{ + const char *tracing; + char *file; + + tracing = find_tracing_dir(); + if (!tracing) + return NULL; + + if (asprintf(&file, "%s/%s", tracing, name) < 0) + return NULL; + + return file; +} + +void put_tracing_file(char *file) +{ + free(file); +} + +int parse_nsec_time(const char *str, u64 *ptime) +{ + u64 time_sec, time_nsec; + char *end; + + time_sec = strtoul(str, &end, 10); + if (*end != '.' && *end != '\0') + return -1; + + if (*end == '.') { + int i; + char nsec_buf[10]; + + if (strlen(++end) > 9) + return -1; + + strncpy(nsec_buf, end, 9); + nsec_buf[9] = '\0'; + + /* make it nsec precision */ + for (i = strlen(nsec_buf); i < 9; i++) + nsec_buf[i] = '0'; + + time_nsec = strtoul(nsec_buf, &end, 10); + if (*end != '\0') + return -1; + } else + time_nsec = 0; + + *ptime = time_sec * NSEC_PER_SEC + time_nsec; + return 0; +} + +unsigned long parse_tag_value(const char *str, struct parse_tag *tags) +{ + struct parse_tag *i = tags; + + while (i->tag) { + char *s; + + s = strchr(str, i->tag); + if (s) { + unsigned long int value; + char *endptr; + + value = strtoul(str, &endptr, 10); + if (s != endptr) + break; + + if (value > ULONG_MAX / i->mult) + break; + value *= i->mult; + return value; + } + i++; + } + + return (unsigned long) -1; +} + +int filename__read_str(const char *filename, char **buf, size_t *sizep) +{ + size_t size = 0, alloc_size = 0; + void *bf = NULL, *nbf; + int fd, n, err = 0; + char sbuf[STRERR_BUFSIZE]; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return -errno; + + do { + if (size == alloc_size) { + alloc_size += BUFSIZ; + nbf = realloc(bf, alloc_size); + if (!nbf) { + err = -ENOMEM; + break; + } + + bf = nbf; + } + + n = read(fd, bf + size, alloc_size - size); + if (n < 0) { + if (size) { + pr_warning("read failed %d: %s\n", errno, + strerror_r(errno, sbuf, sizeof(sbuf))); + err = 0; + } else + err = -errno; + + break; + } + + size += n; + } while (n > 0); + + if (!err) { + *sizep = size; + *buf = bf; + } else + free(bf); + + close(fd); + return err; +} + +const char *get_filename_for_perf_kvm(void) +{ + const char *filename; + + if (perf_host && !perf_guest) + filename = strdup("perf.data.host"); + else if (!perf_host && perf_guest) + filename = strdup("perf.data.guest"); + else + filename = strdup("perf.data.kvm"); + + return filename; +} + +int perf_event_paranoid(void) +{ + int value; + + if (sysctl__read_int("kernel/perf_event_paranoid", &value)) + return INT_MAX; + + return value; +} + +void mem_bswap_32(void *src, int byte_size) +{ + u32 *m = src; + while (byte_size > 0) { + *m = bswap_32(*m); + byte_size -= sizeof(u32); + ++m; + } +} + +void mem_bswap_64(void *src, int byte_size) +{ + u64 *m = src; + + while (byte_size > 0) { + *m = bswap_64(*m); + byte_size -= sizeof(u64); + ++m; + } +} + +bool find_process(const char *name) +{ + size_t len = strlen(name); + DIR *dir; + struct dirent *d; + int ret = -1; + + dir = opendir(procfs__mountpoint()); + if (!dir) + return -1; + + /* Walk through the directory. */ + while (ret && (d = readdir(dir)) != NULL) { + char path[PATH_MAX]; + char *data; + size_t size; + + if ((d->d_type != DT_DIR) || + !strcmp(".", d->d_name) || + !strcmp("..", d->d_name)) + continue; + + scnprintf(path, sizeof(path), "%s/%s/comm", + procfs__mountpoint(), d->d_name); + + if (filename__read_str(path, &data, &size)) + continue; + + ret = strncmp(name, data, len); + free(data); + } + + closedir(dir); + return ret ? false : true; +} diff --git a/kernel/tools/perf/util/util.h b/kernel/tools/perf/util/util.h new file mode 100644 index 000000000..1ff23e04a --- /dev/null +++ b/kernel/tools/perf/util/util.h @@ -0,0 +1,336 @@ +#ifndef GIT_COMPAT_UTIL_H +#define GIT_COMPAT_UTIL_H + +#ifndef FLEX_ARRAY +/* + * See if our compiler is known to support flexible array members. + */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEX_ARRAY /* empty */ +#elif defined(__GNUC__) +# if (__GNUC__ >= 3) +# define FLEX_ARRAY /* empty */ +# else +# define FLEX_ARRAY 0 /* older GNU extension */ +# endif +#endif + +/* + * Otherwise, default to safer but a bit wasteful traditional style + */ +#ifndef FLEX_ARRAY +# define FLEX_ARRAY 1 +#endif +#endif + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +#ifdef __GNUC__ +#define TYPEOF(x) (__typeof__(x)) +#else +#define TYPEOF(x) +#endif + +#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) +#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ + +/* Approximation of the length of the decimal representation of this type. */ +#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) + +#define _ALL_SOURCE 1 +#define _BSD_SOURCE 1 +/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */ +#define _DEFAULT_SOURCE 1 +#define HAS_BOOL + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern const char *graph_line; +extern const char *graph_dotted_line; +extern char buildid_dir[]; +extern char tracing_events_path[]; +extern void perf_debugfs_set_path(const char *mountpoint); +const char *perf_debugfs_mount(const char *mountpoint); +const char *find_tracing_dir(void); +char *get_tracing_file(const char *name); +void put_tracing_file(char *file); + +/* On most systems would have given us this, but + * not on some systems (e.g. GNU/Hurd). + */ +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +#ifndef PRIuMAX +#define PRIuMAX "llu" +#endif + +#ifndef PRIu32 +#define PRIu32 "u" +#endif + +#ifndef PRIx32 +#define PRIx32 "x" +#endif + +#ifndef PATH_SEP +#define PATH_SEP ':' +#endif + +#ifndef STRIP_EXTENSION +#define STRIP_EXTENSION "" +#endif + +#ifndef has_dos_drive_prefix +#define has_dos_drive_prefix(path) 0 +#endif + +#ifndef is_dir_sep +#define is_dir_sep(c) ((c) == '/') +#endif + +#ifdef __GNUC__ +#define NORETURN __attribute__((__noreturn__)) +#else +#define NORETURN +#ifndef __attribute__ +#define __attribute__(x) +#endif +#endif + +#define PERF_GTK_DSO "libperf-gtk.so" + +/* General helper functions */ +extern void usage(const char *err) NORETURN; +extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); +extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); +extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); + +#include "../../../include/linux/stringify.h" + +#define DIE_IF(cnd) \ + do { if (cnd) \ + die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \ + __stringify(cnd) "\n"); \ + } while (0) + + +extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); + +extern int prefixcmp(const char *str, const char *prefix); +extern void set_buildid_dir(const char *dir); + +static inline const char *skip_prefix(const char *str, const char *prefix) +{ + size_t len = strlen(prefix); + return strncmp(str, prefix, len) ? NULL : str + len; +} + +#ifdef __GLIBC_PREREQ +#if __GLIBC_PREREQ(2, 1) +#define HAVE_STRCHRNUL +#endif +#endif + +#ifndef HAVE_STRCHRNUL +#define strchrnul gitstrchrnul +static inline char *gitstrchrnul(const char *s, int c) +{ + while (*s && *s != c) + s++; + return (char *)s; +} +#endif + +/* + * Wrappers: + */ +extern char *xstrdup(const char *str); +extern void *xrealloc(void *ptr, size_t size) __attribute__((weak)); + + +static inline void *zalloc(size_t size) +{ + return calloc(1, size); +} + +#define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) + +static inline int has_extension(const char *filename, const char *ext) +{ + size_t len = strlen(filename); + size_t extlen = strlen(ext); + + return len > extlen && !memcmp(filename + len - extlen, ext, extlen); +} + +/* Sane ctype - no locale, and works with signed chars */ +#undef isascii +#undef isspace +#undef isdigit +#undef isxdigit +#undef isalpha +#undef isprint +#undef isalnum +#undef islower +#undef isupper +#undef tolower +#undef toupper + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif + +int parse_nsec_time(const char *str, u64 *ptime); + +extern unsigned char sane_ctype[256]; +#define GIT_SPACE 0x01 +#define GIT_DIGIT 0x02 +#define GIT_ALPHA 0x04 +#define GIT_GLOB_SPECIAL 0x08 +#define GIT_REGEX_SPECIAL 0x10 +#define GIT_PRINT_EXTRA 0x20 +#define GIT_PRINT 0x3E +#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) +#define isascii(x) (((x) & ~0x7f) == 0) +#define isspace(x) sane_istest(x,GIT_SPACE) +#define isdigit(x) sane_istest(x,GIT_DIGIT) +#define isxdigit(x) \ + (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G') +#define isalpha(x) sane_istest(x,GIT_ALPHA) +#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) +#define isprint(x) sane_istest(x,GIT_PRINT) +#define islower(x) (sane_istest(x,GIT_ALPHA) && (x & 0x20)) +#define isupper(x) (sane_istest(x,GIT_ALPHA) && !(x & 0x20)) +#define tolower(x) sane_case((unsigned char)(x), 0x20) +#define toupper(x) sane_case((unsigned char)(x), 0) + +static inline int sane_case(int x, int high) +{ + if (sane_istest(x, GIT_ALPHA)) + x = (x & ~0x20) | high; + return x; +} + +int mkdir_p(char *path, mode_t mode); +int copyfile(const char *from, const char *to); +int copyfile_mode(const char *from, const char *to, mode_t mode); + +s64 perf_atoll(const char *str); +char **argv_split(const char *str, int *argcp); +void argv_free(char **argv); +bool strglobmatch(const char *str, const char *pat); +bool strlazymatch(const char *str, const char *pat); +int strtailcmp(const char *s1, const char *s2); +char *strxfrchar(char *s, char from, char to); +unsigned long convert_unit(unsigned long value, char *unit); +ssize_t readn(int fd, void *buf, size_t n); +ssize_t writen(int fd, void *buf, size_t n); + +struct perf_event_attr; + +void event_attr_init(struct perf_event_attr *attr); + +#define _STR(x) #x +#define STR(x) _STR(x) + +size_t hex_width(u64 v); +int hex2u64(const char *ptr, u64 *val); + +char *ltrim(char *s); +char *rtrim(char *s); + +void dump_stack(void); +void sighandler_dump_stack(int sig); + +extern unsigned int page_size; +extern int cacheline_size; + +void get_term_dimensions(struct winsize *ws); +void set_term_quiet_input(struct termios *old); + +struct parse_tag { + char tag; + int mult; +}; + +unsigned long parse_tag_value(const char *str, struct parse_tag *tags); + +#define SRCLINE_UNKNOWN ((char *) "??:0") + +static inline int path__join(char *bf, size_t size, + const char *path1, const char *path2) +{ + return scnprintf(bf, size, "%s%s%s", path1, path1[0] ? "/" : "", path2); +} + +static inline int path__join3(char *bf, size_t size, + const char *path1, const char *path2, + const char *path3) +{ + return scnprintf(bf, size, "%s%s%s%s%s", + path1, path1[0] ? "/" : "", + path2, path2[0] ? "/" : "", path3); +} + +struct dso; +struct symbol; + +char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym, + bool show_sym); +void free_srcline(char *srcline); + +int filename__read_str(const char *filename, char **buf, size_t *sizep); +int perf_event_paranoid(void); + +void mem_bswap_64(void *src, int byte_size); +void mem_bswap_32(void *src, int byte_size); + +const char *get_filename_for_perf_kvm(void); +bool find_process(const char *name); + +#ifdef HAVE_ZLIB_SUPPORT +int gzip_decompress_to_file(const char *input, int output_fd); +#endif + +#ifdef HAVE_LZMA_SUPPORT +int lzma_decompress_to_file(const char *input, int output_fd); +#endif + +#endif /* GIT_COMPAT_UTIL_H */ diff --git a/kernel/tools/perf/util/values.c b/kernel/tools/perf/util/values.c new file mode 100644 index 000000000..0fb3c1fcd --- /dev/null +++ b/kernel/tools/perf/util/values.c @@ -0,0 +1,232 @@ +#include + +#include "util.h" +#include "values.h" + +void perf_read_values_init(struct perf_read_values *values) +{ + values->threads_max = 16; + values->pid = malloc(values->threads_max * sizeof(*values->pid)); + values->tid = malloc(values->threads_max * sizeof(*values->tid)); + values->value = malloc(values->threads_max * sizeof(*values->value)); + if (!values->pid || !values->tid || !values->value) + die("failed to allocate read_values threads arrays"); + values->threads = 0; + + values->counters_max = 16; + values->counterrawid = malloc(values->counters_max + * sizeof(*values->counterrawid)); + values->countername = malloc(values->counters_max + * sizeof(*values->countername)); + if (!values->counterrawid || !values->countername) + die("failed to allocate read_values counters arrays"); + values->counters = 0; +} + +void perf_read_values_destroy(struct perf_read_values *values) +{ + int i; + + if (!values->threads_max || !values->counters_max) + return; + + for (i = 0; i < values->threads; i++) + zfree(&values->value[i]); + zfree(&values->value); + zfree(&values->pid); + zfree(&values->tid); + zfree(&values->counterrawid); + for (i = 0; i < values->counters; i++) + zfree(&values->countername[i]); + zfree(&values->countername); +} + +static void perf_read_values__enlarge_threads(struct perf_read_values *values) +{ + values->threads_max *= 2; + values->pid = realloc(values->pid, + values->threads_max * sizeof(*values->pid)); + values->tid = realloc(values->tid, + values->threads_max * sizeof(*values->tid)); + values->value = realloc(values->value, + values->threads_max * sizeof(*values->value)); + if (!values->pid || !values->tid || !values->value) + die("failed to enlarge read_values threads arrays"); +} + +static int perf_read_values__findnew_thread(struct perf_read_values *values, + u32 pid, u32 tid) +{ + int i; + + for (i = 0; i < values->threads; i++) + if (values->pid[i] == pid && values->tid[i] == tid) + return i; + + if (values->threads == values->threads_max) + perf_read_values__enlarge_threads(values); + + i = values->threads++; + values->pid[i] = pid; + values->tid[i] = tid; + values->value[i] = malloc(values->counters_max * sizeof(**values->value)); + if (!values->value[i]) + die("failed to allocate read_values counters array"); + + return i; +} + +static void perf_read_values__enlarge_counters(struct perf_read_values *values) +{ + int i; + + values->counters_max *= 2; + values->counterrawid = realloc(values->counterrawid, + values->counters_max * sizeof(*values->counterrawid)); + values->countername = realloc(values->countername, + values->counters_max * sizeof(*values->countername)); + if (!values->counterrawid || !values->countername) + die("failed to enlarge read_values counters arrays"); + + for (i = 0; i < values->threads; i++) { + values->value[i] = realloc(values->value[i], + values->counters_max * sizeof(**values->value)); + if (!values->value[i]) + die("failed to enlarge read_values counters arrays"); + } +} + +static int perf_read_values__findnew_counter(struct perf_read_values *values, + u64 rawid, const char *name) +{ + int i; + + for (i = 0; i < values->counters; i++) + if (values->counterrawid[i] == rawid) + return i; + + if (values->counters == values->counters_max) + perf_read_values__enlarge_counters(values); + + i = values->counters++; + values->counterrawid[i] = rawid; + values->countername[i] = strdup(name); + + return i; +} + +void perf_read_values_add_value(struct perf_read_values *values, + u32 pid, u32 tid, + u64 rawid, const char *name, u64 value) +{ + int tindex, cindex; + + tindex = perf_read_values__findnew_thread(values, pid, tid); + cindex = perf_read_values__findnew_counter(values, rawid, name); + + values->value[tindex][cindex] = value; +} + +static void perf_read_values__display_pretty(FILE *fp, + struct perf_read_values *values) +{ + int i, j; + int pidwidth, tidwidth; + int *counterwidth; + + counterwidth = malloc(values->counters * sizeof(*counterwidth)); + if (!counterwidth) + die("failed to allocate counterwidth array"); + tidwidth = 3; + pidwidth = 3; + for (j = 0; j < values->counters; j++) + counterwidth[j] = strlen(values->countername[j]); + for (i = 0; i < values->threads; i++) { + int width; + + width = snprintf(NULL, 0, "%d", values->pid[i]); + if (width > pidwidth) + pidwidth = width; + width = snprintf(NULL, 0, "%d", values->tid[i]); + if (width > tidwidth) + tidwidth = width; + for (j = 0; j < values->counters; j++) { + width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]); + if (width > counterwidth[j]) + counterwidth[j] = width; + } + } + + fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID"); + for (j = 0; j < values->counters; j++) + fprintf(fp, " %*s", counterwidth[j], values->countername[j]); + fprintf(fp, "\n"); + + for (i = 0; i < values->threads; i++) { + fprintf(fp, " %*d %*d", pidwidth, values->pid[i], + tidwidth, values->tid[i]); + for (j = 0; j < values->counters; j++) + fprintf(fp, " %*" PRIu64, + counterwidth[j], values->value[i][j]); + fprintf(fp, "\n"); + } + free(counterwidth); +} + +static void perf_read_values__display_raw(FILE *fp, + struct perf_read_values *values) +{ + int width, pidwidth, tidwidth, namewidth, rawwidth, countwidth; + int i, j; + + tidwidth = 3; /* TID */ + pidwidth = 3; /* PID */ + namewidth = 4; /* "Name" */ + rawwidth = 3; /* "Raw" */ + countwidth = 5; /* "Count" */ + + for (i = 0; i < values->threads; i++) { + width = snprintf(NULL, 0, "%d", values->pid[i]); + if (width > pidwidth) + pidwidth = width; + width = snprintf(NULL, 0, "%d", values->tid[i]); + if (width > tidwidth) + tidwidth = width; + } + for (j = 0; j < values->counters; j++) { + width = strlen(values->countername[j]); + if (width > namewidth) + namewidth = width; + width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]); + if (width > rawwidth) + rawwidth = width; + } + for (i = 0; i < values->threads; i++) { + for (j = 0; j < values->counters; j++) { + width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]); + if (width > countwidth) + countwidth = width; + } + } + + fprintf(fp, "# %*s %*s %*s %*s %*s\n", + pidwidth, "PID", tidwidth, "TID", + namewidth, "Name", rawwidth, "Raw", + countwidth, "Count"); + for (i = 0; i < values->threads; i++) + for (j = 0; j < values->counters; j++) + fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64, + pidwidth, values->pid[i], + tidwidth, values->tid[i], + namewidth, values->countername[j], + rawwidth, values->counterrawid[j], + countwidth, values->value[i][j]); +} + +void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw) +{ + if (raw) + perf_read_values__display_raw(fp, values); + else + perf_read_values__display_pretty(fp, values); +} diff --git a/kernel/tools/perf/util/values.h b/kernel/tools/perf/util/values.h new file mode 100644 index 000000000..b21a80c6c --- /dev/null +++ b/kernel/tools/perf/util/values.h @@ -0,0 +1,27 @@ +#ifndef __PERF_VALUES_H +#define __PERF_VALUES_H + +#include + +struct perf_read_values { + int threads; + int threads_max; + u32 *pid, *tid; + int counters; + int counters_max; + u64 *counterrawid; + char **countername; + u64 **value; +}; + +void perf_read_values_init(struct perf_read_values *values); +void perf_read_values_destroy(struct perf_read_values *values); + +void perf_read_values_add_value(struct perf_read_values *values, + u32 pid, u32 tid, + u64 rawid, const char *name, u64 value); + +void perf_read_values_display(FILE *fp, struct perf_read_values *values, + int raw); + +#endif /* __PERF_VALUES_H */ diff --git a/kernel/tools/perf/util/vdso.c b/kernel/tools/perf/util/vdso.c new file mode 100644 index 000000000..5c7dd7969 --- /dev/null +++ b/kernel/tools/perf/util/vdso.c @@ -0,0 +1,321 @@ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vdso.h" +#include "util.h" +#include "symbol.h" +#include "machine.h" +#include "thread.h" +#include "linux/string.h" +#include "debug.h" + +/* + * Include definition of find_vdso_map() also used in perf-read-vdso.c for + * building perf-read-vdso32 and perf-read-vdsox32. + */ +#include "find-vdso-map.c" + +#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX" + +struct vdso_file { + bool found; + bool error; + char temp_file_name[sizeof(VDSO__TEMP_FILE_NAME)]; + const char *dso_name; + const char *read_prog; +}; + +struct vdso_info { + struct vdso_file vdso; +#if BITS_PER_LONG == 64 + struct vdso_file vdso32; + struct vdso_file vdsox32; +#endif +}; + +static struct vdso_info *vdso_info__new(void) +{ + static const struct vdso_info vdso_info_init = { + .vdso = { + .temp_file_name = VDSO__TEMP_FILE_NAME, + .dso_name = DSO__NAME_VDSO, + }, +#if BITS_PER_LONG == 64 + .vdso32 = { + .temp_file_name = VDSO__TEMP_FILE_NAME, + .dso_name = DSO__NAME_VDSO32, + .read_prog = "perf-read-vdso32", + }, + .vdsox32 = { + .temp_file_name = VDSO__TEMP_FILE_NAME, + .dso_name = DSO__NAME_VDSOX32, + .read_prog = "perf-read-vdsox32", + }, +#endif + }; + + return memdup(&vdso_info_init, sizeof(vdso_info_init)); +} + +static char *get_file(struct vdso_file *vdso_file) +{ + char *vdso = NULL; + char *buf = NULL; + void *start, *end; + size_t size; + int fd; + + if (vdso_file->found) + return vdso_file->temp_file_name; + + if (vdso_file->error || find_vdso_map(&start, &end)) + return NULL; + + size = end - start; + + buf = memdup(start, size); + if (!buf) + return NULL; + + fd = mkstemp(vdso_file->temp_file_name); + if (fd < 0) + goto out; + + if (size == (size_t) write(fd, buf, size)) + vdso = vdso_file->temp_file_name; + + close(fd); + + out: + free(buf); + + vdso_file->found = (vdso != NULL); + vdso_file->error = !vdso_file->found; + return vdso; +} + +void vdso__exit(struct machine *machine) +{ + struct vdso_info *vdso_info = machine->vdso_info; + + if (!vdso_info) + return; + + if (vdso_info->vdso.found) + unlink(vdso_info->vdso.temp_file_name); +#if BITS_PER_LONG == 64 + if (vdso_info->vdso32.found) + unlink(vdso_info->vdso32.temp_file_name); + if (vdso_info->vdsox32.found) + unlink(vdso_info->vdsox32.temp_file_name); +#endif + + zfree(&machine->vdso_info); +} + +static struct dso *vdso__new(struct machine *machine, const char *short_name, + const char *long_name) +{ + struct dso *dso; + + dso = dso__new(short_name); + if (dso != NULL) { + dsos__add(&machine->user_dsos, dso); + dso__set_long_name(dso, long_name, false); + } + + return dso; +} + +#if BITS_PER_LONG == 64 + +static enum dso_type machine__thread_dso_type(struct machine *machine, + struct thread *thread) +{ + enum dso_type dso_type = DSO__TYPE_UNKNOWN; + struct map *map; + struct dso *dso; + + map = map_groups__first(thread->mg, MAP__FUNCTION); + for (; map ; map = map_groups__next(map)) { + dso = map->dso; + if (!dso || dso->long_name[0] != '/') + continue; + dso_type = dso__type(dso, machine); + if (dso_type != DSO__TYPE_UNKNOWN) + break; + } + + return dso_type; +} + +static int vdso__do_copy_compat(FILE *f, int fd) +{ + char buf[4096]; + size_t count; + + while (1) { + count = fread(buf, 1, sizeof(buf), f); + if (ferror(f)) + return -errno; + if (feof(f)) + break; + if (count && writen(fd, buf, count) != (ssize_t)count) + return -errno; + } + + return 0; +} + +static int vdso__copy_compat(const char *prog, int fd) +{ + FILE *f; + int err; + + f = popen(prog, "r"); + if (!f) + return -errno; + + err = vdso__do_copy_compat(f, fd); + + if (pclose(f) == -1) + return -errno; + + return err; +} + +static int vdso__create_compat_file(const char *prog, char *temp_name) +{ + int fd, err; + + fd = mkstemp(temp_name); + if (fd < 0) + return -errno; + + err = vdso__copy_compat(prog, fd); + + if (close(fd) == -1) + return -errno; + + return err; +} + +static const char *vdso__get_compat_file(struct vdso_file *vdso_file) +{ + int err; + + if (vdso_file->found) + return vdso_file->temp_file_name; + + if (vdso_file->error) + return NULL; + + err = vdso__create_compat_file(vdso_file->read_prog, + vdso_file->temp_file_name); + if (err) { + pr_err("%s failed, error %d\n", vdso_file->read_prog, err); + vdso_file->error = true; + return NULL; + } + + vdso_file->found = true; + + return vdso_file->temp_file_name; +} + +static struct dso *vdso__findnew_compat(struct machine *machine, + struct vdso_file *vdso_file) +{ + const char *file_name; + struct dso *dso; + + dso = dsos__find(&machine->user_dsos, vdso_file->dso_name, true); + if (dso) + return dso; + + file_name = vdso__get_compat_file(vdso_file); + if (!file_name) + return NULL; + + return vdso__new(machine, vdso_file->dso_name, file_name); +} + +static int vdso__dso_findnew_compat(struct machine *machine, + struct thread *thread, + struct vdso_info *vdso_info, + struct dso **dso) +{ + enum dso_type dso_type; + + dso_type = machine__thread_dso_type(machine, thread); + +#ifndef HAVE_PERF_READ_VDSO32 + if (dso_type == DSO__TYPE_32BIT) + return 0; +#endif +#ifndef HAVE_PERF_READ_VDSOX32 + if (dso_type == DSO__TYPE_X32BIT) + return 0; +#endif + + switch (dso_type) { + case DSO__TYPE_32BIT: + *dso = vdso__findnew_compat(machine, &vdso_info->vdso32); + return 1; + case DSO__TYPE_X32BIT: + *dso = vdso__findnew_compat(machine, &vdso_info->vdsox32); + return 1; + case DSO__TYPE_UNKNOWN: + case DSO__TYPE_64BIT: + default: + return 0; + } +} + +#endif + +struct dso *vdso__dso_findnew(struct machine *machine, + struct thread *thread __maybe_unused) +{ + struct vdso_info *vdso_info; + struct dso *dso; + + if (!machine->vdso_info) + machine->vdso_info = vdso_info__new(); + + vdso_info = machine->vdso_info; + if (!vdso_info) + return NULL; + +#if BITS_PER_LONG == 64 + if (vdso__dso_findnew_compat(machine, thread, vdso_info, &dso)) + return dso; +#endif + + dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true); + if (!dso) { + char *file; + + file = get_file(&vdso_info->vdso); + if (!file) + return NULL; + + dso = vdso__new(machine, DSO__NAME_VDSO, file); + } + + return dso; +} + +bool dso__is_vdso(struct dso *dso) +{ + return !strcmp(dso->short_name, DSO__NAME_VDSO) || + !strcmp(dso->short_name, DSO__NAME_VDSO32) || + !strcmp(dso->short_name, DSO__NAME_VDSOX32); +} diff --git a/kernel/tools/perf/util/vdso.h b/kernel/tools/perf/util/vdso.h new file mode 100644 index 000000000..d97da1616 --- /dev/null +++ b/kernel/tools/perf/util/vdso.h @@ -0,0 +1,29 @@ +#ifndef __PERF_VDSO__ +#define __PERF_VDSO__ + +#include +#include +#include + +#define VDSO__MAP_NAME "[vdso]" + +#define DSO__NAME_VDSO "[vdso]" +#define DSO__NAME_VDSO32 "[vdso32]" +#define DSO__NAME_VDSOX32 "[vdsox32]" + +static inline bool is_vdso_map(const char *filename) +{ + return !strcmp(filename, VDSO__MAP_NAME); +} + +struct dso; + +bool dso__is_vdso(struct dso *dso); + +struct machine; +struct thread; + +struct dso *vdso__dso_findnew(struct machine *machine, struct thread *thread); +void vdso__exit(struct machine *machine); + +#endif /* __PERF_VDSO__ */ diff --git a/kernel/tools/perf/util/wrapper.c b/kernel/tools/perf/util/wrapper.c new file mode 100644 index 000000000..19f15b650 --- /dev/null +++ b/kernel/tools/perf/util/wrapper.c @@ -0,0 +1,41 @@ +/* + * Various trivial helper wrappers around standard functions + */ +#include "cache.h" + +/* + * There's no pack memory to release - but stay close to the Git + * version so wrap this away: + */ +static inline void release_pack_memory(size_t size __maybe_unused, + int flag __maybe_unused) +{ +} + +char *xstrdup(const char *str) +{ + char *ret = strdup(str); + if (!ret) { + release_pack_memory(strlen(str) + 1, -1); + ret = strdup(str); + if (!ret) + die("Out of memory, strdup failed"); + } + return ret; +} + +void *xrealloc(void *ptr, size_t size) +{ + void *ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) { + release_pack_memory(size, -1); + ret = realloc(ptr, size); + if (!ret && !size) + ret = realloc(ptr, 1); + if (!ret) + die("Out of memory, realloc failed"); + } + return ret; +} diff --git a/kernel/tools/perf/util/xyarray.c b/kernel/tools/perf/util/xyarray.c new file mode 100644 index 000000000..22afbf6c5 --- /dev/null +++ b/kernel/tools/perf/util/xyarray.c @@ -0,0 +1,20 @@ +#include "xyarray.h" +#include "util.h" + +struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size) +{ + size_t row_size = ylen * entry_size; + struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size); + + if (xy != NULL) { + xy->entry_size = entry_size; + xy->row_size = row_size; + } + + return xy; +} + +void xyarray__delete(struct xyarray *xy) +{ + free(xy); +} diff --git a/kernel/tools/perf/util/xyarray.h b/kernel/tools/perf/util/xyarray.h new file mode 100644 index 000000000..c488a0727 --- /dev/null +++ b/kernel/tools/perf/util/xyarray.h @@ -0,0 +1,20 @@ +#ifndef _PERF_XYARRAY_H_ +#define _PERF_XYARRAY_H_ 1 + +#include + +struct xyarray { + size_t row_size; + size_t entry_size; + char contents[]; +}; + +struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); +void xyarray__delete(struct xyarray *xy); + +static inline void *xyarray__entry(struct xyarray *xy, int x, int y) +{ + return &xy->contents[x * xy->row_size + y * xy->entry_size]; +} + +#endif /* _PERF_XYARRAY_H_ */ diff --git a/kernel/tools/perf/util/zlib.c b/kernel/tools/perf/util/zlib.c new file mode 100644 index 000000000..495a449fc --- /dev/null +++ b/kernel/tools/perf/util/zlib.c @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include + +#include "util/util.h" +#include "util/debug.h" + + +#define CHUNK_SIZE 16384 + +int gzip_decompress_to_file(const char *input, int output_fd) +{ + int ret = Z_STREAM_ERROR; + int input_fd; + void *ptr; + int len; + struct stat stbuf; + unsigned char buf[CHUNK_SIZE]; + z_stream zs = { + .zalloc = Z_NULL, + .zfree = Z_NULL, + .opaque = Z_NULL, + .avail_in = 0, + .next_in = Z_NULL, + }; + + input_fd = open(input, O_RDONLY); + if (input_fd < 0) + return -1; + + if (fstat(input_fd, &stbuf) < 0) + goto out_close; + + ptr = mmap(NULL, stbuf.st_size, PROT_READ, MAP_PRIVATE, input_fd, 0); + if (ptr == MAP_FAILED) + goto out_close; + + if (inflateInit2(&zs, 16 + MAX_WBITS) != Z_OK) + goto out_unmap; + + zs.next_in = ptr; + zs.avail_in = stbuf.st_size; + + do { + zs.next_out = buf; + zs.avail_out = CHUNK_SIZE; + + ret = inflate(&zs, Z_NO_FLUSH); + switch (ret) { + case Z_NEED_DICT: + ret = Z_DATA_ERROR; + /* fall through */ + case Z_DATA_ERROR: + case Z_MEM_ERROR: + goto out; + default: + break; + } + + len = CHUNK_SIZE - zs.avail_out; + if (writen(output_fd, buf, len) != len) { + ret = Z_DATA_ERROR; + goto out; + } + + } while (ret != Z_STREAM_END); + +out: + inflateEnd(&zs); +out_unmap: + munmap(ptr, stbuf.st_size); +out_close: + close(input_fd); + + return ret == Z_STREAM_END ? 0 : -1; +} -- cgit 1.2.3-korg