summaryrefslogtreecommitdiffstats
path: root/kernel/tools/perf/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/tools/perf/arch/x86')
-rw-r--r--kernel/tools/perf/arch/x86/Build2
-rw-r--r--kernel/tools/perf/arch/x86/Makefile1
-rw-r--r--kernel/tools/perf/arch/x86/include/arch-tests.h19
-rw-r--r--kernel/tools/perf/arch/x86/tests/Build10
-rw-r--r--kernel/tools/perf/arch/x86/tests/arch-tests.c34
-rw-r--r--kernel/tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk75
-rwxr-xr-xkernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh43
-rw-r--r--kernel/tools/perf/arch/x86/tests/insn-x86-dat-32.c658
-rw-r--r--kernel/tools/perf/arch/x86/tests/insn-x86-dat-64.c768
-rw-r--r--kernel/tools/perf/arch/x86/tests/insn-x86-dat-src.c877
-rw-r--r--kernel/tools/perf/arch/x86/tests/insn-x86.c185
-rw-r--r--kernel/tools/perf/arch/x86/tests/intel-cqm.c124
-rw-r--r--kernel/tools/perf/arch/x86/tests/perf-time-to-tsc.c164
-rw-r--r--kernel/tools/perf/arch/x86/tests/rdpmc.c174
-rw-r--r--kernel/tools/perf/arch/x86/util/Build6
-rw-r--r--kernel/tools/perf/arch/x86/util/auxtrace.c83
-rw-r--r--kernel/tools/perf/arch/x86/util/dwarf-regs.c122
-rw-r--r--kernel/tools/perf/arch/x86/util/intel-bts.c458
-rw-r--r--kernel/tools/perf/arch/x86/util/intel-pt.c1046
-rw-r--r--kernel/tools/perf/arch/x86/util/perf_regs.c28
-rw-r--r--kernel/tools/perf/arch/x86/util/pmu.c18
22 files changed, 4859 insertions, 37 deletions
diff --git a/kernel/tools/perf/arch/x86/Build b/kernel/tools/perf/arch/x86/Build
index 41bf61da4..db52fa22d 100644
--- a/kernel/tools/perf/arch/x86/Build
+++ b/kernel/tools/perf/arch/x86/Build
@@ -1,2 +1,2 @@
libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+libperf-y += tests/
diff --git a/kernel/tools/perf/arch/x86/Makefile b/kernel/tools/perf/arch/x86/Makefile
index 21322e038..09ba923de 100644
--- a/kernel/tools/perf/arch/x86/Makefile
+++ b/kernel/tools/perf/arch/x86/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
diff --git a/kernel/tools/perf/arch/x86/include/arch-tests.h b/kernel/tools/perf/arch/x86/include/arch-tests.h
new file mode 100644
index 000000000..7ed00f4b0
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/include/arch-tests.h
@@ -0,0 +1,19 @@
+#ifndef ARCH_TESTS_H
+#define ARCH_TESTS_H
+
+/* Tests */
+int test__rdpmc(void);
+int test__perf_time_to_tsc(void);
+int test__insn_x86(void);
+int test__intel_cqm_count_nmi_context(void);
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+struct thread;
+struct perf_sample;
+int test__arch_unwind_sample(struct perf_sample *sample,
+ struct thread *thread);
+#endif
+
+extern struct test arch_tests[];
+
+#endif
diff --git a/kernel/tools/perf/arch/x86/tests/Build b/kernel/tools/perf/arch/x86/tests/Build
index b30eff9bc..cbb7e9781 100644
--- a/kernel/tools/perf/arch/x86/tests/Build
+++ b/kernel/tools/perf/arch/x86/tests/Build
@@ -1,2 +1,8 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+
+libperf-y += arch-tests.o
+libperf-y += rdpmc.o
+libperf-y += perf-time-to-tsc.o
+libperf-$(CONFIG_AUXTRACE) += insn-x86.o
+libperf-y += intel-cqm.o
diff --git a/kernel/tools/perf/arch/x86/tests/arch-tests.c b/kernel/tools/perf/arch/x86/tests/arch-tests.c
new file mode 100644
index 000000000..2218cb64f
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/arch-tests.c
@@ -0,0 +1,34 @@
+#include <string.h>
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+struct test arch_tests[] = {
+ {
+ .desc = "x86 rdpmc test",
+ .func = test__rdpmc,
+ },
+ {
+ .desc = "Test converting perf time to TSC",
+ .func = test__perf_time_to_tsc,
+ },
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ {
+ .desc = "Test dwarf unwind",
+ .func = test__dwarf_unwind,
+ },
+#endif
+#ifdef HAVE_AUXTRACE_SUPPORT
+ {
+ .desc = "Test x86 instruction decoder - new instructions",
+ .func = test__insn_x86,
+ },
+#endif
+ {
+ .desc = "Test intel cqm nmi context read",
+ .func = test__intel_cqm_count_nmi_context,
+ },
+ {
+ .func = NULL,
+ },
+
+};
diff --git a/kernel/tools/perf/arch/x86/tests/dwarf-unwind.c b/kernel/tools/perf/arch/x86/tests/dwarf-unwind.c
index d8bbf7ad1..7f209ce82 100644
--- a/kernel/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/kernel/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -5,6 +5,7 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
+#include "arch-tests.h"
#define STACK_SIZE 8192
diff --git a/kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk b/kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
new file mode 100644
index 000000000..a21454835
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
@@ -0,0 +1,75 @@
+#!/bin/awk -f
+# gen-insn-x86-dat.awk: script to convert data for the insn-x86 test
+# Copyright (c) 2015, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+BEGIN {
+ print "/*"
+ print " * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk"
+ print " * from insn-x86-dat-src.c for inclusion by insn-x86.c"
+ print " * Do not change this code."
+ print "*/\n"
+ op = ""
+ branch = ""
+ rel = 0
+ going = 0
+}
+
+/ Start here / {
+ going = 1
+}
+
+/ Stop here / {
+ going = 0
+}
+
+/^\s*[0-9a-fA-F]+\:/ {
+ if (going) {
+ colon_pos = index($0, ":")
+ useful_line = substr($0, colon_pos + 1)
+ first_pos = match(useful_line, "[0-9a-fA-F]")
+ useful_line = substr(useful_line, first_pos)
+ gsub("\t", "\\t", useful_line)
+ printf "{{"
+ len = 0
+ for (i = 2; i <= NF; i++) {
+ if (match($i, "^[0-9a-fA-F][0-9a-fA-F]$")) {
+ printf "0x%s, ", $i
+ len += 1
+ } else {
+ break
+ }
+ }
+ printf "}, %d, %s, \"%s\", \"%s\",", len, rel, op, branch
+ printf "\n\"%s\",},\n", useful_line
+ op = ""
+ branch = ""
+ rel = 0
+ }
+}
+
+/ Expecting: / {
+ expecting_str = " Expecting: "
+ expecting_len = length(expecting_str)
+ expecting_pos = index($0, expecting_str)
+ useful_line = substr($0, expecting_pos + expecting_len)
+ for (i = 1; i <= NF; i++) {
+ if ($i == "Expecting:") {
+ i++
+ op = $i
+ i++
+ branch = $i
+ i++
+ rel = $i
+ break
+ }
+ }
+}
diff --git a/kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh b/kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
new file mode 100755
index 000000000..2d4ef94cf
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# gen-insn-x86-dat: generate data for the insn-x86 test
+# Copyright (c) 2015, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+set -e
+
+if [ "$(uname -m)" != "x86_64" ]; then
+ echo "ERROR: This script only works on x86_64"
+ exit 1
+fi
+
+cd $(dirname $0)
+
+trap 'echo "Might need a more recent version of binutils"' EXIT
+
+echo "Compiling insn-x86-dat-src.c to 64-bit object"
+
+gcc -g -c insn-x86-dat-src.c
+
+objdump -dSw insn-x86-dat-src.o | awk -f gen-insn-x86-dat.awk > insn-x86-dat-64.c
+
+rm -f insn-x86-dat-src.o
+
+echo "Compiling insn-x86-dat-src.c to 32-bit object"
+
+gcc -g -c -m32 insn-x86-dat-src.c
+
+objdump -dSw insn-x86-dat-src.o | awk -f gen-insn-x86-dat.awk > insn-x86-dat-32.c
+
+rm -f insn-x86-dat-src.o
+
+trap - EXIT
+
+echo "Done (use git diff to see the changes)"
diff --git a/kernel/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/kernel/tools/perf/arch/x86/tests/insn-x86-dat-32.c
new file mode 100644
index 000000000..3b491cfe2
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -0,0 +1,658 @@
+/*
+ * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk
+ * from insn-x86-dat-src.c for inclusion by insn-x86.c
+ * Do not change this code.
+*/
+
+{{0x0f, 0x31, }, 2, 0, "", "",
+"0f 31 \trdtsc ",},
+{{0xf3, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f3 0f 1b 00 \tbndmk (%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 05 78 56 34 12 \tbndmk 0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f3 0f 1b 18 \tbndmk (%eax),%bnd3",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1b 04 01 \tbndmk (%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 04 05 78 56 34 12 \tbndmk 0x12345678(,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1b 04 08 \tbndmk (%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1b 04 c8 \tbndmk (%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 40 12 \tbndmk 0x12(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 45 12 \tbndmk 0x12(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 01 12 \tbndmk 0x12(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 05 12 \tbndmk 0x12(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 08 12 \tbndmk 0x12(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 c8 12 \tbndmk 0x12(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 80 78 56 34 12 \tbndmk 0x12345678(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 85 78 56 34 12 \tbndmk 0x12345678(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 01 78 56 34 12 \tbndmk 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 05 78 56 34 12 \tbndmk 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 08 78 56 34 12 \tbndmk 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 c8 78 56 34 12 \tbndmk 0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f3 0f 1a 00 \tbndcl (%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 05 78 56 34 12 \tbndcl 0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f3 0f 1a 18 \tbndcl (%eax),%bnd3",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1a 04 01 \tbndcl (%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 04 05 78 56 34 12 \tbndcl 0x12345678(,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1a 04 08 \tbndcl (%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1a 04 c8 \tbndcl (%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 40 12 \tbndcl 0x12(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 45 12 \tbndcl 0x12(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 01 12 \tbndcl 0x12(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 05 12 \tbndcl 0x12(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 08 12 \tbndcl 0x12(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 c8 12 \tbndcl 0x12(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 80 78 56 34 12 \tbndcl 0x12345678(%eax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 85 78 56 34 12 \tbndcl 0x12345678(%ebp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 01 78 56 34 12 \tbndcl 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 05 78 56 34 12 \tbndcl 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 08 78 56 34 12 \tbndcl 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 c8 78 56 34 12 \tbndcl 0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f3 0f 1a c0 \tbndcl %eax,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f2 0f 1a 00 \tbndcu (%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 05 78 56 34 12 \tbndcu 0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f2 0f 1a 18 \tbndcu (%eax),%bnd3",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1a 04 01 \tbndcu (%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 04 05 78 56 34 12 \tbndcu 0x12345678(,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1a 04 08 \tbndcu (%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1a 04 c8 \tbndcu (%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 40 12 \tbndcu 0x12(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 45 12 \tbndcu 0x12(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 01 12 \tbndcu 0x12(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 05 12 \tbndcu 0x12(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 08 12 \tbndcu 0x12(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 c8 12 \tbndcu 0x12(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 80 78 56 34 12 \tbndcu 0x12345678(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 85 78 56 34 12 \tbndcu 0x12345678(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 01 78 56 34 12 \tbndcu 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 05 78 56 34 12 \tbndcu 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 08 78 56 34 12 \tbndcu 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 c8 78 56 34 12 \tbndcu 0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f2 0f 1a c0 \tbndcu %eax,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f2 0f 1b 00 \tbndcn (%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 05 78 56 34 12 \tbndcn 0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f2 0f 1b 18 \tbndcn (%eax),%bnd3",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1b 04 01 \tbndcn (%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 04 05 78 56 34 12 \tbndcn 0x12345678(,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1b 04 08 \tbndcn (%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1b 04 c8 \tbndcn (%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 40 12 \tbndcn 0x12(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 45 12 \tbndcn 0x12(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 01 12 \tbndcn 0x12(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 05 12 \tbndcn 0x12(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 08 12 \tbndcn 0x12(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 c8 12 \tbndcn 0x12(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 80 78 56 34 12 \tbndcn 0x12345678(%eax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 85 78 56 34 12 \tbndcn 0x12345678(%ebp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 01 78 56 34 12 \tbndcn 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 05 78 56 34 12 \tbndcn 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 08 78 56 34 12 \tbndcn 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 c8 78 56 34 12 \tbndcn 0x12345678(%eax,%ecx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0xc0, }, 4, 0, "", "",
+"f2 0f 1b c0 \tbndcn %eax,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"66 0f 1a 00 \tbndmov (%eax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 05 78 56 34 12 \tbndmov 0x12345678,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"66 0f 1a 18 \tbndmov (%eax),%bnd3",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1a 04 01 \tbndmov (%ecx,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 04 05 78 56 34 12 \tbndmov 0x12345678(,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1a 04 08 \tbndmov (%eax,%ecx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1a 04 c8 \tbndmov (%eax,%ecx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1a 40 12 \tbndmov 0x12(%eax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1a 45 12 \tbndmov 0x12(%ebp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 01 12 \tbndmov 0x12(%ecx,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 05 12 \tbndmov 0x12(%ebp,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 08 12 \tbndmov 0x12(%eax,%ecx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 c8 12 \tbndmov 0x12(%eax,%ecx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 80 78 56 34 12 \tbndmov 0x12345678(%eax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 85 78 56 34 12 \tbndmov 0x12345678(%ebp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 01 78 56 34 12 \tbndmov 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 05 78 56 34 12 \tbndmov 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 08 78 56 34 12 \tbndmov 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 c8 78 56 34 12 \tbndmov 0x12345678(%eax,%ecx,8),%bnd0",},
+{{0x66, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"66 0f 1b 00 \tbndmov %bnd0,(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 05 78 56 34 12 \tbndmov %bnd0,0x12345678",},
+{{0x66, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"66 0f 1b 18 \tbndmov %bnd3,(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1b 04 01 \tbndmov %bnd0,(%ecx,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 04 05 78 56 34 12 \tbndmov %bnd0,0x12345678(,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1b 04 08 \tbndmov %bnd0,(%eax,%ecx,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1b 04 c8 \tbndmov %bnd0,(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1b 40 12 \tbndmov %bnd0,0x12(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1b 45 12 \tbndmov %bnd0,0x12(%ebp)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 01 12 \tbndmov %bnd0,0x12(%ecx,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 05 12 \tbndmov %bnd0,0x12(%ebp,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 08 12 \tbndmov %bnd0,0x12(%eax,%ecx,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 c8 12 \tbndmov %bnd0,0x12(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 80 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax)",},
+{{0x66, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 85 78 56 34 12 \tbndmov %bnd0,0x12345678(%ebp)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 01 78 56 34 12 \tbndmov %bnd0,0x12345678(%ecx,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 05 78 56 34 12 \tbndmov %bnd0,0x12345678(%ebp,%eax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 08 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax,%ecx,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 c8 78 56 34 12 \tbndmov %bnd0,0x12345678(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0x1a, 0xc8, }, 4, 0, "", "",
+"66 0f 1a c8 \tbndmov %bnd0,%bnd1",},
+{{0x66, 0x0f, 0x1a, 0xc1, }, 4, 0, "", "",
+"66 0f 1a c1 \tbndmov %bnd1,%bnd0",},
+{{0x0f, 0x1a, 0x00, }, 3, 0, "", "",
+"0f 1a 00 \tbndldx (%eax),%bnd0",},
+{{0x0f, 0x1a, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 05 78 56 34 12 \tbndldx 0x12345678,%bnd0",},
+{{0x0f, 0x1a, 0x18, }, 3, 0, "", "",
+"0f 1a 18 \tbndldx (%eax),%bnd3",},
+{{0x0f, 0x1a, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1a 04 01 \tbndldx (%ecx,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 04 05 78 56 34 12 \tbndldx 0x12345678(,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1a 04 08 \tbndldx (%eax,%ecx,1),%bnd0",},
+{{0x0f, 0x1a, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1a 40 12 \tbndldx 0x12(%eax),%bnd0",},
+{{0x0f, 0x1a, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1a 45 12 \tbndldx 0x12(%ebp),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1a 44 01 12 \tbndldx 0x12(%ecx,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1a 44 05 12 \tbndldx 0x12(%ebp,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1a 44 08 12 \tbndldx 0x12(%eax,%ecx,1),%bnd0",},
+{{0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 80 78 56 34 12 \tbndldx 0x12345678(%eax),%bnd0",},
+{{0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 85 78 56 34 12 \tbndldx 0x12345678(%ebp),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 01 78 56 34 12 \tbndldx 0x12345678(%ecx,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 05 78 56 34 12 \tbndldx 0x12345678(%ebp,%eax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 08 78 56 34 12 \tbndldx 0x12345678(%eax,%ecx,1),%bnd0",},
+{{0x0f, 0x1b, 0x00, }, 3, 0, "", "",
+"0f 1b 00 \tbndstx %bnd0,(%eax)",},
+{{0x0f, 0x1b, 0x05, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 05 78 56 34 12 \tbndstx %bnd0,0x12345678",},
+{{0x0f, 0x1b, 0x18, }, 3, 0, "", "",
+"0f 1b 18 \tbndstx %bnd3,(%eax)",},
+{{0x0f, 0x1b, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1b 04 01 \tbndstx %bnd0,(%ecx,%eax,1)",},
+{{0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 04 05 78 56 34 12 \tbndstx %bnd0,0x12345678(,%eax,1)",},
+{{0x0f, 0x1b, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1b 04 08 \tbndstx %bnd0,(%eax,%ecx,1)",},
+{{0x0f, 0x1b, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1b 40 12 \tbndstx %bnd0,0x12(%eax)",},
+{{0x0f, 0x1b, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1b 45 12 \tbndstx %bnd0,0x12(%ebp)",},
+{{0x0f, 0x1b, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1b 44 01 12 \tbndstx %bnd0,0x12(%ecx,%eax,1)",},
+{{0x0f, 0x1b, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1b 44 05 12 \tbndstx %bnd0,0x12(%ebp,%eax,1)",},
+{{0x0f, 0x1b, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1b 44 08 12 \tbndstx %bnd0,0x12(%eax,%ecx,1)",},
+{{0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 80 78 56 34 12 \tbndstx %bnd0,0x12345678(%eax)",},
+{{0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 85 78 56 34 12 \tbndstx %bnd0,0x12345678(%ebp)",},
+{{0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 01 78 56 34 12 \tbndstx %bnd0,0x12345678(%ecx,%eax,1)",},
+{{0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 05 78 56 34 12 \tbndstx %bnd0,0x12345678(%ebp,%eax,1)",},
+{{0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 08 78 56 34 12 \tbndstx %bnd0,0x12345678(%eax,%ecx,1)",},
+{{0xf2, 0xe8, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "call", "unconditional",
+"f2 e8 fc ff ff ff \tbnd call 3c3 <main+0x3c3>",},
+{{0xf2, 0xff, 0x10, }, 3, 0, "call", "indirect",
+"f2 ff 10 \tbnd call *(%eax)",},
+{{0xf2, 0xc3, }, 2, 0, "ret", "indirect",
+"f2 c3 \tbnd ret ",},
+{{0xf2, 0xe9, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "jmp", "unconditional",
+"f2 e9 fc ff ff ff \tbnd jmp 3ce <main+0x3ce>",},
+{{0xf2, 0xe9, 0xfc, 0xff, 0xff, 0xff, }, 6, 0xfffffffc, "jmp", "unconditional",
+"f2 e9 fc ff ff ff \tbnd jmp 3d4 <main+0x3d4>",},
+{{0xf2, 0xff, 0x21, }, 3, 0, "jmp", "indirect",
+"f2 ff 21 \tbnd jmp *(%ecx)",},
+{{0xf2, 0x0f, 0x85, 0xfc, 0xff, 0xff, 0xff, }, 7, 0xfffffffc, "jcc", "conditional",
+"f2 0f 85 fc ff ff ff \tbnd jne 3de <main+0x3de>",},
+{{0x0f, 0x3a, 0xcc, 0xc1, 0x00, }, 5, 0, "", "",
+"0f 3a cc c1 00 \tsha1rnds4 $0x0,%xmm1,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0xd7, 0x91, }, 5, 0, "", "",
+"0f 3a cc d7 91 \tsha1rnds4 $0x91,%xmm7,%xmm2",},
+{{0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 5, 0, "", "",
+"0f 3a cc 00 91 \tsha1rnds4 $0x91,(%eax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x18, 0x91, }, 5, 0, "", "",
+"0f 3a cc 18 91 \tsha1rnds4 $0x91,(%eax),%xmm3",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x01, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 01 91 \tsha1rnds4 $0x91,(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 04 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x08, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 08 91 \tsha1rnds4 $0x91,(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0xc8, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 c8 91 \tsha1rnds4 $0x91,(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x40, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 40 12 91 \tsha1rnds4 $0x91,0x12(%eax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x45, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 45 12 91 \tsha1rnds4 $0x91,0x12(%ebp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x01, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 01 12 91 \tsha1rnds4 $0x91,0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x05, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 05 12 91 \tsha1rnds4 $0x91,0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x08, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 08 12 91 \tsha1rnds4 $0x91,0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0xc8, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 c8 12 91 \tsha1rnds4 $0x91,0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 80 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 85 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 01 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 08 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0xc1, }, 4, 0, "", "",
+"0f 38 c8 c1 \tsha1nexte %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0xd7, }, 4, 0, "", "",
+"0f 38 c8 d7 \tsha1nexte %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xc8, 0x00, }, 4, 0, "", "",
+"0f 38 c8 00 \tsha1nexte (%eax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 05 78 56 34 12 \tsha1nexte 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x18, }, 4, 0, "", "",
+"0f 38 c8 18 \tsha1nexte (%eax),%xmm3",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c8 04 01 \tsha1nexte (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 04 05 78 56 34 12 \tsha1nexte 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c8 04 08 \tsha1nexte (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c8 04 c8 \tsha1nexte (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c8 40 12 \tsha1nexte 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c8 45 12 \tsha1nexte 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 01 12 \tsha1nexte 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 05 12 \tsha1nexte 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 08 12 \tsha1nexte 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 c8 12 \tsha1nexte 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 80 78 56 34 12 \tsha1nexte 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 85 78 56 34 12 \tsha1nexte 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 01 78 56 34 12 \tsha1nexte 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 05 78 56 34 12 \tsha1nexte 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 08 78 56 34 12 \tsha1nexte 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 c8 78 56 34 12 \tsha1nexte 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0xc1, }, 4, 0, "", "",
+"0f 38 c9 c1 \tsha1msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0xd7, }, 4, 0, "", "",
+"0f 38 c9 d7 \tsha1msg1 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xc9, 0x00, }, 4, 0, "", "",
+"0f 38 c9 00 \tsha1msg1 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 05 78 56 34 12 \tsha1msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x18, }, 4, 0, "", "",
+"0f 38 c9 18 \tsha1msg1 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c9 04 01 \tsha1msg1 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 04 05 78 56 34 12 \tsha1msg1 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c9 04 08 \tsha1msg1 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c9 04 c8 \tsha1msg1 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c9 40 12 \tsha1msg1 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c9 45 12 \tsha1msg1 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 01 12 \tsha1msg1 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 05 12 \tsha1msg1 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 08 12 \tsha1msg1 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 c8 12 \tsha1msg1 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 80 78 56 34 12 \tsha1msg1 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 85 78 56 34 12 \tsha1msg1 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 01 78 56 34 12 \tsha1msg1 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 05 78 56 34 12 \tsha1msg1 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 08 78 56 34 12 \tsha1msg1 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 c8 78 56 34 12 \tsha1msg1 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0xc1, }, 4, 0, "", "",
+"0f 38 ca c1 \tsha1msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xca, 0xd7, }, 4, 0, "", "",
+"0f 38 ca d7 \tsha1msg2 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xca, 0x00, }, 4, 0, "", "",
+"0f 38 ca 00 \tsha1msg2 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 05 78 56 34 12 \tsha1msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xca, 0x18, }, 4, 0, "", "",
+"0f 38 ca 18 \tsha1msg2 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xca, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 ca 04 01 \tsha1msg2 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 04 05 78 56 34 12 \tsha1msg2 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 ca 04 08 \tsha1msg2 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 ca 04 c8 \tsha1msg2 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 ca 40 12 \tsha1msg2 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 ca 45 12 \tsha1msg2 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 01 12 \tsha1msg2 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 05 12 \tsha1msg2 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 08 12 \tsha1msg2 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 c8 12 \tsha1msg2 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 80 78 56 34 12 \tsha1msg2 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 85 78 56 34 12 \tsha1msg2 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 01 78 56 34 12 \tsha1msg2 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 05 78 56 34 12 \tsha1msg2 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 08 78 56 34 12 \tsha1msg2 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 c8 78 56 34 12 \tsha1msg2 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcb, 0xcc, }, 4, 0, "", "",
+"0f 38 cb cc \tsha256rnds2 %xmm0,%xmm4,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0xd7, }, 4, 0, "", "",
+"0f 38 cb d7 \tsha256rnds2 %xmm0,%xmm7,%xmm2",},
+{{0x0f, 0x38, 0xcb, 0x08, }, 4, 0, "", "",
+"0f 38 cb 08 \tsha256rnds2 %xmm0,(%eax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 0d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x18, }, 4, 0, "", "",
+"0f 38 cb 18 \tsha256rnds2 %xmm0,(%eax),%xmm3",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x01, }, 5, 0, "", "",
+"0f 38 cb 0c 01 \tsha256rnds2 %xmm0,(%ecx,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 0c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x08, }, 5, 0, "", "",
+"0f 38 cb 0c 08 \tsha256rnds2 %xmm0,(%eax,%ecx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0xc8, }, 5, 0, "", "",
+"0f 38 cb 0c c8 \tsha256rnds2 %xmm0,(%eax,%ecx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x48, 0x12, }, 5, 0, "", "",
+"0f 38 cb 48 12 \tsha256rnds2 %xmm0,0x12(%eax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4d, 0x12, }, 5, 0, "", "",
+"0f 38 cb 4d 12 \tsha256rnds2 %xmm0,0x12(%ebp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 01 12 \tsha256rnds2 %xmm0,0x12(%ecx,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 05 12 \tsha256rnds2 %xmm0,0x12(%ebp,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 08 12 \tsha256rnds2 %xmm0,0x12(%eax,%ecx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c c8 12 \tsha256rnds2 %xmm0,0x12(%eax,%ecx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 88 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 8d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ebp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 01 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ecx,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%ebp,%eax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 08 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax,%ecx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x0f, 0x38, 0xcc, 0xc1, }, 4, 0, "", "",
+"0f 38 cc c1 \tsha256msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0xd7, }, 4, 0, "", "",
+"0f 38 cc d7 \tsha256msg1 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xcc, 0x00, }, 4, 0, "", "",
+"0f 38 cc 00 \tsha256msg1 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 05 78 56 34 12 \tsha256msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x18, }, 4, 0, "", "",
+"0f 38 cc 18 \tsha256msg1 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cc 04 01 \tsha256msg1 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 04 05 78 56 34 12 \tsha256msg1 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cc 04 08 \tsha256msg1 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cc 04 c8 \tsha256msg1 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cc 40 12 \tsha256msg1 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cc 45 12 \tsha256msg1 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 01 12 \tsha256msg1 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 05 12 \tsha256msg1 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 08 12 \tsha256msg1 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 c8 12 \tsha256msg1 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 80 78 56 34 12 \tsha256msg1 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 85 78 56 34 12 \tsha256msg1 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 01 78 56 34 12 \tsha256msg1 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 05 78 56 34 12 \tsha256msg1 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 08 78 56 34 12 \tsha256msg1 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 c8 78 56 34 12 \tsha256msg1 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0xc1, }, 4, 0, "", "",
+"0f 38 cd c1 \tsha256msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0xd7, }, 4, 0, "", "",
+"0f 38 cd d7 \tsha256msg2 %xmm7,%xmm2",},
+{{0x0f, 0x38, 0xcd, 0x00, }, 4, 0, "", "",
+"0f 38 cd 00 \tsha256msg2 (%eax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 05 78 56 34 12 \tsha256msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x18, }, 4, 0, "", "",
+"0f 38 cd 18 \tsha256msg2 (%eax),%xmm3",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cd 04 01 \tsha256msg2 (%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 04 05 78 56 34 12 \tsha256msg2 0x12345678(,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cd 04 08 \tsha256msg2 (%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cd 04 c8 \tsha256msg2 (%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cd 40 12 \tsha256msg2 0x12(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cd 45 12 \tsha256msg2 0x12(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 01 12 \tsha256msg2 0x12(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 05 12 \tsha256msg2 0x12(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 08 12 \tsha256msg2 0x12(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 c8 12 \tsha256msg2 0x12(%eax,%ecx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 80 78 56 34 12 \tsha256msg2 0x12345678(%eax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 85 78 56 34 12 \tsha256msg2 0x12345678(%ebp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 01 78 56 34 12 \tsha256msg2 0x12345678(%ecx,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 05 78 56 34 12 \tsha256msg2 0x12345678(%ebp,%eax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 08 78 56 34 12 \tsha256msg2 0x12345678(%eax,%ecx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 c8 78 56 34 12 \tsha256msg2 0x12345678(%eax,%ecx,8),%xmm0",},
+{{0x66, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
+"66 0f ae 38 \tclflushopt (%eax)",},
+{{0x66, 0x0f, 0xae, 0x3d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f ae 3d 78 56 34 12 \tclflushopt 0x12345678",},
+{{0x66, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xae, 0x38, }, 3, 0, "", "",
+"0f ae 38 \tclflush (%eax)",},
+{{0x0f, 0xae, 0xf8, }, 3, 0, "", "",
+"0f ae f8 \tsfence ",},
+{{0x66, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
+"66 0f ae 30 \tclwb (%eax)",},
+{{0x66, 0x0f, 0xae, 0x35, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f ae 35 78 56 34 12 \tclwb 0x12345678",},
+{{0x66, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae b4 c8 78 56 34 12 \tclwb 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xae, 0x30, }, 3, 0, "", "",
+"0f ae 30 \txsaveopt (%eax)",},
+{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
+"0f ae f0 \tmfence ",},
+{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
+"0f c7 20 \txsavec (%eax)",},
+{{0x0f, 0xc7, 0x25, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f c7 25 78 56 34 12 \txsavec 0x12345678",},
+{{0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xc7, 0x28, }, 3, 0, "", "",
+"0f c7 28 \txsaves (%eax)",},
+{{0x0f, 0xc7, 0x2d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f c7 2d 78 56 34 12 \txsaves 0x12345678",},
+{{0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%eax,%ecx,8)",},
+{{0x0f, 0xc7, 0x18, }, 3, 0, "", "",
+"0f c7 18 \txrstors (%eax)",},
+{{0x0f, 0xc7, 0x1d, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f c7 1d 78 56 34 12 \txrstors 0x12345678",},
+{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%eax,%ecx,8)",},
+{{0x66, 0x0f, 0xae, 0xf8, }, 4, 0, "", "",
+"66 0f ae f8 \tpcommit ",},
diff --git a/kernel/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/kernel/tools/perf/arch/x86/tests/insn-x86-dat-64.c
new file mode 100644
index 000000000..4fe7cce17
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -0,0 +1,768 @@
+/*
+ * Generated by gen-insn-x86-dat.sh and gen-insn-x86-dat.awk
+ * from insn-x86-dat-src.c for inclusion by insn-x86.c
+ * Do not change this code.
+*/
+
+{{0x0f, 0x31, }, 2, 0, "", "",
+"0f 31 \trdtsc ",},
+{{0xf3, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f3 0f 1b 00 \tbndmk (%rax),%bnd0",},
+{{0xf3, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
+"f3 41 0f 1b 00 \tbndmk (%r8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 04 25 78 56 34 12 \tbndmk 0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f3 0f 1b 18 \tbndmk (%rax),%bnd3",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1b 04 01 \tbndmk (%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 04 05 78 56 34 12 \tbndmk 0x12345678(,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1b 04 08 \tbndmk (%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1b 04 c8 \tbndmk (%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 40 12 \tbndmk 0x12(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1b 45 12 \tbndmk 0x12(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 01 12 \tbndmk 0x12(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 05 12 \tbndmk 0x12(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 08 12 \tbndmk 0x12(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1b 44 c8 12 \tbndmk 0x12(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 80 78 56 34 12 \tbndmk 0x12345678(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1b 85 78 56 34 12 \tbndmk 0x12345678(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 01 78 56 34 12 \tbndmk 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 05 78 56 34 12 \tbndmk 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 08 78 56 34 12 \tbndmk 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1b 84 c8 78 56 34 12 \tbndmk 0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f3 0f 1a 00 \tbndcl (%rax),%bnd0",},
+{{0xf3, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
+"f3 41 0f 1a 00 \tbndcl (%r8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 04 25 78 56 34 12 \tbndcl 0x12345678,%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f3 0f 1a 18 \tbndcl (%rax),%bnd3",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f3 0f 1a 04 01 \tbndcl (%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 04 05 78 56 34 12 \tbndcl 0x12345678(,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f3 0f 1a 04 08 \tbndcl (%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f3 0f 1a 04 c8 \tbndcl (%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 40 12 \tbndcl 0x12(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f3 0f 1a 45 12 \tbndcl 0x12(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 01 12 \tbndcl 0x12(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 05 12 \tbndcl 0x12(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 08 12 \tbndcl 0x12(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f3 0f 1a 44 c8 12 \tbndcl 0x12(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 80 78 56 34 12 \tbndcl 0x12345678(%rax),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f 1a 85 78 56 34 12 \tbndcl 0x12345678(%rbp),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 01 78 56 34 12 \tbndcl 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 05 78 56 34 12 \tbndcl 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 08 78 56 34 12 \tbndcl 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f 1a 84 c8 78 56 34 12 \tbndcl 0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf3, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f3 0f 1a c0 \tbndcl %rax,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"f2 0f 1a 00 \tbndcu (%rax),%bnd0",},
+{{0xf2, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
+"f2 41 0f 1a 00 \tbndcu (%r8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 04 25 78 56 34 12 \tbndcu 0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"f2 0f 1a 18 \tbndcu (%rax),%bnd3",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1a 04 01 \tbndcu (%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 04 05 78 56 34 12 \tbndcu 0x12345678(,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1a 04 08 \tbndcu (%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1a 04 c8 \tbndcu (%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 40 12 \tbndcu 0x12(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1a 45 12 \tbndcu 0x12(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 01 12 \tbndcu 0x12(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 05 12 \tbndcu 0x12(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 08 12 \tbndcu 0x12(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1a 44 c8 12 \tbndcu 0x12(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 80 78 56 34 12 \tbndcu 0x12345678(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1a 85 78 56 34 12 \tbndcu 0x12345678(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 01 78 56 34 12 \tbndcu 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 05 78 56 34 12 \tbndcu 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 08 78 56 34 12 \tbndcu 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1a 84 c8 78 56 34 12 \tbndcu 0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1a, 0xc0, }, 4, 0, "", "",
+"f2 0f 1a c0 \tbndcu %rax,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"f2 0f 1b 00 \tbndcn (%rax),%bnd0",},
+{{0xf2, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
+"f2 41 0f 1b 00 \tbndcn (%r8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 04 25 78 56 34 12 \tbndcn 0x12345678,%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"f2 0f 1b 18 \tbndcn (%rax),%bnd3",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"f2 0f 1b 04 01 \tbndcn (%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 04 05 78 56 34 12 \tbndcn 0x12345678(,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"f2 0f 1b 04 08 \tbndcn (%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"f2 0f 1b 04 c8 \tbndcn (%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 40 12 \tbndcn 0x12(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"f2 0f 1b 45 12 \tbndcn 0x12(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 01 12 \tbndcn 0x12(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 05 12 \tbndcn 0x12(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 08 12 \tbndcn 0x12(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"f2 0f 1b 44 c8 12 \tbndcn 0x12(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 80 78 56 34 12 \tbndcn 0x12345678(%rax),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f2 0f 1b 85 78 56 34 12 \tbndcn 0x12345678(%rbp),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 01 78 56 34 12 \tbndcn 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 05 78 56 34 12 \tbndcn 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 08 78 56 34 12 \tbndcn 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f2 0f 1b 84 c8 78 56 34 12 \tbndcn 0x12345678(%rax,%rcx,8),%bnd0",},
+{{0xf2, 0x0f, 0x1b, 0xc0, }, 4, 0, "", "",
+"f2 0f 1b c0 \tbndcn %rax,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"66 0f 1a 00 \tbndmov (%rax),%bnd0",},
+{{0x66, 0x41, 0x0f, 0x1a, 0x00, }, 5, 0, "", "",
+"66 41 0f 1a 00 \tbndmov (%r8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 04 25 78 56 34 12 \tbndmov 0x12345678,%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x18, }, 4, 0, "", "",
+"66 0f 1a 18 \tbndmov (%rax),%bnd3",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1a 04 01 \tbndmov (%rcx,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 04 05 78 56 34 12 \tbndmov 0x12345678(,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1a 04 08 \tbndmov (%rax,%rcx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1a 04 c8 \tbndmov (%rax,%rcx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1a 40 12 \tbndmov 0x12(%rax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1a 45 12 \tbndmov 0x12(%rbp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 01 12 \tbndmov 0x12(%rcx,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 05 12 \tbndmov 0x12(%rbp,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 08 12 \tbndmov 0x12(%rax,%rcx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1a 44 c8 12 \tbndmov 0x12(%rax,%rcx,8),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 80 78 56 34 12 \tbndmov 0x12345678(%rax),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1a 85 78 56 34 12 \tbndmov 0x12345678(%rbp),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 01 78 56 34 12 \tbndmov 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 05 78 56 34 12 \tbndmov 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 08 78 56 34 12 \tbndmov 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0x66, 0x0f, 0x1a, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1a 84 c8 78 56 34 12 \tbndmov 0x12345678(%rax,%rcx,8),%bnd0",},
+{{0x66, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"66 0f 1b 00 \tbndmov %bnd0,(%rax)",},
+{{0x66, 0x41, 0x0f, 0x1b, 0x00, }, 5, 0, "", "",
+"66 41 0f 1b 00 \tbndmov %bnd0,(%r8)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 04 25 78 56 34 12 \tbndmov %bnd0,0x12345678",},
+{{0x66, 0x0f, 0x1b, 0x18, }, 4, 0, "", "",
+"66 0f 1b 18 \tbndmov %bnd3,(%rax)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x01, }, 5, 0, "", "",
+"66 0f 1b 04 01 \tbndmov %bnd0,(%rcx,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 04 05 78 56 34 12 \tbndmov %bnd0,0x12345678(,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0x08, }, 5, 0, "", "",
+"66 0f 1b 04 08 \tbndmov %bnd0,(%rax,%rcx,1)",},
+{{0x66, 0x0f, 0x1b, 0x04, 0xc8, }, 5, 0, "", "",
+"66 0f 1b 04 c8 \tbndmov %bnd0,(%rax,%rcx,8)",},
+{{0x66, 0x0f, 0x1b, 0x40, 0x12, }, 5, 0, "", "",
+"66 0f 1b 40 12 \tbndmov %bnd0,0x12(%rax)",},
+{{0x66, 0x0f, 0x1b, 0x45, 0x12, }, 5, 0, "", "",
+"66 0f 1b 45 12 \tbndmov %bnd0,0x12(%rbp)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 01 12 \tbndmov %bnd0,0x12(%rcx,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 05 12 \tbndmov %bnd0,0x12(%rbp,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 08 12 \tbndmov %bnd0,0x12(%rax,%rcx,1)",},
+{{0x66, 0x0f, 0x1b, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"66 0f 1b 44 c8 12 \tbndmov %bnd0,0x12(%rax,%rcx,8)",},
+{{0x66, 0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 80 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax)",},
+{{0x66, 0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"66 0f 1b 85 78 56 34 12 \tbndmov %bnd0,0x12345678(%rbp)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 01 78 56 34 12 \tbndmov %bnd0,0x12345678(%rcx,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 05 78 56 34 12 \tbndmov %bnd0,0x12345678(%rbp,%rax,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 08 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax,%rcx,1)",},
+{{0x66, 0x0f, 0x1b, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f 1b 84 c8 78 56 34 12 \tbndmov %bnd0,0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x0f, 0x1a, 0xc8, }, 4, 0, "", "",
+"66 0f 1a c8 \tbndmov %bnd0,%bnd1",},
+{{0x66, 0x0f, 0x1a, 0xc1, }, 4, 0, "", "",
+"66 0f 1a c1 \tbndmov %bnd1,%bnd0",},
+{{0x0f, 0x1a, 0x00, }, 3, 0, "", "",
+"0f 1a 00 \tbndldx (%rax),%bnd0",},
+{{0x41, 0x0f, 0x1a, 0x00, }, 4, 0, "", "",
+"41 0f 1a 00 \tbndldx (%r8),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 04 25 78 56 34 12 \tbndldx 0x12345678,%bnd0",},
+{{0x0f, 0x1a, 0x18, }, 3, 0, "", "",
+"0f 1a 18 \tbndldx (%rax),%bnd3",},
+{{0x0f, 0x1a, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1a 04 01 \tbndldx (%rcx,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 04 05 78 56 34 12 \tbndldx 0x12345678(,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1a 04 08 \tbndldx (%rax,%rcx,1),%bnd0",},
+{{0x0f, 0x1a, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1a 40 12 \tbndldx 0x12(%rax),%bnd0",},
+{{0x0f, 0x1a, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1a 45 12 \tbndldx 0x12(%rbp),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1a 44 01 12 \tbndldx 0x12(%rcx,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1a 44 05 12 \tbndldx 0x12(%rbp,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1a 44 08 12 \tbndldx 0x12(%rax,%rcx,1),%bnd0",},
+{{0x0f, 0x1a, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 80 78 56 34 12 \tbndldx 0x12345678(%rax),%bnd0",},
+{{0x0f, 0x1a, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1a 85 78 56 34 12 \tbndldx 0x12345678(%rbp),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 01 78 56 34 12 \tbndldx 0x12345678(%rcx,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 05 78 56 34 12 \tbndldx 0x12345678(%rbp,%rax,1),%bnd0",},
+{{0x0f, 0x1a, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1a 84 08 78 56 34 12 \tbndldx 0x12345678(%rax,%rcx,1),%bnd0",},
+{{0x0f, 0x1b, 0x00, }, 3, 0, "", "",
+"0f 1b 00 \tbndstx %bnd0,(%rax)",},
+{{0x41, 0x0f, 0x1b, 0x00, }, 4, 0, "", "",
+"41 0f 1b 00 \tbndstx %bnd0,(%r8)",},
+{{0x0f, 0x1b, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 04 25 78 56 34 12 \tbndstx %bnd0,0x12345678",},
+{{0x0f, 0x1b, 0x18, }, 3, 0, "", "",
+"0f 1b 18 \tbndstx %bnd3,(%rax)",},
+{{0x0f, 0x1b, 0x04, 0x01, }, 4, 0, "", "",
+"0f 1b 04 01 \tbndstx %bnd0,(%rcx,%rax,1)",},
+{{0x0f, 0x1b, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 04 05 78 56 34 12 \tbndstx %bnd0,0x12345678(,%rax,1)",},
+{{0x0f, 0x1b, 0x04, 0x08, }, 4, 0, "", "",
+"0f 1b 04 08 \tbndstx %bnd0,(%rax,%rcx,1)",},
+{{0x0f, 0x1b, 0x40, 0x12, }, 4, 0, "", "",
+"0f 1b 40 12 \tbndstx %bnd0,0x12(%rax)",},
+{{0x0f, 0x1b, 0x45, 0x12, }, 4, 0, "", "",
+"0f 1b 45 12 \tbndstx %bnd0,0x12(%rbp)",},
+{{0x0f, 0x1b, 0x44, 0x01, 0x12, }, 5, 0, "", "",
+"0f 1b 44 01 12 \tbndstx %bnd0,0x12(%rcx,%rax,1)",},
+{{0x0f, 0x1b, 0x44, 0x05, 0x12, }, 5, 0, "", "",
+"0f 1b 44 05 12 \tbndstx %bnd0,0x12(%rbp,%rax,1)",},
+{{0x0f, 0x1b, 0x44, 0x08, 0x12, }, 5, 0, "", "",
+"0f 1b 44 08 12 \tbndstx %bnd0,0x12(%rax,%rcx,1)",},
+{{0x0f, 0x1b, 0x80, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 80 78 56 34 12 \tbndstx %bnd0,0x12345678(%rax)",},
+{{0x0f, 0x1b, 0x85, 0x78, 0x56, 0x34, 0x12, }, 7, 0, "", "",
+"0f 1b 85 78 56 34 12 \tbndstx %bnd0,0x12345678(%rbp)",},
+{{0x0f, 0x1b, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 01 78 56 34 12 \tbndstx %bnd0,0x12345678(%rcx,%rax,1)",},
+{{0x0f, 0x1b, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 05 78 56 34 12 \tbndstx %bnd0,0x12345678(%rbp,%rax,1)",},
+{{0x0f, 0x1b, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 1b 84 08 78 56 34 12 \tbndstx %bnd0,0x12345678(%rax,%rcx,1)",},
+{{0xf2, 0xe8, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "call", "unconditional",
+"f2 e8 00 00 00 00 \tbnd callq 3f6 <main+0x3f6>",},
+{{0x67, 0xf2, 0xff, 0x10, }, 4, 0, "call", "indirect",
+"67 f2 ff 10 \tbnd callq *(%eax)",},
+{{0xf2, 0xc3, }, 2, 0, "ret", "indirect",
+"f2 c3 \tbnd retq ",},
+{{0xf2, 0xe9, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "jmp", "unconditional",
+"f2 e9 00 00 00 00 \tbnd jmpq 402 <main+0x402>",},
+{{0xf2, 0xe9, 0x00, 0x00, 0x00, 0x00, }, 6, 0, "jmp", "unconditional",
+"f2 e9 00 00 00 00 \tbnd jmpq 408 <main+0x408>",},
+{{0x67, 0xf2, 0xff, 0x21, }, 4, 0, "jmp", "indirect",
+"67 f2 ff 21 \tbnd jmpq *(%ecx)",},
+{{0xf2, 0x0f, 0x85, 0x00, 0x00, 0x00, 0x00, }, 7, 0, "jcc", "conditional",
+"f2 0f 85 00 00 00 00 \tbnd jne 413 <main+0x413>",},
+{{0x0f, 0x3a, 0xcc, 0xc1, 0x00, }, 5, 0, "", "",
+"0f 3a cc c1 00 \tsha1rnds4 $0x0,%xmm1,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0xd7, 0x91, }, 5, 0, "", "",
+"0f 3a cc d7 91 \tsha1rnds4 $0x91,%xmm7,%xmm2",},
+{{0x41, 0x0f, 0x3a, 0xcc, 0xc0, 0x91, }, 6, 0, "", "",
+"41 0f 3a cc c0 91 \tsha1rnds4 $0x91,%xmm8,%xmm0",},
+{{0x44, 0x0f, 0x3a, 0xcc, 0xc7, 0x91, }, 6, 0, "", "",
+"44 0f 3a cc c7 91 \tsha1rnds4 $0x91,%xmm7,%xmm8",},
+{{0x45, 0x0f, 0x3a, 0xcc, 0xc7, 0x91, }, 6, 0, "", "",
+"45 0f 3a cc c7 91 \tsha1rnds4 $0x91,%xmm15,%xmm8",},
+{{0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 5, 0, "", "",
+"0f 3a cc 00 91 \tsha1rnds4 $0x91,(%rax),%xmm0",},
+{{0x41, 0x0f, 0x3a, 0xcc, 0x00, 0x91, }, 6, 0, "", "",
+"41 0f 3a cc 00 91 \tsha1rnds4 $0x91,(%r8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 04 25 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678,%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x18, 0x91, }, 5, 0, "", "",
+"0f 3a cc 18 91 \tsha1rnds4 $0x91,(%rax),%xmm3",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x01, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 01 91 \tsha1rnds4 $0x91,(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 04 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0x08, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 08 91 \tsha1rnds4 $0x91,(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x04, 0xc8, 0x91, }, 6, 0, "", "",
+"0f 3a cc 04 c8 91 \tsha1rnds4 $0x91,(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x40, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 40 12 91 \tsha1rnds4 $0x91,0x12(%rax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x45, 0x12, 0x91, }, 6, 0, "", "",
+"0f 3a cc 45 12 91 \tsha1rnds4 $0x91,0x12(%rbp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x01, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 01 12 91 \tsha1rnds4 $0x91,0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x05, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 05 12 91 \tsha1rnds4 $0x91,0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0x08, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 08 12 91 \tsha1rnds4 $0x91,0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x44, 0xc8, 0x12, 0x91, }, 7, 0, "", "",
+"0f 3a cc 44 c8 12 91 \tsha1rnds4 $0x91,0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 80 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, 0x91, }, 9, 0, "", "",
+"0f 3a cc 85 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 01 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 05 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 08 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x3a, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 10, 0, "", "",
+"0f 3a cc 84 c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x3a, 0xcc, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x91, }, 11, 0, "", "",
+"44 0f 3a cc bc c8 78 56 34 12 91 \tsha1rnds4 $0x91,0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xc8, 0xc1, }, 4, 0, "", "",
+"0f 38 c8 c1 \tsha1nexte %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0xd7, }, 4, 0, "", "",
+"0f 38 c8 d7 \tsha1nexte %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xc8, 0xc0, }, 5, 0, "", "",
+"41 0f 38 c8 c0 \tsha1nexte %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc8, 0xc7, }, 5, 0, "", "",
+"44 0f 38 c8 c7 \tsha1nexte %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xc8, 0xc7, }, 5, 0, "", "",
+"45 0f 38 c8 c7 \tsha1nexte %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xc8, 0x00, }, 4, 0, "", "",
+"0f 38 c8 00 \tsha1nexte (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xc8, 0x00, }, 5, 0, "", "",
+"41 0f 38 c8 00 \tsha1nexte (%r8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 04 25 78 56 34 12 \tsha1nexte 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x18, }, 4, 0, "", "",
+"0f 38 c8 18 \tsha1nexte (%rax),%xmm3",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c8 04 01 \tsha1nexte (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 04 05 78 56 34 12 \tsha1nexte 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c8 04 08 \tsha1nexte (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c8 04 c8 \tsha1nexte (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c8 40 12 \tsha1nexte 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c8 45 12 \tsha1nexte 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 01 12 \tsha1nexte 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 05 12 \tsha1nexte 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 08 12 \tsha1nexte 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c8 44 c8 12 \tsha1nexte 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 80 78 56 34 12 \tsha1nexte 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c8 85 78 56 34 12 \tsha1nexte 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 01 78 56 34 12 \tsha1nexte 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 05 78 56 34 12 \tsha1nexte 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 08 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc8, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c8 84 c8 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc8, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 c8 bc c8 78 56 34 12 \tsha1nexte 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xc9, 0xc1, }, 4, 0, "", "",
+"0f 38 c9 c1 \tsha1msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0xd7, }, 4, 0, "", "",
+"0f 38 c9 d7 \tsha1msg1 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xc9, 0xc0, }, 5, 0, "", "",
+"41 0f 38 c9 c0 \tsha1msg1 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc9, 0xc7, }, 5, 0, "", "",
+"44 0f 38 c9 c7 \tsha1msg1 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xc9, 0xc7, }, 5, 0, "", "",
+"45 0f 38 c9 c7 \tsha1msg1 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xc9, 0x00, }, 4, 0, "", "",
+"0f 38 c9 00 \tsha1msg1 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xc9, 0x00, }, 5, 0, "", "",
+"41 0f 38 c9 00 \tsha1msg1 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 04 25 78 56 34 12 \tsha1msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x18, }, 4, 0, "", "",
+"0f 38 c9 18 \tsha1msg1 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 c9 04 01 \tsha1msg1 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 04 05 78 56 34 12 \tsha1msg1 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 c9 04 08 \tsha1msg1 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 c9 04 c8 \tsha1msg1 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 c9 40 12 \tsha1msg1 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 c9 45 12 \tsha1msg1 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 01 12 \tsha1msg1 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 05 12 \tsha1msg1 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 08 12 \tsha1msg1 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 c9 44 c8 12 \tsha1msg1 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 80 78 56 34 12 \tsha1msg1 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 c9 85 78 56 34 12 \tsha1msg1 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 01 78 56 34 12 \tsha1msg1 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 05 78 56 34 12 \tsha1msg1 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 08 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xc9, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 c9 84 c8 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xc9, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 c9 bc c8 78 56 34 12 \tsha1msg1 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xca, 0xc1, }, 4, 0, "", "",
+"0f 38 ca c1 \tsha1msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xca, 0xd7, }, 4, 0, "", "",
+"0f 38 ca d7 \tsha1msg2 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xca, 0xc0, }, 5, 0, "", "",
+"41 0f 38 ca c0 \tsha1msg2 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xca, 0xc7, }, 5, 0, "", "",
+"44 0f 38 ca c7 \tsha1msg2 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xca, 0xc7, }, 5, 0, "", "",
+"45 0f 38 ca c7 \tsha1msg2 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xca, 0x00, }, 4, 0, "", "",
+"0f 38 ca 00 \tsha1msg2 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xca, 0x00, }, 5, 0, "", "",
+"41 0f 38 ca 00 \tsha1msg2 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 04 25 78 56 34 12 \tsha1msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xca, 0x18, }, 4, 0, "", "",
+"0f 38 ca 18 \tsha1msg2 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xca, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 ca 04 01 \tsha1msg2 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 04 05 78 56 34 12 \tsha1msg2 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 ca 04 08 \tsha1msg2 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 ca 04 c8 \tsha1msg2 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 ca 40 12 \tsha1msg2 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 ca 45 12 \tsha1msg2 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 01 12 \tsha1msg2 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 05 12 \tsha1msg2 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 08 12 \tsha1msg2 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 ca 44 c8 12 \tsha1msg2 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 80 78 56 34 12 \tsha1msg2 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 ca 85 78 56 34 12 \tsha1msg2 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 01 78 56 34 12 \tsha1msg2 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 05 78 56 34 12 \tsha1msg2 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 08 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xca, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 ca 84 c8 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xca, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 ca bc c8 78 56 34 12 \tsha1msg2 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xcb, 0xcc, }, 4, 0, "", "",
+"0f 38 cb cc \tsha256rnds2 %xmm0,%xmm4,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0xd7, }, 4, 0, "", "",
+"0f 38 cb d7 \tsha256rnds2 %xmm0,%xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xcb, 0xc8, }, 5, 0, "", "",
+"41 0f 38 cb c8 \tsha256rnds2 %xmm0,%xmm8,%xmm1",},
+{{0x44, 0x0f, 0x38, 0xcb, 0xc7, }, 5, 0, "", "",
+"44 0f 38 cb c7 \tsha256rnds2 %xmm0,%xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xcb, 0xc7, }, 5, 0, "", "",
+"45 0f 38 cb c7 \tsha256rnds2 %xmm0,%xmm15,%xmm8",},
+{{0x0f, 0x38, 0xcb, 0x08, }, 4, 0, "", "",
+"0f 38 cb 08 \tsha256rnds2 %xmm0,(%rax),%xmm1",},
+{{0x41, 0x0f, 0x38, 0xcb, 0x08, }, 5, 0, "", "",
+"41 0f 38 cb 08 \tsha256rnds2 %xmm0,(%r8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 0c 25 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678,%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x18, }, 4, 0, "", "",
+"0f 38 cb 18 \tsha256rnds2 %xmm0,(%rax),%xmm3",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x01, }, 5, 0, "", "",
+"0f 38 cb 0c 01 \tsha256rnds2 %xmm0,(%rcx,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 0c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0x08, }, 5, 0, "", "",
+"0f 38 cb 0c 08 \tsha256rnds2 %xmm0,(%rax,%rcx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x0c, 0xc8, }, 5, 0, "", "",
+"0f 38 cb 0c c8 \tsha256rnds2 %xmm0,(%rax,%rcx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x48, 0x12, }, 5, 0, "", "",
+"0f 38 cb 48 12 \tsha256rnds2 %xmm0,0x12(%rax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4d, 0x12, }, 5, 0, "", "",
+"0f 38 cb 4d 12 \tsha256rnds2 %xmm0,0x12(%rbp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 01 12 \tsha256rnds2 %xmm0,0x12(%rcx,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 05 12 \tsha256rnds2 %xmm0,0x12(%rbp,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c 08 12 \tsha256rnds2 %xmm0,0x12(%rax,%rcx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x4c, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cb 4c c8 12 \tsha256rnds2 %xmm0,0x12(%rax,%rcx,8),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x88, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 88 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8d, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cb 8d 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rbp),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 01 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rcx,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 05 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rbp,%rax,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c 08 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,1),%xmm1",},
+{{0x0f, 0x38, 0xcb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cb 8c c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x44, 0x0f, 0x38, 0xcb, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 cb bc c8 78 56 34 12 \tsha256rnds2 %xmm0,0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xcc, 0xc1, }, 4, 0, "", "",
+"0f 38 cc c1 \tsha256msg1 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0xd7, }, 4, 0, "", "",
+"0f 38 cc d7 \tsha256msg1 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xcc, 0xc0, }, 5, 0, "", "",
+"41 0f 38 cc c0 \tsha256msg1 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcc, 0xc7, }, 5, 0, "", "",
+"44 0f 38 cc c7 \tsha256msg1 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xcc, 0xc7, }, 5, 0, "", "",
+"45 0f 38 cc c7 \tsha256msg1 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xcc, 0x00, }, 4, 0, "", "",
+"0f 38 cc 00 \tsha256msg1 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xcc, 0x00, }, 5, 0, "", "",
+"41 0f 38 cc 00 \tsha256msg1 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 04 25 78 56 34 12 \tsha256msg1 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x18, }, 4, 0, "", "",
+"0f 38 cc 18 \tsha256msg1 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cc 04 01 \tsha256msg1 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 04 05 78 56 34 12 \tsha256msg1 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cc 04 08 \tsha256msg1 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cc 04 c8 \tsha256msg1 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cc 40 12 \tsha256msg1 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cc 45 12 \tsha256msg1 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 01 12 \tsha256msg1 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 05 12 \tsha256msg1 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 08 12 \tsha256msg1 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cc 44 c8 12 \tsha256msg1 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 80 78 56 34 12 \tsha256msg1 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cc 85 78 56 34 12 \tsha256msg1 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 01 78 56 34 12 \tsha256msg1 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 05 78 56 34 12 \tsha256msg1 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 08 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcc, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cc 84 c8 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcc, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 cc bc c8 78 56 34 12 \tsha256msg1 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x0f, 0x38, 0xcd, 0xc1, }, 4, 0, "", "",
+"0f 38 cd c1 \tsha256msg2 %xmm1,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0xd7, }, 4, 0, "", "",
+"0f 38 cd d7 \tsha256msg2 %xmm7,%xmm2",},
+{{0x41, 0x0f, 0x38, 0xcd, 0xc0, }, 5, 0, "", "",
+"41 0f 38 cd c0 \tsha256msg2 %xmm8,%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcd, 0xc7, }, 5, 0, "", "",
+"44 0f 38 cd c7 \tsha256msg2 %xmm7,%xmm8",},
+{{0x45, 0x0f, 0x38, 0xcd, 0xc7, }, 5, 0, "", "",
+"45 0f 38 cd c7 \tsha256msg2 %xmm15,%xmm8",},
+{{0x0f, 0x38, 0xcd, 0x00, }, 4, 0, "", "",
+"0f 38 cd 00 \tsha256msg2 (%rax),%xmm0",},
+{{0x41, 0x0f, 0x38, 0xcd, 0x00, }, 5, 0, "", "",
+"41 0f 38 cd 00 \tsha256msg2 (%r8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 04 25 78 56 34 12 \tsha256msg2 0x12345678,%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x18, }, 4, 0, "", "",
+"0f 38 cd 18 \tsha256msg2 (%rax),%xmm3",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x01, }, 5, 0, "", "",
+"0f 38 cd 04 01 \tsha256msg2 (%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 04 05 78 56 34 12 \tsha256msg2 0x12345678(,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0x08, }, 5, 0, "", "",
+"0f 38 cd 04 08 \tsha256msg2 (%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x04, 0xc8, }, 5, 0, "", "",
+"0f 38 cd 04 c8 \tsha256msg2 (%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x40, 0x12, }, 5, 0, "", "",
+"0f 38 cd 40 12 \tsha256msg2 0x12(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x45, 0x12, }, 5, 0, "", "",
+"0f 38 cd 45 12 \tsha256msg2 0x12(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x01, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 01 12 \tsha256msg2 0x12(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x05, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 05 12 \tsha256msg2 0x12(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0x08, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 08 12 \tsha256msg2 0x12(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x44, 0xc8, 0x12, }, 6, 0, "", "",
+"0f 38 cd 44 c8 12 \tsha256msg2 0x12(%rax,%rcx,8),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x80, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 80 78 56 34 12 \tsha256msg2 0x12345678(%rax),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x85, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f 38 cd 85 78 56 34 12 \tsha256msg2 0x12345678(%rbp),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x01, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 01 78 56 34 12 \tsha256msg2 0x12345678(%rcx,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x05, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 05 78 56 34 12 \tsha256msg2 0x12345678(%rbp,%rax,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0x08, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 08 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,1),%xmm0",},
+{{0x0f, 0x38, 0xcd, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"0f 38 cd 84 c8 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,8),%xmm0",},
+{{0x44, 0x0f, 0x38, 0xcd, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"44 0f 38 cd bc c8 78 56 34 12 \tsha256msg2 0x12345678(%rax,%rcx,8),%xmm15",},
+{{0x66, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
+"66 0f ae 38 \tclflushopt (%rax)",},
+{{0x66, 0x41, 0x0f, 0xae, 0x38, }, 5, 0, "", "",
+"66 41 0f ae 38 \tclflushopt (%r8)",},
+{{0x66, 0x0f, 0xae, 0x3c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae 3c 25 78 56 34 12 \tclflushopt 0x12345678",},
+{{0x66, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x41, 0x0f, 0xae, 0xbc, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 41 0f ae bc c8 78 56 34 12 \tclflushopt 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xae, 0x38, }, 3, 0, "", "",
+"0f ae 38 \tclflush (%rax)",},
+{{0x41, 0x0f, 0xae, 0x38, }, 4, 0, "", "",
+"41 0f ae 38 \tclflush (%r8)",},
+{{0x0f, 0xae, 0xf8, }, 3, 0, "", "",
+"0f ae f8 \tsfence ",},
+{{0x66, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
+"66 0f ae 30 \tclwb (%rax)",},
+{{0x66, 0x41, 0x0f, 0xae, 0x30, }, 5, 0, "", "",
+"66 41 0f ae 30 \tclwb (%r8)",},
+{{0x66, 0x0f, 0xae, 0x34, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae 34 25 78 56 34 12 \tclwb 0x12345678",},
+{{0x66, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"66 0f ae b4 c8 78 56 34 12 \tclwb 0x12345678(%rax,%rcx,8)",},
+{{0x66, 0x41, 0x0f, 0xae, 0xb4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 41 0f ae b4 c8 78 56 34 12 \tclwb 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xae, 0x30, }, 3, 0, "", "",
+"0f ae 30 \txsaveopt (%rax)",},
+{{0x41, 0x0f, 0xae, 0x30, }, 4, 0, "", "",
+"41 0f ae 30 \txsaveopt (%r8)",},
+{{0x0f, 0xae, 0xf0, }, 3, 0, "", "",
+"0f ae f0 \tmfence ",},
+{{0x0f, 0xc7, 0x20, }, 3, 0, "", "",
+"0f c7 20 \txsavec (%rax)",},
+{{0x41, 0x0f, 0xc7, 0x20, }, 4, 0, "", "",
+"41 0f c7 20 \txsavec (%r8)",},
+{{0x0f, 0xc7, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 24 25 78 56 34 12 \txsavec 0x12345678",},
+{{0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0xc7, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f c7 a4 c8 78 56 34 12 \txsavec 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xc7, 0x28, }, 3, 0, "", "",
+"0f c7 28 \txsaves (%rax)",},
+{{0x41, 0x0f, 0xc7, 0x28, }, 4, 0, "", "",
+"41 0f c7 28 \txsaves (%r8)",},
+{{0x0f, 0xc7, 0x2c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 2c 25 78 56 34 12 \txsaves 0x12345678",},
+{{0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0xc7, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f c7 ac c8 78 56 34 12 \txsaves 0x12345678(%r8,%rcx,8)",},
+{{0x0f, 0xc7, 0x18, }, 3, 0, "", "",
+"0f c7 18 \txrstors (%rax)",},
+{{0x41, 0x0f, 0xc7, 0x18, }, 4, 0, "", "",
+"41 0f c7 18 \txrstors (%r8)",},
+{{0x0f, 0xc7, 0x1c, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 1c 25 78 56 34 12 \txrstors 0x12345678",},
+{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%rax,%rcx,8)",},
+{{0x41, 0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"41 0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%r8,%rcx,8)",},
+{{0x66, 0x0f, 0xae, 0xf8, }, 4, 0, "", "",
+"66 0f ae f8 \tpcommit ",},
diff --git a/kernel/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/kernel/tools/perf/arch/x86/tests/insn-x86-dat-src.c
new file mode 100644
index 000000000..41b1b1c62
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -0,0 +1,877 @@
+/*
+ * This file contains instructions for testing by the test titled:
+ *
+ * "Test x86 instruction decoder - new instructions"
+ *
+ * Note that the 'Expecting' comment lines are consumed by the
+ * gen-insn-x86-dat.awk script and have the format:
+ *
+ * Expecting: <op> <branch> <rel>
+ *
+ * If this file is changed, remember to run the gen-insn-x86-dat.sh
+ * script and commit the result.
+ *
+ * Refer to insn-x86.c for more details.
+ */
+
+int main(void)
+{
+ /* Following line is a marker for the awk script - do not change */
+ asm volatile("rdtsc"); /* Start here */
+
+#ifdef __x86_64__
+
+ /* bndmk m64, bnd */
+
+ asm volatile("bndmk (%rax), %bnd0");
+ asm volatile("bndmk (%r8), %bnd0");
+ asm volatile("bndmk (0x12345678), %bnd0");
+ asm volatile("bndmk (%rax), %bnd3");
+ asm volatile("bndmk (%rcx,%rax,1), %bnd0");
+ asm volatile("bndmk 0x12345678(,%rax,1), %bnd0");
+ asm volatile("bndmk (%rax,%rcx,1), %bnd0");
+ asm volatile("bndmk (%rax,%rcx,8), %bnd0");
+ asm volatile("bndmk 0x12(%rax), %bnd0");
+ asm volatile("bndmk 0x12(%rbp), %bnd0");
+ asm volatile("bndmk 0x12(%rcx,%rax,1), %bnd0");
+ asm volatile("bndmk 0x12(%rbp,%rax,1), %bnd0");
+ asm volatile("bndmk 0x12(%rax,%rcx,1), %bnd0");
+ asm volatile("bndmk 0x12(%rax,%rcx,8), %bnd0");
+ asm volatile("bndmk 0x12345678(%rax), %bnd0");
+ asm volatile("bndmk 0x12345678(%rbp), %bnd0");
+ asm volatile("bndmk 0x12345678(%rcx,%rax,1), %bnd0");
+ asm volatile("bndmk 0x12345678(%rbp,%rax,1), %bnd0");
+ asm volatile("bndmk 0x12345678(%rax,%rcx,1), %bnd0");
+ asm volatile("bndmk 0x12345678(%rax,%rcx,8), %bnd0");
+
+ /* bndcl r/m64, bnd */
+
+ asm volatile("bndcl (%rax), %bnd0");
+ asm volatile("bndcl (%r8), %bnd0");
+ asm volatile("bndcl (0x12345678), %bnd0");
+ asm volatile("bndcl (%rax), %bnd3");
+ asm volatile("bndcl (%rcx,%rax,1), %bnd0");
+ asm volatile("bndcl 0x12345678(,%rax,1), %bnd0");
+ asm volatile("bndcl (%rax,%rcx,1), %bnd0");
+ asm volatile("bndcl (%rax,%rcx,8), %bnd0");
+ asm volatile("bndcl 0x12(%rax), %bnd0");
+ asm volatile("bndcl 0x12(%rbp), %bnd0");
+ asm volatile("bndcl 0x12(%rcx,%rax,1), %bnd0");
+ asm volatile("bndcl 0x12(%rbp,%rax,1), %bnd0");
+ asm volatile("bndcl 0x12(%rax,%rcx,1), %bnd0");
+ asm volatile("bndcl 0x12(%rax,%rcx,8), %bnd0");
+ asm volatile("bndcl 0x12345678(%rax), %bnd0");
+ asm volatile("bndcl 0x12345678(%rbp), %bnd0");
+ asm volatile("bndcl 0x12345678(%rcx,%rax,1), %bnd0");
+ asm volatile("bndcl 0x12345678(%rbp,%rax,1), %bnd0");
+ asm volatile("bndcl 0x12345678(%rax,%rcx,1), %bnd0");
+ asm volatile("bndcl 0x12345678(%rax,%rcx,8), %bnd0");
+ asm volatile("bndcl %rax, %bnd0");
+
+ /* bndcu r/m64, bnd */
+
+ asm volatile("bndcu (%rax), %bnd0");
+ asm volatile("bndcu (%r8), %bnd0");
+ asm volatile("bndcu (0x12345678), %bnd0");
+ asm volatile("bndcu (%rax), %bnd3");
+ asm volatile("bndcu (%rcx,%rax,1), %bnd0");
+ asm volatile("bndcu 0x12345678(,%rax,1), %bnd0");
+ asm volatile("bndcu (%rax,%rcx,1), %bnd0");
+ asm volatile("bndcu (%rax,%rcx,8), %bnd0");
+ asm volatile("bndcu 0x12(%rax), %bnd0");
+ asm volatile("bndcu 0x12(%rbp), %bnd0");
+ asm volatile("bndcu 0x12(%rcx,%rax,1), %bnd0");
+ asm volatile("bndcu 0x12(%rbp,%rax,1), %bnd0");
+ asm volatile("bndcu 0x12(%rax,%rcx,1), %bnd0");
+ asm volatile("bndcu 0x12(%rax,%rcx,8), %bnd0");
+ asm volatile("bndcu 0x12345678(%rax), %bnd0");
+ asm volatile("bndcu 0x12345678(%rbp), %bnd0");
+ asm volatile("bndcu 0x12345678(%rcx,%rax,1), %bnd0");
+ asm volatile("bndcu 0x12345678(%rbp,%rax,1), %bnd0");
+ asm volatile("bndcu 0x12345678(%rax,%rcx,1), %bnd0");
+ asm volatile("bndcu 0x12345678(%rax,%rcx,8), %bnd0");
+ asm volatile("bndcu %rax, %bnd0");
+
+ /* bndcn r/m64, bnd */
+
+ asm volatile("bndcn (%rax), %bnd0");
+ asm volatile("bndcn (%r8), %bnd0");
+ asm volatile("bndcn (0x12345678), %bnd0");
+ asm volatile("bndcn (%rax), %bnd3");
+ asm volatile("bndcn (%rcx,%rax,1), %bnd0");
+ asm volatile("bndcn 0x12345678(,%rax,1), %bnd0");
+ asm volatile("bndcn (%rax,%rcx,1), %bnd0");
+ asm volatile("bndcn (%rax,%rcx,8), %bnd0");
+ asm volatile("bndcn 0x12(%rax), %bnd0");
+ asm volatile("bndcn 0x12(%rbp), %bnd0");
+ asm volatile("bndcn 0x12(%rcx,%rax,1), %bnd0");
+ asm volatile("bndcn 0x12(%rbp,%rax,1), %bnd0");
+ asm volatile("bndcn 0x12(%rax,%rcx,1), %bnd0");
+ asm volatile("bndcn 0x12(%rax,%rcx,8), %bnd0");
+ asm volatile("bndcn 0x12345678(%rax), %bnd0");
+ asm volatile("bndcn 0x12345678(%rbp), %bnd0");
+ asm volatile("bndcn 0x12345678(%rcx,%rax,1), %bnd0");
+ asm volatile("bndcn 0x12345678(%rbp,%rax,1), %bnd0");
+ asm volatile("bndcn 0x12345678(%rax,%rcx,1), %bnd0");
+ asm volatile("bndcn 0x12345678(%rax,%rcx,8), %bnd0");
+ asm volatile("bndcn %rax, %bnd0");
+
+ /* bndmov m128, bnd */
+
+ asm volatile("bndmov (%rax), %bnd0");
+ asm volatile("bndmov (%r8), %bnd0");
+ asm volatile("bndmov (0x12345678), %bnd0");
+ asm volatile("bndmov (%rax), %bnd3");
+ asm volatile("bndmov (%rcx,%rax,1), %bnd0");
+ asm volatile("bndmov 0x12345678(,%rax,1), %bnd0");
+ asm volatile("bndmov (%rax,%rcx,1), %bnd0");
+ asm volatile("bndmov (%rax,%rcx,8), %bnd0");
+ asm volatile("bndmov 0x12(%rax), %bnd0");
+ asm volatile("bndmov 0x12(%rbp), %bnd0");
+ asm volatile("bndmov 0x12(%rcx,%rax,1), %bnd0");
+ asm volatile("bndmov 0x12(%rbp,%rax,1), %bnd0");
+ asm volatile("bndmov 0x12(%rax,%rcx,1), %bnd0");
+ asm volatile("bndmov 0x12(%rax,%rcx,8), %bnd0");
+ asm volatile("bndmov 0x12345678(%rax), %bnd0");
+ asm volatile("bndmov 0x12345678(%rbp), %bnd0");
+ asm volatile("bndmov 0x12345678(%rcx,%rax,1), %bnd0");
+ asm volatile("bndmov 0x12345678(%rbp,%rax,1), %bnd0");
+ asm volatile("bndmov 0x12345678(%rax,%rcx,1), %bnd0");
+ asm volatile("bndmov 0x12345678(%rax,%rcx,8), %bnd0");
+
+ /* bndmov bnd, m128 */
+
+ asm volatile("bndmov %bnd0, (%rax)");
+ asm volatile("bndmov %bnd0, (%r8)");
+ asm volatile("bndmov %bnd0, (0x12345678)");
+ asm volatile("bndmov %bnd3, (%rax)");
+ asm volatile("bndmov %bnd0, (%rcx,%rax,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(,%rax,1)");
+ asm volatile("bndmov %bnd0, (%rax,%rcx,1)");
+ asm volatile("bndmov %bnd0, (%rax,%rcx,8)");
+ asm volatile("bndmov %bnd0, 0x12(%rax)");
+ asm volatile("bndmov %bnd0, 0x12(%rbp)");
+ asm volatile("bndmov %bnd0, 0x12(%rcx,%rax,1)");
+ asm volatile("bndmov %bnd0, 0x12(%rbp,%rax,1)");
+ asm volatile("bndmov %bnd0, 0x12(%rax,%rcx,1)");
+ asm volatile("bndmov %bnd0, 0x12(%rax,%rcx,8)");
+ asm volatile("bndmov %bnd0, 0x12345678(%rax)");
+ asm volatile("bndmov %bnd0, 0x12345678(%rbp)");
+ asm volatile("bndmov %bnd0, 0x12345678(%rcx,%rax,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(%rbp,%rax,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(%rax,%rcx,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(%rax,%rcx,8)");
+
+ /* bndmov bnd2, bnd1 */
+
+ asm volatile("bndmov %bnd0, %bnd1");
+ asm volatile("bndmov %bnd1, %bnd0");
+
+ /* bndldx mib, bnd */
+
+ asm volatile("bndldx (%rax), %bnd0");
+ asm volatile("bndldx (%r8), %bnd0");
+ asm volatile("bndldx (0x12345678), %bnd0");
+ asm volatile("bndldx (%rax), %bnd3");
+ asm volatile("bndldx (%rcx,%rax,1), %bnd0");
+ asm volatile("bndldx 0x12345678(,%rax,1), %bnd0");
+ asm volatile("bndldx (%rax,%rcx,1), %bnd0");
+ asm volatile("bndldx 0x12(%rax), %bnd0");
+ asm volatile("bndldx 0x12(%rbp), %bnd0");
+ asm volatile("bndldx 0x12(%rcx,%rax,1), %bnd0");
+ asm volatile("bndldx 0x12(%rbp,%rax,1), %bnd0");
+ asm volatile("bndldx 0x12(%rax,%rcx,1), %bnd0");
+ asm volatile("bndldx 0x12345678(%rax), %bnd0");
+ asm volatile("bndldx 0x12345678(%rbp), %bnd0");
+ asm volatile("bndldx 0x12345678(%rcx,%rax,1), %bnd0");
+ asm volatile("bndldx 0x12345678(%rbp,%rax,1), %bnd0");
+ asm volatile("bndldx 0x12345678(%rax,%rcx,1), %bnd0");
+
+ /* bndstx bnd, mib */
+
+ asm volatile("bndstx %bnd0, (%rax)");
+ asm volatile("bndstx %bnd0, (%r8)");
+ asm volatile("bndstx %bnd0, (0x12345678)");
+ asm volatile("bndstx %bnd3, (%rax)");
+ asm volatile("bndstx %bnd0, (%rcx,%rax,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(,%rax,1)");
+ asm volatile("bndstx %bnd0, (%rax,%rcx,1)");
+ asm volatile("bndstx %bnd0, 0x12(%rax)");
+ asm volatile("bndstx %bnd0, 0x12(%rbp)");
+ asm volatile("bndstx %bnd0, 0x12(%rcx,%rax,1)");
+ asm volatile("bndstx %bnd0, 0x12(%rbp,%rax,1)");
+ asm volatile("bndstx %bnd0, 0x12(%rax,%rcx,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(%rax)");
+ asm volatile("bndstx %bnd0, 0x12345678(%rbp)");
+ asm volatile("bndstx %bnd0, 0x12345678(%rcx,%rax,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(%rbp,%rax,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(%rax,%rcx,1)");
+
+ /* bnd prefix on call, ret, jmp and all jcc */
+
+ asm volatile("bnd call label1"); /* Expecting: call unconditional 0 */
+ asm volatile("bnd call *(%eax)"); /* Expecting: call indirect 0 */
+ asm volatile("bnd ret"); /* Expecting: ret indirect 0 */
+ asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0 */
+ asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0 */
+ asm volatile("bnd jmp *(%ecx)"); /* Expecting: jmp indirect 0 */
+ asm volatile("bnd jne label1"); /* Expecting: jcc conditional 0 */
+
+ /* sha1rnds4 imm8, xmm2/m128, xmm1 */
+
+ asm volatile("sha1rnds4 $0x0, %xmm1, %xmm0");
+ asm volatile("sha1rnds4 $0x91, %xmm7, %xmm2");
+ asm volatile("sha1rnds4 $0x91, %xmm8, %xmm0");
+ asm volatile("sha1rnds4 $0x91, %xmm7, %xmm8");
+ asm volatile("sha1rnds4 $0x91, %xmm15, %xmm8");
+ asm volatile("sha1rnds4 $0x91, (%rax), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%r8), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (0x12345678), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%rax), %xmm3");
+ asm volatile("sha1rnds4 $0x91, (%rcx,%rax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(,%rax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%rax,%rcx,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%rax,%rcx,8), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%rax), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%rbp), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rax), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rbp), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* sha1nexte xmm2/m128, xmm1 */
+
+ asm volatile("sha1nexte %xmm1, %xmm0");
+ asm volatile("sha1nexte %xmm7, %xmm2");
+ asm volatile("sha1nexte %xmm8, %xmm0");
+ asm volatile("sha1nexte %xmm7, %xmm8");
+ asm volatile("sha1nexte %xmm15, %xmm8");
+ asm volatile("sha1nexte (%rax), %xmm0");
+ asm volatile("sha1nexte (%r8), %xmm0");
+ asm volatile("sha1nexte (0x12345678), %xmm0");
+ asm volatile("sha1nexte (%rax), %xmm3");
+ asm volatile("sha1nexte (%rcx,%rax,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(,%rax,1), %xmm0");
+ asm volatile("sha1nexte (%rax,%rcx,1), %xmm0");
+ asm volatile("sha1nexte (%rax,%rcx,8), %xmm0");
+ asm volatile("sha1nexte 0x12(%rax), %xmm0");
+ asm volatile("sha1nexte 0x12(%rbp), %xmm0");
+ asm volatile("sha1nexte 0x12(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1nexte 0x12(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1nexte 0x12(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1nexte 0x12(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rax), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rbp), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* sha1msg1 xmm2/m128, xmm1 */
+
+ asm volatile("sha1msg1 %xmm1, %xmm0");
+ asm volatile("sha1msg1 %xmm7, %xmm2");
+ asm volatile("sha1msg1 %xmm8, %xmm0");
+ asm volatile("sha1msg1 %xmm7, %xmm8");
+ asm volatile("sha1msg1 %xmm15, %xmm8");
+ asm volatile("sha1msg1 (%rax), %xmm0");
+ asm volatile("sha1msg1 (%r8), %xmm0");
+ asm volatile("sha1msg1 (0x12345678), %xmm0");
+ asm volatile("sha1msg1 (%rax), %xmm3");
+ asm volatile("sha1msg1 (%rcx,%rax,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(,%rax,1), %xmm0");
+ asm volatile("sha1msg1 (%rax,%rcx,1), %xmm0");
+ asm volatile("sha1msg1 (%rax,%rcx,8), %xmm0");
+ asm volatile("sha1msg1 0x12(%rax), %xmm0");
+ asm volatile("sha1msg1 0x12(%rbp), %xmm0");
+ asm volatile("sha1msg1 0x12(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1msg1 0x12(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1msg1 0x12(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1msg1 0x12(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rax), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rbp), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* sha1msg2 xmm2/m128, xmm1 */
+
+ asm volatile("sha1msg2 %xmm1, %xmm0");
+ asm volatile("sha1msg2 %xmm7, %xmm2");
+ asm volatile("sha1msg2 %xmm8, %xmm0");
+ asm volatile("sha1msg2 %xmm7, %xmm8");
+ asm volatile("sha1msg2 %xmm15, %xmm8");
+ asm volatile("sha1msg2 (%rax), %xmm0");
+ asm volatile("sha1msg2 (%r8), %xmm0");
+ asm volatile("sha1msg2 (0x12345678), %xmm0");
+ asm volatile("sha1msg2 (%rax), %xmm3");
+ asm volatile("sha1msg2 (%rcx,%rax,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(,%rax,1), %xmm0");
+ asm volatile("sha1msg2 (%rax,%rcx,1), %xmm0");
+ asm volatile("sha1msg2 (%rax,%rcx,8), %xmm0");
+ asm volatile("sha1msg2 0x12(%rax), %xmm0");
+ asm volatile("sha1msg2 0x12(%rbp), %xmm0");
+ asm volatile("sha1msg2 0x12(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1msg2 0x12(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1msg2 0x12(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1msg2 0x12(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rax), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rbp), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rcx,%rax,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rbp,%rax,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rax,%rcx,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rax,%rcx,8), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* sha256rnds2 <XMM0>, xmm2/m128, xmm1 */
+ /* Note sha256rnds2 has an implicit operand 'xmm0' */
+
+ asm volatile("sha256rnds2 %xmm4, %xmm1");
+ asm volatile("sha256rnds2 %xmm7, %xmm2");
+ asm volatile("sha256rnds2 %xmm8, %xmm1");
+ asm volatile("sha256rnds2 %xmm7, %xmm8");
+ asm volatile("sha256rnds2 %xmm15, %xmm8");
+ asm volatile("sha256rnds2 (%rax), %xmm1");
+ asm volatile("sha256rnds2 (%r8), %xmm1");
+ asm volatile("sha256rnds2 (0x12345678), %xmm1");
+ asm volatile("sha256rnds2 (%rax), %xmm3");
+ asm volatile("sha256rnds2 (%rcx,%rax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(,%rax,1), %xmm1");
+ asm volatile("sha256rnds2 (%rax,%rcx,1), %xmm1");
+ asm volatile("sha256rnds2 (%rax,%rcx,8), %xmm1");
+ asm volatile("sha256rnds2 0x12(%rax), %xmm1");
+ asm volatile("sha256rnds2 0x12(%rbp), %xmm1");
+ asm volatile("sha256rnds2 0x12(%rcx,%rax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12(%rbp,%rax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12(%rax,%rcx,1), %xmm1");
+ asm volatile("sha256rnds2 0x12(%rax,%rcx,8), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rax), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rbp), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rcx,%rax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rbp,%rax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rax,%rcx,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* sha256msg1 xmm2/m128, xmm1 */
+
+ asm volatile("sha256msg1 %xmm1, %xmm0");
+ asm volatile("sha256msg1 %xmm7, %xmm2");
+ asm volatile("sha256msg1 %xmm8, %xmm0");
+ asm volatile("sha256msg1 %xmm7, %xmm8");
+ asm volatile("sha256msg1 %xmm15, %xmm8");
+ asm volatile("sha256msg1 (%rax), %xmm0");
+ asm volatile("sha256msg1 (%r8), %xmm0");
+ asm volatile("sha256msg1 (0x12345678), %xmm0");
+ asm volatile("sha256msg1 (%rax), %xmm3");
+ asm volatile("sha256msg1 (%rcx,%rax,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(,%rax,1), %xmm0");
+ asm volatile("sha256msg1 (%rax,%rcx,1), %xmm0");
+ asm volatile("sha256msg1 (%rax,%rcx,8), %xmm0");
+ asm volatile("sha256msg1 0x12(%rax), %xmm0");
+ asm volatile("sha256msg1 0x12(%rbp), %xmm0");
+ asm volatile("sha256msg1 0x12(%rcx,%rax,1), %xmm0");
+ asm volatile("sha256msg1 0x12(%rbp,%rax,1), %xmm0");
+ asm volatile("sha256msg1 0x12(%rax,%rcx,1), %xmm0");
+ asm volatile("sha256msg1 0x12(%rax,%rcx,8), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rax), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rbp), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rcx,%rax,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rbp,%rax,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rax,%rcx,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rax,%rcx,8), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* sha256msg2 xmm2/m128, xmm1 */
+
+ asm volatile("sha256msg2 %xmm1, %xmm0");
+ asm volatile("sha256msg2 %xmm7, %xmm2");
+ asm volatile("sha256msg2 %xmm8, %xmm0");
+ asm volatile("sha256msg2 %xmm7, %xmm8");
+ asm volatile("sha256msg2 %xmm15, %xmm8");
+ asm volatile("sha256msg2 (%rax), %xmm0");
+ asm volatile("sha256msg2 (%r8), %xmm0");
+ asm volatile("sha256msg2 (0x12345678), %xmm0");
+ asm volatile("sha256msg2 (%rax), %xmm3");
+ asm volatile("sha256msg2 (%rcx,%rax,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(,%rax,1), %xmm0");
+ asm volatile("sha256msg2 (%rax,%rcx,1), %xmm0");
+ asm volatile("sha256msg2 (%rax,%rcx,8), %xmm0");
+ asm volatile("sha256msg2 0x12(%rax), %xmm0");
+ asm volatile("sha256msg2 0x12(%rbp), %xmm0");
+ asm volatile("sha256msg2 0x12(%rcx,%rax,1), %xmm0");
+ asm volatile("sha256msg2 0x12(%rbp,%rax,1), %xmm0");
+ asm volatile("sha256msg2 0x12(%rax,%rcx,1), %xmm0");
+ asm volatile("sha256msg2 0x12(%rax,%rcx,8), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rax), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rbp), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rcx,%rax,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rbp,%rax,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rax,%rcx,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rax,%rcx,8), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%rax,%rcx,8), %xmm15");
+
+ /* clflushopt m8 */
+
+ asm volatile("clflushopt (%rax)");
+ asm volatile("clflushopt (%r8)");
+ asm volatile("clflushopt (0x12345678)");
+ asm volatile("clflushopt 0x12345678(%rax,%rcx,8)");
+ asm volatile("clflushopt 0x12345678(%r8,%rcx,8)");
+ /* Also check instructions in the same group encoding as clflushopt */
+ asm volatile("clflush (%rax)");
+ asm volatile("clflush (%r8)");
+ asm volatile("sfence");
+
+ /* clwb m8 */
+
+ asm volatile("clwb (%rax)");
+ asm volatile("clwb (%r8)");
+ asm volatile("clwb (0x12345678)");
+ asm volatile("clwb 0x12345678(%rax,%rcx,8)");
+ asm volatile("clwb 0x12345678(%r8,%rcx,8)");
+ /* Also check instructions in the same group encoding as clwb */
+ asm volatile("xsaveopt (%rax)");
+ asm volatile("xsaveopt (%r8)");
+ asm volatile("mfence");
+
+ /* xsavec mem */
+
+ asm volatile("xsavec (%rax)");
+ asm volatile("xsavec (%r8)");
+ asm volatile("xsavec (0x12345678)");
+ asm volatile("xsavec 0x12345678(%rax,%rcx,8)");
+ asm volatile("xsavec 0x12345678(%r8,%rcx,8)");
+
+ /* xsaves mem */
+
+ asm volatile("xsaves (%rax)");
+ asm volatile("xsaves (%r8)");
+ asm volatile("xsaves (0x12345678)");
+ asm volatile("xsaves 0x12345678(%rax,%rcx,8)");
+ asm volatile("xsaves 0x12345678(%r8,%rcx,8)");
+
+ /* xrstors mem */
+
+ asm volatile("xrstors (%rax)");
+ asm volatile("xrstors (%r8)");
+ asm volatile("xrstors (0x12345678)");
+ asm volatile("xrstors 0x12345678(%rax,%rcx,8)");
+ asm volatile("xrstors 0x12345678(%r8,%rcx,8)");
+
+#else /* #ifdef __x86_64__ */
+
+ /* bndmk m32, bnd */
+
+ asm volatile("bndmk (%eax), %bnd0");
+ asm volatile("bndmk (0x12345678), %bnd0");
+ asm volatile("bndmk (%eax), %bnd3");
+ asm volatile("bndmk (%ecx,%eax,1), %bnd0");
+ asm volatile("bndmk 0x12345678(,%eax,1), %bnd0");
+ asm volatile("bndmk (%eax,%ecx,1), %bnd0");
+ asm volatile("bndmk (%eax,%ecx,8), %bnd0");
+ asm volatile("bndmk 0x12(%eax), %bnd0");
+ asm volatile("bndmk 0x12(%ebp), %bnd0");
+ asm volatile("bndmk 0x12(%ecx,%eax,1), %bnd0");
+ asm volatile("bndmk 0x12(%ebp,%eax,1), %bnd0");
+ asm volatile("bndmk 0x12(%eax,%ecx,1), %bnd0");
+ asm volatile("bndmk 0x12(%eax,%ecx,8), %bnd0");
+ asm volatile("bndmk 0x12345678(%eax), %bnd0");
+ asm volatile("bndmk 0x12345678(%ebp), %bnd0");
+ asm volatile("bndmk 0x12345678(%ecx,%eax,1), %bnd0");
+ asm volatile("bndmk 0x12345678(%ebp,%eax,1), %bnd0");
+ asm volatile("bndmk 0x12345678(%eax,%ecx,1), %bnd0");
+ asm volatile("bndmk 0x12345678(%eax,%ecx,8), %bnd0");
+
+ /* bndcl r/m32, bnd */
+
+ asm volatile("bndcl (%eax), %bnd0");
+ asm volatile("bndcl (0x12345678), %bnd0");
+ asm volatile("bndcl (%eax), %bnd3");
+ asm volatile("bndcl (%ecx,%eax,1), %bnd0");
+ asm volatile("bndcl 0x12345678(,%eax,1), %bnd0");
+ asm volatile("bndcl (%eax,%ecx,1), %bnd0");
+ asm volatile("bndcl (%eax,%ecx,8), %bnd0");
+ asm volatile("bndcl 0x12(%eax), %bnd0");
+ asm volatile("bndcl 0x12(%ebp), %bnd0");
+ asm volatile("bndcl 0x12(%ecx,%eax,1), %bnd0");
+ asm volatile("bndcl 0x12(%ebp,%eax,1), %bnd0");
+ asm volatile("bndcl 0x12(%eax,%ecx,1), %bnd0");
+ asm volatile("bndcl 0x12(%eax,%ecx,8), %bnd0");
+ asm volatile("bndcl 0x12345678(%eax), %bnd0");
+ asm volatile("bndcl 0x12345678(%ebp), %bnd0");
+ asm volatile("bndcl 0x12345678(%ecx,%eax,1), %bnd0");
+ asm volatile("bndcl 0x12345678(%ebp,%eax,1), %bnd0");
+ asm volatile("bndcl 0x12345678(%eax,%ecx,1), %bnd0");
+ asm volatile("bndcl 0x12345678(%eax,%ecx,8), %bnd0");
+ asm volatile("bndcl %eax, %bnd0");
+
+ /* bndcu r/m32, bnd */
+
+ asm volatile("bndcu (%eax), %bnd0");
+ asm volatile("bndcu (0x12345678), %bnd0");
+ asm volatile("bndcu (%eax), %bnd3");
+ asm volatile("bndcu (%ecx,%eax,1), %bnd0");
+ asm volatile("bndcu 0x12345678(,%eax,1), %bnd0");
+ asm volatile("bndcu (%eax,%ecx,1), %bnd0");
+ asm volatile("bndcu (%eax,%ecx,8), %bnd0");
+ asm volatile("bndcu 0x12(%eax), %bnd0");
+ asm volatile("bndcu 0x12(%ebp), %bnd0");
+ asm volatile("bndcu 0x12(%ecx,%eax,1), %bnd0");
+ asm volatile("bndcu 0x12(%ebp,%eax,1), %bnd0");
+ asm volatile("bndcu 0x12(%eax,%ecx,1), %bnd0");
+ asm volatile("bndcu 0x12(%eax,%ecx,8), %bnd0");
+ asm volatile("bndcu 0x12345678(%eax), %bnd0");
+ asm volatile("bndcu 0x12345678(%ebp), %bnd0");
+ asm volatile("bndcu 0x12345678(%ecx,%eax,1), %bnd0");
+ asm volatile("bndcu 0x12345678(%ebp,%eax,1), %bnd0");
+ asm volatile("bndcu 0x12345678(%eax,%ecx,1), %bnd0");
+ asm volatile("bndcu 0x12345678(%eax,%ecx,8), %bnd0");
+ asm volatile("bndcu %eax, %bnd0");
+
+ /* bndcn r/m32, bnd */
+
+ asm volatile("bndcn (%eax), %bnd0");
+ asm volatile("bndcn (0x12345678), %bnd0");
+ asm volatile("bndcn (%eax), %bnd3");
+ asm volatile("bndcn (%ecx,%eax,1), %bnd0");
+ asm volatile("bndcn 0x12345678(,%eax,1), %bnd0");
+ asm volatile("bndcn (%eax,%ecx,1), %bnd0");
+ asm volatile("bndcn (%eax,%ecx,8), %bnd0");
+ asm volatile("bndcn 0x12(%eax), %bnd0");
+ asm volatile("bndcn 0x12(%ebp), %bnd0");
+ asm volatile("bndcn 0x12(%ecx,%eax,1), %bnd0");
+ asm volatile("bndcn 0x12(%ebp,%eax,1), %bnd0");
+ asm volatile("bndcn 0x12(%eax,%ecx,1), %bnd0");
+ asm volatile("bndcn 0x12(%eax,%ecx,8), %bnd0");
+ asm volatile("bndcn 0x12345678(%eax), %bnd0");
+ asm volatile("bndcn 0x12345678(%ebp), %bnd0");
+ asm volatile("bndcn 0x12345678(%ecx,%eax,1), %bnd0");
+ asm volatile("bndcn 0x12345678(%ebp,%eax,1), %bnd0");
+ asm volatile("bndcn 0x12345678(%eax,%ecx,1), %bnd0");
+ asm volatile("bndcn 0x12345678(%eax,%ecx,8), %bnd0");
+ asm volatile("bndcn %eax, %bnd0");
+
+ /* bndmov m64, bnd */
+
+ asm volatile("bndmov (%eax), %bnd0");
+ asm volatile("bndmov (0x12345678), %bnd0");
+ asm volatile("bndmov (%eax), %bnd3");
+ asm volatile("bndmov (%ecx,%eax,1), %bnd0");
+ asm volatile("bndmov 0x12345678(,%eax,1), %bnd0");
+ asm volatile("bndmov (%eax,%ecx,1), %bnd0");
+ asm volatile("bndmov (%eax,%ecx,8), %bnd0");
+ asm volatile("bndmov 0x12(%eax), %bnd0");
+ asm volatile("bndmov 0x12(%ebp), %bnd0");
+ asm volatile("bndmov 0x12(%ecx,%eax,1), %bnd0");
+ asm volatile("bndmov 0x12(%ebp,%eax,1), %bnd0");
+ asm volatile("bndmov 0x12(%eax,%ecx,1), %bnd0");
+ asm volatile("bndmov 0x12(%eax,%ecx,8), %bnd0");
+ asm volatile("bndmov 0x12345678(%eax), %bnd0");
+ asm volatile("bndmov 0x12345678(%ebp), %bnd0");
+ asm volatile("bndmov 0x12345678(%ecx,%eax,1), %bnd0");
+ asm volatile("bndmov 0x12345678(%ebp,%eax,1), %bnd0");
+ asm volatile("bndmov 0x12345678(%eax,%ecx,1), %bnd0");
+ asm volatile("bndmov 0x12345678(%eax,%ecx,8), %bnd0");
+
+ /* bndmov bnd, m64 */
+
+ asm volatile("bndmov %bnd0, (%eax)");
+ asm volatile("bndmov %bnd0, (0x12345678)");
+ asm volatile("bndmov %bnd3, (%eax)");
+ asm volatile("bndmov %bnd0, (%ecx,%eax,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(,%eax,1)");
+ asm volatile("bndmov %bnd0, (%eax,%ecx,1)");
+ asm volatile("bndmov %bnd0, (%eax,%ecx,8)");
+ asm volatile("bndmov %bnd0, 0x12(%eax)");
+ asm volatile("bndmov %bnd0, 0x12(%ebp)");
+ asm volatile("bndmov %bnd0, 0x12(%ecx,%eax,1)");
+ asm volatile("bndmov %bnd0, 0x12(%ebp,%eax,1)");
+ asm volatile("bndmov %bnd0, 0x12(%eax,%ecx,1)");
+ asm volatile("bndmov %bnd0, 0x12(%eax,%ecx,8)");
+ asm volatile("bndmov %bnd0, 0x12345678(%eax)");
+ asm volatile("bndmov %bnd0, 0x12345678(%ebp)");
+ asm volatile("bndmov %bnd0, 0x12345678(%ecx,%eax,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(%ebp,%eax,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(%eax,%ecx,1)");
+ asm volatile("bndmov %bnd0, 0x12345678(%eax,%ecx,8)");
+
+ /* bndmov bnd2, bnd1 */
+
+ asm volatile("bndmov %bnd0, %bnd1");
+ asm volatile("bndmov %bnd1, %bnd0");
+
+ /* bndldx mib, bnd */
+
+ asm volatile("bndldx (%eax), %bnd0");
+ asm volatile("bndldx (0x12345678), %bnd0");
+ asm volatile("bndldx (%eax), %bnd3");
+ asm volatile("bndldx (%ecx,%eax,1), %bnd0");
+ asm volatile("bndldx 0x12345678(,%eax,1), %bnd0");
+ asm volatile("bndldx (%eax,%ecx,1), %bnd0");
+ asm volatile("bndldx 0x12(%eax), %bnd0");
+ asm volatile("bndldx 0x12(%ebp), %bnd0");
+ asm volatile("bndldx 0x12(%ecx,%eax,1), %bnd0");
+ asm volatile("bndldx 0x12(%ebp,%eax,1), %bnd0");
+ asm volatile("bndldx 0x12(%eax,%ecx,1), %bnd0");
+ asm volatile("bndldx 0x12345678(%eax), %bnd0");
+ asm volatile("bndldx 0x12345678(%ebp), %bnd0");
+ asm volatile("bndldx 0x12345678(%ecx,%eax,1), %bnd0");
+ asm volatile("bndldx 0x12345678(%ebp,%eax,1), %bnd0");
+ asm volatile("bndldx 0x12345678(%eax,%ecx,1), %bnd0");
+
+ /* bndstx bnd, mib */
+
+ asm volatile("bndstx %bnd0, (%eax)");
+ asm volatile("bndstx %bnd0, (0x12345678)");
+ asm volatile("bndstx %bnd3, (%eax)");
+ asm volatile("bndstx %bnd0, (%ecx,%eax,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(,%eax,1)");
+ asm volatile("bndstx %bnd0, (%eax,%ecx,1)");
+ asm volatile("bndstx %bnd0, 0x12(%eax)");
+ asm volatile("bndstx %bnd0, 0x12(%ebp)");
+ asm volatile("bndstx %bnd0, 0x12(%ecx,%eax,1)");
+ asm volatile("bndstx %bnd0, 0x12(%ebp,%eax,1)");
+ asm volatile("bndstx %bnd0, 0x12(%eax,%ecx,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(%eax)");
+ asm volatile("bndstx %bnd0, 0x12345678(%ebp)");
+ asm volatile("bndstx %bnd0, 0x12345678(%ecx,%eax,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(%ebp,%eax,1)");
+ asm volatile("bndstx %bnd0, 0x12345678(%eax,%ecx,1)");
+
+ /* bnd prefix on call, ret, jmp and all jcc */
+
+ asm volatile("bnd call label1"); /* Expecting: call unconditional 0xfffffffc */
+ asm volatile("bnd call *(%eax)"); /* Expecting: call indirect 0 */
+ asm volatile("bnd ret"); /* Expecting: ret indirect 0 */
+ asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0xfffffffc */
+ asm volatile("bnd jmp label1"); /* Expecting: jmp unconditional 0xfffffffc */
+ asm volatile("bnd jmp *(%ecx)"); /* Expecting: jmp indirect 0 */
+ asm volatile("bnd jne label1"); /* Expecting: jcc conditional 0xfffffffc */
+
+ /* sha1rnds4 imm8, xmm2/m128, xmm1 */
+
+ asm volatile("sha1rnds4 $0x0, %xmm1, %xmm0");
+ asm volatile("sha1rnds4 $0x91, %xmm7, %xmm2");
+ asm volatile("sha1rnds4 $0x91, (%eax), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (0x12345678), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%eax), %xmm3");
+ asm volatile("sha1rnds4 $0x91, (%ecx,%eax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(,%eax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%eax,%ecx,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, (%eax,%ecx,8), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%eax), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%ebp), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12(%eax,%ecx,8), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%eax), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%ebp), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1rnds4 $0x91, 0x12345678(%eax,%ecx,8), %xmm0");
+
+ /* sha1nexte xmm2/m128, xmm1 */
+
+ asm volatile("sha1nexte %xmm1, %xmm0");
+ asm volatile("sha1nexte %xmm7, %xmm2");
+ asm volatile("sha1nexte (%eax), %xmm0");
+ asm volatile("sha1nexte (0x12345678), %xmm0");
+ asm volatile("sha1nexte (%eax), %xmm3");
+ asm volatile("sha1nexte (%ecx,%eax,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(,%eax,1), %xmm0");
+ asm volatile("sha1nexte (%eax,%ecx,1), %xmm0");
+ asm volatile("sha1nexte (%eax,%ecx,8), %xmm0");
+ asm volatile("sha1nexte 0x12(%eax), %xmm0");
+ asm volatile("sha1nexte 0x12(%ebp), %xmm0");
+ asm volatile("sha1nexte 0x12(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1nexte 0x12(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1nexte 0x12(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1nexte 0x12(%eax,%ecx,8), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%eax), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%ebp), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1nexte 0x12345678(%eax,%ecx,8), %xmm0");
+
+ /* sha1msg1 xmm2/m128, xmm1 */
+
+ asm volatile("sha1msg1 %xmm1, %xmm0");
+ asm volatile("sha1msg1 %xmm7, %xmm2");
+ asm volatile("sha1msg1 (%eax), %xmm0");
+ asm volatile("sha1msg1 (0x12345678), %xmm0");
+ asm volatile("sha1msg1 (%eax), %xmm3");
+ asm volatile("sha1msg1 (%ecx,%eax,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(,%eax,1), %xmm0");
+ asm volatile("sha1msg1 (%eax,%ecx,1), %xmm0");
+ asm volatile("sha1msg1 (%eax,%ecx,8), %xmm0");
+ asm volatile("sha1msg1 0x12(%eax), %xmm0");
+ asm volatile("sha1msg1 0x12(%ebp), %xmm0");
+ asm volatile("sha1msg1 0x12(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1msg1 0x12(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1msg1 0x12(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1msg1 0x12(%eax,%ecx,8), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%eax), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%ebp), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1msg1 0x12345678(%eax,%ecx,8), %xmm0");
+
+ /* sha1msg2 xmm2/m128, xmm1 */
+
+ asm volatile("sha1msg2 %xmm1, %xmm0");
+ asm volatile("sha1msg2 %xmm7, %xmm2");
+ asm volatile("sha1msg2 (%eax), %xmm0");
+ asm volatile("sha1msg2 (0x12345678), %xmm0");
+ asm volatile("sha1msg2 (%eax), %xmm3");
+ asm volatile("sha1msg2 (%ecx,%eax,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(,%eax,1), %xmm0");
+ asm volatile("sha1msg2 (%eax,%ecx,1), %xmm0");
+ asm volatile("sha1msg2 (%eax,%ecx,8), %xmm0");
+ asm volatile("sha1msg2 0x12(%eax), %xmm0");
+ asm volatile("sha1msg2 0x12(%ebp), %xmm0");
+ asm volatile("sha1msg2 0x12(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1msg2 0x12(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1msg2 0x12(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1msg2 0x12(%eax,%ecx,8), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%eax), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%ebp), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%ecx,%eax,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%ebp,%eax,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%eax,%ecx,1), %xmm0");
+ asm volatile("sha1msg2 0x12345678(%eax,%ecx,8), %xmm0");
+
+ /* sha256rnds2 <XMM0>, xmm2/m128, xmm1 */
+ /* Note sha256rnds2 has an implicit operand 'xmm0' */
+
+ asm volatile("sha256rnds2 %xmm4, %xmm1");
+ asm volatile("sha256rnds2 %xmm7, %xmm2");
+ asm volatile("sha256rnds2 (%eax), %xmm1");
+ asm volatile("sha256rnds2 (0x12345678), %xmm1");
+ asm volatile("sha256rnds2 (%eax), %xmm3");
+ asm volatile("sha256rnds2 (%ecx,%eax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(,%eax,1), %xmm1");
+ asm volatile("sha256rnds2 (%eax,%ecx,1), %xmm1");
+ asm volatile("sha256rnds2 (%eax,%ecx,8), %xmm1");
+ asm volatile("sha256rnds2 0x12(%eax), %xmm1");
+ asm volatile("sha256rnds2 0x12(%ebp), %xmm1");
+ asm volatile("sha256rnds2 0x12(%ecx,%eax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12(%ebp,%eax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12(%eax,%ecx,1), %xmm1");
+ asm volatile("sha256rnds2 0x12(%eax,%ecx,8), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%eax), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%ebp), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%ecx,%eax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%ebp,%eax,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%eax,%ecx,1), %xmm1");
+ asm volatile("sha256rnds2 0x12345678(%eax,%ecx,8), %xmm1");
+
+ /* sha256msg1 xmm2/m128, xmm1 */
+
+ asm volatile("sha256msg1 %xmm1, %xmm0");
+ asm volatile("sha256msg1 %xmm7, %xmm2");
+ asm volatile("sha256msg1 (%eax), %xmm0");
+ asm volatile("sha256msg1 (0x12345678), %xmm0");
+ asm volatile("sha256msg1 (%eax), %xmm3");
+ asm volatile("sha256msg1 (%ecx,%eax,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(,%eax,1), %xmm0");
+ asm volatile("sha256msg1 (%eax,%ecx,1), %xmm0");
+ asm volatile("sha256msg1 (%eax,%ecx,8), %xmm0");
+ asm volatile("sha256msg1 0x12(%eax), %xmm0");
+ asm volatile("sha256msg1 0x12(%ebp), %xmm0");
+ asm volatile("sha256msg1 0x12(%ecx,%eax,1), %xmm0");
+ asm volatile("sha256msg1 0x12(%ebp,%eax,1), %xmm0");
+ asm volatile("sha256msg1 0x12(%eax,%ecx,1), %xmm0");
+ asm volatile("sha256msg1 0x12(%eax,%ecx,8), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%eax), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%ebp), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%ecx,%eax,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%ebp,%eax,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%eax,%ecx,1), %xmm0");
+ asm volatile("sha256msg1 0x12345678(%eax,%ecx,8), %xmm0");
+
+ /* sha256msg2 xmm2/m128, xmm1 */
+
+ asm volatile("sha256msg2 %xmm1, %xmm0");
+ asm volatile("sha256msg2 %xmm7, %xmm2");
+ asm volatile("sha256msg2 (%eax), %xmm0");
+ asm volatile("sha256msg2 (0x12345678), %xmm0");
+ asm volatile("sha256msg2 (%eax), %xmm3");
+ asm volatile("sha256msg2 (%ecx,%eax,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(,%eax,1), %xmm0");
+ asm volatile("sha256msg2 (%eax,%ecx,1), %xmm0");
+ asm volatile("sha256msg2 (%eax,%ecx,8), %xmm0");
+ asm volatile("sha256msg2 0x12(%eax), %xmm0");
+ asm volatile("sha256msg2 0x12(%ebp), %xmm0");
+ asm volatile("sha256msg2 0x12(%ecx,%eax,1), %xmm0");
+ asm volatile("sha256msg2 0x12(%ebp,%eax,1), %xmm0");
+ asm volatile("sha256msg2 0x12(%eax,%ecx,1), %xmm0");
+ asm volatile("sha256msg2 0x12(%eax,%ecx,8), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%eax), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%ebp), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%ecx,%eax,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%ebp,%eax,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%eax,%ecx,1), %xmm0");
+ asm volatile("sha256msg2 0x12345678(%eax,%ecx,8), %xmm0");
+
+ /* clflushopt m8 */
+
+ asm volatile("clflushopt (%eax)");
+ asm volatile("clflushopt (0x12345678)");
+ asm volatile("clflushopt 0x12345678(%eax,%ecx,8)");
+ /* Also check instructions in the same group encoding as clflushopt */
+ asm volatile("clflush (%eax)");
+ asm volatile("sfence");
+
+ /* clwb m8 */
+
+ asm volatile("clwb (%eax)");
+ asm volatile("clwb (0x12345678)");
+ asm volatile("clwb 0x12345678(%eax,%ecx,8)");
+ /* Also check instructions in the same group encoding as clwb */
+ asm volatile("xsaveopt (%eax)");
+ asm volatile("mfence");
+
+ /* xsavec mem */
+
+ asm volatile("xsavec (%eax)");
+ asm volatile("xsavec (0x12345678)");
+ asm volatile("xsavec 0x12345678(%eax,%ecx,8)");
+
+ /* xsaves mem */
+
+ asm volatile("xsaves (%eax)");
+ asm volatile("xsaves (0x12345678)");
+ asm volatile("xsaves 0x12345678(%eax,%ecx,8)");
+
+ /* xrstors mem */
+
+ asm volatile("xrstors (%eax)");
+ asm volatile("xrstors (0x12345678)");
+ asm volatile("xrstors 0x12345678(%eax,%ecx,8)");
+
+#endif /* #ifndef __x86_64__ */
+
+ /* pcommit */
+
+ asm volatile("pcommit");
+
+ /* Following line is a marker for the awk script - do not change */
+ asm volatile("rdtsc"); /* Stop here */
+
+ return 0;
+}
diff --git a/kernel/tools/perf/arch/x86/tests/insn-x86.c b/kernel/tools/perf/arch/x86/tests/insn-x86.c
new file mode 100644
index 000000000..b6115dfd2
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/insn-x86.c
@@ -0,0 +1,185 @@
+#include <linux/types.h>
+
+#include "debug.h"
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+#include "intel-pt-decoder/insn.h"
+#include "intel-pt-decoder/intel-pt-insn-decoder.h"
+
+struct test_data {
+ u8 data[MAX_INSN_SIZE];
+ int expected_length;
+ int expected_rel;
+ const char *expected_op_str;
+ const char *expected_branch_str;
+ const char *asm_rep;
+};
+
+struct test_data test_data_32[] = {
+#include "insn-x86-dat-32.c"
+ {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee \trdpkru"},
+ {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef \twrpkru"},
+ {{0}, 0, 0, NULL, NULL, NULL},
+};
+
+struct test_data test_data_64[] = {
+#include "insn-x86-dat-64.c"
+ {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee \trdpkru"},
+ {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef \twrpkru"},
+ {{0}, 0, 0, NULL, NULL, NULL},
+};
+
+static int get_op(const char *op_str)
+{
+ struct val_data {
+ const char *name;
+ int val;
+ } vals[] = {
+ {"other", INTEL_PT_OP_OTHER},
+ {"call", INTEL_PT_OP_CALL},
+ {"ret", INTEL_PT_OP_RET},
+ {"jcc", INTEL_PT_OP_JCC},
+ {"jmp", INTEL_PT_OP_JMP},
+ {"loop", INTEL_PT_OP_LOOP},
+ {"iret", INTEL_PT_OP_IRET},
+ {"int", INTEL_PT_OP_INT},
+ {"syscall", INTEL_PT_OP_SYSCALL},
+ {"sysret", INTEL_PT_OP_SYSRET},
+ {NULL, 0},
+ };
+ struct val_data *val;
+
+ if (!op_str || !strlen(op_str))
+ return 0;
+
+ for (val = vals; val->name; val++) {
+ if (!strcmp(val->name, op_str))
+ return val->val;
+ }
+
+ pr_debug("Failed to get op\n");
+
+ return -1;
+}
+
+static int get_branch(const char *branch_str)
+{
+ struct val_data {
+ const char *name;
+ int val;
+ } vals[] = {
+ {"no_branch", INTEL_PT_BR_NO_BRANCH},
+ {"indirect", INTEL_PT_BR_INDIRECT},
+ {"conditional", INTEL_PT_BR_CONDITIONAL},
+ {"unconditional", INTEL_PT_BR_UNCONDITIONAL},
+ {NULL, 0},
+ };
+ struct val_data *val;
+
+ if (!branch_str || !strlen(branch_str))
+ return 0;
+
+ for (val = vals; val->name; val++) {
+ if (!strcmp(val->name, branch_str))
+ return val->val;
+ }
+
+ pr_debug("Failed to get branch\n");
+
+ return -1;
+}
+
+static int test_data_item(struct test_data *dat, int x86_64)
+{
+ struct intel_pt_insn intel_pt_insn;
+ struct insn insn;
+ int op, branch;
+
+ insn_init(&insn, dat->data, MAX_INSN_SIZE, x86_64);
+ insn_get_length(&insn);
+
+ if (!insn_complete(&insn)) {
+ pr_debug("Failed to decode: %s\n", dat->asm_rep);
+ return -1;
+ }
+
+ if (insn.length != dat->expected_length) {
+ pr_debug("Failed to decode length (%d vs expected %d): %s\n",
+ insn.length, dat->expected_length, dat->asm_rep);
+ return -1;
+ }
+
+ op = get_op(dat->expected_op_str);
+ branch = get_branch(dat->expected_branch_str);
+
+ if (intel_pt_get_insn(dat->data, MAX_INSN_SIZE, x86_64, &intel_pt_insn)) {
+ pr_debug("Intel PT failed to decode: %s\n", dat->asm_rep);
+ return -1;
+ }
+
+ if ((int)intel_pt_insn.op != op) {
+ pr_debug("Failed to decode 'op' value (%d vs expected %d): %s\n",
+ intel_pt_insn.op, op, dat->asm_rep);
+ return -1;
+ }
+
+ if ((int)intel_pt_insn.branch != branch) {
+ pr_debug("Failed to decode 'branch' value (%d vs expected %d): %s\n",
+ intel_pt_insn.branch, branch, dat->asm_rep);
+ return -1;
+ }
+
+ if (intel_pt_insn.rel != dat->expected_rel) {
+ pr_debug("Failed to decode 'rel' value (%#x vs expected %#x): %s\n",
+ intel_pt_insn.rel, dat->expected_rel, dat->asm_rep);
+ return -1;
+ }
+
+ pr_debug("Decoded ok: %s\n", dat->asm_rep);
+
+ return 0;
+}
+
+static int test_data_set(struct test_data *dat_set, int x86_64)
+{
+ struct test_data *dat;
+ int ret = 0;
+
+ for (dat = dat_set; dat->expected_length; dat++) {
+ if (test_data_item(dat, x86_64))
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * test__insn_x86 - test x86 instruction decoder - new instructions.
+ *
+ * This function implements a test that decodes a selection of instructions and
+ * checks the results. The Intel PT function that further categorizes
+ * instructions (i.e. intel_pt_get_insn()) is also checked.
+ *
+ * The instructions are originally in insn-x86-dat-src.c which has been
+ * processed by scripts gen-insn-x86-dat.sh and gen-insn-x86-dat.awk to produce
+ * insn-x86-dat-32.c and insn-x86-dat-64.c which are included into this program.
+ * i.e. to add new instructions to the test, edit insn-x86-dat-src.c, run the
+ * gen-insn-x86-dat.sh script, make perf, and then run the test.
+ *
+ * If the test passes %0 is returned, otherwise %-1 is returned. Use the
+ * verbose (-v) option to see all the instructions and whether or not they
+ * decoded successfuly.
+ */
+int test__insn_x86(void)
+{
+ int ret = 0;
+
+ if (test_data_set(test_data_32, 0))
+ ret = -1;
+
+ if (test_data_set(test_data_64, 1))
+ ret = -1;
+
+ return ret;
+}
diff --git a/kernel/tools/perf/arch/x86/tests/intel-cqm.c b/kernel/tools/perf/arch/x86/tests/intel-cqm.c
new file mode 100644
index 000000000..d28c1b6a3
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/intel-cqm.c
@@ -0,0 +1,124 @@
+#include "tests/tests.h"
+#include "perf.h"
+#include "cloexec.h"
+#include "debug.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "arch-tests.h"
+
+#include <sys/mman.h>
+#include <string.h>
+
+static pid_t spawn(void)
+{
+ pid_t pid;
+
+ pid = fork();
+ if (pid)
+ return pid;
+
+ while(1);
+ sleep(5);
+ return 0;
+}
+
+/*
+ * Create an event group that contains both a sampled hardware
+ * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
+ * wait for the hardware perf counter to overflow and generate a PMI,
+ * which triggers an event read for both of the events in the group.
+ *
+ * Since reading Intel CQM event counters requires sending SMP IPIs, the
+ * CQM pmu needs to handle the above situation gracefully, and return
+ * the last read counter value to avoid triggering a WARN_ON_ONCE() in
+ * smp_call_function_many() caused by sending IPIs from NMI context.
+ */
+int test__intel_cqm_count_nmi_context(void)
+{
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel = NULL;
+ struct perf_event_attr pe;
+ int i, fd[2], flag, ret;
+ size_t mmap_len;
+ void *event;
+ pid_t pid;
+ int err = TEST_FAIL;
+
+ flag = perf_event_open_cloexec_flag();
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ pr_debug("perf_evlist__new failed\n");
+ return TEST_FAIL;
+ }
+
+ ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
+ if (ret) {
+ pr_debug("parse_events failed\n");
+ err = TEST_SKIP;
+ goto out;
+ }
+
+ evsel = perf_evlist__first(evlist);
+ if (!evsel) {
+ pr_debug("perf_evlist__first failed\n");
+ goto out;
+ }
+
+ memset(&pe, 0, sizeof(pe));
+ pe.size = sizeof(pe);
+
+ pe.type = PERF_TYPE_HARDWARE;
+ pe.config = PERF_COUNT_HW_CPU_CYCLES;
+ pe.read_format = PERF_FORMAT_GROUP;
+
+ pe.sample_period = 128;
+ pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
+
+ pid = spawn();
+
+ fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
+ if (fd[0] < 0) {
+ pr_debug("failed to open event\n");
+ goto out;
+ }
+
+ memset(&pe, 0, sizeof(pe));
+ pe.size = sizeof(pe);
+
+ pe.type = evsel->attr.type;
+ pe.config = evsel->attr.config;
+
+ fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
+ if (fd[1] < 0) {
+ pr_debug("failed to open event\n");
+ goto out;
+ }
+
+ /*
+ * Pick a power-of-two number of pages + 1 for the meta-data
+ * page (struct perf_event_mmap_page). See tools/perf/design.txt.
+ */
+ mmap_len = page_size * 65;
+
+ event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
+ if (event == (void *)(-1)) {
+ pr_debug("failed to mmap %d\n", errno);
+ goto out;
+ }
+
+ sleep(1);
+
+ err = TEST_OK;
+
+ munmap(event, mmap_len);
+
+ for (i = 0; i < 2; i++)
+ close(fd[i]);
+
+ kill(pid, SIGKILL);
+ wait(NULL);
+out:
+ perf_evlist__delete(evlist);
+ return err;
+}
diff --git a/kernel/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/kernel/tools/perf/arch/x86/tests/perf-time-to-tsc.c
new file mode 100644
index 000000000..658cd200a
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -0,0 +1,164 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/types.h>
+#include <sys/prctl.h>
+
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "tsc.h"
+#include "tests/tests.h"
+
+#include "arch-tests.h"
+
+#define CHECK__(x) { \
+ while ((x) < 0) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+#define CHECK_NOT_NULL__(x) { \
+ while ((x) == NULL) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+/**
+ * test__perf_time_to_tsc - test converting perf time to TSC.
+ *
+ * This function implements a test that checks that the conversion of perf time
+ * to and from TSC is consistent with the order of events. If the test passes
+ * %0 is returned, otherwise %-1 is returned. If TSC conversion is not
+ * supported then then the test passes but " (not supported)" is printed.
+ */
+int test__perf_time_to_tsc(void)
+{
+ struct record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ },
+ .sample_time = true,
+ };
+ struct thread_map *threads = NULL;
+ struct cpu_map *cpus = NULL;
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel = NULL;
+ int err = -1, ret, i;
+ const char *comm1, *comm2;
+ struct perf_tsc_conversion tc;
+ struct perf_event_mmap_page *pc;
+ union perf_event *event;
+ u64 test_tsc, comm1_tsc, comm2_tsc;
+ u64 test_time, comm1_time = 0, comm2_time = 0;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ CHECK_NOT_NULL__(threads);
+
+ cpus = cpu_map__new(NULL);
+ CHECK_NOT_NULL__(cpus);
+
+ evlist = perf_evlist__new();
+ CHECK_NOT_NULL__(evlist);
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ CHECK__(parse_events(evlist, "cycles:u", NULL));
+
+ perf_evlist__config(evlist, &opts);
+
+ evsel = perf_evlist__first(evlist);
+
+ evsel->attr.comm = 1;
+ evsel->attr.disabled = 1;
+ evsel->attr.enable_on_exec = 0;
+
+ CHECK__(perf_evlist__open(evlist));
+
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+
+ pc = evlist->mmap[0].base;
+ ret = perf_read_tsc_conversion(pc, &tc);
+ if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ fprintf(stderr, " (not supported)");
+ return 0;
+ }
+ goto out_err;
+ }
+
+ perf_evlist__enable(evlist);
+
+ comm1 = "Test COMM 1";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
+
+ test_tsc = rdtsc();
+
+ comm2 = "Test COMM 2";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
+
+ perf_evlist__disable(evlist);
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ struct perf_sample sample;
+
+ if (event->header.type != PERF_RECORD_COMM ||
+ (pid_t)event->comm.pid != getpid() ||
+ (pid_t)event->comm.tid != getpid())
+ goto next_event;
+
+ if (strcmp(event->comm.comm, comm1) == 0) {
+ CHECK__(perf_evsel__parse_sample(evsel, event,
+ &sample));
+ comm1_time = sample.time;
+ }
+ if (strcmp(event->comm.comm, comm2) == 0) {
+ CHECK__(perf_evsel__parse_sample(evsel, event,
+ &sample));
+ comm2_time = sample.time;
+ }
+next_event:
+ perf_evlist__mmap_consume(evlist, i);
+ }
+ }
+
+ if (!comm1_time || !comm2_time)
+ goto out_err;
+
+ test_time = tsc_to_perf_time(test_tsc, &tc);
+ comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
+ comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
+
+ pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
+ comm1_time, comm1_tsc);
+ pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n",
+ test_time, test_tsc);
+ pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
+ comm2_time, comm2_tsc);
+
+ if (test_time <= comm1_time ||
+ test_time >= comm2_time)
+ goto out_err;
+
+ if (test_tsc <= comm1_tsc ||
+ test_tsc >= comm2_tsc)
+ goto out_err;
+
+ err = 0;
+
+out_err:
+ if (evlist) {
+ perf_evlist__disable(evlist);
+ perf_evlist__delete(evlist);
+ }
+
+ return err;
+}
diff --git a/kernel/tools/perf/arch/x86/tests/rdpmc.c b/kernel/tools/perf/arch/x86/tests/rdpmc.c
new file mode 100644
index 000000000..e7688214c
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/tests/rdpmc.c
@@ -0,0 +1,174 @@
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <linux/types.h>
+#include "perf.h"
+#include "debug.h"
+#include "tests/tests.h"
+#include "cloexec.h"
+#include "arch-tests.h"
+
+static u64 rdpmc(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((u64)high) << 32;
+}
+
+static u64 rdtsc(void)
+{
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((u64)high) << 32;
+}
+
+static u64 mmap_read_self(void *addr)
+{
+ struct perf_event_mmap_page *pc = addr;
+ u32 seq, idx, time_mult = 0, time_shift = 0;
+ u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
+
+ do {
+ seq = pc->lock;
+ barrier();
+
+ enabled = pc->time_enabled;
+ running = pc->time_running;
+
+ if (enabled != running) {
+ cyc = rdtsc();
+ time_mult = pc->time_mult;
+ time_shift = pc->time_shift;
+ time_offset = pc->time_offset;
+ }
+
+ idx = pc->index;
+ count = pc->offset;
+ if (idx)
+ count += rdpmc(idx - 1);
+
+ barrier();
+ } while (pc->lock != seq);
+
+ if (enabled != running) {
+ u64 quot, rem;
+
+ quot = (cyc >> time_shift);
+ rem = cyc & ((1 << time_shift) - 1);
+ delta = time_offset + quot * time_mult +
+ ((rem * time_mult) >> time_shift);
+
+ enabled += delta;
+ if (idx)
+ running += delta;
+
+ quot = count / running;
+ rem = count % running;
+ count = quot * enabled + (rem * enabled) / running;
+ }
+
+ return count;
+}
+
+/*
+ * If the RDPMC instruction faults then signal this back to the test parent task:
+ */
+static void segfault_handler(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
+{
+ exit(-1);
+}
+
+static int __test__rdpmc(void)
+{
+ volatile int tmp = 0;
+ u64 i, loops = 1000;
+ int n;
+ int fd;
+ void *addr;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .exclude_kernel = 1,
+ };
+ u64 delta_sum = 0;
+ struct sigaction sa;
+ char sbuf[STRERR_BUFSIZE];
+
+ sigfillset(&sa.sa_mask);
+ sa.sa_sigaction = segfault_handler;
+ sigaction(SIGSEGV, &sa, NULL);
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0) {
+ pr_err("Error: sys_perf_event_open() syscall returned "
+ "with %d (%s)\n", fd,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
+ return -1;
+ }
+
+ addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (addr == (void *)(-1)) {
+ pr_err("Error: mmap() syscall returned with (%s)\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
+ goto out_close;
+ }
+
+ for (n = 0; n < 6; n++) {
+ u64 stamp, now, delta;
+
+ stamp = mmap_read_self(addr);
+
+ for (i = 0; i < loops; i++)
+ tmp++;
+
+ now = mmap_read_self(addr);
+ loops *= 10;
+
+ delta = now - stamp;
+ pr_debug("%14d: %14Lu\n", n, (long long)delta);
+
+ delta_sum += delta;
+ }
+
+ munmap(addr, page_size);
+ pr_debug(" ");
+out_close:
+ close(fd);
+
+ if (!delta_sum)
+ return -1;
+
+ return 0;
+}
+
+int test__rdpmc(void)
+{
+ int status = 0;
+ int wret = 0;
+ int ret;
+ int pid;
+
+ pid = fork();
+ if (pid < 0)
+ return -1;
+
+ if (!pid) {
+ ret = __test__rdpmc();
+
+ exit(ret);
+ }
+
+ wret = waitpid(pid, &status, 0);
+ if (wret < 0 || status)
+ return -1;
+
+ return 0;
+}
diff --git a/kernel/tools/perf/arch/x86/util/Build b/kernel/tools/perf/arch/x86/util/Build
index cfbccc4e3..ff63649fa 100644
--- a/kernel/tools/perf/arch/x86/util/Build
+++ b/kernel/tools/perf/arch/x86/util/Build
@@ -1,8 +1,14 @@
libperf-y += header.o
libperf-y += tsc.o
+libperf-y += pmu.o
libperf-y += kvm-stat.o
+libperf-y += perf_regs.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+
+libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+libperf-$(CONFIG_AUXTRACE) += intel-pt.o
+libperf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/kernel/tools/perf/arch/x86/util/auxtrace.c b/kernel/tools/perf/arch/x86/util/auxtrace.c
new file mode 100644
index 000000000..7a7805583
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/util/auxtrace.c
@@ -0,0 +1,83 @@
+/*
+ * auxtrace.c: AUX area tracing support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdbool.h>
+
+#include "../../util/header.h"
+#include "../../util/debug.h"
+#include "../../util/pmu.h"
+#include "../../util/auxtrace.h"
+#include "../../util/intel-pt.h"
+#include "../../util/intel-bts.h"
+#include "../../util/evlist.h"
+
+static
+struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
+ int *err)
+{
+ struct perf_pmu *intel_pt_pmu;
+ struct perf_pmu *intel_bts_pmu;
+ struct perf_evsel *evsel;
+ bool found_pt = false;
+ bool found_bts = false;
+
+ intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
+ intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
+
+ if (evlist) {
+ evlist__for_each(evlist, evsel) {
+ if (intel_pt_pmu &&
+ evsel->attr.type == intel_pt_pmu->type)
+ found_pt = true;
+ if (intel_bts_pmu &&
+ evsel->attr.type == intel_bts_pmu->type)
+ found_bts = true;
+ }
+ }
+
+ if (found_pt && found_bts) {
+ pr_err("intel_pt and intel_bts may not be used together\n");
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ if (found_pt)
+ return intel_pt_recording_init(err);
+
+ if (found_bts)
+ return intel_bts_recording_init(err);
+
+ return NULL;
+}
+
+struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
+ int *err)
+{
+ char buffer[64];
+ int ret;
+
+ *err = 0;
+
+ ret = get_cpuid(buffer, sizeof(buffer));
+ if (ret) {
+ *err = ret;
+ return NULL;
+ }
+
+ if (!strncmp(buffer, "GenuineIntel,", 13))
+ return auxtrace_record__init_intel(evlist, err);
+
+ return NULL;
+}
diff --git a/kernel/tools/perf/arch/x86/util/dwarf-regs.c b/kernel/tools/perf/arch/x86/util/dwarf-regs.c
index be22dd463..9223c164e 100644
--- a/kernel/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/kernel/tools/perf/arch/x86/util/dwarf-regs.c
@@ -21,55 +21,109 @@
*/
#include <stddef.h>
+#include <errno.h> /* for EINVAL */
+#include <string.h> /* for strcmp */
+#include <linux/ptrace.h> /* for struct pt_regs */
+#include <linux/kernel.h> /* for offsetof */
#include <dwarf-regs.h>
/*
- * Generic dwarf analysis helpers
+ * See arch/x86/kernel/ptrace.c.
+ * Different from it:
+ *
+ * - Since struct pt_regs is defined differently for user and kernel,
+ * but we want to use 'ax, bx' instead of 'rax, rbx' (which is struct
+ * field name of user's pt_regs), we make REG_OFFSET_NAME to accept
+ * both string name and reg field name.
+ *
+ * - Since accessing x86_32's pt_regs from x86_64 building is difficult
+ * and vise versa, we simply fill offset with -1, so
+ * get_arch_regstr() still works but regs_query_register_offset()
+ * returns error.
+ * The only inconvenience caused by it now is that we are not allowed
+ * to generate BPF prologue for a x86_64 kernel if perf is built for
+ * x86_32. This is really a rare usecase.
+ *
+ * - Order is different from kernel's ptrace.c for get_arch_regstr(). Use
+ * the order defined by dwarf.
*/
-#define X86_32_MAX_REGS 8
-const char *x86_32_regs_table[X86_32_MAX_REGS] = {
- "%ax",
- "%cx",
- "%dx",
- "%bx",
- "$stack", /* Stack address instead of %sp */
- "%bp",
- "%si",
- "%di",
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+#ifdef __x86_64__
+# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
+# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = -1}
+#else
+# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = -1}
+# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
+#endif
+
+static const struct pt_regs_offset x86_32_regoffset_table[] = {
+ REG_OFFSET_NAME_32("%ax", eax),
+ REG_OFFSET_NAME_32("%cx", ecx),
+ REG_OFFSET_NAME_32("%dx", edx),
+ REG_OFFSET_NAME_32("%bx", ebx),
+ REG_OFFSET_NAME_32("$stack", esp), /* Stack address instead of %sp */
+ REG_OFFSET_NAME_32("%bp", ebp),
+ REG_OFFSET_NAME_32("%si", esi),
+ REG_OFFSET_NAME_32("%di", edi),
+ REG_OFFSET_END,
};
-#define X86_64_MAX_REGS 16
-const char *x86_64_regs_table[X86_64_MAX_REGS] = {
- "%ax",
- "%dx",
- "%cx",
- "%bx",
- "%si",
- "%di",
- "%bp",
- "%sp",
- "%r8",
- "%r9",
- "%r10",
- "%r11",
- "%r12",
- "%r13",
- "%r14",
- "%r15",
+static const struct pt_regs_offset x86_64_regoffset_table[] = {
+ REG_OFFSET_NAME_64("%ax", rax),
+ REG_OFFSET_NAME_64("%dx", rdx),
+ REG_OFFSET_NAME_64("%cx", rcx),
+ REG_OFFSET_NAME_64("%bx", rbx),
+ REG_OFFSET_NAME_64("%si", rsi),
+ REG_OFFSET_NAME_64("%di", rdi),
+ REG_OFFSET_NAME_64("%bp", rbp),
+ REG_OFFSET_NAME_64("%sp", rsp),
+ REG_OFFSET_NAME_64("%r8", r8),
+ REG_OFFSET_NAME_64("%r9", r9),
+ REG_OFFSET_NAME_64("%r10", r10),
+ REG_OFFSET_NAME_64("%r11", r11),
+ REG_OFFSET_NAME_64("%r12", r12),
+ REG_OFFSET_NAME_64("%r13", r13),
+ REG_OFFSET_NAME_64("%r14", r14),
+ REG_OFFSET_NAME_64("%r15", r15),
+ REG_OFFSET_END,
};
/* TODO: switching by dwarf address size */
#ifdef __x86_64__
-#define ARCH_MAX_REGS X86_64_MAX_REGS
-#define arch_regs_table x86_64_regs_table
+#define regoffset_table x86_64_regoffset_table
#else
-#define ARCH_MAX_REGS X86_32_MAX_REGS
-#define arch_regs_table x86_32_regs_table
+#define regoffset_table x86_32_regoffset_table
#endif
+/* Minus 1 for the ending REG_OFFSET_END */
+#define ARCH_MAX_REGS ((sizeof(regoffset_table) / sizeof(regoffset_table[0])) - 1)
+
/* Return architecture dependent register string (for kprobe-tracer) */
const char *get_arch_regstr(unsigned int n)
{
- return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+ return (n < ARCH_MAX_REGS) ? regoffset_table[n].name : NULL;
+}
+
+/* Reuse code from arch/x86/kernel/ptrace.c */
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
}
diff --git a/kernel/tools/perf/arch/x86/util/intel-bts.c b/kernel/tools/perf/arch/x86/util/intel-bts.c
new file mode 100644
index 000000000..9b94ce520
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/util/intel-bts.c
@@ -0,0 +1,458 @@
+/*
+ * intel-bts.c: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "../../util/cpumap.h"
+#include "../../util/evsel.h"
+#include "../../util/evlist.h"
+#include "../../util/session.h"
+#include "../../util/util.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/tsc.h"
+#include "../../util/auxtrace.h"
+#include "../../util/intel-bts.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+#define KiB_MASK(x) (KiB(x) - 1)
+#define MiB_MASK(x) (MiB(x) - 1)
+
+#define INTEL_BTS_DFLT_SAMPLE_SIZE KiB(4)
+
+#define INTEL_BTS_MAX_SAMPLE_SIZE KiB(60)
+
+struct intel_bts_snapshot_ref {
+ void *ref_buf;
+ size_t ref_offset;
+ bool wrapped;
+};
+
+struct intel_bts_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *intel_bts_pmu;
+ struct perf_evlist *evlist;
+ bool snapshot_mode;
+ size_t snapshot_size;
+ int snapshot_ref_cnt;
+ struct intel_bts_snapshot_ref *snapshot_refs;
+};
+
+struct branch {
+ u64 from;
+ u64 to;
+ u64 misc;
+};
+
+static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+{
+ return INTEL_BTS_AUXTRACE_PRIV_SIZE;
+}
+
+static int intel_bts_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct auxtrace_info_event *auxtrace_info,
+ size_t priv_size)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
+ struct perf_event_mmap_page *pc;
+ struct perf_tsc_conversion tc = { .time_mult = 0, };
+ bool cap_user_time_zero = false;
+ int err;
+
+ if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ if (!session->evlist->nr_mmaps)
+ return -EINVAL;
+
+ pc = session->evlist->mmap[0].base;
+ if (pc) {
+ err = perf_read_tsc_conversion(pc, &tc);
+ if (err) {
+ if (err != -EOPNOTSUPP)
+ return err;
+ } else {
+ cap_user_time_zero = tc.time_mult != 0;
+ }
+ if (!cap_user_time_zero)
+ ui__warning("Intel BTS: TSC not available\n");
+ }
+
+ auxtrace_info->type = PERF_AUXTRACE_INTEL_BTS;
+ auxtrace_info->priv[INTEL_BTS_PMU_TYPE] = intel_bts_pmu->type;
+ auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift;
+ auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult;
+ auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero;
+ auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO] = cap_user_time_zero;
+ auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE] = btsr->snapshot_mode;
+
+ return 0;
+}
+
+static int intel_bts_recording_options(struct auxtrace_record *itr,
+ struct perf_evlist *evlist,
+ struct record_opts *opts)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
+ struct perf_evsel *evsel, *intel_bts_evsel = NULL;
+ const struct cpu_map *cpus = evlist->cpus;
+ bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+
+ btsr->evlist = evlist;
+ btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type == intel_bts_pmu->type) {
+ if (intel_bts_evsel) {
+ pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
+ return -EINVAL;
+ }
+ evsel->attr.freq = 0;
+ evsel->attr.sample_period = 1;
+ intel_bts_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
+ pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME " PMU event (-e " INTEL_BTS_PMU_NAME ")\n");
+ return -EINVAL;
+ }
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ if (opts->full_auxtrace && !cpu_map__empty(cpus)) {
+ pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
+ return -EINVAL;
+ }
+
+ /* Set default sizes for snapshot mode */
+ if (opts->auxtrace_snapshot_mode) {
+ if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ } else if (!opts->auxtrace_mmap_pages && !privileged &&
+ opts->mmap_pages == UINT_MAX) {
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ if (!opts->auxtrace_snapshot_size)
+ opts->auxtrace_snapshot_size =
+ opts->auxtrace_mmap_pages * (size_t)page_size;
+ if (!opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_snapshot_size;
+
+ sz = round_up(sz, page_size) / page_size;
+ opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+ }
+ if (opts->auxtrace_snapshot_size >
+ opts->auxtrace_mmap_pages * (size_t)page_size) {
+ pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
+ opts->auxtrace_snapshot_size,
+ opts->auxtrace_mmap_pages * (size_t)page_size);
+ return -EINVAL;
+ }
+ if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
+ pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
+ return -EINVAL;
+ }
+ pr_debug2("Intel BTS snapshot size: %zu\n",
+ opts->auxtrace_snapshot_size);
+ }
+
+ /* Set default sizes for full trace mode */
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz;
+
+ if (opts->auxtrace_snapshot_mode)
+ min_sz = KiB(4);
+ else
+ min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+ if (intel_bts_evsel) {
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ perf_evlist__to_front(evlist, intel_bts_evsel);
+ /*
+ * In the case of per-cpu mmaps, we need the CPU on the
+ * AUX event.
+ */
+ if (!cpu_map__empty(cpus))
+ perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
+ }
+
+ /* Add dummy event to keep tracking */
+ if (opts->full_auxtrace) {
+ struct perf_evsel *tracking_evsel;
+ int err;
+
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ tracking_evsel = perf_evlist__last(evlist);
+
+ perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->attr.freq = 0;
+ tracking_evsel->attr.sample_period = 1;
+ }
+
+ return 0;
+}
+
+static int intel_bts_parse_snapshot_options(struct auxtrace_record *itr,
+ struct record_opts *opts,
+ const char *str)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ unsigned long long snapshot_size = 0;
+ char *endptr;
+
+ if (str) {
+ snapshot_size = strtoull(str, &endptr, 0);
+ if (*endptr || snapshot_size > SIZE_MAX)
+ return -1;
+ }
+
+ opts->auxtrace_snapshot_mode = true;
+ opts->auxtrace_snapshot_size = snapshot_size;
+
+ btsr->snapshot_size = snapshot_size;
+
+ return 0;
+}
+
+static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ return rdtsc();
+}
+
+static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording *btsr,
+ int idx)
+{
+ const size_t sz = sizeof(struct intel_bts_snapshot_ref);
+ int cnt = btsr->snapshot_ref_cnt, new_cnt = cnt * 2;
+ struct intel_bts_snapshot_ref *refs;
+
+ if (!new_cnt)
+ new_cnt = 16;
+
+ while (new_cnt <= idx)
+ new_cnt *= 2;
+
+ refs = calloc(new_cnt, sz);
+ if (!refs)
+ return -ENOMEM;
+
+ memcpy(refs, btsr->snapshot_refs, cnt * sz);
+
+ btsr->snapshot_refs = refs;
+ btsr->snapshot_ref_cnt = new_cnt;
+
+ return 0;
+}
+
+static void intel_bts_free_snapshot_refs(struct intel_bts_recording *btsr)
+{
+ int i;
+
+ for (i = 0; i < btsr->snapshot_ref_cnt; i++)
+ zfree(&btsr->snapshot_refs[i].ref_buf);
+ zfree(&btsr->snapshot_refs);
+}
+
+static void intel_bts_recording_free(struct auxtrace_record *itr)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+
+ intel_bts_free_snapshot_refs(btsr);
+ free(btsr);
+}
+
+static int intel_bts_snapshot_start(struct auxtrace_record *itr)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each(btsr->evlist, evsel) {
+ if (evsel->attr.type == btsr->intel_bts_pmu->type)
+ return perf_evlist__disable_event(btsr->evlist, evsel);
+ }
+ return -EINVAL;
+}
+
+static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each(btsr->evlist, evsel) {
+ if (evsel->attr.type == btsr->intel_bts_pmu->type)
+ return perf_evlist__enable_event(btsr->evlist, evsel);
+ }
+ return -EINVAL;
+}
+
+static bool intel_bts_first_wrap(u64 *data, size_t buf_size)
+{
+ int i, a, b;
+
+ b = buf_size >> 3;
+ a = b - 512;
+ if (a < 0)
+ a = 0;
+
+ for (i = a; i < b; i++) {
+ if (data[i])
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx,
+ struct auxtrace_mmap *mm, unsigned char *data,
+ u64 *head, u64 *old)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ bool wrapped;
+ int err;
+
+ pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
+ __func__, idx, (size_t)*old, (size_t)*head);
+
+ if (idx >= btsr->snapshot_ref_cnt) {
+ err = intel_bts_alloc_snapshot_refs(btsr, idx);
+ if (err)
+ goto out_err;
+ }
+
+ wrapped = btsr->snapshot_refs[idx].wrapped;
+ if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) {
+ btsr->snapshot_refs[idx].wrapped = true;
+ wrapped = true;
+ }
+
+ /*
+ * In full trace mode 'head' continually increases. However in snapshot
+ * mode 'head' is an offset within the buffer. Here 'old' and 'head'
+ * are adjusted to match the full trace case which expects that 'old' is
+ * always less than 'head'.
+ */
+ if (wrapped) {
+ *old = *head;
+ *head += mm->len;
+ } else {
+ if (mm->mask)
+ *old &= mm->mask;
+ else
+ *old %= mm->len;
+ if (*old > *head)
+ *head += mm->len;
+ }
+
+ pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
+ __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
+
+ return 0;
+
+out_err:
+ pr_err("%s: failed, error %d\n", __func__, err);
+ return err;
+}
+
+static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct intel_bts_recording *btsr =
+ container_of(itr, struct intel_bts_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each(btsr->evlist, evsel) {
+ if (evsel->attr.type == btsr->intel_bts_pmu->type)
+ return perf_evlist__enable_event_idx(btsr->evlist,
+ evsel, idx);
+ }
+ return -EINVAL;
+}
+
+struct auxtrace_record *intel_bts_recording_init(int *err)
+{
+ struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
+ struct intel_bts_recording *btsr;
+
+ if (!intel_bts_pmu)
+ return NULL;
+
+ btsr = zalloc(sizeof(struct intel_bts_recording));
+ if (!btsr) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ btsr->intel_bts_pmu = intel_bts_pmu;
+ btsr->itr.recording_options = intel_bts_recording_options;
+ btsr->itr.info_priv_size = intel_bts_info_priv_size;
+ btsr->itr.info_fill = intel_bts_info_fill;
+ btsr->itr.free = intel_bts_recording_free;
+ btsr->itr.snapshot_start = intel_bts_snapshot_start;
+ btsr->itr.snapshot_finish = intel_bts_snapshot_finish;
+ btsr->itr.find_snapshot = intel_bts_find_snapshot;
+ btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options;
+ btsr->itr.reference = intel_bts_reference;
+ btsr->itr.read_finish = intel_bts_read_finish;
+ btsr->itr.alignment = sizeof(struct branch);
+ return &btsr->itr;
+}
diff --git a/kernel/tools/perf/arch/x86/util/intel-pt.c b/kernel/tools/perf/arch/x86/util/intel-pt.c
new file mode 100644
index 000000000..b02af064f
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/util/intel-pt.c
@@ -0,0 +1,1046 @@
+/*
+ * intel_pt.c: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdbool.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <cpuid.h>
+
+#include "../../perf.h"
+#include "../../util/session.h"
+#include "../../util/event.h"
+#include "../../util/evlist.h"
+#include "../../util/evsel.h"
+#include "../../util/cpumap.h"
+#include "../../util/parse-options.h"
+#include "../../util/parse-events.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/auxtrace.h"
+#include "../../util/tsc.h"
+#include "../../util/intel-pt.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+#define KiB_MASK(x) (KiB(x) - 1)
+#define MiB_MASK(x) (MiB(x) - 1)
+
+#define INTEL_PT_DEFAULT_SAMPLE_SIZE KiB(4)
+
+#define INTEL_PT_MAX_SAMPLE_SIZE KiB(60)
+
+#define INTEL_PT_PSB_PERIOD_NEAR 256
+
+struct intel_pt_snapshot_ref {
+ void *ref_buf;
+ size_t ref_offset;
+ bool wrapped;
+};
+
+struct intel_pt_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *intel_pt_pmu;
+ int have_sched_switch;
+ struct perf_evlist *evlist;
+ bool snapshot_mode;
+ bool snapshot_init_done;
+ size_t snapshot_size;
+ size_t snapshot_ref_buf_size;
+ int snapshot_ref_cnt;
+ struct intel_pt_snapshot_ref *snapshot_refs;
+};
+
+static int intel_pt_parse_terms_with_default(struct list_head *formats,
+ const char *str,
+ u64 *config)
+{
+ struct list_head *terms;
+ struct perf_event_attr attr = { .size = 0, };
+ int err;
+
+ terms = malloc(sizeof(struct list_head));
+ if (!terms)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(terms);
+
+ err = parse_events_terms(terms, str);
+ if (err)
+ goto out_free;
+
+ attr.config = *config;
+ err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
+ if (err)
+ goto out_free;
+
+ *config = attr.config;
+out_free:
+ parse_events__free_terms(terms);
+ return err;
+}
+
+static int intel_pt_parse_terms(struct list_head *formats, const char *str,
+ u64 *config)
+{
+ *config = 0;
+ return intel_pt_parse_terms_with_default(formats, str, config);
+}
+
+static u64 intel_pt_masked_bits(u64 mask, u64 bits)
+{
+ const u64 top_bit = 1ULL << 63;
+ u64 res = 0;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (mask & top_bit) {
+ res <<= 1;
+ if (bits & top_bit)
+ res |= 1;
+ }
+ mask <<= 1;
+ bits <<= 1;
+ }
+
+ return res;
+}
+
+static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
+ struct perf_evlist *evlist, u64 *res)
+{
+ struct perf_evsel *evsel;
+ u64 mask;
+
+ *res = 0;
+
+ mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
+ if (!mask)
+ return -EINVAL;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type == intel_pt_pmu->type) {
+ *res = intel_pt_masked_bits(mask, evsel->attr.config);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
+ struct perf_evlist *evlist)
+{
+ u64 val;
+ int err, topa_multiple_entries;
+ size_t psb_period;
+
+ if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
+ "%d", &topa_multiple_entries) != 1)
+ topa_multiple_entries = 0;
+
+ /*
+ * Use caps/topa_multiple_entries to indicate early hardware that had
+ * extra frequent PSBs.
+ */
+ if (!topa_multiple_entries) {
+ psb_period = 256;
+ goto out;
+ }
+
+ err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
+ if (err)
+ val = 0;
+
+ psb_period = 1 << (val + 11);
+out:
+ pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
+ return psb_period;
+}
+
+static int intel_pt_pick_bit(int bits, int target)
+{
+ int pos, pick = -1;
+
+ for (pos = 0; bits; bits >>= 1, pos++) {
+ if (bits & 1) {
+ if (pos <= target || pick < 0)
+ pick = pos;
+ if (pos >= target)
+ break;
+ }
+ }
+
+ return pick;
+}
+
+static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
+{
+ char buf[256];
+ int mtc, mtc_periods = 0, mtc_period;
+ int psb_cyc, psb_periods, psb_period;
+ int pos = 0;
+ u64 config;
+
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
+
+ if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
+ &mtc) != 1)
+ mtc = 1;
+
+ if (mtc) {
+ if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
+ &mtc_periods) != 1)
+ mtc_periods = 0;
+ if (mtc_periods) {
+ mtc_period = intel_pt_pick_bit(mtc_periods, 3);
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ ",mtc,mtc_period=%d", mtc_period);
+ }
+ }
+
+ if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
+ &psb_cyc) != 1)
+ psb_cyc = 1;
+
+ if (psb_cyc && mtc_periods) {
+ if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
+ &psb_periods) != 1)
+ psb_periods = 0;
+ if (psb_periods) {
+ psb_period = intel_pt_pick_bit(psb_periods, 3);
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ ",psb_period=%d", psb_period);
+ }
+ }
+
+ pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
+
+ intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
+
+ return config;
+}
+
+static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
+ struct record_opts *opts,
+ const char *str)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ unsigned long long snapshot_size = 0;
+ char *endptr;
+
+ if (str) {
+ snapshot_size = strtoull(str, &endptr, 0);
+ if (*endptr || snapshot_size > SIZE_MAX)
+ return -1;
+ }
+
+ opts->auxtrace_snapshot_mode = true;
+ opts->auxtrace_snapshot_size = snapshot_size;
+
+ ptr->snapshot_size = snapshot_size;
+
+ return 0;
+}
+
+struct perf_event_attr *
+intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
+{
+ struct perf_event_attr *attr;
+
+ attr = zalloc(sizeof(struct perf_event_attr));
+ if (!attr)
+ return NULL;
+
+ attr->config = intel_pt_default_config(intel_pt_pmu);
+
+ intel_pt_pmu->selectable = true;
+
+ return attr;
+}
+
+static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+{
+ return INTEL_PT_AUXTRACE_PRIV_SIZE;
+}
+
+static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
+{
+ unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
+
+ __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+ *n = ebx;
+ *d = eax;
+}
+
+static int intel_pt_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct auxtrace_info_event *auxtrace_info,
+ size_t priv_size)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
+ struct perf_event_mmap_page *pc;
+ struct perf_tsc_conversion tc = { .time_mult = 0, };
+ bool cap_user_time_zero = false, per_cpu_mmaps;
+ u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
+ u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
+ int err;
+
+ if (priv_size != INTEL_PT_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
+ intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
+ &noretcomp_bit);
+ intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
+ mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
+ "mtc_period");
+ intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
+
+ intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
+
+ if (!session->evlist->nr_mmaps)
+ return -EINVAL;
+
+ pc = session->evlist->mmap[0].base;
+ if (pc) {
+ err = perf_read_tsc_conversion(pc, &tc);
+ if (err) {
+ if (err != -EOPNOTSUPP)
+ return err;
+ } else {
+ cap_user_time_zero = tc.time_mult != 0;
+ }
+ if (!cap_user_time_zero)
+ ui__warning("Intel Processor Trace: TSC not available\n");
+ }
+
+ per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
+
+ auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
+ auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
+ auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
+ auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
+ auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
+ auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
+ auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
+ auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
+ auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
+ auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
+ auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
+ auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
+ auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
+ auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
+ auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
+ auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
+
+ return 0;
+}
+
+static int intel_pt_track_switches(struct perf_evlist *evlist)
+{
+ const char *sched_switch = "sched:sched_switch";
+ struct perf_evsel *evsel;
+ int err;
+
+ if (!perf_evlist__can_select_event(evlist, sched_switch))
+ return -EPERM;
+
+ err = parse_events(evlist, sched_switch, NULL);
+ if (err) {
+ pr_debug2("%s: failed to parse %s, error %d\n",
+ __func__, sched_switch, err);
+ return err;
+ }
+
+ evsel = perf_evlist__last(evlist);
+
+ perf_evsel__set_sample_bit(evsel, CPU);
+ perf_evsel__set_sample_bit(evsel, TIME);
+
+ evsel->system_wide = true;
+ evsel->no_aux_samples = true;
+ evsel->immediate = true;
+
+ return 0;
+}
+
+static void intel_pt_valid_str(char *str, size_t len, u64 valid)
+{
+ unsigned int val, last = 0, state = 1;
+ int p = 0;
+
+ str[0] = '\0';
+
+ for (val = 0; val <= 64; val++, valid >>= 1) {
+ if (valid & 1) {
+ last = val;
+ switch (state) {
+ case 0:
+ p += scnprintf(str + p, len - p, ",");
+ /* Fall through */
+ case 1:
+ p += scnprintf(str + p, len - p, "%u", val);
+ state = 2;
+ break;
+ case 2:
+ state = 3;
+ break;
+ case 3:
+ state = 4;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (state) {
+ case 3:
+ p += scnprintf(str + p, len - p, ",%u", last);
+ state = 0;
+ break;
+ case 4:
+ p += scnprintf(str + p, len - p, "-%u", last);
+ state = 0;
+ break;
+ default:
+ break;
+ }
+ if (state != 1)
+ state = 0;
+ }
+ }
+}
+
+static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
+ const char *caps, const char *name,
+ const char *supported, u64 config)
+{
+ char valid_str[256];
+ unsigned int shift;
+ unsigned long long valid;
+ u64 bits;
+ int ok;
+
+ if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
+ valid = 0;
+
+ if (supported &&
+ perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
+ valid = 0;
+
+ valid |= 1;
+
+ bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
+
+ config &= bits;
+
+ for (shift = 0; bits && !(bits & 1); shift++)
+ bits >>= 1;
+
+ config >>= shift;
+
+ if (config > 63)
+ goto out_err;
+
+ if (valid & (1 << config))
+ return 0;
+out_err:
+ intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
+ pr_err("Invalid %s for %s. Valid values are: %s\n",
+ name, INTEL_PT_PMU_NAME, valid_str);
+ return -EINVAL;
+}
+
+static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
+ struct perf_evsel *evsel)
+{
+ int err;
+
+ if (!evsel)
+ return 0;
+
+ err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
+ "cyc_thresh", "caps/psb_cyc",
+ evsel->attr.config);
+ if (err)
+ return err;
+
+ err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
+ "mtc_period", "caps/mtc",
+ evsel->attr.config);
+ if (err)
+ return err;
+
+ return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
+ "psb_period", "caps/psb_cyc",
+ evsel->attr.config);
+}
+
+static int intel_pt_recording_options(struct auxtrace_record *itr,
+ struct perf_evlist *evlist,
+ struct record_opts *opts)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
+ bool have_timing_info;
+ struct perf_evsel *evsel, *intel_pt_evsel = NULL;
+ const struct cpu_map *cpus = evlist->cpus;
+ bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+ u64 tsc_bit;
+ int err;
+
+ ptr->evlist = evlist;
+ ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type == intel_pt_pmu->type) {
+ if (intel_pt_evsel) {
+ pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
+ return -EINVAL;
+ }
+ evsel->attr.freq = 0;
+ evsel->attr.sample_period = 1;
+ intel_pt_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
+ pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
+ return -EINVAL;
+ }
+
+ if (opts->use_clockid) {
+ pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
+ return -EINVAL;
+ }
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
+ if (err)
+ return err;
+
+ /* Set default sizes for snapshot mode */
+ if (opts->auxtrace_snapshot_mode) {
+ size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
+
+ if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ } else if (!opts->auxtrace_mmap_pages && !privileged &&
+ opts->mmap_pages == UINT_MAX) {
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ if (!opts->auxtrace_snapshot_size)
+ opts->auxtrace_snapshot_size =
+ opts->auxtrace_mmap_pages * (size_t)page_size;
+ if (!opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_snapshot_size;
+
+ sz = round_up(sz, page_size) / page_size;
+ opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+ }
+ if (opts->auxtrace_snapshot_size >
+ opts->auxtrace_mmap_pages * (size_t)page_size) {
+ pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
+ opts->auxtrace_snapshot_size,
+ opts->auxtrace_mmap_pages * (size_t)page_size);
+ return -EINVAL;
+ }
+ if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
+ pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
+ return -EINVAL;
+ }
+ pr_debug2("Intel PT snapshot size: %zu\n",
+ opts->auxtrace_snapshot_size);
+ if (psb_period &&
+ opts->auxtrace_snapshot_size <= psb_period +
+ INTEL_PT_PSB_PERIOD_NEAR)
+ ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
+ opts->auxtrace_snapshot_size, psb_period);
+ }
+
+ /* Set default sizes for full trace mode */
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz;
+
+ if (opts->auxtrace_snapshot_mode)
+ min_sz = KiB(4);
+ else
+ min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+ intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
+
+ if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
+ have_timing_info = true;
+ else
+ have_timing_info = false;
+
+ /*
+ * Per-cpu recording needs sched_switch events to distinguish different
+ * threads.
+ */
+ if (have_timing_info && !cpu_map__empty(cpus)) {
+ if (perf_can_record_switch_events()) {
+ bool cpu_wide = !target__none(&opts->target) &&
+ !target__has_task(&opts->target);
+
+ if (!cpu_wide && perf_can_record_cpu_wide()) {
+ struct perf_evsel *switch_evsel;
+
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ switch_evsel = perf_evlist__last(evlist);
+
+ switch_evsel->attr.freq = 0;
+ switch_evsel->attr.sample_period = 1;
+ switch_evsel->attr.context_switch = 1;
+
+ switch_evsel->system_wide = true;
+ switch_evsel->no_aux_samples = true;
+ switch_evsel->immediate = true;
+
+ perf_evsel__set_sample_bit(switch_evsel, TID);
+ perf_evsel__set_sample_bit(switch_evsel, TIME);
+ perf_evsel__set_sample_bit(switch_evsel, CPU);
+
+ opts->record_switch_events = false;
+ ptr->have_sched_switch = 3;
+ } else {
+ opts->record_switch_events = true;
+ if (cpu_wide)
+ ptr->have_sched_switch = 3;
+ else
+ ptr->have_sched_switch = 2;
+ }
+ } else {
+ err = intel_pt_track_switches(evlist);
+ if (err == -EPERM)
+ pr_debug2("Unable to select sched:sched_switch\n");
+ else if (err)
+ return err;
+ else
+ ptr->have_sched_switch = 1;
+ }
+ }
+
+ if (intel_pt_evsel) {
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace
+ * event must come first.
+ */
+ perf_evlist__to_front(evlist, intel_pt_evsel);
+ /*
+ * In the case of per-cpu mmaps, we need the CPU on the
+ * AUX event.
+ */
+ if (!cpu_map__empty(cpus))
+ perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
+ }
+
+ /* Add dummy event to keep tracking */
+ if (opts->full_auxtrace) {
+ struct perf_evsel *tracking_evsel;
+
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ tracking_evsel = perf_evlist__last(evlist);
+
+ perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->attr.freq = 0;
+ tracking_evsel->attr.sample_period = 1;
+
+ /* In per-cpu case, always need the time of mmap events etc */
+ if (!cpu_map__empty(cpus)) {
+ perf_evsel__set_sample_bit(tracking_evsel, TIME);
+ /* And the CPU for switch events */
+ perf_evsel__set_sample_bit(tracking_evsel, CPU);
+ }
+ }
+
+ /*
+ * Warn the user when we do not have enough information to decode i.e.
+ * per-cpu with no sched_switch (except workload-only).
+ */
+ if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
+ !target__none(&opts->target))
+ ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
+
+ return 0;
+}
+
+static int intel_pt_snapshot_start(struct auxtrace_record *itr)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each(ptr->evlist, evsel) {
+ if (evsel->attr.type == ptr->intel_pt_pmu->type)
+ return perf_evlist__disable_event(ptr->evlist, evsel);
+ }
+ return -EINVAL;
+}
+
+static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each(ptr->evlist, evsel) {
+ if (evsel->attr.type == ptr->intel_pt_pmu->type)
+ return perf_evlist__enable_event(ptr->evlist, evsel);
+ }
+ return -EINVAL;
+}
+
+static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
+{
+ const size_t sz = sizeof(struct intel_pt_snapshot_ref);
+ int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
+ struct intel_pt_snapshot_ref *refs;
+
+ if (!new_cnt)
+ new_cnt = 16;
+
+ while (new_cnt <= idx)
+ new_cnt *= 2;
+
+ refs = calloc(new_cnt, sz);
+ if (!refs)
+ return -ENOMEM;
+
+ memcpy(refs, ptr->snapshot_refs, cnt * sz);
+
+ ptr->snapshot_refs = refs;
+ ptr->snapshot_ref_cnt = new_cnt;
+
+ return 0;
+}
+
+static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
+{
+ int i;
+
+ for (i = 0; i < ptr->snapshot_ref_cnt; i++)
+ zfree(&ptr->snapshot_refs[i].ref_buf);
+ zfree(&ptr->snapshot_refs);
+}
+
+static void intel_pt_recording_free(struct auxtrace_record *itr)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+
+ intel_pt_free_snapshot_refs(ptr);
+ free(ptr);
+}
+
+static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
+ size_t snapshot_buf_size)
+{
+ size_t ref_buf_size = ptr->snapshot_ref_buf_size;
+ void *ref_buf;
+
+ ref_buf = zalloc(ref_buf_size);
+ if (!ref_buf)
+ return -ENOMEM;
+
+ ptr->snapshot_refs[idx].ref_buf = ref_buf;
+ ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
+
+ return 0;
+}
+
+static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
+ size_t snapshot_buf_size)
+{
+ const size_t max_size = 256 * 1024;
+ size_t buf_size = 0, psb_period;
+
+ if (ptr->snapshot_size <= 64 * 1024)
+ return 0;
+
+ psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
+ if (psb_period)
+ buf_size = psb_period * 2;
+
+ if (!buf_size || buf_size > max_size)
+ buf_size = max_size;
+
+ if (buf_size >= snapshot_buf_size)
+ return 0;
+
+ if (buf_size >= ptr->snapshot_size / 2)
+ return 0;
+
+ return buf_size;
+}
+
+static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
+ size_t snapshot_buf_size)
+{
+ if (ptr->snapshot_init_done)
+ return 0;
+
+ ptr->snapshot_init_done = true;
+
+ ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
+ snapshot_buf_size);
+
+ return 0;
+}
+
+/**
+ * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
+ * @buf1: first buffer
+ * @compare_size: number of bytes to compare
+ * @buf2: second buffer (a circular buffer)
+ * @offs2: offset in second buffer
+ * @buf2_size: size of second buffer
+ *
+ * The comparison allows for the possibility that the bytes to compare in the
+ * circular buffer are not contiguous. It is assumed that @compare_size <=
+ * @buf2_size. This function returns %false if the bytes are identical, %true
+ * otherwise.
+ */
+static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
+ void *buf2, size_t offs2, size_t buf2_size)
+{
+ size_t end2 = offs2 + compare_size, part_size;
+
+ if (end2 <= buf2_size)
+ return memcmp(buf1, buf2 + offs2, compare_size);
+
+ part_size = end2 - buf2_size;
+ if (memcmp(buf1, buf2 + offs2, part_size))
+ return true;
+
+ compare_size -= part_size;
+
+ return memcmp(buf1 + part_size, buf2, compare_size);
+}
+
+static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
+ size_t ref_size, size_t buf_size,
+ void *data, size_t head)
+{
+ size_t ref_end = ref_offset + ref_size;
+
+ if (ref_end > buf_size) {
+ if (head > ref_offset || head < ref_end - buf_size)
+ return true;
+ } else if (head > ref_offset && head < ref_end) {
+ return true;
+ }
+
+ return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
+ buf_size);
+}
+
+static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
+ void *data, size_t head)
+{
+ if (head >= ref_size) {
+ memcpy(ref_buf, data + head - ref_size, ref_size);
+ } else {
+ memcpy(ref_buf, data, head);
+ ref_size -= head;
+ memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
+ }
+}
+
+static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
+ struct auxtrace_mmap *mm, unsigned char *data,
+ u64 head)
+{
+ struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
+ bool wrapped;
+
+ wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
+ ptr->snapshot_ref_buf_size, mm->len,
+ data, head);
+
+ intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
+ data, head);
+
+ return wrapped;
+}
+
+static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
+{
+ int i, a, b;
+
+ b = buf_size >> 3;
+ a = b - 512;
+ if (a < 0)
+ a = 0;
+
+ for (i = a; i < b; i++) {
+ if (data[i])
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
+ struct auxtrace_mmap *mm, unsigned char *data,
+ u64 *head, u64 *old)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ bool wrapped;
+ int err;
+
+ pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
+ __func__, idx, (size_t)*old, (size_t)*head);
+
+ err = intel_pt_snapshot_init(ptr, mm->len);
+ if (err)
+ goto out_err;
+
+ if (idx >= ptr->snapshot_ref_cnt) {
+ err = intel_pt_alloc_snapshot_refs(ptr, idx);
+ if (err)
+ goto out_err;
+ }
+
+ if (ptr->snapshot_ref_buf_size) {
+ if (!ptr->snapshot_refs[idx].ref_buf) {
+ err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
+ if (err)
+ goto out_err;
+ }
+ wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
+ } else {
+ wrapped = ptr->snapshot_refs[idx].wrapped;
+ if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
+ ptr->snapshot_refs[idx].wrapped = true;
+ wrapped = true;
+ }
+ }
+
+ /*
+ * In full trace mode 'head' continually increases. However in snapshot
+ * mode 'head' is an offset within the buffer. Here 'old' and 'head'
+ * are adjusted to match the full trace case which expects that 'old' is
+ * always less than 'head'.
+ */
+ if (wrapped) {
+ *old = *head;
+ *head += mm->len;
+ } else {
+ if (mm->mask)
+ *old &= mm->mask;
+ else
+ *old %= mm->len;
+ if (*old > *head)
+ *head += mm->len;
+ }
+
+ pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
+ __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
+
+ return 0;
+
+out_err:
+ pr_err("%s: failed, error %d\n", __func__, err);
+ return err;
+}
+
+static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ return rdtsc();
+}
+
+static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct intel_pt_recording *ptr =
+ container_of(itr, struct intel_pt_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each(ptr->evlist, evsel) {
+ if (evsel->attr.type == ptr->intel_pt_pmu->type)
+ return perf_evlist__enable_event_idx(ptr->evlist, evsel,
+ idx);
+ }
+ return -EINVAL;
+}
+
+struct auxtrace_record *intel_pt_recording_init(int *err)
+{
+ struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
+ struct intel_pt_recording *ptr;
+
+ if (!intel_pt_pmu)
+ return NULL;
+
+ ptr = zalloc(sizeof(struct intel_pt_recording));
+ if (!ptr) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ ptr->intel_pt_pmu = intel_pt_pmu;
+ ptr->itr.recording_options = intel_pt_recording_options;
+ ptr->itr.info_priv_size = intel_pt_info_priv_size;
+ ptr->itr.info_fill = intel_pt_info_fill;
+ ptr->itr.free = intel_pt_recording_free;
+ ptr->itr.snapshot_start = intel_pt_snapshot_start;
+ ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
+ ptr->itr.find_snapshot = intel_pt_find_snapshot;
+ ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
+ ptr->itr.reference = intel_pt_reference;
+ ptr->itr.read_finish = intel_pt_read_finish;
+ return &ptr->itr;
+}
diff --git a/kernel/tools/perf/arch/x86/util/perf_regs.c b/kernel/tools/perf/arch/x86/util/perf_regs.c
new file mode 100644
index 000000000..c5db14f36
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/util/perf_regs.c
@@ -0,0 +1,28 @@
+#include "../../perf.h"
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG(AX, PERF_REG_X86_AX),
+ SMPL_REG(BX, PERF_REG_X86_BX),
+ SMPL_REG(CX, PERF_REG_X86_CX),
+ SMPL_REG(DX, PERF_REG_X86_DX),
+ SMPL_REG(SI, PERF_REG_X86_SI),
+ SMPL_REG(DI, PERF_REG_X86_DI),
+ SMPL_REG(BP, PERF_REG_X86_BP),
+ SMPL_REG(SP, PERF_REG_X86_SP),
+ SMPL_REG(IP, PERF_REG_X86_IP),
+ SMPL_REG(FLAGS, PERF_REG_X86_FLAGS),
+ SMPL_REG(CS, PERF_REG_X86_CS),
+ SMPL_REG(SS, PERF_REG_X86_SS),
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+ SMPL_REG(R8, PERF_REG_X86_R8),
+ SMPL_REG(R9, PERF_REG_X86_R9),
+ SMPL_REG(R10, PERF_REG_X86_R10),
+ SMPL_REG(R11, PERF_REG_X86_R11),
+ SMPL_REG(R12, PERF_REG_X86_R12),
+ SMPL_REG(R13, PERF_REG_X86_R13),
+ SMPL_REG(R14, PERF_REG_X86_R14),
+ SMPL_REG(R15, PERF_REG_X86_R15),
+#endif
+ SMPL_REG_END
+};
diff --git a/kernel/tools/perf/arch/x86/util/pmu.c b/kernel/tools/perf/arch/x86/util/pmu.c
new file mode 100644
index 000000000..79fe07158
--- /dev/null
+++ b/kernel/tools/perf/arch/x86/util/pmu.c
@@ -0,0 +1,18 @@
+#include <string.h>
+
+#include <linux/perf_event.h>
+
+#include "../../util/intel-pt.h"
+#include "../../util/intel-bts.h"
+#include "../../util/pmu.h"
+
+struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+{
+#ifdef HAVE_AUXTRACE_SUPPORT
+ if (!strcmp(pmu->name, INTEL_PT_PMU_NAME))
+ return intel_pt_pmu_default_config(pmu);
+ if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME))
+ pmu->selectable = true;
+#endif
+ return NULL;
+}