From e44e3482bdb4d0ebde2d8b41830ac2cdb07948fb Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Fri, 28 Aug 2015 09:58:54 +0800 Subject: Add qemu 2.4.0 Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5 Signed-off-by: Yang Zhang --- qemu/disas/libvixl/LICENCE | 30 + qemu/disas/libvixl/Makefile.objs | 8 + qemu/disas/libvixl/README | 12 + qemu/disas/libvixl/a64/assembler-a64.h | 2353 ++++++++++++++++++++++++++++ qemu/disas/libvixl/a64/constants-a64.h | 1213 ++++++++++++++ qemu/disas/libvixl/a64/cpu-a64.h | 83 + qemu/disas/libvixl/a64/decoder-a64.cc | 707 +++++++++ qemu/disas/libvixl/a64/decoder-a64.h | 239 +++ qemu/disas/libvixl/a64/disasm-a64.cc | 1954 +++++++++++++++++++++++ qemu/disas/libvixl/a64/disasm-a64.h | 176 +++ qemu/disas/libvixl/a64/instructions-a64.cc | 314 ++++ qemu/disas/libvixl/a64/instructions-a64.h | 384 +++++ qemu/disas/libvixl/code-buffer.h | 113 ++ qemu/disas/libvixl/globals.h | 85 + qemu/disas/libvixl/platform.h | 37 + qemu/disas/libvixl/utils.cc | 151 ++ qemu/disas/libvixl/utils.h | 211 +++ 17 files changed, 8070 insertions(+) create mode 100644 qemu/disas/libvixl/LICENCE create mode 100644 qemu/disas/libvixl/Makefile.objs create mode 100644 qemu/disas/libvixl/README create mode 100644 qemu/disas/libvixl/a64/assembler-a64.h create mode 100644 qemu/disas/libvixl/a64/constants-a64.h create mode 100644 qemu/disas/libvixl/a64/cpu-a64.h create mode 100644 qemu/disas/libvixl/a64/decoder-a64.cc create mode 100644 qemu/disas/libvixl/a64/decoder-a64.h create mode 100644 qemu/disas/libvixl/a64/disasm-a64.cc create mode 100644 qemu/disas/libvixl/a64/disasm-a64.h create mode 100644 qemu/disas/libvixl/a64/instructions-a64.cc create mode 100644 qemu/disas/libvixl/a64/instructions-a64.h create mode 100644 qemu/disas/libvixl/code-buffer.h create mode 100644 qemu/disas/libvixl/globals.h create mode 100644 qemu/disas/libvixl/platform.h create mode 100644 qemu/disas/libvixl/utils.cc create mode 100644 qemu/disas/libvixl/utils.h (limited to 'qemu/disas/libvixl') diff --git a/qemu/disas/libvixl/LICENCE b/qemu/disas/libvixl/LICENCE new file mode 100644 index 000000000..b7e160a3f --- /dev/null +++ b/qemu/disas/libvixl/LICENCE @@ -0,0 +1,30 @@ +LICENCE +======= + +The software in this repository is covered by the following licence. + +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/qemu/disas/libvixl/Makefile.objs b/qemu/disas/libvixl/Makefile.objs new file mode 100644 index 000000000..17e6565d1 --- /dev/null +++ b/qemu/disas/libvixl/Makefile.objs @@ -0,0 +1,8 @@ +libvixl_OBJS = utils.o \ + a64/instructions-a64.o \ + a64/decoder-a64.o \ + a64/disasm-a64.o + +$(addprefix $(obj)/,$(libvixl_OBJS)): QEMU_CFLAGS := -I$(SRC_PATH)/disas/libvixl $(QEMU_CFLAGS) + +common-obj-$(CONFIG_ARM_A64_DIS) += $(libvixl_OBJS) diff --git a/qemu/disas/libvixl/README b/qemu/disas/libvixl/README new file mode 100644 index 000000000..58db41c67 --- /dev/null +++ b/qemu/disas/libvixl/README @@ -0,0 +1,12 @@ + +The code in this directory is a subset of libvixl: + https://github.com/armvixl/vixl +(specifically, it is the set of files needed for disassembly only, +taken from libvixl 1.7). +Bugfixes should preferably be sent upstream initially. + +The disassembler does not currently support the entire A64 instruction +set. Notably: + * No Advanced SIMD support. + * Limited support for system instructions. + * A few miscellaneous integer and floating point instructions are missing. diff --git a/qemu/disas/libvixl/a64/assembler-a64.h b/qemu/disas/libvixl/a64/assembler-a64.h new file mode 100644 index 000000000..35aaf20f7 --- /dev/null +++ b/qemu/disas/libvixl/a64/assembler-a64.h @@ -0,0 +1,2353 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_ASSEMBLER_A64_H_ +#define VIXL_A64_ASSEMBLER_A64_H_ + +#include +#include + +#include "globals.h" +#include "utils.h" +#include "code-buffer.h" +#include "a64/instructions-a64.h" + +namespace vixl { + +typedef uint64_t RegList; +static const int kRegListSizeInBits = sizeof(RegList) * 8; + + +// Registers. + +// Some CPURegister methods can return Register and FPRegister types, so we +// need to declare them in advance. +class Register; +class FPRegister; + + +class CPURegister { + public: + enum RegisterType { + // The kInvalid value is used to detect uninitialized static instances, + // which are always zero-initialized before any constructors are called. + kInvalid = 0, + kRegister, + kFPRegister, + kNoRegister + }; + + CPURegister() : code_(0), size_(0), type_(kNoRegister) { + VIXL_ASSERT(!IsValid()); + VIXL_ASSERT(IsNone()); + } + + CPURegister(unsigned code, unsigned size, RegisterType type) + : code_(code), size_(size), type_(type) { + VIXL_ASSERT(IsValidOrNone()); + } + + unsigned code() const { + VIXL_ASSERT(IsValid()); + return code_; + } + + RegisterType type() const { + VIXL_ASSERT(IsValidOrNone()); + return type_; + } + + RegList Bit() const { + VIXL_ASSERT(code_ < (sizeof(RegList) * 8)); + return IsValid() ? (static_cast(1) << code_) : 0; + } + + unsigned size() const { + VIXL_ASSERT(IsValid()); + return size_; + } + + int SizeInBytes() const { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(size() % 8 == 0); + return size_ / 8; + } + + int SizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + + bool Is32Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 32; + } + + bool Is64Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 64; + } + + bool IsValid() const { + if (IsValidRegister() || IsValidFPRegister()) { + VIXL_ASSERT(!IsNone()); + return true; + } else { + VIXL_ASSERT(IsNone()); + return false; + } + } + + bool IsValidRegister() const { + return IsRegister() && + ((size_ == kWRegSize) || (size_ == kXRegSize)) && + ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode)); + } + + bool IsValidFPRegister() const { + return IsFPRegister() && + ((size_ == kSRegSize) || (size_ == kDRegSize)) && + (code_ < kNumberOfFPRegisters); + } + + bool IsNone() const { + // kNoRegister types should always have size 0 and code 0. + VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0)); + VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0)); + + return type_ == kNoRegister; + } + + bool Aliases(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return (code_ == other.code_) && (type_ == other.type_); + } + + bool Is(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return Aliases(other) && (size_ == other.size_); + } + + bool IsZero() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kZeroRegCode); + } + + bool IsSP() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kSPRegInternalCode); + } + + bool IsRegister() const { + return type_ == kRegister; + } + + bool IsFPRegister() const { + return type_ == kFPRegister; + } + + bool IsW() const { return IsValidRegister() && Is32Bits(); } + bool IsX() const { return IsValidRegister() && Is64Bits(); } + bool IsS() const { return IsValidFPRegister() && Is32Bits(); } + bool IsD() const { return IsValidFPRegister() && Is64Bits(); } + + const Register& W() const; + const Register& X() const; + const FPRegister& S() const; + const FPRegister& D() const; + + bool IsSameSizeAndType(const CPURegister& other) const { + return (size_ == other.size_) && (type_ == other.type_); + } + + protected: + unsigned code_; + unsigned size_; + RegisterType type_; + + private: + bool IsValidOrNone() const { + return IsValid() || IsNone(); + } +}; + + +class Register : public CPURegister { + public: + Register() : CPURegister() {} + explicit Register(const CPURegister& other) + : CPURegister(other.code(), other.size(), other.type()) { + VIXL_ASSERT(IsValidRegister()); + } + Register(unsigned code, unsigned size) + : CPURegister(code, size, kRegister) {} + + bool IsValid() const { + VIXL_ASSERT(IsRegister() || IsNone()); + return IsValidRegister(); + } + + static const Register& WRegFromCode(unsigned code); + static const Register& XRegFromCode(unsigned code); + + private: + static const Register wregisters[]; + static const Register xregisters[]; +}; + + +class FPRegister : public CPURegister { + public: + FPRegister() : CPURegister() {} + explicit FPRegister(const CPURegister& other) + : CPURegister(other.code(), other.size(), other.type()) { + VIXL_ASSERT(IsValidFPRegister()); + } + FPRegister(unsigned code, unsigned size) + : CPURegister(code, size, kFPRegister) {} + + bool IsValid() const { + VIXL_ASSERT(IsFPRegister() || IsNone()); + return IsValidFPRegister(); + } + + static const FPRegister& SRegFromCode(unsigned code); + static const FPRegister& DRegFromCode(unsigned code); + + private: + static const FPRegister sregisters[]; + static const FPRegister dregisters[]; +}; + + +// No*Reg is used to indicate an unused argument, or an error case. Note that +// these all compare equal (using the Is() method). The Register and FPRegister +// variants are provided for convenience. +const Register NoReg; +const FPRegister NoFPReg; +const CPURegister NoCPUReg; + + +#define DEFINE_REGISTERS(N) \ +const Register w##N(N, kWRegSize); \ +const Register x##N(N, kXRegSize); +REGISTER_CODE_LIST(DEFINE_REGISTERS) +#undef DEFINE_REGISTERS +const Register wsp(kSPRegInternalCode, kWRegSize); +const Register sp(kSPRegInternalCode, kXRegSize); + + +#define DEFINE_FPREGISTERS(N) \ +const FPRegister s##N(N, kSRegSize); \ +const FPRegister d##N(N, kDRegSize); +REGISTER_CODE_LIST(DEFINE_FPREGISTERS) +#undef DEFINE_FPREGISTERS + + +// Registers aliases. +const Register ip0 = x16; +const Register ip1 = x17; +const Register lr = x30; +const Register xzr = x31; +const Register wzr = w31; + + +// AreAliased returns true if any of the named registers overlap. Arguments +// set to NoReg are ignored. The system stack pointer may be specified. +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + + +// AreSameSizeAndType returns true if all of the specified registers have the +// same size, and are of the same type. The system stack pointer may be +// specified. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg, + const CPURegister& reg5 = NoCPUReg, + const CPURegister& reg6 = NoCPUReg, + const CPURegister& reg7 = NoCPUReg, + const CPURegister& reg8 = NoCPUReg); + + +// Lists of registers. +class CPURegList { + public: + explicit CPURegList(CPURegister reg1, + CPURegister reg2 = NoCPUReg, + CPURegister reg3 = NoCPUReg, + CPURegister reg4 = NoCPUReg) + : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), + size_(reg1.size()), type_(reg1.type()) { + VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) + : list_(list), size_(size), type_(type) { + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, unsigned size, + unsigned first_reg, unsigned last_reg) + : size_(size), type_(type) { + VIXL_ASSERT(((type == CPURegister::kRegister) && + (last_reg < kNumberOfRegisters)) || + ((type == CPURegister::kFPRegister) && + (last_reg < kNumberOfFPRegisters))); + VIXL_ASSERT(last_reg >= first_reg); + list_ = (UINT64_C(1) << (last_reg + 1)) - 1; + list_ &= ~((UINT64_C(1) << first_reg) - 1); + VIXL_ASSERT(IsValid()); + } + + CPURegister::RegisterType type() const { + VIXL_ASSERT(IsValid()); + return type_; + } + + // Combine another CPURegList into this one. Registers that already exist in + // this list are left unchanged. The type and size of the registers in the + // 'other' list must match those in this list. + void Combine(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.RegisterSizeInBits() == size_); + list_ |= other.list(); + } + + // Remove every register in the other CPURegList from this one. Registers that + // do not exist in this list are ignored. The type and size of the registers + // in the 'other' list must match those in this list. + void Remove(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.RegisterSizeInBits() == size_); + list_ &= ~other.list(); + } + + // Variants of Combine and Remove which take a single register. + void Combine(const CPURegister& other) { + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.size() == size_); + Combine(other.code()); + } + + void Remove(const CPURegister& other) { + VIXL_ASSERT(other.type() == type_); + VIXL_ASSERT(other.size() == size_); + Remove(other.code()); + } + + // Variants of Combine and Remove which take a single register by its code; + // the type and size of the register is inferred from this list. + void Combine(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ |= (UINT64_C(1) << code); + } + + void Remove(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ &= ~(UINT64_C(1) << code); + } + + static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_); + } + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_); + } + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + RegList list() const { + VIXL_ASSERT(IsValid()); + return list_; + } + + void set_list(RegList new_list) { + VIXL_ASSERT(IsValid()); + list_ = new_list; + } + + // Remove all callee-saved registers from the list. This can be useful when + // preparing registers for an AAPCS64 function call, for example. + void RemoveCalleeSaved(); + + CPURegister PopLowestIndex(); + CPURegister PopHighestIndex(); + + // AAPCS64 callee-saved registers. + static CPURegList GetCalleeSaved(unsigned size = kXRegSize); + static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize); + + // AAPCS64 caller-saved registers. Note that this includes lr. + static CPURegList GetCallerSaved(unsigned size = kXRegSize); + static CPURegList GetCallerSavedFP(unsigned size = kDRegSize); + + bool IsEmpty() const { + VIXL_ASSERT(IsValid()); + return list_ == 0; + } + + bool IncludesAliasOf(const CPURegister& other) const { + VIXL_ASSERT(IsValid()); + return (type_ == other.type()) && ((other.Bit() & list_) != 0); + } + + bool IncludesAliasOf(int code) const { + VIXL_ASSERT(IsValid()); + return ((code & list_) != 0); + } + + int Count() const { + VIXL_ASSERT(IsValid()); + return CountSetBits(list_, kRegListSizeInBits); + } + + unsigned RegisterSizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + + unsigned RegisterSizeInBytes() const { + int size_in_bits = RegisterSizeInBits(); + VIXL_ASSERT((size_in_bits % 8) == 0); + return size_in_bits / 8; + } + + unsigned TotalSizeInBytes() const { + VIXL_ASSERT(IsValid()); + return RegisterSizeInBytes() * Count(); + } + + private: + RegList list_; + unsigned size_; + CPURegister::RegisterType type_; + + bool IsValid() const; +}; + + +// AAPCS64 callee-saved registers. +extern const CPURegList kCalleeSaved; +extern const CPURegList kCalleeSavedFP; + + +// AAPCS64 caller-saved registers. Note that this includes lr. +extern const CPURegList kCallerSaved; +extern const CPURegList kCallerSavedFP; + + +// Operand. +class Operand { + public: + // # + // where is int64_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(int64_t immediate); // NOLINT(runtime/explicit) + + // rm, { #} + // where is one of {LSL, LSR, ASR, ROR}. + // is uint6_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(Register reg, + Shift shift = LSL, + unsigned shift_amount = 0); // NOLINT(runtime/explicit) + + // rm, { {#}} + // where is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. + // is uint2_t. + explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0); + + bool IsImmediate() const; + bool IsShiftedRegister() const; + bool IsExtendedRegister() const; + bool IsZero() const; + + // This returns an LSL shift (<= 4) operand as an equivalent extend operand, + // which helps in the encoding of instructions that use the stack pointer. + Operand ToExtendedRegister() const; + + int64_t immediate() const { + VIXL_ASSERT(IsImmediate()); + return immediate_; + } + + Register reg() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return reg_; + } + + Shift shift() const { + VIXL_ASSERT(IsShiftedRegister()); + return shift_; + } + + Extend extend() const { + VIXL_ASSERT(IsExtendedRegister()); + return extend_; + } + + unsigned shift_amount() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return shift_amount_; + } + + private: + int64_t immediate_; + Register reg_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + + +// MemOperand represents the addressing mode of a load or store instruction. +class MemOperand { + public: + explicit MemOperand(Register base, + int64_t offset = 0, + AddrMode addrmode = Offset); + explicit MemOperand(Register base, + Register regoffset, + Shift shift = LSL, + unsigned shift_amount = 0); + explicit MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount = 0); + explicit MemOperand(Register base, + const Operand& offset, + AddrMode addrmode = Offset); + + const Register& base() const { return base_; } + const Register& regoffset() const { return regoffset_; } + int64_t offset() const { return offset_; } + AddrMode addrmode() const { return addrmode_; } + Shift shift() const { return shift_; } + Extend extend() const { return extend_; } + unsigned shift_amount() const { return shift_amount_; } + bool IsImmediateOffset() const; + bool IsRegisterOffset() const; + bool IsPreIndex() const; + bool IsPostIndex() const; + + private: + Register base_; + Register regoffset_; + int64_t offset_; + AddrMode addrmode_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + + +class Label { + public: + Label() : location_(kLocationUnbound) {} + ~Label() { + // If the label has been linked to, it needs to be bound to a target. + VIXL_ASSERT(!IsLinked() || IsBound()); + } + + bool IsBound() const { return location_ >= 0; } + bool IsLinked() const { return !links_.empty(); } + + ptrdiff_t location() const { return location_; } + + private: + // The list of linked instructions is stored in a stack-like structure. We + // don't use std::stack directly because it's slow for the common case where + // only one or two instructions refer to a label, and labels themselves are + // short-lived. This class behaves like std::stack, but the first few links + // are preallocated (configured by kPreallocatedLinks). + // + // If more than N links are required, this falls back to std::stack. + class LinksStack { + public: + LinksStack() : size_(0), links_extended_(NULL) {} + ~LinksStack() { + delete links_extended_; + } + + size_t size() const { + return size_; + } + + bool empty() const { + return size_ == 0; + } + + void push(ptrdiff_t value) { + if (size_ < kPreallocatedLinks) { + links_[size_] = value; + } else { + if (links_extended_ == NULL) { + links_extended_ = new std::stack(); + } + VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks)); + links_extended_->push(value); + } + size_++; + } + + ptrdiff_t top() const { + return (size_ <= kPreallocatedLinks) ? links_[size_ - 1] + : links_extended_->top(); + } + + void pop() { + size_--; + if (size_ >= kPreallocatedLinks) { + links_extended_->pop(); + VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks)); + } + } + + private: + static const size_t kPreallocatedLinks = 4; + + size_t size_; + ptrdiff_t links_[kPreallocatedLinks]; + std::stack * links_extended_; + }; + + void Bind(ptrdiff_t location) { + // Labels can only be bound once. + VIXL_ASSERT(!IsBound()); + location_ = location; + } + + void AddLink(ptrdiff_t instruction) { + // If a label is bound, the assembler already has the information it needs + // to write the instruction, so there is no need to add it to links_. + VIXL_ASSERT(!IsBound()); + links_.push(instruction); + } + + ptrdiff_t GetAndRemoveNextLink() { + VIXL_ASSERT(IsLinked()); + ptrdiff_t link = links_.top(); + links_.pop(); + return link; + } + + // The offsets of the instructions that have linked to this label. + LinksStack links_; + // The label location. + ptrdiff_t location_; + + static const ptrdiff_t kLocationUnbound = -1; + + // It is not safe to copy labels, so disable the copy constructor by declaring + // it private (without an implementation). + Label(const Label&); + + // The Assembler class is responsible for binding and linking labels, since + // the stored offsets need to be consistent with the Assembler's buffer. + friend class Assembler; +}; + + +// A literal is a 32-bit or 64-bit piece of data stored in the instruction +// stream and loaded through a pc relative load. The same literal can be +// referred to by multiple instructions but a literal can only reside at one +// place in memory. A literal can be used by a load before or after being +// placed in memory. +// +// Internally an offset of 0 is associated with a literal which has been +// neither used nor placed. Then two possibilities arise: +// 1) the label is placed, the offset (stored as offset + 1) is used to +// resolve any subsequent load using the label. +// 2) the label is not placed and offset is the offset of the last load using +// the literal (stored as -offset -1). If multiple loads refer to this +// literal then the last load holds the offset of the preceding load and +// all loads form a chain. Once the offset is placed all the loads in the +// chain are resolved and future loads fall back to possibility 1. +class RawLiteral { + public: + RawLiteral() : size_(0), offset_(0), raw_value_(0) {} + + size_t size() { + VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes); + VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes); + VIXL_ASSERT((size_ == kXRegSizeInBytes) || (size_ == kWRegSizeInBytes)); + return size_; + } + uint64_t raw_value64() { + VIXL_ASSERT(size_ == kXRegSizeInBytes); + return raw_value_; + } + uint32_t raw_value32() { + VIXL_ASSERT(size_ == kWRegSizeInBytes); + VIXL_ASSERT(is_uint32(raw_value_) || is_int32(raw_value_)); + return static_cast(raw_value_); + } + bool IsUsed() { return offset_ < 0; } + bool IsPlaced() { return offset_ > 0; } + + protected: + ptrdiff_t offset() { + VIXL_ASSERT(IsPlaced()); + return offset_ - 1; + } + void set_offset(ptrdiff_t offset) { + VIXL_ASSERT(offset >= 0); + VIXL_ASSERT(IsWordAligned(offset)); + VIXL_ASSERT(!IsPlaced()); + offset_ = offset + 1; + } + ptrdiff_t last_use() { + VIXL_ASSERT(IsUsed()); + return -offset_ - 1; + } + void set_last_use(ptrdiff_t offset) { + VIXL_ASSERT(offset >= 0); + VIXL_ASSERT(IsWordAligned(offset)); + VIXL_ASSERT(!IsPlaced()); + offset_ = -offset - 1; + } + + size_t size_; + ptrdiff_t offset_; + uint64_t raw_value_; + + friend class Assembler; +}; + + +template +class Literal : public RawLiteral { + public: + explicit Literal(T value) { + size_ = sizeof(value); + memcpy(&raw_value_, &value, sizeof(value)); + } +}; + + +// Control whether or not position-independent code should be emitted. +enum PositionIndependentCodeOption { + // All code generated will be position-independent; all branches and + // references to labels generated with the Label class will use PC-relative + // addressing. + PositionIndependentCode, + + // Allow VIXL to generate code that refers to absolute addresses. With this + // option, it will not be possible to copy the code buffer and run it from a + // different address; code must be generated in its final location. + PositionDependentCode, + + // Allow VIXL to assume that the bottom 12 bits of the address will be + // constant, but that the top 48 bits may change. This allows `adrp` to + // function in systems which copy code between pages, but otherwise maintain + // 4KB page alignment. + PageOffsetDependentCode +}; + + +// Control how scaled- and unscaled-offset loads and stores are generated. +enum LoadStoreScalingOption { + // Prefer scaled-immediate-offset instructions, but emit unscaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferScaledOffset, + + // Prefer unscaled-immediate-offset instructions, but emit scaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferUnscaledOffset, + + // Require scaled-immediate-offset instructions. + RequireScaledOffset, + + // Require unscaled-immediate-offset instructions. + RequireUnscaledOffset +}; + + +// Assembler. +class Assembler { + public: + Assembler(size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode); + Assembler(byte* buffer, size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode); + + // The destructor asserts that one of the following is true: + // * The Assembler object has not been used. + // * Nothing has been emitted since the last Reset() call. + // * Nothing has been emitted since the last FinalizeCode() call. + ~Assembler(); + + // System functions. + + // Start generating code from the beginning of the buffer, discarding any code + // and data that has already been emitted into the buffer. + void Reset(); + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. + void FinalizeCode(); + + // Label. + // Bind a label to the current PC. + void bind(Label* label); + + // Bind a label to a specified offset from the start of the buffer. + void BindToOffset(Label* label, ptrdiff_t offset); + + // Place a literal at the current PC. + void place(RawLiteral* literal); + + ptrdiff_t CursorOffset() const { + return buffer_->CursorOffset(); + } + + ptrdiff_t BufferEndOffset() const { + return static_cast(buffer_->capacity()); + } + + // Return the address of an offset in the buffer. + template + T GetOffsetAddress(ptrdiff_t offset) { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return buffer_->GetOffsetAddress(offset); + } + + // Return the address of a bound label. + template + T GetLabelAddress(const Label * label) { + VIXL_ASSERT(label->IsBound()); + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(label->location()); + } + + // Return the address of the cursor. + template + T GetCursorAddress() { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(CursorOffset()); + } + + // Return the address of the start of the buffer. + template + T GetStartAddress() { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(0); + } + + // Instruction set functions. + + // Branch / Jump instructions. + // Branch to register. + void br(const Register& xn); + + // Branch with link to register. + void blr(const Register& xn); + + // Branch to register with return hint. + void ret(const Register& xn = lr); + + // Unconditional branch to label. + void b(Label* label); + + // Conditional branch to label. + void b(Label* label, Condition cond); + + // Unconditional branch to PC offset. + void b(int imm26); + + // Conditional branch to PC offset. + void b(int imm19, Condition cond); + + // Branch with link to label. + void bl(Label* label); + + // Branch with link to PC offset. + void bl(int imm26); + + // Compare and branch to label if zero. + void cbz(const Register& rt, Label* label); + + // Compare and branch to PC offset if zero. + void cbz(const Register& rt, int imm19); + + // Compare and branch to label if not zero. + void cbnz(const Register& rt, Label* label); + + // Compare and branch to PC offset if not zero. + void cbnz(const Register& rt, int imm19); + + // Test bit and branch to label if zero. + void tbz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if zero. + void tbz(const Register& rt, unsigned bit_pos, int imm14); + + // Test bit and branch to label if not zero. + void tbnz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if not zero. + void tbnz(const Register& rt, unsigned bit_pos, int imm14); + + // Address calculation instructions. + // Calculate a PC-relative address. Unlike for branches the offset in adr is + // unscaled (i.e. the result can be unaligned). + + // Calculate the address of a label. + void adr(const Register& rd, Label* label); + + // Calculate the address of a PC offset. + void adr(const Register& rd, int imm21); + + // Calculate the page address of a label. + void adrp(const Register& rd, Label* label); + + // Calculate the page address of a PC offset. + void adrp(const Register& rd, int imm21); + + // Data Processing instructions. + // Add. + void add(const Register& rd, + const Register& rn, + const Operand& operand); + + // Add and update status flags. + void adds(const Register& rd, + const Register& rn, + const Operand& operand); + + // Compare negative. + void cmn(const Register& rn, const Operand& operand); + + // Subtract. + void sub(const Register& rd, + const Register& rn, + const Operand& operand); + + // Subtract and update status flags. + void subs(const Register& rd, + const Register& rn, + const Operand& operand); + + // Compare. + void cmp(const Register& rn, const Operand& operand); + + // Negate. + void neg(const Register& rd, + const Operand& operand); + + // Negate and update status flags. + void negs(const Register& rd, + const Operand& operand); + + // Add with carry bit. + void adc(const Register& rd, + const Register& rn, + const Operand& operand); + + // Add with carry bit and update status flags. + void adcs(const Register& rd, + const Register& rn, + const Operand& operand); + + // Subtract with carry bit. + void sbc(const Register& rd, + const Register& rn, + const Operand& operand); + + // Subtract with carry bit and update status flags. + void sbcs(const Register& rd, + const Register& rn, + const Operand& operand); + + // Negate with carry bit. + void ngc(const Register& rd, + const Operand& operand); + + // Negate with carry bit and update status flags. + void ngcs(const Register& rd, + const Operand& operand); + + // Logical instructions. + // Bitwise and (A & B). + void and_(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bitwise and (A & B) and update status flags. + void ands(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bit test and set flags. + void tst(const Register& rn, const Operand& operand); + + // Bit clear (A & ~B). + void bic(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bit clear (A & ~B) and update status flags. + void bics(const Register& rd, + const Register& rn, + const Operand& operand); + + // Bitwise or (A | B). + void orr(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise nor (A | ~B). + void orn(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise eor/xor (A ^ B). + void eor(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise enor/xnor (A ^ ~B). + void eon(const Register& rd, const Register& rn, const Operand& operand); + + // Logical shift left by variable. + void lslv(const Register& rd, const Register& rn, const Register& rm); + + // Logical shift right by variable. + void lsrv(const Register& rd, const Register& rn, const Register& rm); + + // Arithmetic shift right by variable. + void asrv(const Register& rd, const Register& rn, const Register& rm); + + // Rotate right by variable. + void rorv(const Register& rd, const Register& rn, const Register& rm); + + // Bitfield instructions. + // Bitfield move. + void bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Signed bitfield move. + void sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Unsigned bitfield move. + void ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Bfm aliases. + // Bitfield insert. + void bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); + } + + // Bitfield extract and insert low. + void bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + bfm(rd, rn, lsb, lsb + width - 1); + } + + // Sbfm aliases. + // Arithmetic shift right. + void asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < rd.size()); + sbfm(rd, rn, shift, rd.size() - 1); + } + + // Signed bitfield insert with zero at right. + void sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); + } + + // Signed bitfield extract. + void sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + sbfm(rd, rn, lsb, lsb + width - 1); + } + + // Signed extend byte. + void sxtb(const Register& rd, const Register& rn) { + sbfm(rd, rn, 0, 7); + } + + // Signed extend halfword. + void sxth(const Register& rd, const Register& rn) { + sbfm(rd, rn, 0, 15); + } + + // Signed extend word. + void sxtw(const Register& rd, const Register& rn) { + sbfm(rd, rn, 0, 31); + } + + // Ubfm aliases. + // Logical shift left. + void lsl(const Register& rd, const Register& rn, unsigned shift) { + unsigned reg_size = rd.size(); + VIXL_ASSERT(shift < reg_size); + ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); + } + + // Logical shift right. + void lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < rd.size()); + ubfm(rd, rn, shift, rd.size() - 1); + } + + // Unsigned bitfield insert with zero at right. + void ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); + } + + // Unsigned bitfield extract. + void ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= rn.size()); + ubfm(rd, rn, lsb, lsb + width - 1); + } + + // Unsigned extend byte. + void uxtb(const Register& rd, const Register& rn) { + ubfm(rd, rn, 0, 7); + } + + // Unsigned extend halfword. + void uxth(const Register& rd, const Register& rn) { + ubfm(rd, rn, 0, 15); + } + + // Unsigned extend word. + void uxtw(const Register& rd, const Register& rn) { + ubfm(rd, rn, 0, 31); + } + + // Extract. + void extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb); + + // Conditional select: rd = cond ? rn : rm. + void csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select increment: rd = cond ? rn : rm + 1. + void csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select inversion: rd = cond ? rn : ~rm. + void csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select negation: rd = cond ? rn : -rm. + void csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional set: rd = cond ? 1 : 0. + void cset(const Register& rd, Condition cond); + + // Conditional set mask: rd = cond ? -1 : 0. + void csetm(const Register& rd, Condition cond); + + // Conditional increment: rd = cond ? rn + 1 : rn. + void cinc(const Register& rd, const Register& rn, Condition cond); + + // Conditional invert: rd = cond ? ~rn : rn. + void cinv(const Register& rd, const Register& rn, Condition cond); + + // Conditional negate: rd = cond ? -rn : rn. + void cneg(const Register& rd, const Register& rn, Condition cond); + + // Rotate right. + void ror(const Register& rd, const Register& rs, unsigned shift) { + extr(rd, rs, rs, shift); + } + + // Conditional comparison. + // Conditional compare negative. + void ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // Conditional compare. + void ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // Multiply. + void mul(const Register& rd, const Register& rn, const Register& rm); + + // Negated multiply. + void mneg(const Register& rd, const Register& rn, const Register& rm); + + // Signed long multiply: 32 x 32 -> 64-bit. + void smull(const Register& rd, const Register& rn, const Register& rm); + + // Signed multiply high: 64 x 64 -> 64-bit <127:64>. + void smulh(const Register& xd, const Register& xn, const Register& xm); + + // Multiply and accumulate. + void madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Multiply and subtract. + void msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void smaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void umaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void smsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void umsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed integer divide. + void sdiv(const Register& rd, const Register& rn, const Register& rm); + + // Unsigned integer divide. + void udiv(const Register& rd, const Register& rn, const Register& rm); + + // Bit reverse. + void rbit(const Register& rd, const Register& rn); + + // Reverse bytes in 16-bit half words. + void rev16(const Register& rd, const Register& rn); + + // Reverse bytes in 32-bit words. + void rev32(const Register& rd, const Register& rn); + + // Reverse bytes. + void rev(const Register& rd, const Register& rn); + + // Count leading zeroes. + void clz(const Register& rd, const Register& rn); + + // Count leading sign bits. + void cls(const Register& rd, const Register& rn); + + // Memory instructions. + // Load integer or FP register. + void ldr(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store integer or FP register. + void str(const CPURegister& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load word with sign extension. + void ldrsw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte. + void ldrb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store byte. + void strb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte with sign extension. + void ldrsb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word. + void ldrh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store half-word. + void strh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word with sign extension. + void ldrsh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load integer or FP register (with unscaled offset). + void ldur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store integer or FP register (with unscaled offset). + void stur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load word with sign extension. + void ldursw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte (with unscaled offset). + void ldurb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store byte (with unscaled offset). + void sturb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte with sign extension (and unscaled offset). + void ldursb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word (with unscaled offset). + void ldurh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store half-word (with unscaled offset). + void sturh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word with sign extension (and unscaled offset). + void ldursh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load integer or FP register pair. + void ldp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair. + void stp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& dst); + + // Load word pair with sign extension. + void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src); + + // Load integer or FP register pair, non-temporal. + void ldnp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair, non-temporal. + void stnp(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& dst); + + // Load integer or FP register from literal pool. + void ldr(const CPURegister& rt, RawLiteral* literal); + + // Load word with sign extension from literal pool. + void ldrsw(const Register& rt, RawLiteral* literal); + + // Load integer or FP register from pc + imm19 << 2. + void ldr(const CPURegister& rt, int imm19); + + // Load word with sign extension from pc + imm19 << 2. + void ldrsw(const Register& rt, int imm19); + + // Store exclusive byte. + void stxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive half-word. + void stxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive register. + void stxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load exclusive byte. + void ldxrb(const Register& rt, const MemOperand& src); + + // Load exclusive half-word. + void ldxrh(const Register& rt, const MemOperand& src); + + // Load exclusive register. + void ldxr(const Register& rt, const MemOperand& src); + + // Store exclusive register pair. + void stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load exclusive register pair. + void ldxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release exclusive byte. + void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive half-word. + void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive register. + void stlxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load-acquire exclusive byte. + void ldaxrb(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive half-word. + void ldaxrh(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive register. + void ldaxr(const Register& rt, const MemOperand& src); + + // Store-release exclusive register pair. + void stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load-acquire exclusive register pair. + void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release byte. + void stlrb(const Register& rt, const MemOperand& dst); + + // Store-release half-word. + void stlrh(const Register& rt, const MemOperand& dst); + + // Store-release register. + void stlr(const Register& rt, const MemOperand& dst); + + // Load-acquire byte. + void ldarb(const Register& rt, const MemOperand& src); + + // Load-acquire half-word. + void ldarh(const Register& rt, const MemOperand& src); + + // Load-acquire register. + void ldar(const Register& rt, const MemOperand& src); + + // Prefetch memory. + void prfm(PrefetchOperation op, const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // Prefetch memory (with unscaled offset). + void prfum(PrefetchOperation op, const MemOperand& addr, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Prefetch memory in the literal pool. + void prfm(PrefetchOperation op, RawLiteral* literal); + + // Prefetch from pc + imm19 << 2. + void prfm(PrefetchOperation op, int imm19); + + // Move instructions. The default shift of -1 indicates that the move + // instruction will calculate an appropriate 16-bit immediate and left shift + // that is equal to the 64-bit immediate argument. If an explicit left shift + // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value. + // + // For movk, an explicit shift can be used to indicate which half word should + // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant + // half word with zero, whereas movk(x0, 0, 48) will overwrite the + // most-significant. + + // Move immediate and keep. + void movk(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVK); + } + + // Move inverted immediate. + void movn(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVN); + } + + // Move immediate. + void movz(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVZ); + } + + // Misc instructions. + // Monitor debug-mode breakpoint. + void brk(int code); + + // Halting debug-mode breakpoint. + void hlt(int code); + + // Move register to register. + void mov(const Register& rd, const Register& rn); + + // Move inverted operand to register. + void mvn(const Register& rd, const Operand& operand); + + // System instructions. + // Move to register from system register. + void mrs(const Register& rt, SystemRegister sysreg); + + // Move from register to system register. + void msr(SystemRegister sysreg, const Register& rt); + + // System hint. + void hint(SystemHint code); + + // Clear exclusive monitor. + void clrex(int imm4 = 0xf); + + // Data memory barrier. + void dmb(BarrierDomain domain, BarrierType type); + + // Data synchronization barrier. + void dsb(BarrierDomain domain, BarrierType type); + + // Instruction synchronization barrier. + void isb(); + + // Alias for system instructions. + // No-op. + void nop() { + hint(NOP); + } + + // FP instructions. + // Move double precision immediate to FP register. + void fmov(const FPRegister& fd, double imm); + + // Move single precision immediate to FP register. + void fmov(const FPRegister& fd, float imm); + + // Move FP register to register. + void fmov(const Register& rd, const FPRegister& fn); + + // Move register to FP register. + void fmov(const FPRegister& fd, const Register& rn); + + // Move FP register to FP register. + void fmov(const FPRegister& fd, const FPRegister& fn); + + // FP add. + void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP subtract. + void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP multiply. + void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP fused multiply and add. + void fmadd(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + const FPRegister& fa); + + // FP fused multiply and subtract. + void fmsub(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + const FPRegister& fa); + + // FP fused multiply, add and negate. + void fnmadd(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + const FPRegister& fa); + + // FP fused multiply, subtract and negate. + void fnmsub(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + const FPRegister& fa); + + // FP divide. + void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP maximum. + void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP minimum. + void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP maximum number. + void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP minimum number. + void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + + // FP absolute. + void fabs(const FPRegister& fd, const FPRegister& fn); + + // FP negate. + void fneg(const FPRegister& fd, const FPRegister& fn); + + // FP square root. + void fsqrt(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (nearest with ties to away). + void frinta(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (implicit rounding). + void frinti(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (toward minus infinity). + void frintm(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (nearest with ties to even). + void frintn(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (toward plus infinity). + void frintp(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (exact, implicit rounding). + void frintx(const FPRegister& fd, const FPRegister& fn); + + // FP round to integer (towards zero). + void frintz(const FPRegister& fd, const FPRegister& fn); + + // FP compare registers. + void fcmp(const FPRegister& fn, const FPRegister& fm); + + // FP compare immediate. + void fcmp(const FPRegister& fn, double value); + + // FP conditional compare. + void fccmp(const FPRegister& fn, + const FPRegister& fm, + StatusFlags nzcv, + Condition cond); + + // FP conditional select. + void fcsel(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + Condition cond); + + // Common FP Convert function. + void FPConvertToInt(const Register& rd, + const FPRegister& fn, + FPIntegerConvertOp op); + + // FP convert between single and double precision. + void fcvt(const FPRegister& fd, const FPRegister& fn); + + // Convert FP to signed integer (nearest with ties to away). + void fcvtas(const Register& rd, const FPRegister& fn); + + // Convert FP to unsigned integer (nearest with ties to away). + void fcvtau(const Register& rd, const FPRegister& fn); + + // Convert FP to signed integer (round towards -infinity). + void fcvtms(const Register& rd, const FPRegister& fn); + + // Convert FP to unsigned integer (round towards -infinity). + void fcvtmu(const Register& rd, const FPRegister& fn); + + // Convert FP to signed integer (nearest with ties to even). + void fcvtns(const Register& rd, const FPRegister& fn); + + // Convert FP to unsigned integer (nearest with ties to even). + void fcvtnu(const Register& rd, const FPRegister& fn); + + // Convert FP to signed integer (round towards zero). + void fcvtzs(const Register& rd, const FPRegister& fn); + + // Convert FP to unsigned integer (round towards zero). + void fcvtzu(const Register& rd, const FPRegister& fn); + + // Convert signed integer or fixed point to FP. + void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0); + + // Convert unsigned integer or fixed point to FP. + void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0); + + // Emit generic instructions. + // Emit raw instructions into the instruction stream. + void dci(Instr raw_inst) { Emit(raw_inst); } + + // Emit 32 bits of data into the instruction stream. + void dc32(uint32_t data) { + VIXL_ASSERT(buffer_monitor_ > 0); + buffer_->Emit32(data); + } + + // Emit 64 bits of data into the instruction stream. + void dc64(uint64_t data) { + VIXL_ASSERT(buffer_monitor_ > 0); + buffer_->Emit64(data); + } + + // Copy a string into the instruction stream, including the terminating NULL + // character. The instruction pointer is then aligned correctly for + // subsequent instructions. + void EmitString(const char * string) { + VIXL_ASSERT(string != NULL); + VIXL_ASSERT(buffer_monitor_ > 0); + + buffer_->EmitString(string); + buffer_->Align(); + } + + // Code generation helpers. + + // Register encoding. + static Instr Rd(CPURegister rd) { + VIXL_ASSERT(rd.code() != kSPRegInternalCode); + return rd.code() << Rd_offset; + } + + static Instr Rn(CPURegister rn) { + VIXL_ASSERT(rn.code() != kSPRegInternalCode); + return rn.code() << Rn_offset; + } + + static Instr Rm(CPURegister rm) { + VIXL_ASSERT(rm.code() != kSPRegInternalCode); + return rm.code() << Rm_offset; + } + + static Instr Ra(CPURegister ra) { + VIXL_ASSERT(ra.code() != kSPRegInternalCode); + return ra.code() << Ra_offset; + } + + static Instr Rt(CPURegister rt) { + VIXL_ASSERT(rt.code() != kSPRegInternalCode); + return rt.code() << Rt_offset; + } + + static Instr Rt2(CPURegister rt2) { + VIXL_ASSERT(rt2.code() != kSPRegInternalCode); + return rt2.code() << Rt2_offset; + } + + static Instr Rs(CPURegister rs) { + VIXL_ASSERT(rs.code() != kSPRegInternalCode); + return rs.code() << Rs_offset; + } + + // These encoding functions allow the stack pointer to be encoded, and + // disallow the zero register. + static Instr RdSP(Register rd) { + VIXL_ASSERT(!rd.IsZero()); + return (rd.code() & kRegCodeMask) << Rd_offset; + } + + static Instr RnSP(Register rn) { + VIXL_ASSERT(!rn.IsZero()); + return (rn.code() & kRegCodeMask) << Rn_offset; + } + + // Flags encoding. + static Instr Flags(FlagsUpdate S) { + if (S == SetFlags) { + return 1 << FlagsUpdate_offset; + } else if (S == LeaveFlags) { + return 0 << FlagsUpdate_offset; + } + VIXL_UNREACHABLE(); + return 0; + } + + static Instr Cond(Condition cond) { + return cond << Condition_offset; + } + + // PC-relative address encoding. + static Instr ImmPCRelAddress(int imm21) { + VIXL_ASSERT(is_int21(imm21)); + Instr imm = static_cast(truncate_to_int21(imm21)); + Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset; + Instr immlo = imm << ImmPCRelLo_offset; + return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask); + } + + // Branch encoding. + static Instr ImmUncondBranch(int imm26) { + VIXL_ASSERT(is_int26(imm26)); + return truncate_to_int26(imm26) << ImmUncondBranch_offset; + } + + static Instr ImmCondBranch(int imm19) { + VIXL_ASSERT(is_int19(imm19)); + return truncate_to_int19(imm19) << ImmCondBranch_offset; + } + + static Instr ImmCmpBranch(int imm19) { + VIXL_ASSERT(is_int19(imm19)); + return truncate_to_int19(imm19) << ImmCmpBranch_offset; + } + + static Instr ImmTestBranch(int imm14) { + VIXL_ASSERT(is_int14(imm14)); + return truncate_to_int14(imm14) << ImmTestBranch_offset; + } + + static Instr ImmTestBranchBit(unsigned bit_pos) { + VIXL_ASSERT(is_uint6(bit_pos)); + // Subtract five from the shift offset, as we need bit 5 from bit_pos. + unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); + unsigned b40 = bit_pos << ImmTestBranchBit40_offset; + b5 &= ImmTestBranchBit5_mask; + b40 &= ImmTestBranchBit40_mask; + return b5 | b40; + } + + // Data Processing encoding. + static Instr SF(Register rd) { + return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits; + } + + static Instr ImmAddSub(int64_t imm) { + VIXL_ASSERT(IsImmAddSub(imm)); + if (is_uint12(imm)) { // No shift required. + return imm << ImmAddSub_offset; + } else { + return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); + } + } + + static Instr ImmS(unsigned imms, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) || + ((reg_size == kWRegSize) && is_uint5(imms))); + USE(reg_size); + return imms << ImmS_offset; + } + + static Instr ImmR(unsigned immr, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || + ((reg_size == kWRegSize) && is_uint5(immr))); + USE(reg_size); + VIXL_ASSERT(is_uint6(immr)); + return immr << ImmR_offset; + } + + static Instr ImmSetBits(unsigned imms, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(is_uint6(imms)); + VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3)); + USE(reg_size); + return imms << ImmSetBits_offset; + } + + static Instr ImmRotate(unsigned immr, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || + ((reg_size == kWRegSize) && is_uint5(immr))); + USE(reg_size); + return immr << ImmRotate_offset; + } + + static Instr ImmLLiteral(int imm19) { + VIXL_ASSERT(is_int19(imm19)); + return truncate_to_int19(imm19) << ImmLLiteral_offset; + } + + static Instr BitN(unsigned bitn, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0)); + USE(reg_size); + return bitn << BitN_offset; + } + + static Instr ShiftDP(Shift shift) { + VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); + return shift << ShiftDP_offset; + } + + static Instr ImmDPShift(unsigned amount) { + VIXL_ASSERT(is_uint6(amount)); + return amount << ImmDPShift_offset; + } + + static Instr ExtendMode(Extend extend) { + return extend << ExtendMode_offset; + } + + static Instr ImmExtendShift(unsigned left_shift) { + VIXL_ASSERT(left_shift <= 4); + return left_shift << ImmExtendShift_offset; + } + + static Instr ImmCondCmp(unsigned imm) { + VIXL_ASSERT(is_uint5(imm)); + return imm << ImmCondCmp_offset; + } + + static Instr Nzcv(StatusFlags nzcv) { + return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset; + } + + // MemOperand offset encoding. + static Instr ImmLSUnsigned(int imm12) { + VIXL_ASSERT(is_uint12(imm12)); + return imm12 << ImmLSUnsigned_offset; + } + + static Instr ImmLS(int imm9) { + VIXL_ASSERT(is_int9(imm9)); + return truncate_to_int9(imm9) << ImmLS_offset; + } + + static Instr ImmLSPair(int imm7, LSDataSize size) { + VIXL_ASSERT(((imm7 >> size) << size) == imm7); + int scaled_imm7 = imm7 >> size; + VIXL_ASSERT(is_int7(scaled_imm7)); + return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; + } + + static Instr ImmShiftLS(unsigned shift_amount) { + VIXL_ASSERT(is_uint1(shift_amount)); + return shift_amount << ImmShiftLS_offset; + } + + static Instr ImmPrefetchOperation(int imm5) { + VIXL_ASSERT(is_uint5(imm5)); + return imm5 << ImmPrefetchOperation_offset; + } + + static Instr ImmException(int imm16) { + VIXL_ASSERT(is_uint16(imm16)); + return imm16 << ImmException_offset; + } + + static Instr ImmSystemRegister(int imm15) { + VIXL_ASSERT(is_uint15(imm15)); + return imm15 << ImmSystemRegister_offset; + } + + static Instr ImmHint(int imm7) { + VIXL_ASSERT(is_uint7(imm7)); + return imm7 << ImmHint_offset; + } + + static Instr CRm(int imm4) { + VIXL_ASSERT(is_uint4(imm4)); + return imm4 << CRm_offset; + } + + static Instr ImmBarrierDomain(int imm2) { + VIXL_ASSERT(is_uint2(imm2)); + return imm2 << ImmBarrierDomain_offset; + } + + static Instr ImmBarrierType(int imm2) { + VIXL_ASSERT(is_uint2(imm2)); + return imm2 << ImmBarrierType_offset; + } + + static LSDataSize CalcLSDataSize(LoadStoreOp op) { + VIXL_ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8)); + return static_cast(op >> SizeLS_offset); + } + + // Move immediates encoding. + static Instr ImmMoveWide(uint64_t imm) { + VIXL_ASSERT(is_uint16(imm)); + return imm << ImmMoveWide_offset; + } + + static Instr ShiftMoveWide(int64_t shift) { + VIXL_ASSERT(is_uint2(shift)); + return shift << ShiftMoveWide_offset; + } + + // FP Immediates. + static Instr ImmFP32(float imm); + static Instr ImmFP64(double imm); + + // FP register type. + static Instr FPType(FPRegister fd) { + return fd.Is64Bits() ? FP64 : FP32; + } + + static Instr FPScale(unsigned scale) { + VIXL_ASSERT(is_uint6(scale)); + return scale << FPScale_offset; + } + + // Immediate field checking helpers. + static bool IsImmAddSub(int64_t immediate); + static bool IsImmConditionalCompare(int64_t immediate); + static bool IsImmFP32(float imm); + static bool IsImmFP64(double imm); + static bool IsImmLogical(uint64_t value, + unsigned width, + unsigned* n = NULL, + unsigned* imm_s = NULL, + unsigned* imm_r = NULL); + static bool IsImmLSPair(int64_t offset, LSDataSize size); + static bool IsImmLSScaled(int64_t offset, LSDataSize size); + static bool IsImmLSUnscaled(int64_t offset); + static bool IsImmMovn(uint64_t imm, unsigned reg_size); + static bool IsImmMovz(uint64_t imm, unsigned reg_size); + + // Size of the code generated since label to the current position. + size_t SizeOfCodeGeneratedSince(Label* label) const { + VIXL_ASSERT(label->IsBound()); + return buffer_->OffsetFrom(label->location()); + } + + size_t SizeOfCodeGenerated() const { + return buffer_->CursorOffset(); + } + + size_t BufferCapacity() const { return buffer_->capacity(); } + + size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); } + + void EnsureSpaceFor(size_t amount) { + if (buffer_->RemainingBytes() < amount) { + size_t capacity = buffer_->capacity(); + size_t size = buffer_->CursorOffset(); + do { + // TODO(all): refine. + capacity *= 2; + } while ((capacity - size) < amount); + buffer_->Grow(capacity); + } + } + +#ifdef VIXL_DEBUG + void AcquireBuffer() { + VIXL_ASSERT(buffer_monitor_ >= 0); + buffer_monitor_++; + } + + void ReleaseBuffer() { + buffer_monitor_--; + VIXL_ASSERT(buffer_monitor_ >= 0); + } +#endif + + PositionIndependentCodeOption pic() const { + return pic_; + } + + bool AllowPageOffsetDependentCode() const { + return (pic() == PageOffsetDependentCode) || + (pic() == PositionDependentCode); + } + + static const Register& AppropriateZeroRegFor(const CPURegister& reg) { + return reg.Is64Bits() ? xzr : wzr; + } + + + protected: + void LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option = PreferScaledOffset); + + void LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + + void Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 + // reports a bogus uninitialised warning then. + void Logical(const Register& rd, + const Register& rn, + const Operand operand, + LogicalOp op); + void LogicalImmediate(const Register& rd, + const Register& rn, + unsigned n, + unsigned imm_s, + unsigned imm_r, + LogicalOp op); + + void ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + + // Functions for emulating operands not directly supported by the instruction + // set. + void EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned amount); + void EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift); + + void AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified + // registers. Only simple loads are supported; sign- and zero-extension (such + // as in LDPSW_x or LDRB_w) are not supported. + static LoadStoreOp LoadOpFor(const CPURegister& rt); + static LoadStorePairOp LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStoreOp StoreOpFor(const CPURegister& rt); + static LoadStorePairOp StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt); + + + private: + // Instruction helpers. + void MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op); + void DataProcShiftedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op); + void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op); + void ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op); + void DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op); + void DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op); + void FPDataProcessing1Source(const FPRegister& fd, + const FPRegister& fn, + FPDataProcessing1SourceOp op); + void FPDataProcessing2Source(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + FPDataProcessing2SourceOp op); + void FPDataProcessing3Source(const FPRegister& fd, + const FPRegister& fn, + const FPRegister& fm, + const FPRegister& fa, + FPDataProcessing3SourceOp op); + + // Encode the specified MemOperand for the specified access size and scaling + // preference. + Instr LoadStoreMemOperand(const MemOperand& addr, + LSDataSize size, + LoadStoreScalingOption option); + + // Link the current (not-yet-emitted) instruction to the specified label, then + // return an offset to be encoded in the instruction. If the label is not yet + // bound, an offset of 0 is returned. + ptrdiff_t LinkAndGetByteOffsetTo(Label * label); + ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label); + ptrdiff_t LinkAndGetPageOffsetTo(Label * label); + + // A common implementation for the LinkAndGetOffsetTo helpers. + template + ptrdiff_t LinkAndGetOffsetTo(Label* label); + + // Literal load offset are in words (32-bit). + ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal); + + // Emit the instruction in buffer_. + void Emit(Instr instruction) { + VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize); + VIXL_ASSERT(buffer_monitor_ > 0); + buffer_->Emit32(instruction); + } + + // Buffer where the code is emitted. + CodeBuffer* buffer_; + PositionIndependentCodeOption pic_; + +#ifdef VIXL_DEBUG + int64_t buffer_monitor_; +#endif +}; + + +// All Assembler emits MUST acquire/release the underlying code buffer. The +// helper scope below will do so and optionally ensure the buffer is big enough +// to receive the emit. It is possible to request the scope not to perform any +// checks (kNoCheck) if for example it is known in advance the buffer size is +// adequate or there is some other size checking mechanism in place. +class CodeBufferCheckScope { + public: + // Tell whether or not the scope needs to ensure the associated CodeBuffer + // has enough space for the requested size. + enum CheckPolicy { + kNoCheck, + kCheck + }; + + // Tell whether or not the scope should assert the amount of code emitted + // within the scope is consistent with the requested amount. + enum AssertPolicy { + kNoAssert, // No assert required. + kExactSize, // The code emitted must be exactly size bytes. + kMaximumSize // The code emitted must be at most size bytes. + }; + + CodeBufferCheckScope(Assembler* assm, + size_t size, + CheckPolicy check_policy = kCheck, + AssertPolicy assert_policy = kMaximumSize) + : assm_(assm) { + if (check_policy == kCheck) assm->EnsureSpaceFor(size); +#ifdef VIXL_DEBUG + assm->bind(&start_); + size_ = size; + assert_policy_ = assert_policy; + assm->AcquireBuffer(); +#else + USE(assert_policy); +#endif + } + + // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert). + explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) { +#ifdef VIXL_DEBUG + size_ = 0; + assert_policy_ = kNoAssert; + assm->AcquireBuffer(); +#endif + } + + ~CodeBufferCheckScope() { +#ifdef VIXL_DEBUG + assm_->ReleaseBuffer(); + switch (assert_policy_) { + case kNoAssert: break; + case kExactSize: + VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) == size_); + break; + case kMaximumSize: + VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) <= size_); + break; + default: + VIXL_UNREACHABLE(); + } +#endif + } + + protected: + Assembler* assm_; +#ifdef VIXL_DEBUG + Label start_; + size_t size_; + AssertPolicy assert_policy_; +#endif +}; + +} // namespace vixl + +#endif // VIXL_A64_ASSEMBLER_A64_H_ diff --git a/qemu/disas/libvixl/a64/constants-a64.h b/qemu/disas/libvixl/a64/constants-a64.h new file mode 100644 index 000000000..bc1a2c4b9 --- /dev/null +++ b/qemu/disas/libvixl/a64/constants-a64.h @@ -0,0 +1,1213 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_CONSTANTS_A64_H_ +#define VIXL_A64_CONSTANTS_A64_H_ + +namespace vixl { + +const unsigned kNumberOfRegisters = 32; +const unsigned kNumberOfFPRegisters = 32; + +#define REGISTER_CODE_LIST(R) \ +R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ +R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ +R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ +R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) + +#define INSTRUCTION_FIELDS_LIST(V_) \ +/* Register fields */ \ +V_(Rd, 4, 0, Bits) /* Destination register. */ \ +V_(Rn, 9, 5, Bits) /* First source register. */ \ +V_(Rm, 20, 16, Bits) /* Second source register. */ \ +V_(Ra, 14, 10, Bits) /* Third source register. */ \ +V_(Rt, 4, 0, Bits) /* Load/store register. */ \ +V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \ +V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \ + \ +/* Common bits */ \ +V_(SixtyFourBits, 31, 31, Bits) \ +V_(FlagsUpdate, 29, 29, Bits) \ + \ +/* PC relative addressing */ \ +V_(ImmPCRelHi, 23, 5, SignedBits) \ +V_(ImmPCRelLo, 30, 29, Bits) \ + \ +/* Add/subtract/logical shift register */ \ +V_(ShiftDP, 23, 22, Bits) \ +V_(ImmDPShift, 15, 10, Bits) \ + \ +/* Add/subtract immediate */ \ +V_(ImmAddSub, 21, 10, Bits) \ +V_(ShiftAddSub, 23, 22, Bits) \ + \ +/* Add/substract extend */ \ +V_(ImmExtendShift, 12, 10, Bits) \ +V_(ExtendMode, 15, 13, Bits) \ + \ +/* Move wide */ \ +V_(ImmMoveWide, 20, 5, Bits) \ +V_(ShiftMoveWide, 22, 21, Bits) \ + \ +/* Logical immediate, bitfield and extract */ \ +V_(BitN, 22, 22, Bits) \ +V_(ImmRotate, 21, 16, Bits) \ +V_(ImmSetBits, 15, 10, Bits) \ +V_(ImmR, 21, 16, Bits) \ +V_(ImmS, 15, 10, Bits) \ + \ +/* Test and branch immediate */ \ +V_(ImmTestBranch, 18, 5, SignedBits) \ +V_(ImmTestBranchBit40, 23, 19, Bits) \ +V_(ImmTestBranchBit5, 31, 31, Bits) \ + \ +/* Conditionals */ \ +V_(Condition, 15, 12, Bits) \ +V_(ConditionBranch, 3, 0, Bits) \ +V_(Nzcv, 3, 0, Bits) \ +V_(ImmCondCmp, 20, 16, Bits) \ +V_(ImmCondBranch, 23, 5, SignedBits) \ + \ +/* Floating point */ \ +V_(FPType, 23, 22, Bits) \ +V_(ImmFP, 20, 13, Bits) \ +V_(FPScale, 15, 10, Bits) \ + \ +/* Load Store */ \ +V_(ImmLS, 20, 12, SignedBits) \ +V_(ImmLSUnsigned, 21, 10, Bits) \ +V_(ImmLSPair, 21, 15, SignedBits) \ +V_(SizeLS, 31, 30, Bits) \ +V_(ImmShiftLS, 12, 12, Bits) \ +V_(ImmPrefetchOperation, 4, 0, Bits) \ +V_(PrefetchHint, 4, 3, Bits) \ +V_(PrefetchTarget, 2, 1, Bits) \ +V_(PrefetchStream, 0, 0, Bits) \ + \ +/* Other immediates */ \ +V_(ImmUncondBranch, 25, 0, SignedBits) \ +V_(ImmCmpBranch, 23, 5, SignedBits) \ +V_(ImmLLiteral, 23, 5, SignedBits) \ +V_(ImmException, 20, 5, Bits) \ +V_(ImmHint, 11, 5, Bits) \ +V_(ImmBarrierDomain, 11, 10, Bits) \ +V_(ImmBarrierType, 9, 8, Bits) \ + \ +/* System (MRS, MSR) */ \ +V_(ImmSystemRegister, 19, 5, Bits) \ +V_(SysO0, 19, 19, Bits) \ +V_(SysOp1, 18, 16, Bits) \ +V_(SysOp2, 7, 5, Bits) \ +V_(CRn, 15, 12, Bits) \ +V_(CRm, 11, 8, Bits) \ + \ +/* Load-/store-exclusive */ \ +V_(LdStXLoad, 22, 22, Bits) \ +V_(LdStXNotExclusive, 23, 23, Bits) \ +V_(LdStXAcquireRelease, 15, 15, Bits) \ +V_(LdStXSizeLog2, 31, 30, Bits) \ +V_(LdStXPair, 21, 21, Bits) \ + + +#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ +/* NZCV */ \ +V_(Flags, 31, 28, Bits) \ +V_(N, 31, 31, Bits) \ +V_(Z, 30, 30, Bits) \ +V_(C, 29, 29, Bits) \ +V_(V, 28, 28, Bits) \ +M_(NZCV, Flags_mask) \ + \ +/* FPCR */ \ +V_(AHP, 26, 26, Bits) \ +V_(DN, 25, 25, Bits) \ +V_(FZ, 24, 24, Bits) \ +V_(RMode, 23, 22, Bits) \ +M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask) + + +// Fields offsets. +#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \ +const int Name##_offset = LowBit; \ +const int Name##_width = HighBit - LowBit + 1; \ +const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit; +#define NOTHING(A, B) +INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS) +SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) +#undef NOTHING +#undef DECLARE_FIELDS_BITS + +// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed +// from ImmPCRelLo and ImmPCRelHi. +const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; + +// Condition codes. +enum Condition { + eq = 0, + ne = 1, + hs = 2, + lo = 3, + mi = 4, + pl = 5, + vs = 6, + vc = 7, + hi = 8, + ls = 9, + ge = 10, + lt = 11, + gt = 12, + le = 13, + al = 14, + nv = 15 // Behaves as always/al. +}; + +inline Condition InvertCondition(Condition cond) { + // Conditions al and nv behave identically, as "always true". They can't be + // inverted, because there is no "always false" condition. + VIXL_ASSERT((cond != al) && (cond != nv)); + return static_cast(cond ^ 1); +} + +enum FlagsUpdate { + SetFlags = 1, + LeaveFlags = 0 +}; + +enum StatusFlags { + NoFlag = 0, + + // Derive the flag combinations from the system register bit descriptions. + NFlag = N_mask, + ZFlag = Z_mask, + CFlag = C_mask, + VFlag = V_mask, + NZFlag = NFlag | ZFlag, + NCFlag = NFlag | CFlag, + NVFlag = NFlag | VFlag, + ZCFlag = ZFlag | CFlag, + ZVFlag = ZFlag | VFlag, + CVFlag = CFlag | VFlag, + NZCFlag = NFlag | ZFlag | CFlag, + NZVFlag = NFlag | ZFlag | VFlag, + NCVFlag = NFlag | CFlag | VFlag, + ZCVFlag = ZFlag | CFlag | VFlag, + NZCVFlag = NFlag | ZFlag | CFlag | VFlag, + + // Floating-point comparison results. + FPEqualFlag = ZCFlag, + FPLessThanFlag = NFlag, + FPGreaterThanFlag = CFlag, + FPUnorderedFlag = CVFlag +}; + +enum Shift { + NO_SHIFT = -1, + LSL = 0x0, + LSR = 0x1, + ASR = 0x2, + ROR = 0x3 +}; + +enum Extend { + NO_EXTEND = -1, + UXTB = 0, + UXTH = 1, + UXTW = 2, + UXTX = 3, + SXTB = 4, + SXTH = 5, + SXTW = 6, + SXTX = 7 +}; + +enum SystemHint { + NOP = 0, + YIELD = 1, + WFE = 2, + WFI = 3, + SEV = 4, + SEVL = 5 +}; + +enum BarrierDomain { + OuterShareable = 0, + NonShareable = 1, + InnerShareable = 2, + FullSystem = 3 +}; + +enum BarrierType { + BarrierOther = 0, + BarrierReads = 1, + BarrierWrites = 2, + BarrierAll = 3 +}; + +enum PrefetchOperation { + PLDL1KEEP = 0x00, + PLDL1STRM = 0x01, + PLDL2KEEP = 0x02, + PLDL2STRM = 0x03, + PLDL3KEEP = 0x04, + PLDL3STRM = 0x05, + + PLIL1KEEP = 0x08, + PLIL1STRM = 0x09, + PLIL2KEEP = 0x0a, + PLIL2STRM = 0x0b, + PLIL3KEEP = 0x0c, + PLIL3STRM = 0x0d, + + PSTL1KEEP = 0x10, + PSTL1STRM = 0x11, + PSTL2KEEP = 0x12, + PSTL2STRM = 0x13, + PSTL3KEEP = 0x14, + PSTL3STRM = 0x15 +}; + +// System/special register names. +// This information is not encoded as one field but as the concatenation of +// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). +enum SystemRegister { + NZCV = ((0x1 << SysO0_offset) | + (0x3 << SysOp1_offset) | + (0x4 << CRn_offset) | + (0x2 << CRm_offset) | + (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset, + FPCR = ((0x1 << SysO0_offset) | + (0x3 << SysOp1_offset) | + (0x4 << CRn_offset) | + (0x4 << CRm_offset) | + (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset +}; + +// Instruction enumerations. +// +// These are the masks that define a class of instructions, and the list of +// instructions within each class. Each enumeration has a Fixed, FMask and +// Mask value. +// +// Fixed: The fixed bits in this instruction class. +// FMask: The mask used to extract the fixed bits in the class. +// Mask: The mask used to identify the instructions within a class. +// +// The enumerations can be used like this: +// +// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); +// switch(instr->Mask(PCRelAddressingMask)) { +// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break; +// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break; +// default: printf("Unknown instruction\n"); +// } + + +// Generic fields. +enum GenericInstrField { + SixtyFourBits = 0x80000000, + ThirtyTwoBits = 0x00000000, + FP32 = 0x00000000, + FP64 = 0x00400000 +}; + +// PC relative addressing. +enum PCRelAddressingOp { + PCRelAddressingFixed = 0x10000000, + PCRelAddressingFMask = 0x1F000000, + PCRelAddressingMask = 0x9F000000, + ADR = PCRelAddressingFixed | 0x00000000, + ADRP = PCRelAddressingFixed | 0x80000000 +}; + +// Add/sub (immediate, shifted and extended.) +const int kSFOffset = 31; +enum AddSubOp { + AddSubOpMask = 0x60000000, + AddSubSetFlagsBit = 0x20000000, + ADD = 0x00000000, + ADDS = ADD | AddSubSetFlagsBit, + SUB = 0x40000000, + SUBS = SUB | AddSubSetFlagsBit +}; + +#define ADD_SUB_OP_LIST(V) \ + V(ADD), \ + V(ADDS), \ + V(SUB), \ + V(SUBS) + +enum AddSubImmediateOp { + AddSubImmediateFixed = 0x11000000, + AddSubImmediateFMask = 0x1F000000, + AddSubImmediateMask = 0xFF000000, + #define ADD_SUB_IMMEDIATE(A) \ + A##_w_imm = AddSubImmediateFixed | A, \ + A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE) + #undef ADD_SUB_IMMEDIATE +}; + +enum AddSubShiftedOp { + AddSubShiftedFixed = 0x0B000000, + AddSubShiftedFMask = 0x1F200000, + AddSubShiftedMask = 0xFF200000, + #define ADD_SUB_SHIFTED(A) \ + A##_w_shift = AddSubShiftedFixed | A, \ + A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_SHIFTED) + #undef ADD_SUB_SHIFTED +}; + +enum AddSubExtendedOp { + AddSubExtendedFixed = 0x0B200000, + AddSubExtendedFMask = 0x1F200000, + AddSubExtendedMask = 0xFFE00000, + #define ADD_SUB_EXTENDED(A) \ + A##_w_ext = AddSubExtendedFixed | A, \ + A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_EXTENDED) + #undef ADD_SUB_EXTENDED +}; + +// Add/sub with carry. +enum AddSubWithCarryOp { + AddSubWithCarryFixed = 0x1A000000, + AddSubWithCarryFMask = 0x1FE00000, + AddSubWithCarryMask = 0xFFE0FC00, + ADC_w = AddSubWithCarryFixed | ADD, + ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits, + ADC = ADC_w, + ADCS_w = AddSubWithCarryFixed | ADDS, + ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits, + SBC_w = AddSubWithCarryFixed | SUB, + SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits, + SBC = SBC_w, + SBCS_w = AddSubWithCarryFixed | SUBS, + SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits +}; + + +// Logical (immediate and shifted register). +enum LogicalOp { + LogicalOpMask = 0x60200000, + NOT = 0x00200000, + AND = 0x00000000, + BIC = AND | NOT, + ORR = 0x20000000, + ORN = ORR | NOT, + EOR = 0x40000000, + EON = EOR | NOT, + ANDS = 0x60000000, + BICS = ANDS | NOT +}; + +// Logical immediate. +enum LogicalImmediateOp { + LogicalImmediateFixed = 0x12000000, + LogicalImmediateFMask = 0x1F800000, + LogicalImmediateMask = 0xFF800000, + AND_w_imm = LogicalImmediateFixed | AND, + AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits, + ORR_w_imm = LogicalImmediateFixed | ORR, + ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits, + EOR_w_imm = LogicalImmediateFixed | EOR, + EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits, + ANDS_w_imm = LogicalImmediateFixed | ANDS, + ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits +}; + +// Logical shifted register. +enum LogicalShiftedOp { + LogicalShiftedFixed = 0x0A000000, + LogicalShiftedFMask = 0x1F000000, + LogicalShiftedMask = 0xFF200000, + AND_w = LogicalShiftedFixed | AND, + AND_x = LogicalShiftedFixed | AND | SixtyFourBits, + AND_shift = AND_w, + BIC_w = LogicalShiftedFixed | BIC, + BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits, + BIC_shift = BIC_w, + ORR_w = LogicalShiftedFixed | ORR, + ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits, + ORR_shift = ORR_w, + ORN_w = LogicalShiftedFixed | ORN, + ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits, + ORN_shift = ORN_w, + EOR_w = LogicalShiftedFixed | EOR, + EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits, + EOR_shift = EOR_w, + EON_w = LogicalShiftedFixed | EON, + EON_x = LogicalShiftedFixed | EON | SixtyFourBits, + EON_shift = EON_w, + ANDS_w = LogicalShiftedFixed | ANDS, + ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits, + ANDS_shift = ANDS_w, + BICS_w = LogicalShiftedFixed | BICS, + BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits, + BICS_shift = BICS_w +}; + +// Move wide immediate. +enum MoveWideImmediateOp { + MoveWideImmediateFixed = 0x12800000, + MoveWideImmediateFMask = 0x1F800000, + MoveWideImmediateMask = 0xFF800000, + MOVN = 0x00000000, + MOVZ = 0x40000000, + MOVK = 0x60000000, + MOVN_w = MoveWideImmediateFixed | MOVN, + MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits, + MOVZ_w = MoveWideImmediateFixed | MOVZ, + MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits, + MOVK_w = MoveWideImmediateFixed | MOVK, + MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits +}; + +// Bitfield. +const int kBitfieldNOffset = 22; +enum BitfieldOp { + BitfieldFixed = 0x13000000, + BitfieldFMask = 0x1F800000, + BitfieldMask = 0xFF800000, + SBFM_w = BitfieldFixed | 0x00000000, + SBFM_x = BitfieldFixed | 0x80000000, + SBFM = SBFM_w, + BFM_w = BitfieldFixed | 0x20000000, + BFM_x = BitfieldFixed | 0xA0000000, + BFM = BFM_w, + UBFM_w = BitfieldFixed | 0x40000000, + UBFM_x = BitfieldFixed | 0xC0000000, + UBFM = UBFM_w + // Bitfield N field. +}; + +// Extract. +enum ExtractOp { + ExtractFixed = 0x13800000, + ExtractFMask = 0x1F800000, + ExtractMask = 0xFFA00000, + EXTR_w = ExtractFixed | 0x00000000, + EXTR_x = ExtractFixed | 0x80000000, + EXTR = EXTR_w +}; + +// Unconditional branch. +enum UnconditionalBranchOp { + UnconditionalBranchFixed = 0x14000000, + UnconditionalBranchFMask = 0x7C000000, + UnconditionalBranchMask = 0xFC000000, + B = UnconditionalBranchFixed | 0x00000000, + BL = UnconditionalBranchFixed | 0x80000000 +}; + +// Unconditional branch to register. +enum UnconditionalBranchToRegisterOp { + UnconditionalBranchToRegisterFixed = 0xD6000000, + UnconditionalBranchToRegisterFMask = 0xFE000000, + UnconditionalBranchToRegisterMask = 0xFFFFFC1F, + BR = UnconditionalBranchToRegisterFixed | 0x001F0000, + BLR = UnconditionalBranchToRegisterFixed | 0x003F0000, + RET = UnconditionalBranchToRegisterFixed | 0x005F0000 +}; + +// Compare and branch. +enum CompareBranchOp { + CompareBranchFixed = 0x34000000, + CompareBranchFMask = 0x7E000000, + CompareBranchMask = 0xFF000000, + CBZ_w = CompareBranchFixed | 0x00000000, + CBZ_x = CompareBranchFixed | 0x80000000, + CBZ = CBZ_w, + CBNZ_w = CompareBranchFixed | 0x01000000, + CBNZ_x = CompareBranchFixed | 0x81000000, + CBNZ = CBNZ_w +}; + +// Test and branch. +enum TestBranchOp { + TestBranchFixed = 0x36000000, + TestBranchFMask = 0x7E000000, + TestBranchMask = 0x7F000000, + TBZ = TestBranchFixed | 0x00000000, + TBNZ = TestBranchFixed | 0x01000000 +}; + +// Conditional branch. +enum ConditionalBranchOp { + ConditionalBranchFixed = 0x54000000, + ConditionalBranchFMask = 0xFE000000, + ConditionalBranchMask = 0xFF000010, + B_cond = ConditionalBranchFixed | 0x00000000 +}; + +// System. +// System instruction encoding is complicated because some instructions use op +// and CR fields to encode parameters. To handle this cleanly, the system +// instructions are split into more than one enum. + +enum SystemOp { + SystemFixed = 0xD5000000, + SystemFMask = 0xFFC00000 +}; + +enum SystemSysRegOp { + SystemSysRegFixed = 0xD5100000, + SystemSysRegFMask = 0xFFD00000, + SystemSysRegMask = 0xFFF00000, + MRS = SystemSysRegFixed | 0x00200000, + MSR = SystemSysRegFixed | 0x00000000 +}; + +enum SystemHintOp { + SystemHintFixed = 0xD503201F, + SystemHintFMask = 0xFFFFF01F, + SystemHintMask = 0xFFFFF01F, + HINT = SystemHintFixed | 0x00000000 +}; + +// Exception. +enum ExceptionOp { + ExceptionFixed = 0xD4000000, + ExceptionFMask = 0xFF000000, + ExceptionMask = 0xFFE0001F, + HLT = ExceptionFixed | 0x00400000, + BRK = ExceptionFixed | 0x00200000, + SVC = ExceptionFixed | 0x00000001, + HVC = ExceptionFixed | 0x00000002, + SMC = ExceptionFixed | 0x00000003, + DCPS1 = ExceptionFixed | 0x00A00001, + DCPS2 = ExceptionFixed | 0x00A00002, + DCPS3 = ExceptionFixed | 0x00A00003 +}; + +enum MemBarrierOp { + MemBarrierFixed = 0xD503309F, + MemBarrierFMask = 0xFFFFF09F, + MemBarrierMask = 0xFFFFF0FF, + DSB = MemBarrierFixed | 0x00000000, + DMB = MemBarrierFixed | 0x00000020, + ISB = MemBarrierFixed | 0x00000040 +}; + +enum SystemExclusiveMonitorOp { + SystemExclusiveMonitorFixed = 0xD503305F, + SystemExclusiveMonitorFMask = 0xFFFFF0FF, + SystemExclusiveMonitorMask = 0xFFFFF0FF, + CLREX = SystemExclusiveMonitorFixed +}; + +// Any load or store. +enum LoadStoreAnyOp { + LoadStoreAnyFMask = 0x0a000000, + LoadStoreAnyFixed = 0x08000000 +}; + +// Any load pair or store pair. +enum LoadStorePairAnyOp { + LoadStorePairAnyFMask = 0x3a000000, + LoadStorePairAnyFixed = 0x28000000 +}; + +#define LOAD_STORE_PAIR_OP_LIST(V) \ + V(STP, w, 0x00000000), \ + V(LDP, w, 0x00400000), \ + V(LDPSW, x, 0x40400000), \ + V(STP, x, 0x80000000), \ + V(LDP, x, 0x80400000), \ + V(STP, s, 0x04000000), \ + V(LDP, s, 0x04400000), \ + V(STP, d, 0x44000000), \ + V(LDP, d, 0x44400000) + +// Load/store pair (post, pre and offset.) +enum LoadStorePairOp { + LoadStorePairMask = 0xC4400000, + LoadStorePairLBit = 1 << 22, + #define LOAD_STORE_PAIR(A, B, C) \ + A##_##B = C + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR) + #undef LOAD_STORE_PAIR +}; + +enum LoadStorePairPostIndexOp { + LoadStorePairPostIndexFixed = 0x28800000, + LoadStorePairPostIndexFMask = 0x3B800000, + LoadStorePairPostIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \ + A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX) + #undef LOAD_STORE_PAIR_POST_INDEX +}; + +enum LoadStorePairPreIndexOp { + LoadStorePairPreIndexFixed = 0x29800000, + LoadStorePairPreIndexFMask = 0x3B800000, + LoadStorePairPreIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \ + A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX) + #undef LOAD_STORE_PAIR_PRE_INDEX +}; + +enum LoadStorePairOffsetOp { + LoadStorePairOffsetFixed = 0x29000000, + LoadStorePairOffsetFMask = 0x3B800000, + LoadStorePairOffsetMask = 0xFFC00000, + #define LOAD_STORE_PAIR_OFFSET(A, B, C) \ + A##_##B##_off = LoadStorePairOffsetFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET) + #undef LOAD_STORE_PAIR_OFFSET +}; + +enum LoadStorePairNonTemporalOp { + LoadStorePairNonTemporalFixed = 0x28000000, + LoadStorePairNonTemporalFMask = 0x3B800000, + LoadStorePairNonTemporalMask = 0xFFC00000, + STNP_w = LoadStorePairNonTemporalFixed | STP_w, + LDNP_w = LoadStorePairNonTemporalFixed | LDP_w, + STNP_x = LoadStorePairNonTemporalFixed | STP_x, + LDNP_x = LoadStorePairNonTemporalFixed | LDP_x, + STNP_s = LoadStorePairNonTemporalFixed | STP_s, + LDNP_s = LoadStorePairNonTemporalFixed | LDP_s, + STNP_d = LoadStorePairNonTemporalFixed | STP_d, + LDNP_d = LoadStorePairNonTemporalFixed | LDP_d +}; + +// Load literal. +enum LoadLiteralOp { + LoadLiteralFixed = 0x18000000, + LoadLiteralFMask = 0x3B000000, + LoadLiteralMask = 0xFF000000, + LDR_w_lit = LoadLiteralFixed | 0x00000000, + LDR_x_lit = LoadLiteralFixed | 0x40000000, + LDRSW_x_lit = LoadLiteralFixed | 0x80000000, + PRFM_lit = LoadLiteralFixed | 0xC0000000, + LDR_s_lit = LoadLiteralFixed | 0x04000000, + LDR_d_lit = LoadLiteralFixed | 0x44000000 +}; + +#define LOAD_STORE_OP_LIST(V) \ + V(ST, RB, w, 0x00000000), \ + V(ST, RH, w, 0x40000000), \ + V(ST, R, w, 0x80000000), \ + V(ST, R, x, 0xC0000000), \ + V(LD, RB, w, 0x00400000), \ + V(LD, RH, w, 0x40400000), \ + V(LD, R, w, 0x80400000), \ + V(LD, R, x, 0xC0400000), \ + V(LD, RSB, x, 0x00800000), \ + V(LD, RSH, x, 0x40800000), \ + V(LD, RSW, x, 0x80800000), \ + V(LD, RSB, w, 0x00C00000), \ + V(LD, RSH, w, 0x40C00000), \ + V(ST, R, s, 0x84000000), \ + V(ST, R, d, 0xC4000000), \ + V(LD, R, s, 0x84400000), \ + V(LD, R, d, 0xC4400000) + + +// Load/store (post, pre, offset and unsigned.) +enum LoadStoreOp { + LoadStoreOpMask = 0xC4C00000, + #define LOAD_STORE(A, B, C, D) \ + A##B##_##C = D + LOAD_STORE_OP_LIST(LOAD_STORE), + #undef LOAD_STORE + PRFM = 0xC0800000 +}; + +// Load/store unscaled offset. +enum LoadStoreUnscaledOffsetOp { + LoadStoreUnscaledOffsetFixed = 0x38000000, + LoadStoreUnscaledOffsetFMask = 0x3B200C00, + LoadStoreUnscaledOffsetMask = 0xFFE00C00, + PRFUM = LoadStoreUnscaledOffsetFixed | PRFM, + #define LOAD_STORE_UNSCALED(A, B, C, D) \ + A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED) + #undef LOAD_STORE_UNSCALED +}; + +// Load/store post index. +enum LoadStorePostIndex { + LoadStorePostIndexFixed = 0x38000400, + LoadStorePostIndexFMask = 0x3B200C00, + LoadStorePostIndexMask = 0xFFE00C00, + #define LOAD_STORE_POST_INDEX(A, B, C, D) \ + A##B##_##C##_post = LoadStorePostIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX) + #undef LOAD_STORE_POST_INDEX +}; + +// Load/store pre index. +enum LoadStorePreIndex { + LoadStorePreIndexFixed = 0x38000C00, + LoadStorePreIndexFMask = 0x3B200C00, + LoadStorePreIndexMask = 0xFFE00C00, + #define LOAD_STORE_PRE_INDEX(A, B, C, D) \ + A##B##_##C##_pre = LoadStorePreIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX) + #undef LOAD_STORE_PRE_INDEX +}; + +// Load/store unsigned offset. +enum LoadStoreUnsignedOffset { + LoadStoreUnsignedOffsetFixed = 0x39000000, + LoadStoreUnsignedOffsetFMask = 0x3B000000, + LoadStoreUnsignedOffsetMask = 0xFFC00000, + PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM, + #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \ + A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET) + #undef LOAD_STORE_UNSIGNED_OFFSET +}; + +// Load/store register offset. +enum LoadStoreRegisterOffset { + LoadStoreRegisterOffsetFixed = 0x38200800, + LoadStoreRegisterOffsetFMask = 0x3B200C00, + LoadStoreRegisterOffsetMask = 0xFFE00C00, + PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM, + #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \ + A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET) + #undef LOAD_STORE_REGISTER_OFFSET +}; + +enum LoadStoreExclusive { + LoadStoreExclusiveFixed = 0x08000000, + LoadStoreExclusiveFMask = 0x3F000000, + LoadStoreExclusiveMask = 0xFFE08000, + STXRB_w = LoadStoreExclusiveFixed | 0x00000000, + STXRH_w = LoadStoreExclusiveFixed | 0x40000000, + STXR_w = LoadStoreExclusiveFixed | 0x80000000, + STXR_x = LoadStoreExclusiveFixed | 0xC0000000, + LDXRB_w = LoadStoreExclusiveFixed | 0x00400000, + LDXRH_w = LoadStoreExclusiveFixed | 0x40400000, + LDXR_w = LoadStoreExclusiveFixed | 0x80400000, + LDXR_x = LoadStoreExclusiveFixed | 0xC0400000, + STXP_w = LoadStoreExclusiveFixed | 0x80200000, + STXP_x = LoadStoreExclusiveFixed | 0xC0200000, + LDXP_w = LoadStoreExclusiveFixed | 0x80600000, + LDXP_x = LoadStoreExclusiveFixed | 0xC0600000, + STLXRB_w = LoadStoreExclusiveFixed | 0x00008000, + STLXRH_w = LoadStoreExclusiveFixed | 0x40008000, + STLXR_w = LoadStoreExclusiveFixed | 0x80008000, + STLXR_x = LoadStoreExclusiveFixed | 0xC0008000, + LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000, + LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000, + LDAXR_w = LoadStoreExclusiveFixed | 0x80408000, + LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000, + STLXP_w = LoadStoreExclusiveFixed | 0x80208000, + STLXP_x = LoadStoreExclusiveFixed | 0xC0208000, + LDAXP_w = LoadStoreExclusiveFixed | 0x80608000, + LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000, + STLRB_w = LoadStoreExclusiveFixed | 0x00808000, + STLRH_w = LoadStoreExclusiveFixed | 0x40808000, + STLR_w = LoadStoreExclusiveFixed | 0x80808000, + STLR_x = LoadStoreExclusiveFixed | 0xC0808000, + LDARB_w = LoadStoreExclusiveFixed | 0x00C08000, + LDARH_w = LoadStoreExclusiveFixed | 0x40C08000, + LDAR_w = LoadStoreExclusiveFixed | 0x80C08000, + LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000 +}; + +// Conditional compare. +enum ConditionalCompareOp { + ConditionalCompareMask = 0x60000000, + CCMN = 0x20000000, + CCMP = 0x60000000 +}; + +// Conditional compare register. +enum ConditionalCompareRegisterOp { + ConditionalCompareRegisterFixed = 0x1A400000, + ConditionalCompareRegisterFMask = 0x1FE00800, + ConditionalCompareRegisterMask = 0xFFE00C10, + CCMN_w = ConditionalCompareRegisterFixed | CCMN, + CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN, + CCMP_w = ConditionalCompareRegisterFixed | CCMP, + CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP +}; + +// Conditional compare immediate. +enum ConditionalCompareImmediateOp { + ConditionalCompareImmediateFixed = 0x1A400800, + ConditionalCompareImmediateFMask = 0x1FE00800, + ConditionalCompareImmediateMask = 0xFFE00C10, + CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN, + CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN, + CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP, + CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP +}; + +// Conditional select. +enum ConditionalSelectOp { + ConditionalSelectFixed = 0x1A800000, + ConditionalSelectFMask = 0x1FE00000, + ConditionalSelectMask = 0xFFE00C00, + CSEL_w = ConditionalSelectFixed | 0x00000000, + CSEL_x = ConditionalSelectFixed | 0x80000000, + CSEL = CSEL_w, + CSINC_w = ConditionalSelectFixed | 0x00000400, + CSINC_x = ConditionalSelectFixed | 0x80000400, + CSINC = CSINC_w, + CSINV_w = ConditionalSelectFixed | 0x40000000, + CSINV_x = ConditionalSelectFixed | 0xC0000000, + CSINV = CSINV_w, + CSNEG_w = ConditionalSelectFixed | 0x40000400, + CSNEG_x = ConditionalSelectFixed | 0xC0000400, + CSNEG = CSNEG_w +}; + +// Data processing 1 source. +enum DataProcessing1SourceOp { + DataProcessing1SourceFixed = 0x5AC00000, + DataProcessing1SourceFMask = 0x5FE00000, + DataProcessing1SourceMask = 0xFFFFFC00, + RBIT = DataProcessing1SourceFixed | 0x00000000, + RBIT_w = RBIT, + RBIT_x = RBIT | SixtyFourBits, + REV16 = DataProcessing1SourceFixed | 0x00000400, + REV16_w = REV16, + REV16_x = REV16 | SixtyFourBits, + REV = DataProcessing1SourceFixed | 0x00000800, + REV_w = REV, + REV32_x = REV | SixtyFourBits, + REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00, + CLZ = DataProcessing1SourceFixed | 0x00001000, + CLZ_w = CLZ, + CLZ_x = CLZ | SixtyFourBits, + CLS = DataProcessing1SourceFixed | 0x00001400, + CLS_w = CLS, + CLS_x = CLS | SixtyFourBits +}; + +// Data processing 2 source. +enum DataProcessing2SourceOp { + DataProcessing2SourceFixed = 0x1AC00000, + DataProcessing2SourceFMask = 0x5FE00000, + DataProcessing2SourceMask = 0xFFE0FC00, + UDIV_w = DataProcessing2SourceFixed | 0x00000800, + UDIV_x = DataProcessing2SourceFixed | 0x80000800, + UDIV = UDIV_w, + SDIV_w = DataProcessing2SourceFixed | 0x00000C00, + SDIV_x = DataProcessing2SourceFixed | 0x80000C00, + SDIV = SDIV_w, + LSLV_w = DataProcessing2SourceFixed | 0x00002000, + LSLV_x = DataProcessing2SourceFixed | 0x80002000, + LSLV = LSLV_w, + LSRV_w = DataProcessing2SourceFixed | 0x00002400, + LSRV_x = DataProcessing2SourceFixed | 0x80002400, + LSRV = LSRV_w, + ASRV_w = DataProcessing2SourceFixed | 0x00002800, + ASRV_x = DataProcessing2SourceFixed | 0x80002800, + ASRV = ASRV_w, + RORV_w = DataProcessing2SourceFixed | 0x00002C00, + RORV_x = DataProcessing2SourceFixed | 0x80002C00, + RORV = RORV_w, + CRC32B = DataProcessing2SourceFixed | 0x00004000, + CRC32H = DataProcessing2SourceFixed | 0x00004400, + CRC32W = DataProcessing2SourceFixed | 0x00004800, + CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00, + CRC32CB = DataProcessing2SourceFixed | 0x00005000, + CRC32CH = DataProcessing2SourceFixed | 0x00005400, + CRC32CW = DataProcessing2SourceFixed | 0x00005800, + CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00 +}; + +// Data processing 3 source. +enum DataProcessing3SourceOp { + DataProcessing3SourceFixed = 0x1B000000, + DataProcessing3SourceFMask = 0x1F000000, + DataProcessing3SourceMask = 0xFFE08000, + MADD_w = DataProcessing3SourceFixed | 0x00000000, + MADD_x = DataProcessing3SourceFixed | 0x80000000, + MADD = MADD_w, + MSUB_w = DataProcessing3SourceFixed | 0x00008000, + MSUB_x = DataProcessing3SourceFixed | 0x80008000, + MSUB = MSUB_w, + SMADDL_x = DataProcessing3SourceFixed | 0x80200000, + SMSUBL_x = DataProcessing3SourceFixed | 0x80208000, + SMULH_x = DataProcessing3SourceFixed | 0x80400000, + UMADDL_x = DataProcessing3SourceFixed | 0x80A00000, + UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000, + UMULH_x = DataProcessing3SourceFixed | 0x80C00000 +}; + +// Floating point compare. +enum FPCompareOp { + FPCompareFixed = 0x1E202000, + FPCompareFMask = 0x5F203C00, + FPCompareMask = 0xFFE0FC1F, + FCMP_s = FPCompareFixed | 0x00000000, + FCMP_d = FPCompareFixed | FP64 | 0x00000000, + FCMP = FCMP_s, + FCMP_s_zero = FPCompareFixed | 0x00000008, + FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008, + FCMP_zero = FCMP_s_zero, + FCMPE_s = FPCompareFixed | 0x00000010, + FCMPE_d = FPCompareFixed | FP64 | 0x00000010, + FCMPE_s_zero = FPCompareFixed | 0x00000018, + FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018 +}; + +// Floating point conditional compare. +enum FPConditionalCompareOp { + FPConditionalCompareFixed = 0x1E200400, + FPConditionalCompareFMask = 0x5F200C00, + FPConditionalCompareMask = 0xFFE00C10, + FCCMP_s = FPConditionalCompareFixed | 0x00000000, + FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000, + FCCMP = FCCMP_s, + FCCMPE_s = FPConditionalCompareFixed | 0x00000010, + FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010, + FCCMPE = FCCMPE_s +}; + +// Floating point conditional select. +enum FPConditionalSelectOp { + FPConditionalSelectFixed = 0x1E200C00, + FPConditionalSelectFMask = 0x5F200C00, + FPConditionalSelectMask = 0xFFE00C00, + FCSEL_s = FPConditionalSelectFixed | 0x00000000, + FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000, + FCSEL = FCSEL_s +}; + +// Floating point immediate. +enum FPImmediateOp { + FPImmediateFixed = 0x1E201000, + FPImmediateFMask = 0x5F201C00, + FPImmediateMask = 0xFFE01C00, + FMOV_s_imm = FPImmediateFixed | 0x00000000, + FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000 +}; + +// Floating point data processing 1 source. +enum FPDataProcessing1SourceOp { + FPDataProcessing1SourceFixed = 0x1E204000, + FPDataProcessing1SourceFMask = 0x5F207C00, + FPDataProcessing1SourceMask = 0xFFFFFC00, + FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, + FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, + FMOV = FMOV_s, + FABS_s = FPDataProcessing1SourceFixed | 0x00008000, + FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, + FABS = FABS_s, + FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, + FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, + FNEG = FNEG_s, + FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, + FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, + FSQRT = FSQRT_s, + FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, + FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000, + FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000, + FRINTN = FRINTN_s, + FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000, + FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000, + FRINTP = FRINTP_s, + FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000, + FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000, + FRINTM = FRINTM_s, + FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000, + FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000, + FRINTZ = FRINTZ_s, + FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000, + FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000, + FRINTA = FRINTA_s, + FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000, + FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000, + FRINTX = FRINTX_s, + FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000, + FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000, + FRINTI = FRINTI_s +}; + +// Floating point data processing 2 source. +enum FPDataProcessing2SourceOp { + FPDataProcessing2SourceFixed = 0x1E200800, + FPDataProcessing2SourceFMask = 0x5F200C00, + FPDataProcessing2SourceMask = 0xFFE0FC00, + FMUL = FPDataProcessing2SourceFixed | 0x00000000, + FMUL_s = FMUL, + FMUL_d = FMUL | FP64, + FDIV = FPDataProcessing2SourceFixed | 0x00001000, + FDIV_s = FDIV, + FDIV_d = FDIV | FP64, + FADD = FPDataProcessing2SourceFixed | 0x00002000, + FADD_s = FADD, + FADD_d = FADD | FP64, + FSUB = FPDataProcessing2SourceFixed | 0x00003000, + FSUB_s = FSUB, + FSUB_d = FSUB | FP64, + FMAX = FPDataProcessing2SourceFixed | 0x00004000, + FMAX_s = FMAX, + FMAX_d = FMAX | FP64, + FMIN = FPDataProcessing2SourceFixed | 0x00005000, + FMIN_s = FMIN, + FMIN_d = FMIN | FP64, + FMAXNM = FPDataProcessing2SourceFixed | 0x00006000, + FMAXNM_s = FMAXNM, + FMAXNM_d = FMAXNM | FP64, + FMINNM = FPDataProcessing2SourceFixed | 0x00007000, + FMINNM_s = FMINNM, + FMINNM_d = FMINNM | FP64, + FNMUL = FPDataProcessing2SourceFixed | 0x00008000, + FNMUL_s = FNMUL, + FNMUL_d = FNMUL | FP64 +}; + +// Floating point data processing 3 source. +enum FPDataProcessing3SourceOp { + FPDataProcessing3SourceFixed = 0x1F000000, + FPDataProcessing3SourceFMask = 0x5F000000, + FPDataProcessing3SourceMask = 0xFFE08000, + FMADD_s = FPDataProcessing3SourceFixed | 0x00000000, + FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000, + FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000, + FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000, + FMADD_d = FPDataProcessing3SourceFixed | 0x00400000, + FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000, + FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000, + FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000 +}; + +// Conversion between floating point and integer. +enum FPIntegerConvertOp { + FPIntegerConvertFixed = 0x1E200000, + FPIntegerConvertFMask = 0x5F20FC00, + FPIntegerConvertMask = 0xFFFFFC00, + FCVTNS = FPIntegerConvertFixed | 0x00000000, + FCVTNS_ws = FCVTNS, + FCVTNS_xs = FCVTNS | SixtyFourBits, + FCVTNS_wd = FCVTNS | FP64, + FCVTNS_xd = FCVTNS | SixtyFourBits | FP64, + FCVTNU = FPIntegerConvertFixed | 0x00010000, + FCVTNU_ws = FCVTNU, + FCVTNU_xs = FCVTNU | SixtyFourBits, + FCVTNU_wd = FCVTNU | FP64, + FCVTNU_xd = FCVTNU | SixtyFourBits | FP64, + FCVTPS = FPIntegerConvertFixed | 0x00080000, + FCVTPS_ws = FCVTPS, + FCVTPS_xs = FCVTPS | SixtyFourBits, + FCVTPS_wd = FCVTPS | FP64, + FCVTPS_xd = FCVTPS | SixtyFourBits | FP64, + FCVTPU = FPIntegerConvertFixed | 0x00090000, + FCVTPU_ws = FCVTPU, + FCVTPU_xs = FCVTPU | SixtyFourBits, + FCVTPU_wd = FCVTPU | FP64, + FCVTPU_xd = FCVTPU | SixtyFourBits | FP64, + FCVTMS = FPIntegerConvertFixed | 0x00100000, + FCVTMS_ws = FCVTMS, + FCVTMS_xs = FCVTMS | SixtyFourBits, + FCVTMS_wd = FCVTMS | FP64, + FCVTMS_xd = FCVTMS | SixtyFourBits | FP64, + FCVTMU = FPIntegerConvertFixed | 0x00110000, + FCVTMU_ws = FCVTMU, + FCVTMU_xs = FCVTMU | SixtyFourBits, + FCVTMU_wd = FCVTMU | FP64, + FCVTMU_xd = FCVTMU | SixtyFourBits | FP64, + FCVTZS = FPIntegerConvertFixed | 0x00180000, + FCVTZS_ws = FCVTZS, + FCVTZS_xs = FCVTZS | SixtyFourBits, + FCVTZS_wd = FCVTZS | FP64, + FCVTZS_xd = FCVTZS | SixtyFourBits | FP64, + FCVTZU = FPIntegerConvertFixed | 0x00190000, + FCVTZU_ws = FCVTZU, + FCVTZU_xs = FCVTZU | SixtyFourBits, + FCVTZU_wd = FCVTZU | FP64, + FCVTZU_xd = FCVTZU | SixtyFourBits | FP64, + SCVTF = FPIntegerConvertFixed | 0x00020000, + SCVTF_sw = SCVTF, + SCVTF_sx = SCVTF | SixtyFourBits, + SCVTF_dw = SCVTF | FP64, + SCVTF_dx = SCVTF | SixtyFourBits | FP64, + UCVTF = FPIntegerConvertFixed | 0x00030000, + UCVTF_sw = UCVTF, + UCVTF_sx = UCVTF | SixtyFourBits, + UCVTF_dw = UCVTF | FP64, + UCVTF_dx = UCVTF | SixtyFourBits | FP64, + FCVTAS = FPIntegerConvertFixed | 0x00040000, + FCVTAS_ws = FCVTAS, + FCVTAS_xs = FCVTAS | SixtyFourBits, + FCVTAS_wd = FCVTAS | FP64, + FCVTAS_xd = FCVTAS | SixtyFourBits | FP64, + FCVTAU = FPIntegerConvertFixed | 0x00050000, + FCVTAU_ws = FCVTAU, + FCVTAU_xs = FCVTAU | SixtyFourBits, + FCVTAU_wd = FCVTAU | FP64, + FCVTAU_xd = FCVTAU | SixtyFourBits | FP64, + FMOV_ws = FPIntegerConvertFixed | 0x00060000, + FMOV_sw = FPIntegerConvertFixed | 0x00070000, + FMOV_xd = FMOV_ws | SixtyFourBits | FP64, + FMOV_dx = FMOV_sw | SixtyFourBits | FP64 +}; + +// Conversion between fixed point and floating point. +enum FPFixedPointConvertOp { + FPFixedPointConvertFixed = 0x1E000000, + FPFixedPointConvertFMask = 0x5F200000, + FPFixedPointConvertMask = 0xFFFF0000, + FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000, + FCVTZS_ws_fixed = FCVTZS_fixed, + FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits, + FCVTZS_wd_fixed = FCVTZS_fixed | FP64, + FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64, + FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000, + FCVTZU_ws_fixed = FCVTZU_fixed, + FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits, + FCVTZU_wd_fixed = FCVTZU_fixed | FP64, + FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64, + SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000, + SCVTF_sw_fixed = SCVTF_fixed, + SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits, + SCVTF_dw_fixed = SCVTF_fixed | FP64, + SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64, + UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000, + UCVTF_sw_fixed = UCVTF_fixed, + UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits, + UCVTF_dw_fixed = UCVTF_fixed | FP64, + UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64 +}; + +// Unimplemented and unallocated instructions. These are defined to make fixed +// bit assertion easier. +enum UnimplementedOp { + UnimplementedFixed = 0x00000000, + UnimplementedFMask = 0x00000000 +}; + +enum UnallocatedOp { + UnallocatedFixed = 0x00000000, + UnallocatedFMask = 0x00000000 +}; + +} // namespace vixl + +#endif // VIXL_A64_CONSTANTS_A64_H_ diff --git a/qemu/disas/libvixl/a64/cpu-a64.h b/qemu/disas/libvixl/a64/cpu-a64.h new file mode 100644 index 000000000..59b7974a1 --- /dev/null +++ b/qemu/disas/libvixl/a64/cpu-a64.h @@ -0,0 +1,83 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CPU_A64_H +#define VIXL_CPU_A64_H + +#include "globals.h" +#include "instructions-a64.h" + +namespace vixl { + +class CPU { + public: + // Initialise CPU support. + static void SetUp(); + + // Ensures the data at a given address and with a given size is the same for + // the I and D caches. I and D caches are not automatically coherent on ARM + // so this operation is required before any dynamically generated code can + // safely run. + static void EnsureIAndDCacheCoherency(void *address, size_t length); + + // Handle tagged pointers. + template + static T SetPointerTag(T pointer, uint64_t tag) { + VIXL_ASSERT(is_uintn(kAddressTagWidth, tag)); + + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset); + return (T)raw; + } + + template + static uint64_t GetPointerTag(T pointer) { + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + return (raw & kAddressTagMask) >> kAddressTagOffset; + } + + private: + // Return the content of the cache type register. + static uint32_t GetCacheType(); + + // I and D cache line size in bytes. + static unsigned icache_line_size_; + static unsigned dcache_line_size_; +}; + +} // namespace vixl + +#endif // VIXL_CPU_A64_H diff --git a/qemu/disas/libvixl/a64/decoder-a64.cc b/qemu/disas/libvixl/a64/decoder-a64.cc new file mode 100644 index 000000000..82591ca30 --- /dev/null +++ b/qemu/disas/libvixl/a64/decoder-a64.cc @@ -0,0 +1,707 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "globals.h" +#include "utils.h" +#include "a64/decoder-a64.h" + +namespace vixl { + +void Decoder::DecodeInstruction(const Instruction *instr) { + if (instr->Bits(28, 27) == 0) { + VisitUnallocated(instr); + } else { + switch (instr->Bits(27, 24)) { + // 0: PC relative addressing. + case 0x0: DecodePCRelAddressing(instr); break; + + // 1: Add/sub immediate. + case 0x1: DecodeAddSubImmediate(instr); break; + + // A: Logical shifted register. + // Add/sub with carry. + // Conditional compare register. + // Conditional compare immediate. + // Conditional select. + // Data processing 1 source. + // Data processing 2 source. + // B: Add/sub shifted register. + // Add/sub extended register. + // Data processing 3 source. + case 0xA: + case 0xB: DecodeDataProcessing(instr); break; + + // 2: Logical immediate. + // Move wide immediate. + case 0x2: DecodeLogical(instr); break; + + // 3: Bitfield. + // Extract. + case 0x3: DecodeBitfieldExtract(instr); break; + + // 4: Unconditional branch immediate. + // Exception generation. + // Compare and branch immediate. + // 5: Compare and branch immediate. + // Conditional branch. + // System. + // 6,7: Unconditional branch. + // Test and branch immediate. + case 0x4: + case 0x5: + case 0x6: + case 0x7: DecodeBranchSystemException(instr); break; + + // 8,9: Load/store register pair post-index. + // Load register literal. + // Load/store register unscaled immediate. + // Load/store register immediate post-index. + // Load/store register immediate pre-index. + // Load/store register offset. + // Load/store exclusive. + // C,D: Load/store register pair offset. + // Load/store register pair pre-index. + // Load/store register unsigned immediate. + // Advanced SIMD. + case 0x8: + case 0x9: + case 0xC: + case 0xD: DecodeLoadStore(instr); break; + + // E: FP fixed point conversion. + // FP integer conversion. + // FP data processing 1 source. + // FP compare. + // FP immediate. + // FP data processing 2 source. + // FP conditional compare. + // FP conditional select. + // Advanced SIMD. + // F: FP data processing 3 source. + // Advanced SIMD. + case 0xE: + case 0xF: DecodeFP(instr); break; + } + } +} + +void Decoder::AppendVisitor(DecoderVisitor* new_visitor) { + visitors_.push_back(new_visitor); +} + + +void Decoder::PrependVisitor(DecoderVisitor* new_visitor) { + visitors_.push_front(new_visitor); +} + + +void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list. The last element must be + // registered_visitor. + VIXL_ASSERT(*it == registered_visitor); + visitors_.insert(it, new_visitor); +} + + +void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + it++; + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list. The last element must be + // registered_visitor. + VIXL_ASSERT(*it == registered_visitor); + visitors_.push_back(new_visitor); +} + + +void Decoder::RemoveVisitor(DecoderVisitor* visitor) { + visitors_.remove(visitor); +} + + +void Decoder::DecodePCRelAddressing(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x0); + // We know bit 28 is set, as = 0 is filtered out at the top level + // decode. + VIXL_ASSERT(instr->Bit(28) == 0x1); + VisitPCRelAddressing(instr); +} + + +void Decoder::DecodeBranchSystemException(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0x4) || + (instr->Bits(27, 24) == 0x5) || + (instr->Bits(27, 24) == 0x6) || + (instr->Bits(27, 24) == 0x7) ); + + switch (instr->Bits(31, 29)) { + case 0: + case 4: { + VisitUnconditionalBranch(instr); + break; + } + case 1: + case 5: { + if (instr->Bit(25) == 0) { + VisitCompareBranch(instr); + } else { + VisitTestBranch(instr); + } + break; + } + case 2: { + if (instr->Bit(25) == 0) { + if ((instr->Bit(24) == 0x1) || + (instr->Mask(0x01000010) == 0x00000010)) { + VisitUnallocated(instr); + } else { + VisitConditionalBranch(instr); + } + } else { + VisitUnallocated(instr); + } + break; + } + case 6: { + if (instr->Bit(25) == 0) { + if (instr->Bit(24) == 0) { + if ((instr->Bits(4, 2) != 0) || + (instr->Mask(0x00E0001D) == 0x00200001) || + (instr->Mask(0x00E0001D) == 0x00400001) || + (instr->Mask(0x00E0001E) == 0x00200002) || + (instr->Mask(0x00E0001E) == 0x00400002) || + (instr->Mask(0x00E0001C) == 0x00600000) || + (instr->Mask(0x00E0001C) == 0x00800000) || + (instr->Mask(0x00E0001F) == 0x00A00000) || + (instr->Mask(0x00C0001C) == 0x00C00000)) { + VisitUnallocated(instr); + } else { + VisitException(instr); + } + } else { + if (instr->Bits(23, 22) == 0) { + const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0); + if ((instr->Bits(21, 19) == 0x4) || + (masked_003FF0E0 == 0x00033000) || + (masked_003FF0E0 == 0x003FF020) || + (masked_003FF0E0 == 0x003FF060) || + (masked_003FF0E0 == 0x003FF0E0) || + (instr->Mask(0x00388000) == 0x00008000) || + (instr->Mask(0x0038E000) == 0x00000000) || + (instr->Mask(0x0039E000) == 0x00002000) || + (instr->Mask(0x003AE000) == 0x00002000) || + (instr->Mask(0x003CE000) == 0x00042000) || + (instr->Mask(0x003FFFC0) == 0x000320C0) || + (instr->Mask(0x003FF100) == 0x00032100) || + (instr->Mask(0x003FF200) == 0x00032200) || + (instr->Mask(0x003FF400) == 0x00032400) || + (instr->Mask(0x003FF800) == 0x00032800) || + (instr->Mask(0x0038F000) == 0x00005000) || + (instr->Mask(0x0038E000) == 0x00006000)) { + VisitUnallocated(instr); + } else { + VisitSystem(instr); + } + } else { + VisitUnallocated(instr); + } + } + } else { + if ((instr->Bit(24) == 0x1) || + (instr->Bits(20, 16) != 0x1F) || + (instr->Bits(15, 10) != 0) || + (instr->Bits(4, 0) != 0) || + (instr->Bits(24, 21) == 0x3) || + (instr->Bits(24, 22) == 0x3)) { + VisitUnallocated(instr); + } else { + VisitUnconditionalBranchToRegister(instr); + } + } + break; + } + case 3: + case 7: { + VisitUnallocated(instr); + break; + } + } +} + + +void Decoder::DecodeLoadStore(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0x8) || + (instr->Bits(27, 24) == 0x9) || + (instr->Bits(27, 24) == 0xC) || + (instr->Bits(27, 24) == 0xD) ); + + if (instr->Bit(24) == 0) { + if (instr->Bit(28) == 0) { + if (instr->Bit(29) == 0) { + if (instr->Bit(26) == 0) { + VisitLoadStoreExclusive(instr); + } else { + DecodeAdvSIMDLoadStore(instr); + } + } else { + if ((instr->Bits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(23) == 0) { + if (instr->Mask(0xC4400000) == 0xC0400000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePairNonTemporal(instr); + } + } else { + VisitLoadStorePairPostIndex(instr); + } + } + } + } else { + if (instr->Bit(29) == 0) { + if (instr->Mask(0xC4000000) == 0xC4000000) { + VisitUnallocated(instr); + } else { + VisitLoadLiteral(instr); + } + } else { + if ((instr->Mask(0x84C00000) == 0x80C00000) || + (instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(21) == 0) { + switch (instr->Bits(11, 10)) { + case 0: { + VisitLoadStoreUnscaledOffset(instr); + break; + } + case 1: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePostIndex(instr); + } + break; + } + case 2: { + // TODO: VisitLoadStoreRegisterOffsetUnpriv. + VisitUnimplemented(instr); + break; + } + case 3: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePreIndex(instr); + } + break; + } + } + } else { + if (instr->Bits(11, 10) == 0x2) { + if (instr->Bit(14) == 0) { + VisitUnallocated(instr); + } else { + VisitLoadStoreRegisterOffset(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } + } + } else { + if (instr->Bit(28) == 0) { + if (instr->Bit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->Bits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(23) == 0) { + VisitLoadStorePairOffset(instr); + } else { + VisitLoadStorePairPreIndex(instr); + } + } + } + } else { + if (instr->Bit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->Mask(0x84C00000) == 0x80C00000) || + (instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + VisitLoadStoreUnsignedOffset(instr); + } + } + } + } +} + + +void Decoder::DecodeLogical(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x2); + + if (instr->Mask(0x80400000) == 0x00400000) { + VisitUnallocated(instr); + } else { + if (instr->Bit(23) == 0) { + VisitLogicalImmediate(instr); + } else { + if (instr->Bits(30, 29) == 0x1) { + VisitUnallocated(instr); + } else { + VisitMoveWideImmediate(instr); + } + } + } +} + + +void Decoder::DecodeBitfieldExtract(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x3); + + if ((instr->Mask(0x80400000) == 0x80000000) || + (instr->Mask(0x80400000) == 0x00400000) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else if (instr->Bit(23) == 0) { + if ((instr->Mask(0x80200000) == 0x00200000) || + (instr->Mask(0x60000000) == 0x60000000)) { + VisitUnallocated(instr); + } else { + VisitBitfield(instr); + } + } else { + if ((instr->Mask(0x60200000) == 0x00200000) || + (instr->Mask(0x60000000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitExtract(instr); + } + } +} + + +void Decoder::DecodeAddSubImmediate(const Instruction* instr) { + VIXL_ASSERT(instr->Bits(27, 24) == 0x1); + if (instr->Bit(23) == 1) { + VisitUnallocated(instr); + } else { + VisitAddSubImmediate(instr); + } +} + + +void Decoder::DecodeDataProcessing(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0xA) || + (instr->Bits(27, 24) == 0xB)); + + if (instr->Bit(24) == 0) { + if (instr->Bit(28) == 0) { + if (instr->Mask(0x80008000) == 0x00008000) { + VisitUnallocated(instr); + } else { + VisitLogicalShifted(instr); + } + } else { + switch (instr->Bits(23, 21)) { + case 0: { + if (instr->Mask(0x0000FC00) != 0) { + VisitUnallocated(instr); + } else { + VisitAddSubWithCarry(instr); + } + break; + } + case 2: { + if ((instr->Bit(29) == 0) || + (instr->Mask(0x00000410) != 0)) { + VisitUnallocated(instr); + } else { + if (instr->Bit(11) == 0) { + VisitConditionalCompareRegister(instr); + } else { + VisitConditionalCompareImmediate(instr); + } + } + break; + } + case 4: { + if (instr->Mask(0x20000800) != 0x00000000) { + VisitUnallocated(instr); + } else { + VisitConditionalSelect(instr); + } + break; + } + case 6: { + if (instr->Bit(29) == 0x1) { + VisitUnallocated(instr); + } else { + if (instr->Bit(30) == 0) { + if ((instr->Bit(15) == 0x1) || + (instr->Bits(15, 11) == 0) || + (instr->Bits(15, 12) == 0x1) || + (instr->Bits(15, 12) == 0x3) || + (instr->Bits(15, 13) == 0x3) || + (instr->Mask(0x8000EC00) == 0x00004C00) || + (instr->Mask(0x8000E800) == 0x80004000) || + (instr->Mask(0x8000E400) == 0x80004000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing2Source(instr); + } + } else { + if ((instr->Bit(13) == 1) || + (instr->Bits(20, 16) != 0) || + (instr->Bits(15, 14) != 0) || + (instr->Mask(0xA01FFC00) == 0x00000C00) || + (instr->Mask(0x201FF800) == 0x00001800)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing1Source(instr); + } + } + break; + } + } + case 1: + case 3: + case 5: + case 7: VisitUnallocated(instr); break; + } + } + } else { + if (instr->Bit(28) == 0) { + if (instr->Bit(21) == 0) { + if ((instr->Bits(23, 22) == 0x3) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else { + VisitAddSubShifted(instr); + } + } else { + if ((instr->Mask(0x00C00000) != 0x00000000) || + (instr->Mask(0x00001400) == 0x00001400) || + (instr->Mask(0x00001800) == 0x00001800)) { + VisitUnallocated(instr); + } else { + VisitAddSubExtended(instr); + } + } + } else { + if ((instr->Bit(30) == 0x1) || + (instr->Bits(30, 29) == 0x1) || + (instr->Mask(0xE0600000) == 0x00200000) || + (instr->Mask(0xE0608000) == 0x00400000) || + (instr->Mask(0x60608000) == 0x00408000) || + (instr->Mask(0x60E00000) == 0x00E00000) || + (instr->Mask(0x60E00000) == 0x00800000) || + (instr->Mask(0x60E00000) == 0x00600000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing3Source(instr); + } + } + } +} + + +void Decoder::DecodeFP(const Instruction* instr) { + VIXL_ASSERT((instr->Bits(27, 24) == 0xE) || + (instr->Bits(27, 24) == 0xF)); + + if (instr->Bit(28) == 0) { + DecodeAdvSIMDDataProcessing(instr); + } else { + if (instr->Bit(29) == 1) { + VisitUnallocated(instr); + } else { + if (instr->Bits(31, 30) == 0x3) { + VisitUnallocated(instr); + } else if (instr->Bits(31, 30) == 0x1) { + DecodeAdvSIMDDataProcessing(instr); + } else { + if (instr->Bit(24) == 0) { + if (instr->Bit(21) == 0) { + if ((instr->Bit(23) == 1) || + (instr->Bit(18) == 1) || + (instr->Mask(0x80008000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x00160000) == 0x00000000) || + (instr->Mask(0x00160000) == 0x00120000)) { + VisitUnallocated(instr); + } else { + VisitFPFixedPointConvert(instr); + } + } else { + if (instr->Bits(15, 10) == 32) { + VisitUnallocated(instr); + } else if (instr->Bits(15, 10) == 0) { + if ((instr->Bits(23, 22) == 0x3) || + (instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x000E0000) == 0x000C0000) || + (instr->Mask(0x00160000) == 0x00120000) || + (instr->Mask(0x00160000) == 0x00140000) || + (instr->Mask(0x20C40000) == 0x00800000) || + (instr->Mask(0x20C60000) == 0x00840000) || + (instr->Mask(0xA0C60000) == 0x80060000) || + (instr->Mask(0xA0C60000) == 0x00860000) || + (instr->Mask(0xA0C60000) == 0x00460000) || + (instr->Mask(0xA0CE0000) == 0x80860000) || + (instr->Mask(0xA0CE0000) == 0x804E0000) || + (instr->Mask(0xA0CE0000) == 0x000E0000) || + (instr->Mask(0xA0D60000) == 0x00160000) || + (instr->Mask(0xA0D60000) == 0x80560000) || + (instr->Mask(0xA0D60000) == 0x80960000)) { + VisitUnallocated(instr); + } else { + VisitFPIntegerConvert(instr); + } + } else if (instr->Bits(14, 10) == 16) { + const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000); + if ((instr->Mask(0x80180000) != 0) || + (masked_A0DF8000 == 0x00020000) || + (masked_A0DF8000 == 0x00030000) || + (masked_A0DF8000 == 0x00068000) || + (masked_A0DF8000 == 0x00428000) || + (masked_A0DF8000 == 0x00430000) || + (masked_A0DF8000 == 0x00468000) || + (instr->Mask(0xA0D80000) == 0x00800000) || + (instr->Mask(0xA0DE0000) == 0x00C00000) || + (instr->Mask(0xA0DF0000) == 0x00C30000) || + (instr->Mask(0xA0DC0000) == 0x00C40000)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing1Source(instr); + } + } else if (instr->Bits(13, 10) == 8) { + if ((instr->Bits(15, 14) != 0) || + (instr->Bits(2, 0) != 0) || + (instr->Mask(0x80800000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitFPCompare(instr); + } + } else if (instr->Bits(12, 10) == 4) { + if ((instr->Bits(9, 5) != 0) || + (instr->Mask(0x80800000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitFPImmediate(instr); + } + } else { + if (instr->Mask(0x80800000) != 0x00000000) { + VisitUnallocated(instr); + } else { + switch (instr->Bits(11, 10)) { + case 1: { + VisitFPConditionalCompare(instr); + break; + } + case 2: { + if ((instr->Bits(15, 14) == 0x3) || + (instr->Mask(0x00009000) == 0x00009000) || + (instr->Mask(0x0000A000) == 0x0000A000)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing2Source(instr); + } + break; + } + case 3: { + VisitFPConditionalSelect(instr); + break; + } + default: VIXL_UNREACHABLE(); + } + } + } + } + } else { + // Bit 30 == 1 has been handled earlier. + VIXL_ASSERT(instr->Bit(30) == 0); + if (instr->Mask(0xA0800000) != 0) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing3Source(instr); + } + } + } + } + } +} + + +void Decoder::DecodeAdvSIMDLoadStore(const Instruction* instr) { + // TODO: Implement Advanced SIMD load/store instruction decode. + VIXL_ASSERT(instr->Bits(29, 25) == 0x6); + VisitUnimplemented(instr); +} + + +void Decoder::DecodeAdvSIMDDataProcessing(const Instruction* instr) { + // TODO: Implement Advanced SIMD data processing instruction decode. + VIXL_ASSERT(instr->Bits(27, 25) == 0x7); + VisitUnimplemented(instr); +} + + +#define DEFINE_VISITOR_CALLERS(A) \ + void Decoder::Visit##A(const Instruction *instr) { \ + VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \ + std::list::iterator it; \ + for (it = visitors_.begin(); it != visitors_.end(); it++) { \ + (*it)->Visit##A(instr); \ + } \ + } +VISITOR_LIST(DEFINE_VISITOR_CALLERS) +#undef DEFINE_VISITOR_CALLERS +} // namespace vixl diff --git a/qemu/disas/libvixl/a64/decoder-a64.h b/qemu/disas/libvixl/a64/decoder-a64.h new file mode 100644 index 000000000..fd08d6c1f --- /dev/null +++ b/qemu/disas/libvixl/a64/decoder-a64.h @@ -0,0 +1,239 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_DECODER_A64_H_ +#define VIXL_A64_DECODER_A64_H_ + +#include + +#include "globals.h" +#include "a64/instructions-a64.h" + + +// List macro containing all visitors needed by the decoder class. + +#define VISITOR_LIST(V) \ + V(PCRelAddressing) \ + V(AddSubImmediate) \ + V(LogicalImmediate) \ + V(MoveWideImmediate) \ + V(Bitfield) \ + V(Extract) \ + V(UnconditionalBranch) \ + V(UnconditionalBranchToRegister) \ + V(CompareBranch) \ + V(TestBranch) \ + V(ConditionalBranch) \ + V(System) \ + V(Exception) \ + V(LoadStorePairPostIndex) \ + V(LoadStorePairOffset) \ + V(LoadStorePairPreIndex) \ + V(LoadStorePairNonTemporal) \ + V(LoadLiteral) \ + V(LoadStoreUnscaledOffset) \ + V(LoadStorePostIndex) \ + V(LoadStorePreIndex) \ + V(LoadStoreRegisterOffset) \ + V(LoadStoreUnsignedOffset) \ + V(LoadStoreExclusive) \ + V(LogicalShifted) \ + V(AddSubShifted) \ + V(AddSubExtended) \ + V(AddSubWithCarry) \ + V(ConditionalCompareRegister) \ + V(ConditionalCompareImmediate) \ + V(ConditionalSelect) \ + V(DataProcessing1Source) \ + V(DataProcessing2Source) \ + V(DataProcessing3Source) \ + V(FPCompare) \ + V(FPConditionalCompare) \ + V(FPConditionalSelect) \ + V(FPImmediate) \ + V(FPDataProcessing1Source) \ + V(FPDataProcessing2Source) \ + V(FPDataProcessing3Source) \ + V(FPIntegerConvert) \ + V(FPFixedPointConvert) \ + V(Unallocated) \ + V(Unimplemented) + +namespace vixl { + +// The Visitor interface. Disassembler and simulator (and other tools) +// must provide implementations for all of these functions. +class DecoderVisitor { + public: + enum VisitorConstness { + kConstVisitor, + kNonConstVisitor + }; + explicit DecoderVisitor(VisitorConstness constness = kConstVisitor) + : constness_(constness) {} + + virtual ~DecoderVisitor() {} + + #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; + VISITOR_LIST(DECLARE) + #undef DECLARE + + bool IsConstVisitor() const { return constness_ == kConstVisitor; } + Instruction* MutableInstruction(const Instruction* instr) { + VIXL_ASSERT(!IsConstVisitor()); + return const_cast(instr); + } + + private: + const VisitorConstness constness_; +}; + + +class Decoder { + public: + Decoder() {} + + // Top-level wrappers around the actual decoding function. + void Decode(const Instruction* instr) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + VIXL_ASSERT((*it)->IsConstVisitor()); + } + DecodeInstruction(instr); + } + void Decode(Instruction* instr) { + DecodeInstruction(const_cast(instr)); + } + + // Register a new visitor class with the decoder. + // Decode() will call the corresponding visitor method from all registered + // visitor classes when decoding reaches the leaf node of the instruction + // decode tree. + // Visitors are called in order. + // A visitor can be registered multiple times. + // + // d.AppendVisitor(V1); + // d.AppendVisitor(V2); + // d.PrependVisitor(V2); + // d.AppendVisitor(V3); + // + // d.Decode(i); + // + // will call in order visitor methods in V2, V1, V2, V3. + void AppendVisitor(DecoderVisitor* visitor); + void PrependVisitor(DecoderVisitor* visitor); + // These helpers register `new_visitor` before or after the first instance of + // `registered_visiter` in the list. + // So if + // V1, V2, V1, V2 + // are registered in this order in the decoder, calls to + // d.InsertVisitorAfter(V3, V1); + // d.InsertVisitorBefore(V4, V2); + // will yield the order + // V1, V3, V4, V2, V1, V2 + // + // For more complex modifications of the order of registered visitors, one can + // directly access and modify the list of visitors via the `visitors()' + // accessor. + void InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + void InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + + // Remove all instances of a previously registered visitor class from the list + // of visitors stored by the decoder. + void RemoveVisitor(DecoderVisitor* visitor); + + #define DECLARE(A) void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) + #undef DECLARE + + + std::list* visitors() { return &visitors_; } + + private: + // Decodes an instruction and calls the visitor functions registered with the + // Decoder class. + void DecodeInstruction(const Instruction* instr); + + // Decode the PC relative addressing instruction, and call the corresponding + // visitors. + // On entry, instruction bits 27:24 = 0x0. + void DecodePCRelAddressing(const Instruction* instr); + + // Decode the add/subtract immediate instruction, and call the correspoding + // visitors. + // On entry, instruction bits 27:24 = 0x1. + void DecodeAddSubImmediate(const Instruction* instr); + + // Decode the branch, system command, and exception generation parts of + // the instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. + void DecodeBranchSystemException(const Instruction* instr); + + // Decode the load and store parts of the instruction tree, and call + // the corresponding visitors. + // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. + void DecodeLoadStore(const Instruction* instr); + + // Decode the logical immediate and move wide immediate parts of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x2. + void DecodeLogical(const Instruction* instr); + + // Decode the bitfield and extraction parts of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x3. + void DecodeBitfieldExtract(const Instruction* instr); + + // Decode the data processing parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. + void DecodeDataProcessing(const Instruction* instr); + + // Decode the floating point parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0xE, 0xF}. + void DecodeFP(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 29:25 = 0x6. + void DecodeAdvSIMDLoadStore(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) data processing part of the instruction + // tree, and call the corresponding visitors. + // On entry, instruction bits 27:25 = 0x7. + void DecodeAdvSIMDDataProcessing(const Instruction* instr); + + private: + // Visitors are registered in a list. + std::list visitors_; +}; + +} // namespace vixl + +#endif // VIXL_A64_DECODER_A64_H_ diff --git a/qemu/disas/libvixl/a64/disasm-a64.cc b/qemu/disas/libvixl/a64/disasm-a64.cc new file mode 100644 index 000000000..f7bc2468b --- /dev/null +++ b/qemu/disas/libvixl/a64/disasm-a64.cc @@ -0,0 +1,1954 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include "a64/disasm-a64.h" + +namespace vixl { + +Disassembler::Disassembler() { + buffer_size_ = 256; + buffer_ = reinterpret_cast(malloc(buffer_size_)); + buffer_pos_ = 0; + own_buffer_ = true; + code_address_offset_ = 0; +} + + +Disassembler::Disassembler(char* text_buffer, int buffer_size) { + buffer_size_ = buffer_size; + buffer_ = text_buffer; + buffer_pos_ = 0; + own_buffer_ = false; + code_address_offset_ = 0; +} + + +Disassembler::~Disassembler() { + if (own_buffer_) { + free(buffer_); + } +} + + +char* Disassembler::GetOutput() { + return buffer_; +} + + +void Disassembler::VisitAddSubImmediate(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) && + (instr->ImmAddSub() == 0) ? true : false; + const char *mnemonic = ""; + const char *form = "'Rds, 'Rns, 'IAddSub"; + const char *form_cmp = "'Rns, 'IAddSub"; + const char *form_mov = "'Rds, 'Rns"; + + switch (instr->Mask(AddSubImmediateMask)) { + case ADD_w_imm: + case ADD_x_imm: { + mnemonic = "add"; + if (stack_op) { + mnemonic = "mov"; + form = form_mov; + } + break; + } + case ADDS_w_imm: + case ADDS_x_imm: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_imm: + case SUB_x_imm: mnemonic = "sub"; break; + case SUBS_w_imm: + case SUBS_x_imm: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubShifted(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'HDP"; + const char *form_cmp = "'Rn, 'Rm'HDP"; + const char *form_neg = "'Rd, 'Rm'HDP"; + + switch (instr->Mask(AddSubShiftedMask)) { + case ADD_w_shift: + case ADD_x_shift: mnemonic = "add"; break; + case ADDS_w_shift: + case ADDS_x_shift: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_shift: + case SUB_x_shift: { + mnemonic = "sub"; + if (rn_is_zr) { + mnemonic = "neg"; + form = form_neg; + } + break; + } + case SUBS_w_shift: + case SUBS_x_shift: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } else if (rn_is_zr) { + mnemonic = "negs"; + form = form_neg; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubExtended(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + const char *mnemonic = ""; + Extend mode = static_cast(instr->ExtendMode()); + const char *form = ((mode == UXTX) || (mode == SXTX)) ? + "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext"; + const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ? + "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext"; + + switch (instr->Mask(AddSubExtendedMask)) { + case ADD_w_ext: + case ADD_x_ext: mnemonic = "add"; break; + case ADDS_w_ext: + case ADDS_x_ext: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_ext: + case SUB_x_ext: mnemonic = "sub"; break; + case SUBS_w_ext: + case SUBS_x_ext: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubWithCarry(const Instruction* instr) { + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_neg = "'Rd, 'Rm"; + + switch (instr->Mask(AddSubWithCarryMask)) { + case ADC_w: + case ADC_x: mnemonic = "adc"; break; + case ADCS_w: + case ADCS_x: mnemonic = "adcs"; break; + case SBC_w: + case SBC_x: { + mnemonic = "sbc"; + if (rn_is_zr) { + mnemonic = "ngc"; + form = form_neg; + } + break; + } + case SBCS_w: + case SBCS_x: { + mnemonic = "sbcs"; + if (rn_is_zr) { + mnemonic = "ngcs"; + form = form_neg; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLogicalImmediate(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rds, 'Rn, 'ITri"; + + if (instr->ImmLogical() == 0) { + // The immediate encoded in the instruction is not in the expected format. + Format(instr, "unallocated", "(LogicalImmediate)"); + return; + } + + switch (instr->Mask(LogicalImmediateMask)) { + case AND_w_imm: + case AND_x_imm: mnemonic = "and"; break; + case ORR_w_imm: + case ORR_x_imm: { + mnemonic = "orr"; + unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize + : kWRegSize; + if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) { + mnemonic = "mov"; + form = "'Rds, 'ITri"; + } + break; + } + case EOR_w_imm: + case EOR_x_imm: mnemonic = "eor"; break; + case ANDS_w_imm: + case ANDS_x_imm: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'ITri"; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) { + VIXL_ASSERT((reg_size == kXRegSize) || + ((reg_size == kWRegSize) && (value <= 0xffffffff))); + + // Test for movz: 16 bits set at positions 0, 16, 32 or 48. + if (((value & UINT64_C(0xffffffffffff0000)) == 0) || + ((value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((value & UINT64_C(0x0000ffffffffffff)) == 0)) { + return true; + } + + // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48). + if ((reg_size == kXRegSize) && + (((~value & UINT64_C(0xffffffffffff0000)) == 0) || + ((~value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((~value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) { + return true; + } + if ((reg_size == kWRegSize) && + (((value & 0xffff0000) == 0xffff0000) || + ((value & 0x0000ffff) == 0x0000ffff))) { + return true; + } + return false; +} + + +void Disassembler::VisitLogicalShifted(const Instruction* instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'HLo"; + + switch (instr->Mask(LogicalShiftedMask)) { + case AND_w: + case AND_x: mnemonic = "and"; break; + case BIC_w: + case BIC_x: mnemonic = "bic"; break; + case EOR_w: + case EOR_x: mnemonic = "eor"; break; + case EON_w: + case EON_x: mnemonic = "eon"; break; + case BICS_w: + case BICS_x: mnemonic = "bics"; break; + case ANDS_w: + case ANDS_x: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'Rm'HLo"; + } + break; + } + case ORR_w: + case ORR_x: { + mnemonic = "orr"; + if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) { + mnemonic = "mov"; + form = "'Rd, 'Rm"; + } + break; + } + case ORN_w: + case ORN_x: { + mnemonic = "orn"; + if (rn_is_zr) { + mnemonic = "mvn"; + form = "'Rd, 'Rm'HLo"; + } + break; + } + default: VIXL_UNREACHABLE(); + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'Rm, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareRegisterMask)) { + case CCMN_w: + case CCMN_x: mnemonic = "ccmn"; break; + case CCMP_w: + case CCMP_x: mnemonic = "ccmp"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'IP, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareImmediateMask)) { + case CCMN_w_imm: + case CCMN_x_imm: mnemonic = "ccmn"; break; + case CCMP_w_imm: + case CCMP_x_imm: mnemonic = "ccmp"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalSelect(const Instruction* instr) { + bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr)); + bool rn_is_rm = (instr->Rn() == instr->Rm()); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'Cond"; + const char *form_test = "'Rd, 'CInv"; + const char *form_update = "'Rd, 'Rn, 'CInv"; + + Condition cond = static_cast(instr->Condition()); + bool invertible_cond = (cond != al) && (cond != nv); + + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: mnemonic = "csel"; break; + case CSINC_w: + case CSINC_x: { + mnemonic = "csinc"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "cset"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinc"; + form = form_update; + } + break; + } + case CSINV_w: + case CSINV_x: { + mnemonic = "csinv"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "csetm"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinv"; + form = form_update; + } + break; + } + case CSNEG_w: + case CSNEG_x: { + mnemonic = "csneg"; + if (rn_is_rm && invertible_cond) { + mnemonic = "cneg"; + form = form_update; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitBitfield(const Instruction* instr) { + unsigned s = instr->ImmS(); + unsigned r = instr->ImmR(); + unsigned rd_size_minus_1 = + ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1; + const char *mnemonic = ""; + const char *form = ""; + const char *form_shift_right = "'Rd, 'Rn, 'IBr"; + const char *form_extend = "'Rd, 'Wn"; + const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1"; + const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1"; + const char *form_lsl = "'Rd, 'Rn, 'IBZ-r"; + + switch (instr->Mask(BitfieldMask)) { + case SBFM_w: + case SBFM_x: { + mnemonic = "sbfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "sxtb"; + } else if (s == 15) { + mnemonic = "sxth"; + } else if ((s == 31) && (instr->SixtyFourBits() == 1)) { + mnemonic = "sxtw"; + } else { + form = form_bfx; + } + } else if (s == rd_size_minus_1) { + mnemonic = "asr"; + form = form_shift_right; + } else if (s < r) { + mnemonic = "sbfiz"; + form = form_bfiz; + } + break; + } + case UBFM_w: + case UBFM_x: { + mnemonic = "ubfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "uxtb"; + } else if (s == 15) { + mnemonic = "uxth"; + } else { + form = form_bfx; + } + } + if (s == rd_size_minus_1) { + mnemonic = "lsr"; + form = form_shift_right; + } else if (r == s + 1) { + mnemonic = "lsl"; + form = form_lsl; + } else if (s < r) { + mnemonic = "ubfiz"; + form = form_bfiz; + } + break; + } + case BFM_w: + case BFM_x: { + mnemonic = "bfxil"; + form = form_bfx; + if (s < r) { + mnemonic = "bfi"; + form = form_bfiz; + } + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitExtract(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'IExtract"; + + switch (instr->Mask(ExtractMask)) { + case EXTR_w: + case EXTR_x: { + if (instr->Rn() == instr->Rm()) { + mnemonic = "ror"; + form = "'Rd, 'Rn, 'IExtract"; + } else { + mnemonic = "extr"; + } + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitPCRelAddressing(const Instruction* instr) { + switch (instr->Mask(PCRelAddressingMask)) { + case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break; + case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break; + default: Format(instr, "unimplemented", "(PCRelAddressing)"); + } +} + + +void Disassembler::VisitConditionalBranch(const Instruction* instr) { + switch (instr->Mask(ConditionalBranchMask)) { + case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break; + default: VIXL_UNREACHABLE(); + } +} + + +void Disassembler::VisitUnconditionalBranchToRegister( + const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Xn"; + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BR: mnemonic = "br"; break; + case BLR: mnemonic = "blr"; break; + case RET: { + mnemonic = "ret"; + if (instr->Rn() == kLinkRegCode) { + form = NULL; + } + break; + } + default: form = "(UnconditionalBranchToRegister)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitUnconditionalBranch(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'BImmUncn"; + + switch (instr->Mask(UnconditionalBranchMask)) { + case B: mnemonic = "b"; break; + case BL: mnemonic = "bl"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing1Source(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn"; + + switch (instr->Mask(DataProcessing1SourceMask)) { + #define FORMAT(A, B) \ + case A##_w: \ + case A##_x: mnemonic = B; break; + FORMAT(RBIT, "rbit"); + FORMAT(REV16, "rev16"); + FORMAT(REV, "rev"); + FORMAT(CLZ, "clz"); + FORMAT(CLS, "cls"); + #undef FORMAT + case REV32_x: mnemonic = "rev32"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing2Source(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Rd, 'Rn, 'Rm"; + + switch (instr->Mask(DataProcessing2SourceMask)) { + #define FORMAT(A, B) \ + case A##_w: \ + case A##_x: mnemonic = B; break; + FORMAT(UDIV, "udiv"); + FORMAT(SDIV, "sdiv"); + FORMAT(LSLV, "lsl"); + FORMAT(LSRV, "lsr"); + FORMAT(ASRV, "asr"); + FORMAT(RORV, "ror"); + #undef FORMAT + default: form = "(DataProcessing2Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing3Source(const Instruction* instr) { + bool ra_is_zr = RaIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Xd, 'Wn, 'Wm, 'Xa"; + const char *form_rrr = "'Rd, 'Rn, 'Rm"; + const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra"; + const char *form_xww = "'Xd, 'Wn, 'Wm"; + const char *form_xxx = "'Xd, 'Xn, 'Xm"; + + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: { + mnemonic = "madd"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mul"; + form = form_rrr; + } + break; + } + case MSUB_w: + case MSUB_x: { + mnemonic = "msub"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mneg"; + form = form_rrr; + } + break; + } + case SMADDL_x: { + mnemonic = "smaddl"; + if (ra_is_zr) { + mnemonic = "smull"; + form = form_xww; + } + break; + } + case SMSUBL_x: { + mnemonic = "smsubl"; + if (ra_is_zr) { + mnemonic = "smnegl"; + form = form_xww; + } + break; + } + case UMADDL_x: { + mnemonic = "umaddl"; + if (ra_is_zr) { + mnemonic = "umull"; + form = form_xww; + } + break; + } + case UMSUBL_x: { + mnemonic = "umsubl"; + if (ra_is_zr) { + mnemonic = "umnegl"; + form = form_xww; + } + break; + } + case SMULH_x: { + mnemonic = "smulh"; + form = form_xxx; + break; + } + case UMULH_x: { + mnemonic = "umulh"; + form = form_xxx; + break; + } + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCompareBranch(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rt, 'BImmCmpa"; + + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: + case CBZ_x: mnemonic = "cbz"; break; + case CBNZ_w: + case CBNZ_x: mnemonic = "cbnz"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitTestBranch(const Instruction* instr) { + const char *mnemonic = ""; + // If the top bit of the immediate is clear, the tested register is + // disassembled as Wt, otherwise Xt. As the top bit of the immediate is + // encoded in bit 31 of the instruction, we can reuse the Rt form, which + // uses bit 31 (normally "sf") to choose the register size. + const char *form = "'Rt, 'IS, 'BImmTest"; + + switch (instr->Mask(TestBranchMask)) { + case TBZ: mnemonic = "tbz"; break; + case TBNZ: mnemonic = "tbnz"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitMoveWideImmediate(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'IMoveImm"; + + // Print the shift separately for movk, to make it clear which half word will + // be overwritten. Movn and movz print the computed immediate, which includes + // shift calculation. + switch (instr->Mask(MoveWideImmediateMask)) { + case MOVN_w: + case MOVN_x: + if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) { + if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) { + mnemonic = "movn"; + } else { + mnemonic = "mov"; + form = "'Rd, 'IMoveNeg"; + } + } else { + mnemonic = "movn"; + } + break; + case MOVZ_w: + case MOVZ_x: + if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) + mnemonic = "mov"; + else + mnemonic = "movz"; + break; + case MOVK_w: + case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_LIST(V) \ + V(STRB_w, "strb", "'Wt") \ + V(STRH_w, "strh", "'Wt") \ + V(STR_w, "str", "'Wt") \ + V(STR_x, "str", "'Xt") \ + V(LDRB_w, "ldrb", "'Wt") \ + V(LDRH_w, "ldrh", "'Wt") \ + V(LDR_w, "ldr", "'Wt") \ + V(LDR_x, "ldr", "'Xt") \ + V(LDRSB_x, "ldrsb", "'Xt") \ + V(LDRSH_x, "ldrsh", "'Xt") \ + V(LDRSW_x, "ldrsw", "'Xt") \ + V(LDRSB_w, "ldrsb", "'Wt") \ + V(LDRSH_w, "ldrsh", "'Wt") \ + V(STR_s, "str", "'St") \ + V(STR_d, "str", "'Dt") \ + V(LDR_s, "ldr", "'St") \ + V(LDR_d, "ldr", "'Dt") + +void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePreIndex)"; + + switch (instr->Mask(LoadStorePreIndexMask)) { + #define LS_PREINDEX(A, B, C) \ + case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break; + LOAD_STORE_LIST(LS_PREINDEX) + #undef LS_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePostIndex)"; + + switch (instr->Mask(LoadStorePostIndexMask)) { + #define LS_POSTINDEX(A, B, C) \ + case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break; + LOAD_STORE_LIST(LS_POSTINDEX) + #undef LS_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreUnsignedOffset)"; + + switch (instr->Mask(LoadStoreUnsignedOffsetMask)) { + #define LS_UNSIGNEDOFFSET(A, B, C) \ + case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break; + LOAD_STORE_LIST(LS_UNSIGNEDOFFSET) + #undef LS_UNSIGNEDOFFSET + case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreRegisterOffset)"; + + switch (instr->Mask(LoadStoreRegisterOffsetMask)) { + #define LS_REGISTEROFFSET(A, B, C) \ + case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break; + LOAD_STORE_LIST(LS_REGISTEROFFSET) + #undef LS_REGISTEROFFSET + case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Wt, ['Xns'ILS]"; + const char *form_x = "'Xt, ['Xns'ILS]"; + const char *form_s = "'St, ['Xns'ILS]"; + const char *form_d = "'Dt, ['Xns'ILS]"; + const char *form_prefetch = "'PrefOp, ['Xns'ILS]"; + + switch (instr->Mask(LoadStoreUnscaledOffsetMask)) { + case STURB_w: mnemonic = "sturb"; break; + case STURH_w: mnemonic = "sturh"; break; + case STUR_w: mnemonic = "stur"; break; + case STUR_x: mnemonic = "stur"; form = form_x; break; + case STUR_s: mnemonic = "stur"; form = form_s; break; + case STUR_d: mnemonic = "stur"; form = form_d; break; + case LDURB_w: mnemonic = "ldurb"; break; + case LDURH_w: mnemonic = "ldurh"; break; + case LDUR_w: mnemonic = "ldur"; break; + case LDUR_x: mnemonic = "ldur"; form = form_x; break; + case LDUR_s: mnemonic = "ldur"; form = form_s; break; + case LDUR_d: mnemonic = "ldur"; form = form_d; break; + case LDURSB_x: form = form_x; // Fall through. + case LDURSB_w: mnemonic = "ldursb"; break; + case LDURSH_x: form = form_x; // Fall through. + case LDURSH_w: mnemonic = "ldursh"; break; + case LDURSW_x: mnemonic = "ldursw"; form = form_x; break; + case PRFUM: mnemonic = "prfum"; form = form_prefetch; break; + default: form = "(LoadStoreUnscaledOffset)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadLiteral(const Instruction* instr) { + const char *mnemonic = "ldr"; + const char *form = "(LoadLiteral)"; + + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break; + case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break; + case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break; + case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break; + case LDRSW_x_lit: { + mnemonic = "ldrsw"; + form = "'Xt, 'ILLiteral 'LValue"; + break; + } + case PRFM_lit: { + mnemonic = "prfm"; + form = "'PrefOp, 'ILLiteral 'LValue"; + break; + } + default: mnemonic = "unimplemented"; + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_PAIR_LIST(V) \ + V(STP_w, "stp", "'Wt, 'Wt2", "4") \ + V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \ + V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \ + V(STP_x, "stp", "'Xt, 'Xt2", "8") \ + V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \ + V(STP_s, "stp", "'St, 'St2", "4") \ + V(LDP_s, "ldp", "'St, 'St2", "4") \ + V(STP_d, "stp", "'Dt, 'Dt2", "8") \ + V(LDP_d, "ldp", "'Dt, 'Dt2", "8") + +void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPostIndex)"; + + switch (instr->Mask(LoadStorePairPostIndexMask)) { + #define LSP_POSTINDEX(A, B, C, D) \ + case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break; + LOAD_STORE_PAIR_LIST(LSP_POSTINDEX) + #undef LSP_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPreIndex)"; + + switch (instr->Mask(LoadStorePairPreIndexMask)) { + #define LSP_PREINDEX(A, B, C, D) \ + case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break; + LOAD_STORE_PAIR_LIST(LSP_PREINDEX) + #undef LSP_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairOffset)"; + + switch (instr->Mask(LoadStorePairOffsetMask)) { + #define LSP_OFFSET(A, B, C, D) \ + case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break; + LOAD_STORE_PAIR_LIST(LSP_OFFSET) + #undef LSP_OFFSET + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStorePairNonTemporalMask)) { + case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break; + case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break; + case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break; + case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break; + case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break; + case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break; + case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break; + case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break; + default: form = "(LoadStorePairNonTemporal)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStoreExclusiveMask)) { + case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break; + case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break; + case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break; + case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break; + case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break; + case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; + case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; + case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break; + case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break; + case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break; + case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break; + case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break; + case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break; + case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break; + case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; + case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; + case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break; + case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break; + case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break; + case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break; + case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break; + case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break; + case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break; + case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break; + case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break; + case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break; + default: form = "(LoadStoreExclusive)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPCompare(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fn, 'Fm"; + const char *form_zero = "'Fn, #0.0"; + + switch (instr->Mask(FPCompareMask)) { + case FCMP_s_zero: + case FCMP_d_zero: form = form_zero; // Fall through. + case FCMP_s: + case FCMP_d: mnemonic = "fcmp"; break; + default: form = "(FPCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalCompare(const Instruction* instr) { + const char *mnemonic = "unmplemented"; + const char *form = "'Fn, 'Fm, 'INzcv, 'Cond"; + + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMP_s: + case FCCMP_d: mnemonic = "fccmp"; break; + case FCCMPE_s: + case FCCMPE_d: mnemonic = "fccmpe"; break; + default: form = "(FPConditionalCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalSelect(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Cond"; + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_s: + case FCSEL_d: mnemonic = "fcsel"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fd, 'Fn"; + + switch (instr->Mask(FPDataProcessing1SourceMask)) { + #define FORMAT(A, B) \ + case A##_s: \ + case A##_d: mnemonic = B; break; + FORMAT(FMOV, "fmov"); + FORMAT(FABS, "fabs"); + FORMAT(FNEG, "fneg"); + FORMAT(FSQRT, "fsqrt"); + FORMAT(FRINTN, "frintn"); + FORMAT(FRINTP, "frintp"); + FORMAT(FRINTM, "frintm"); + FORMAT(FRINTZ, "frintz"); + FORMAT(FRINTA, "frinta"); + FORMAT(FRINTX, "frintx"); + FORMAT(FRINTI, "frinti"); + #undef FORMAT + case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break; + case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break; + default: form = "(FPDataProcessing1Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm"; + + switch (instr->Mask(FPDataProcessing2SourceMask)) { + #define FORMAT(A, B) \ + case A##_s: \ + case A##_d: mnemonic = B; break; + FORMAT(FMUL, "fmul"); + FORMAT(FDIV, "fdiv"); + FORMAT(FADD, "fadd"); + FORMAT(FSUB, "fsub"); + FORMAT(FMAX, "fmax"); + FORMAT(FMIN, "fmin"); + FORMAT(FMAXNM, "fmaxnm"); + FORMAT(FMINNM, "fminnm"); + FORMAT(FNMUL, "fnmul"); + #undef FORMAT + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Fa"; + + switch (instr->Mask(FPDataProcessing3SourceMask)) { + #define FORMAT(A, B) \ + case A##_s: \ + case A##_d: mnemonic = B; break; + FORMAT(FMADD, "fmadd"); + FORMAT(FMSUB, "fmsub"); + FORMAT(FNMADD, "fnmadd"); + FORMAT(FNMSUB, "fnmsub"); + #undef FORMAT + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPImmediate(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "(FPImmediate)"; + + switch (instr->Mask(FPImmediateMask)) { + case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break; + case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPIntegerConvert(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(FPIntegerConvert)"; + const char *form_rf = "'Rd, 'Fn"; + const char *form_fr = "'Fd, 'Rn"; + + switch (instr->Mask(FPIntegerConvertMask)) { + case FMOV_ws: + case FMOV_xd: mnemonic = "fmov"; form = form_rf; break; + case FMOV_sw: + case FMOV_dx: mnemonic = "fmov"; form = form_fr; break; + case FCVTAS_ws: + case FCVTAS_xs: + case FCVTAS_wd: + case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break; + case FCVTAU_ws: + case FCVTAU_xs: + case FCVTAU_wd: + case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break; + case FCVTMS_ws: + case FCVTMS_xs: + case FCVTMS_wd: + case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break; + case FCVTMU_ws: + case FCVTMU_xs: + case FCVTMU_wd: + case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break; + case FCVTNS_ws: + case FCVTNS_xs: + case FCVTNS_wd: + case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break; + case FCVTNU_ws: + case FCVTNU_xs: + case FCVTNU_wd: + case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break; + case FCVTZU_xd: + case FCVTZU_ws: + case FCVTZU_wd: + case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break; + case FCVTZS_xd: + case FCVTZS_wd: + case FCVTZS_xs: + case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break; + case SCVTF_sw: + case SCVTF_sx: + case SCVTF_dw: + case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break; + case UCVTF_sw: + case UCVTF_sx: + case UCVTF_dw: + case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Fn, 'IFPFBits"; + const char *form_fr = "'Fd, 'Rn, 'IFPFBits"; + + switch (instr->Mask(FPFixedPointConvertMask)) { + case FCVTZS_ws_fixed: + case FCVTZS_xs_fixed: + case FCVTZS_wd_fixed: + case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break; + case FCVTZU_ws_fixed: + case FCVTZU_xs_fixed: + case FCVTZU_wd_fixed: + case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break; + case SCVTF_sw_fixed: + case SCVTF_sx_fixed: + case SCVTF_dw_fixed: + case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break; + case UCVTF_sw_fixed: + case UCVTF_sx_fixed: + case UCVTF_dw_fixed: + case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break; + default: VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitSystem(const Instruction* instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + const char *mnemonic = "unimplemented"; + const char *form = "(System)"; + + if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) { + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + mnemonic = "clrex"; + form = (instr->CRm() == 0xf) ? NULL : "'IX"; + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + mnemonic = "mrs"; + switch (instr->ImmSystemRegister()) { + case NZCV: form = "'Xt, nzcv"; break; + case FPCR: form = "'Xt, fpcr"; break; + default: form = "'Xt, (unknown)"; break; + } + break; + } + case MSR: { + mnemonic = "msr"; + switch (instr->ImmSystemRegister()) { + case NZCV: form = "nzcv, 'Xt"; break; + case FPCR: form = "fpcr, 'Xt"; break; + default: form = "(unknown), 'Xt"; break; + } + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + switch (instr->ImmHint()) { + case NOP: { + mnemonic = "nop"; + form = NULL; + break; + } + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + switch (instr->Mask(MemBarrierMask)) { + case DMB: { + mnemonic = "dmb"; + form = "'M"; + break; + } + case DSB: { + mnemonic = "dsb"; + form = "'M"; + break; + } + case ISB: { + mnemonic = "isb"; + form = NULL; + break; + } + } + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitException(const Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'IDebug"; + + switch (instr->Mask(ExceptionMask)) { + case HLT: mnemonic = "hlt"; break; + case BRK: mnemonic = "brk"; break; + case SVC: mnemonic = "svc"; break; + case HVC: mnemonic = "hvc"; break; + case SMC: mnemonic = "smc"; break; + case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break; + case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break; + case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break; + default: form = "(Exception)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitUnimplemented(const Instruction* instr) { + Format(instr, "unimplemented", "(Unimplemented)"); +} + + +void Disassembler::VisitUnallocated(const Instruction* instr) { + Format(instr, "unallocated", "(Unallocated)"); +} + + +void Disassembler::ProcessOutput(const Instruction* /*instr*/) { + // The base disasm does nothing more than disassembling into a buffer. +} + + +void Disassembler::AppendRegisterNameToOutput(const Instruction* instr, + const CPURegister& reg) { + USE(instr); + VIXL_ASSERT(reg.IsValid()); + char reg_char; + + if (reg.IsRegister()) { + reg_char = reg.Is64Bits() ? 'x' : 'w'; + } else { + VIXL_ASSERT(reg.IsFPRegister()); + reg_char = reg.Is64Bits() ? 'd' : 's'; + } + + if (reg.IsFPRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) { + // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31. + AppendToOutput("%c%d", reg_char, reg.code()); + } else if (reg.Aliases(sp)) { + // Disassemble w31/x31 as stack pointer wsp/sp. + AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp"); + } else { + // Disassemble w31/x31 as zero register wzr/xzr. + AppendToOutput("%czr", reg_char); + } +} + + +void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr, + int64_t offset) { + USE(instr); + char sign = (offset < 0) ? '-' : '+'; + AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset)); +} + + +void Disassembler::AppendAddressToOutput(const Instruction* instr, + const void* addr) { + USE(instr); + AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast(addr)); +} + + +void Disassembler::AppendCodeAddressToOutput(const Instruction* instr, + const void* addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendDataAddressToOutput(const Instruction* instr, + const void* addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr, + const void* addr) { + USE(instr); + int64_t rel_addr = CodeRelativeAddress(addr); + if (rel_addr >= 0) { + AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr); + } else { + AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr); + } +} + + +void Disassembler::AppendCodeRelativeCodeAddressToOutput( + const Instruction* instr, const void* addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeDataAddressToOutput( + const Instruction* instr, const void* addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::MapCodeAddress(int64_t base_address, + const Instruction* instr_address) { + set_code_address_offset( + base_address - reinterpret_cast(instr_address)); +} +int64_t Disassembler::CodeRelativeAddress(const void* addr) { + return reinterpret_cast(addr) + code_address_offset(); +} + + +void Disassembler::Format(const Instruction* instr, const char* mnemonic, + const char* format) { + VIXL_ASSERT(mnemonic != NULL); + ResetOutput(); + Substitute(instr, mnemonic); + if (format != NULL) { + buffer_[buffer_pos_++] = ' '; + Substitute(instr, format); + } + buffer_[buffer_pos_] = 0; + ProcessOutput(instr); +} + + +void Disassembler::Substitute(const Instruction* instr, const char* string) { + char chr = *string++; + while (chr != '\0') { + if (chr == '\'') { + string += SubstituteField(instr, string); + } else { + buffer_[buffer_pos_++] = chr; + } + chr = *string++; + } +} + + +int Disassembler::SubstituteField(const Instruction* instr, + const char* format) { + switch (format[0]) { + case 'R': // Register. X or W, selected by sf bit. + case 'F': // FP Register. S or D, selected by type field. + case 'W': + case 'X': + case 'S': + case 'D': return SubstituteRegisterField(instr, format); + case 'I': return SubstituteImmediateField(instr, format); + case 'L': return SubstituteLiteralField(instr, format); + case 'H': return SubstituteShiftField(instr, format); + case 'P': return SubstitutePrefetchField(instr, format); + case 'C': return SubstituteConditionField(instr, format); + case 'E': return SubstituteExtendField(instr, format); + case 'A': return SubstitutePCRelAddressField(instr, format); + case 'B': return SubstituteBranchTargetField(instr, format); + case 'O': return SubstituteLSRegOffsetField(instr, format); + case 'M': return SubstituteBarrierField(instr, format); + default: { + VIXL_UNREACHABLE(); + return 1; + } + } +} + + +int Disassembler::SubstituteRegisterField(const Instruction* instr, + const char* format) { + unsigned reg_num = 0; + unsigned field_len = 2; + switch (format[1]) { + case 'd': reg_num = instr->Rd(); break; + case 'n': reg_num = instr->Rn(); break; + case 'm': reg_num = instr->Rm(); break; + case 'a': reg_num = instr->Ra(); break; + case 's': reg_num = instr->Rs(); break; + case 't': { + if (format[2] == '2') { + reg_num = instr->Rt2(); + field_len = 3; + } else { + reg_num = instr->Rt(); + } + break; + } + default: VIXL_UNREACHABLE(); + } + + // Increase field length for registers tagged as stack. + if (format[2] == 's') { + field_len = 3; + } + + CPURegister::RegisterType reg_type; + unsigned reg_size; + + if (format[0] == 'R') { + // Register type is R: use sf bit to choose X and W. + reg_type = CPURegister::kRegister; + reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; + } else if (format[0] == 'F') { + // Floating-point register: use type field to choose S or D. + reg_type = CPURegister::kFPRegister; + reg_size = ((instr->FPType() & 1) == 0) ? kSRegSize : kDRegSize; + } else { + // The register type is specified. + switch (format[0]) { + case 'W': + reg_type = CPURegister::kRegister; reg_size = kWRegSize; break; + case 'X': + reg_type = CPURegister::kRegister; reg_size = kXRegSize; break; + case 'S': + reg_type = CPURegister::kFPRegister; reg_size = kSRegSize; break; + case 'D': + reg_type = CPURegister::kFPRegister; reg_size = kDRegSize; break; + default: + VIXL_UNREACHABLE(); + reg_type = CPURegister::kRegister; + reg_size = kXRegSize; + } + } + + if ((reg_type == CPURegister::kRegister) && + (reg_num == kZeroRegCode) && (format[2] == 's')) { + reg_num = kSPRegInternalCode; + } + + AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type)); + + return field_len; +} + + +int Disassembler::SubstituteImmediateField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'I'); + + switch (format[1]) { + case 'M': { // IMoveImm, IMoveNeg or IMoveLSL. + if (format[5] == 'L') { + AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide()); + if (instr->ShiftMoveWide() > 0) { + AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide()); + } + } else { + VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N')); + uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide()); + if (format[5] == 'N') + imm = ~imm; + if (!instr->SixtyFourBits()) + imm &= UINT64_C(0xffffffff); + AppendToOutput("#0x%" PRIx64, imm); + } + return 8; + } + case 'L': { + switch (format[2]) { + case 'L': { // ILLiteral - Immediate Load Literal. + AppendToOutput("pc%+" PRId64, + instr->ImmLLiteral() << kLiteralEntrySizeLog2); + return 9; + } + case 'S': { // ILS - Immediate Load/Store. + if (instr->ImmLS() != 0) { + AppendToOutput(", #%" PRId64, instr->ImmLS()); + } + return 3; + } + case 'P': { // ILPx - Immediate Load/Store Pair, x = access size. + if (instr->ImmLSPair() != 0) { + // format[3] is the scale value. Convert to a number. + int scale = format[3] - 0x30; + AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale); + } + return 4; + } + case 'U': { // ILU - Immediate Load/Store Unsigned. + if (instr->ImmLSUnsigned() != 0) { + AppendToOutput(", #%" PRIu64, + instr->ImmLSUnsigned() << instr->SizeLS()); + } + return 3; + } + } + } + case 'C': { // ICondB - Immediate Conditional Branch. + int64_t offset = instr->ImmCondBranch() << 2; + AppendPCRelativeOffsetToOutput(instr, offset); + return 6; + } + case 'A': { // IAddSub. + VIXL_ASSERT(instr->ShiftAddSub() <= 1); + int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub()); + AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm); + return 7; + } + case 'F': { // IFPSingle, IFPDouble or IFPFBits. + if (format[3] == 'F') { // IFPFbits. + AppendToOutput("#%" PRId64, 64 - instr->FPScale()); + return 8; + } else { + AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(), + format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64()); + return 9; + } + } + case 'T': { // ITri - Immediate Triangular Encoded. + AppendToOutput("#0x%" PRIx64, instr->ImmLogical()); + return 4; + } + case 'N': { // INzcv. + int nzcv = (instr->Nzcv() << Flags_offset); + AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N', + ((nzcv & ZFlag) == 0) ? 'z' : 'Z', + ((nzcv & CFlag) == 0) ? 'c' : 'C', + ((nzcv & VFlag) == 0) ? 'v' : 'V'); + return 5; + } + case 'P': { // IP - Conditional compare. + AppendToOutput("#%" PRId64, instr->ImmCondCmp()); + return 2; + } + case 'B': { // Bitfields. + return SubstituteBitfieldImmediateField(instr, format); + } + case 'E': { // IExtract. + AppendToOutput("#%" PRId64, instr->ImmS()); + return 8; + } + case 'S': { // IS - Test and branch bit. + AppendToOutput("#%" PRId64, (instr->ImmTestBranchBit5() << 5) | + instr->ImmTestBranchBit40()); + return 2; + } + case 'D': { // IDebug - HLT and BRK instructions. + AppendToOutput("#0x%" PRIx64, instr->ImmException()); + return 6; + } + case 'X': { // IX - CLREX instruction. + AppendToOutput("#0x%" PRIx64, instr->CRm()); + return 2; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } +} + + +int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr, + const char* format) { + VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B')); + unsigned r = instr->ImmR(); + unsigned s = instr->ImmS(); + + switch (format[2]) { + case 'r': { // IBr. + AppendToOutput("#%d", r); + return 3; + } + case 's': { // IBs+1 or IBs-r+1. + if (format[3] == '+') { + AppendToOutput("#%d", s + 1); + return 5; + } else { + VIXL_ASSERT(format[3] == '-'); + AppendToOutput("#%d", s - r + 1); + return 7; + } + } + case 'Z': { // IBZ-r. + VIXL_ASSERT((format[3] == '-') && (format[4] == 'r')); + unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize; + AppendToOutput("#%d", reg_size - r); + return 5; + } + default: { + VIXL_UNREACHABLE(); + return 0; + } + } +} + + +int Disassembler::SubstituteLiteralField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "LValue", 6) == 0); + USE(format); + + const void * address = instr->LiteralAddress(); + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: + case LDR_x_lit: + case LDRSW_x_lit: + case LDR_s_lit: + case LDR_d_lit: + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case PRFM_lit: { + // Use the prefetch hint to decide how to print the address. + switch (instr->PrefetchHint()) { + case 0x0: // PLD: prefetch for load. + case 0x2: // PST: prepare for store. + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case 0x1: // PLI: preload instructions. + AppendCodeRelativeCodeAddressToOutput(instr, address); + break; + case 0x3: // Unallocated hint. + AppendCodeRelativeAddressToOutput(instr, address); + break; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + + return 6; +} + + +int Disassembler::SubstituteShiftField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'H'); + VIXL_ASSERT(instr->ShiftDP() <= 0x3); + + switch (format[1]) { + case 'D': { // HDP. + VIXL_ASSERT(instr->ShiftDP() != ROR); + } // Fall through. + case 'L': { // HLo. + if (instr->ImmDPShift() != 0) { + const char* shift_type[] = {"lsl", "lsr", "asr", "ror"}; + AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()], + instr->ImmDPShift()); + } + return 3; + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } +} + + +int Disassembler::SubstituteConditionField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'C'); + const char* condition_code[] = { "eq", "ne", "hs", "lo", + "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", + "gt", "le", "al", "nv" }; + int cond; + switch (format[1]) { + case 'B': cond = instr->ConditionBranch(); break; + case 'I': { + cond = InvertCondition(static_cast(instr->Condition())); + break; + } + default: cond = instr->Condition(); + } + AppendToOutput("%s", condition_code[cond]); + return 4; +} + + +int Disassembler::SubstitutePCRelAddressField(const Instruction* instr, + const char* format) { + VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`. + (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. + + int64_t offset = instr->ImmPCRel(); + + // Compute the target address based on the effective address (after applying + // code_address_offset). This is required for correct behaviour of adrp. + const Instruction* base = instr + code_address_offset(); + if (format[9] == 'P') { + offset *= kPageSize; + base = AlignDown(base, kPageSize); + } + // Strip code_address_offset before printing, so we can use the + // semantically-correct AppendCodeRelativeAddressToOutput. + const void* target = + reinterpret_cast(base + offset - code_address_offset()); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeAddressToOutput(instr, target); + return 13; +} + + +int Disassembler::SubstituteBranchTargetField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "BImm", 4) == 0); + + int64_t offset = 0; + switch (format[5]) { + // BImmUncn - unconditional branch immediate. + case 'n': offset = instr->ImmUncondBranch(); break; + // BImmCond - conditional branch immediate. + case 'o': offset = instr->ImmCondBranch(); break; + // BImmCmpa - compare and branch immediate. + case 'm': offset = instr->ImmCmpBranch(); break; + // BImmTest - test and branch immediate. + case 'e': offset = instr->ImmTestBranch(); break; + default: VIXL_UNIMPLEMENTED(); + } + offset <<= kInstructionSizeLog2; + const void* target_address = reinterpret_cast(instr + offset); + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeCodeAddressToOutput(instr, target_address); + + return 8; +} + + +int Disassembler::SubstituteExtendField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "Ext", 3) == 0); + VIXL_ASSERT(instr->ExtendMode() <= 7); + USE(format); + + const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx", + "sxtb", "sxth", "sxtw", "sxtx" }; + + // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit + // registers becomes lsl. + if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) && + (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) || + (instr->ExtendMode() == UXTX))) { + if (instr->ImmExtendShift() > 0) { + AppendToOutput(", lsl #%" PRId64, instr->ImmExtendShift()); + } + } else { + AppendToOutput(", %s", extend_mode[instr->ExtendMode()]); + if (instr->ImmExtendShift() > 0) { + AppendToOutput(" #%" PRId64, instr->ImmExtendShift()); + } + } + return 3; +} + + +int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0); + const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl", + "undefined", "undefined", "sxtw", "sxtx" }; + USE(format); + + unsigned shift = instr->ImmShiftLS(); + Extend ext = static_cast(instr->ExtendMode()); + char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x'; + + unsigned rm = instr->Rm(); + if (rm == kZeroRegCode) { + AppendToOutput("%czr", reg_type); + } else { + AppendToOutput("%c%d", reg_type, rm); + } + + // Extend mode UXTX is an alias for shift mode LSL here. + if (!((ext == UXTX) && (shift == 0))) { + AppendToOutput(", %s", extend_mode[ext]); + if (shift != 0) { + AppendToOutput(" #%" PRId64, instr->SizeLS()); + } + } + return 9; +} + + +int Disassembler::SubstitutePrefetchField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'P'); + USE(format); + + static const char* hints[] = {"ld", "li", "st"}; + static const char* stream_options[] = {"keep", "strm"}; + + unsigned hint = instr->PrefetchHint(); + unsigned target = instr->PrefetchTarget() + 1; + unsigned stream = instr->PrefetchStream(); + + if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) { + // Unallocated prefetch operations. + int prefetch_mode = instr->ImmPrefetchOperation(); + AppendToOutput("#0b%c%c%c%c%c", + (prefetch_mode & (1 << 4)) ? '1' : '0', + (prefetch_mode & (1 << 3)) ? '1' : '0', + (prefetch_mode & (1 << 2)) ? '1' : '0', + (prefetch_mode & (1 << 1)) ? '1' : '0', + (prefetch_mode & (1 << 0)) ? '1' : '0'); + } else { + VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0]))); + AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]); + } + return 6; +} + +int Disassembler::SubstituteBarrierField(const Instruction* instr, + const char* format) { + VIXL_ASSERT(format[0] == 'M'); + USE(format); + + static const char* options[4][4] = { + { "sy (0b0000)", "oshld", "oshst", "osh" }, + { "sy (0b0100)", "nshld", "nshst", "nsh" }, + { "sy (0b1000)", "ishld", "ishst", "ish" }, + { "sy (0b1100)", "ld", "st", "sy" } + }; + int domain = instr->ImmBarrierDomain(); + int type = instr->ImmBarrierType(); + + AppendToOutput("%s", options[domain][type]); + return 1; +} + +void Disassembler::ResetOutput() { + buffer_pos_ = 0; + buffer_[buffer_pos_] = 0; +} + + +void Disassembler::AppendToOutput(const char* format, ...) { + va_list args; + va_start(args, format); + buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args); + va_end(args); +} + + +void PrintDisassembler::ProcessOutput(const Instruction* instr) { + fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n", + reinterpret_cast(instr), + instr->InstructionBits(), + GetOutput()); +} +} // namespace vixl diff --git a/qemu/disas/libvixl/a64/disasm-a64.h b/qemu/disas/libvixl/a64/disasm-a64.h new file mode 100644 index 000000000..ddfe98be1 --- /dev/null +++ b/qemu/disas/libvixl/a64/disasm-a64.h @@ -0,0 +1,176 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_DISASM_A64_H +#define VIXL_A64_DISASM_A64_H + +#include "globals.h" +#include "utils.h" +#include "instructions-a64.h" +#include "decoder-a64.h" +#include "assembler-a64.h" + +namespace vixl { + +class Disassembler: public DecoderVisitor { + public: + Disassembler(); + Disassembler(char* text_buffer, int buffer_size); + virtual ~Disassembler(); + char* GetOutput(); + + // Declare all Visitor functions. + #define DECLARE(A) virtual void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) + #undef DECLARE + + protected: + virtual void ProcessOutput(const Instruction* instr); + + // Default output functions. The functions below implement a default way of + // printing elements in the disassembly. A sub-class can override these to + // customize the disassembly output. + + // Prints the name of a register. + virtual void AppendRegisterNameToOutput(const Instruction* instr, + const CPURegister& reg); + + // Prints a PC-relative offset. This is used for example when disassembling + // branches to immediate offsets. + virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr, + int64_t offset); + + // Prints an address, in the general case. It can be code or data. This is + // used for example to print the target address of an ADR instruction. + virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some code. + // This is used for example to print the target address of a branch to an + // immediate offset. + // A sub-class can for example override this method to lookup the address and + // print an appropriate name. + virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some data. + // This is used for example to print the source address of a load literal + // instruction. + virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr, + const void* addr); + + // Same as the above, but for addresses that are not relative to the code + // buffer. They are currently not used by VIXL. + virtual void AppendAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendCodeAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendDataAddressToOutput(const Instruction* instr, + const void* addr); + + public: + // Get/Set the offset that should be added to code addresses when printing + // code-relative addresses in the AppendCodeRelativeAddressToOutput() + // helpers. + // Below is an example of how a branch immediate instruction in memory at + // address 0xb010200 would disassemble with different offsets. + // Base address | Disassembly + // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc) + // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc) + // 0xb010200 | 0x0: b #+0xcc (addr 0xcc) + void MapCodeAddress(int64_t base_address, const Instruction* instr_address); + int64_t CodeRelativeAddress(const void* instr); + + private: + void Format( + const Instruction* instr, const char* mnemonic, const char* format); + void Substitute(const Instruction* instr, const char* string); + int SubstituteField(const Instruction* instr, const char* format); + int SubstituteRegisterField(const Instruction* instr, const char* format); + int SubstituteImmediateField(const Instruction* instr, const char* format); + int SubstituteLiteralField(const Instruction* instr, const char* format); + int SubstituteBitfieldImmediateField( + const Instruction* instr, const char* format); + int SubstituteShiftField(const Instruction* instr, const char* format); + int SubstituteExtendField(const Instruction* instr, const char* format); + int SubstituteConditionField(const Instruction* instr, const char* format); + int SubstitutePCRelAddressField(const Instruction* instr, const char* format); + int SubstituteBranchTargetField(const Instruction* instr, const char* format); + int SubstituteLSRegOffsetField(const Instruction* instr, const char* format); + int SubstitutePrefetchField(const Instruction* instr, const char* format); + int SubstituteBarrierField(const Instruction* instr, const char* format); + + bool RdIsZROrSP(const Instruction* instr) const { + return (instr->Rd() == kZeroRegCode); + } + + bool RnIsZROrSP(const Instruction* instr) const { + return (instr->Rn() == kZeroRegCode); + } + + bool RmIsZROrSP(const Instruction* instr) const { + return (instr->Rm() == kZeroRegCode); + } + + bool RaIsZROrSP(const Instruction* instr) const { + return (instr->Ra() == kZeroRegCode); + } + + bool IsMovzMovnImm(unsigned reg_size, uint64_t value); + + int64_t code_address_offset() const { return code_address_offset_; } + + protected: + void ResetOutput(); + void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); + + void set_code_address_offset(int64_t code_address_offset) { + code_address_offset_ = code_address_offset; + } + + char* buffer_; + uint32_t buffer_pos_; + uint32_t buffer_size_; + bool own_buffer_; + + int64_t code_address_offset_; +}; + + +class PrintDisassembler: public Disassembler { + public: + explicit PrintDisassembler(FILE* stream) : stream_(stream) { } + virtual ~PrintDisassembler() { } + + protected: + virtual void ProcessOutput(const Instruction* instr); + + private: + FILE *stream_; +}; +} // namespace vixl + +#endif // VIXL_A64_DISASM_A64_H diff --git a/qemu/disas/libvixl/a64/instructions-a64.cc b/qemu/disas/libvixl/a64/instructions-a64.cc new file mode 100644 index 000000000..b09188683 --- /dev/null +++ b/qemu/disas/libvixl/a64/instructions-a64.cc @@ -0,0 +1,314 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "a64/instructions-a64.h" +#include "a64/assembler-a64.h" + +namespace vixl { + + +// Floating-point infinity values. +const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000); +const float kFP32NegativeInfinity = rawbits_to_float(0xff800000); +const double kFP64PositiveInfinity = + rawbits_to_double(UINT64_C(0x7ff0000000000000)); +const double kFP64NegativeInfinity = + rawbits_to_double(UINT64_C(0xfff0000000000000)); + + +// The default NaN values (for FPCR.DN=1). +const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000)); +const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000); + + +static uint64_t RotateRight(uint64_t value, + unsigned int rotate, + unsigned int width) { + VIXL_ASSERT(width <= 64); + rotate &= 63; + return ((value & ((UINT64_C(1) << rotate) - 1)) << + (width - rotate)) | (value >> rotate); +} + + +static uint64_t RepeatBitsAcrossReg(unsigned reg_size, + uint64_t value, + unsigned width) { + VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || + (width == 32)); + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + uint64_t result = value & ((UINT64_C(1) << width) - 1); + for (unsigned i = width; i < reg_size; i *= 2) { + result |= (result << i); + } + return result; +} + + +bool Instruction::IsLoad() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) != 0; + } else { + LoadStoreOp op = static_cast(Mask(LoadStoreOpMask)); + switch (op) { + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_w: + case LDRSB_x: + case LDRSH_w: + case LDRSH_x: + case LDRSW_x: + case LDR_s: + case LDR_d: return true; + default: return false; + } + } +} + + +bool Instruction::IsStore() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) == 0; + } else { + LoadStoreOp op = static_cast(Mask(LoadStoreOpMask)); + switch (op) { + case STRB_w: + case STRH_w: + case STR_w: + case STR_x: + case STR_s: + case STR_d: return true; + default: return false; + } + } +} + + +// Logical immediates can't encode zero, so a return value of zero is used to +// indicate a failure case. Specifically, where the constraints on imm_s are +// not met. +uint64_t Instruction::ImmLogical() const { + unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize; + int64_t n = BitN(); + int64_t imm_s = ImmSetBits(); + int64_t imm_r = ImmRotate(); + + // An integer is constructed from the n, imm_s and imm_r bits according to + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 + // bits are set. The pattern is rotated right by R, and repeated across a + // 32 or 64-bit value, depending on destination register width. + // + + if (n == 1) { + if (imm_s == 0x3F) { + return 0; + } + uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1; + return RotateRight(bits, imm_r, 64); + } else { + if ((imm_s >> 1) == 0x1F) { + return 0; + } + for (int width = 0x20; width >= 0x2; width >>= 1) { + if ((imm_s & width) == 0) { + int mask = width - 1; + if ((imm_s & mask) == mask) { + return 0; + } + uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1; + return RepeatBitsAcrossReg(reg_size, + RotateRight(bits, imm_r & mask, width), + width); + } + } + } + VIXL_UNREACHABLE(); + return 0; +} + + +float Instruction::ImmFP32() const { + // ImmFP: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = ImmFP(); + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return rawbits_to_float(result); +} + + +double Instruction::ImmFP64() const { + // ImmFP: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = ImmFP(); + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + + return rawbits_to_double(result); +} + + +LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { + switch (op) { + case STP_x: + case LDP_x: + case STP_d: + case LDP_d: return LSDoubleWord; + default: return LSWord; + } +} + + +const Instruction* Instruction::ImmPCOffsetTarget() const { + const Instruction * base = this; + ptrdiff_t offset; + if (IsPCRelAddressing()) { + // ADR and ADRP. + offset = ImmPCRel(); + if (Mask(PCRelAddressingMask) == ADRP) { + base = AlignDown(base, kPageSize); + offset *= kPageSize; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); + } + } else { + // All PC-relative branches. + VIXL_ASSERT(BranchType() != UnknownBranchType); + // Relative branch offsets are instruction-size-aligned. + offset = ImmBranch() << kInstructionSizeLog2; + } + return base + offset; +} + + +inline int Instruction::ImmBranch() const { + switch (BranchType()) { + case CondBranchType: return ImmCondBranch(); + case UncondBranchType: return ImmUncondBranch(); + case CompareBranchType: return ImmCmpBranch(); + case TestBranchType: return ImmTestBranch(); + default: VIXL_UNREACHABLE(); + } + return 0; +} + + +void Instruction::SetImmPCOffsetTarget(const Instruction* target) { + if (IsPCRelAddressing()) { + SetPCRelImmTarget(target); + } else { + SetBranchImmTarget(target); + } +} + + +void Instruction::SetPCRelImmTarget(const Instruction* target) { + int32_t imm21; + if ((Mask(PCRelAddressingMask) == ADR)) { + imm21 = target - this; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP); + uintptr_t this_page = reinterpret_cast(this) / kPageSize; + uintptr_t target_page = reinterpret_cast(target) / kPageSize; + imm21 = target_page - this_page; + } + Instr imm = Assembler::ImmPCRelAddress(imm21); + + SetInstructionBits(Mask(~ImmPCRel_mask) | imm); +} + + +void Instruction::SetBranchImmTarget(const Instruction* target) { + VIXL_ASSERT(((target - this) & 3) == 0); + Instr branch_imm = 0; + uint32_t imm_mask = 0; + int offset = (target - this) >> kInstructionSizeLog2; + switch (BranchType()) { + case CondBranchType: { + branch_imm = Assembler::ImmCondBranch(offset); + imm_mask = ImmCondBranch_mask; + break; + } + case UncondBranchType: { + branch_imm = Assembler::ImmUncondBranch(offset); + imm_mask = ImmUncondBranch_mask; + break; + } + case CompareBranchType: { + branch_imm = Assembler::ImmCmpBranch(offset); + imm_mask = ImmCmpBranch_mask; + break; + } + case TestBranchType: { + branch_imm = Assembler::ImmTestBranch(offset); + imm_mask = ImmTestBranch_mask; + break; + } + default: VIXL_UNREACHABLE(); + } + SetInstructionBits(Mask(~imm_mask) | branch_imm); +} + + +void Instruction::SetImmLLiteral(const Instruction* source) { + VIXL_ASSERT(IsWordAligned(source)); + ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2; + Instr imm = Assembler::ImmLLiteral(offset); + Instr mask = ImmLLiteral_mask; + + SetInstructionBits(Mask(~mask) | imm); +} +} // namespace vixl + diff --git a/qemu/disas/libvixl/a64/instructions-a64.h b/qemu/disas/libvixl/a64/instructions-a64.h new file mode 100644 index 000000000..f1d883ccc --- /dev/null +++ b/qemu/disas/libvixl/a64/instructions-a64.h @@ -0,0 +1,384 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_A64_INSTRUCTIONS_A64_H_ +#define VIXL_A64_INSTRUCTIONS_A64_H_ + +#include "globals.h" +#include "utils.h" +#include "a64/constants-a64.h" + +namespace vixl { +// ISA constants. -------------------------------------------------------------- + +typedef uint32_t Instr; +const unsigned kInstructionSize = 4; +const unsigned kInstructionSizeLog2 = 2; +const unsigned kLiteralEntrySize = 4; +const unsigned kLiteralEntrySizeLog2 = 2; +const unsigned kMaxLoadLiteralRange = 1 * MBytes; + +// This is the nominal page size (as used by the adrp instruction); the actual +// size of the memory pages allocated by the kernel is likely to differ. +const unsigned kPageSize = 4 * KBytes; +const unsigned kPageSizeLog2 = 12; + +const unsigned kWRegSize = 32; +const unsigned kWRegSizeLog2 = 5; +const unsigned kWRegSizeInBytes = kWRegSize / 8; +const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3; +const unsigned kXRegSize = 64; +const unsigned kXRegSizeLog2 = 6; +const unsigned kXRegSizeInBytes = kXRegSize / 8; +const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3; +const unsigned kSRegSize = 32; +const unsigned kSRegSizeLog2 = 5; +const unsigned kSRegSizeInBytes = kSRegSize / 8; +const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3; +const unsigned kDRegSize = 64; +const unsigned kDRegSizeLog2 = 6; +const unsigned kDRegSizeInBytes = kDRegSize / 8; +const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3; +const uint64_t kWRegMask = UINT64_C(0xffffffff); +const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kSRegMask = UINT64_C(0xffffffff); +const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kSSignMask = UINT64_C(0x80000000); +const uint64_t kDSignMask = UINT64_C(0x8000000000000000); +const uint64_t kWSignMask = UINT64_C(0x80000000); +const uint64_t kXSignMask = UINT64_C(0x8000000000000000); +const uint64_t kByteMask = UINT64_C(0xff); +const uint64_t kHalfWordMask = UINT64_C(0xffff); +const uint64_t kWordMask = UINT64_C(0xffffffff); +const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff); +const uint64_t kWMaxUInt = UINT64_C(0xffffffff); +const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff); +const int64_t kXMinInt = INT64_C(0x8000000000000000); +const int32_t kWMaxInt = INT32_C(0x7fffffff); +const int32_t kWMinInt = INT32_C(0x80000000); +const unsigned kLinkRegCode = 30; +const unsigned kZeroRegCode = 31; +const unsigned kSPRegInternalCode = 63; +const unsigned kRegCodeMask = 0x1f; + +const unsigned kAddressTagOffset = 56; +const unsigned kAddressTagWidth = 8; +const uint64_t kAddressTagMask = + ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset; +VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); + +// AArch64 floating-point specifics. These match IEEE-754. +const unsigned kDoubleMantissaBits = 52; +const unsigned kDoubleExponentBits = 11; +const unsigned kFloatMantissaBits = 23; +const unsigned kFloatExponentBits = 8; + +// Floating-point infinity values. +extern const float kFP32PositiveInfinity; +extern const float kFP32NegativeInfinity; +extern const double kFP64PositiveInfinity; +extern const double kFP64NegativeInfinity; + +// The default NaN values (for FPCR.DN=1). +extern const double kFP64DefaultNaN; +extern const float kFP32DefaultNaN; + + +enum LSDataSize { + LSByte = 0, + LSHalfword = 1, + LSWord = 2, + LSDoubleWord = 3 +}; + +LSDataSize CalcLSPairDataSize(LoadStorePairOp op); + +enum ImmBranchType { + UnknownBranchType = 0, + CondBranchType = 1, + UncondBranchType = 2, + CompareBranchType = 3, + TestBranchType = 4 +}; + +enum AddrMode { + Offset, + PreIndex, + PostIndex +}; + +enum FPRounding { + // The first four values are encodable directly by FPCR. + FPTieEven = 0x0, + FPPositiveInfinity = 0x1, + FPNegativeInfinity = 0x2, + FPZero = 0x3, + + // The final rounding mode is only available when explicitly specified by the + // instruction (such as with fcvta). It cannot be set in FPCR. + FPTieAway +}; + +enum Reg31Mode { + Reg31IsStackPointer, + Reg31IsZeroRegister +}; + +// Instructions. --------------------------------------------------------------- + +class Instruction { + public: + Instr InstructionBits() const { + return *(reinterpret_cast(this)); + } + + void SetInstructionBits(Instr new_instr) { + *(reinterpret_cast(this)) = new_instr; + } + + int Bit(int pos) const { + return (InstructionBits() >> pos) & 1; + } + + uint32_t Bits(int msb, int lsb) const { + return unsigned_bitextract_32(msb, lsb, InstructionBits()); + } + + int32_t SignedBits(int msb, int lsb) const { + int32_t bits = *(reinterpret_cast(this)); + return signed_bitextract_32(msb, lsb, bits); + } + + Instr Mask(uint32_t mask) const { + return InstructionBits() & mask; + } + + #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + int64_t Name() const { return Func(HighBit, LowBit); } + INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) + #undef DEFINE_GETTER + + // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), + // formed from ImmPCRelLo and ImmPCRelHi. + int ImmPCRel() const { + int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); + int const width = ImmPCRelLo_width + ImmPCRelHi_width; + return signed_bitextract_32(width-1, 0, offset); + } + + uint64_t ImmLogical() const; + float ImmFP32() const; + double ImmFP64() const; + + LSDataSize SizeLSPair() const { + return CalcLSPairDataSize( + static_cast(Mask(LoadStorePairMask))); + } + + // Helpers. + bool IsCondBranchImm() const { + return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; + } + + bool IsUncondBranchImm() const { + return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; + } + + bool IsCompareBranch() const { + return Mask(CompareBranchFMask) == CompareBranchFixed; + } + + bool IsTestBranch() const { + return Mask(TestBranchFMask) == TestBranchFixed; + } + + bool IsPCRelAddressing() const { + return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; + } + + bool IsLogicalImmediate() const { + return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; + } + + bool IsAddSubImmediate() const { + return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; + } + + bool IsAddSubExtended() const { + return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; + } + + bool IsLoadOrStore() const { + return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; + } + + bool IsLoad() const; + bool IsStore() const; + + bool IsLoadLiteral() const { + // This includes PRFM_lit. + return Mask(LoadLiteralFMask) == LoadLiteralFixed; + } + + bool IsMovn() const { + return (Mask(MoveWideImmediateMask) == MOVN_x) || + (Mask(MoveWideImmediateMask) == MOVN_w); + } + + // Indicate whether Rd can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rd field. + Reg31Mode RdMode() const { + // The following instructions use sp or wsp as Rd: + // Add/sub (immediate) when not setting the flags. + // Add/sub (extended) when not setting the flags. + // Logical (immediate) when not setting the flags. + // Otherwise, r31 is the zero register. + if (IsAddSubImmediate() || IsAddSubExtended()) { + if (Mask(AddSubSetFlagsBit)) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + if (IsLogicalImmediate()) { + // Of the logical (immediate) instructions, only ANDS (and its aliases) + // can set the flags. The others can all write into sp. + // Note that some logical operations are not available to + // immediate-operand instructions, so we have to combine two masks here. + if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + return Reg31IsZeroRegister; + } + + // Indicate whether Rn can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rn field. + Reg31Mode RnMode() const { + // The following instructions use sp or wsp as Rn: + // All loads and stores. + // Add/sub (immediate). + // Add/sub (extended). + // Otherwise, r31 is the zero register. + if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { + return Reg31IsStackPointer; + } + return Reg31IsZeroRegister; + } + + ImmBranchType BranchType() const { + if (IsCondBranchImm()) { + return CondBranchType; + } else if (IsUncondBranchImm()) { + return UncondBranchType; + } else if (IsCompareBranch()) { + return CompareBranchType; + } else if (IsTestBranch()) { + return TestBranchType; + } else { + return UnknownBranchType; + } + } + + // Find the target of this instruction. 'this' may be a branch or a + // PC-relative addressing instruction. + const Instruction* ImmPCOffsetTarget() const; + + // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or + // a PC-relative addressing instruction. + void SetImmPCOffsetTarget(const Instruction* target); + // Patch a literal load instruction to load from 'source'. + void SetImmLLiteral(const Instruction* source); + + // Calculate the address of a literal referred to by a load-literal + // instruction, and return it as the specified type. + // + // The literal itself is safely mutable only if the backing buffer is safely + // mutable. + template + T LiteralAddress() const { + uint64_t base_raw = reinterpret_cast(this); + ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2; + uint64_t address_raw = base_raw + offset; + + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + T address = (T)(address_raw); + + // Assert that the address can be represented by the specified type. + VIXL_ASSERT((uint64_t)(address) == address_raw); + + return address; + } + + uint32_t Literal32() const { + uint32_t literal; + memcpy(&literal, LiteralAddress(), sizeof(literal)); + return literal; + } + + uint64_t Literal64() const { + uint64_t literal; + memcpy(&literal, LiteralAddress(), sizeof(literal)); + return literal; + } + + float LiteralFP32() const { + return rawbits_to_float(Literal32()); + } + + double LiteralFP64() const { + return rawbits_to_double(Literal64()); + } + + const Instruction* NextInstruction() const { + return this + kInstructionSize; + } + + const Instruction* InstructionAtOffset(int64_t offset) const { + VIXL_ASSERT(IsWordAligned(this + offset)); + return this + offset; + } + + template static Instruction* Cast(T src) { + return reinterpret_cast(src); + } + + template static const Instruction* CastConst(T src) { + return reinterpret_cast(src); + } + + private: + int ImmBranch() const; + + void SetPCRelImmTarget(const Instruction* target); + void SetBranchImmTarget(const Instruction* target); +}; +} // namespace vixl + +#endif // VIXL_A64_INSTRUCTIONS_A64_H_ diff --git a/qemu/disas/libvixl/code-buffer.h b/qemu/disas/libvixl/code-buffer.h new file mode 100644 index 000000000..da6233dd8 --- /dev/null +++ b/qemu/disas/libvixl/code-buffer.h @@ -0,0 +1,113 @@ +// Copyright 2014, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CODE_BUFFER_H +#define VIXL_CODE_BUFFER_H + +#include +#include "globals.h" + +namespace vixl { + +class CodeBuffer { + public: + explicit CodeBuffer(size_t capacity = 4 * KBytes); + CodeBuffer(void* buffer, size_t capacity); + ~CodeBuffer(); + + void Reset(); + + ptrdiff_t OffsetFrom(ptrdiff_t offset) const { + ptrdiff_t cursor_offset = cursor_ - buffer_; + VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset)); + return cursor_offset - offset; + } + + ptrdiff_t CursorOffset() const { + return OffsetFrom(0); + } + + template + T GetOffsetAddress(ptrdiff_t offset) const { + VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_))); + return reinterpret_cast(buffer_ + offset); + } + + size_t RemainingBytes() const { + VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_))); + return (buffer_ + capacity_) - cursor_; + } + + // A code buffer can emit: + // * 32-bit data: instruction and constant. + // * 64-bit data: constant. + // * string: debug info. + void Emit32(uint32_t data) { Emit(data); } + + void Emit64(uint64_t data) { Emit(data); } + + void EmitString(const char* string); + + // Align to kInstructionSize. + void Align(); + + size_t capacity() const { return capacity_; } + + bool IsManaged() const { return managed_; } + + void Grow(size_t new_capacity); + + bool IsDirty() const { return dirty_; } + + void SetClean() { dirty_ = false; } + + private: + template + void Emit(T value) { + VIXL_ASSERT(RemainingBytes() >= sizeof(value)); + dirty_ = true; + memcpy(cursor_, &value, sizeof(value)); + cursor_ += sizeof(value); + } + + // Backing store of the buffer. + byte* buffer_; + // If true the backing store is allocated and deallocated by the buffer. The + // backing store can then grow on demand. If false the backing store is + // provided by the user and cannot be resized internally. + bool managed_; + // Pointer to the next location to be written. + byte* cursor_; + // True if there has been any write since the buffer was created or cleaned. + bool dirty_; + // Capacity in bytes of the backing store. + size_t capacity_; +}; + +} // namespace vixl + +#endif // VIXL_CODE_BUFFER_H + diff --git a/qemu/disas/libvixl/globals.h b/qemu/disas/libvixl/globals.h new file mode 100644 index 000000000..0c2493105 --- /dev/null +++ b/qemu/disas/libvixl/globals.h @@ -0,0 +1,85 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_GLOBALS_H +#define VIXL_GLOBALS_H + +// Get standard C99 macros for integer types. +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include "platform.h" + + +typedef uint8_t byte; + +const int KBytes = 1024; +const int MBytes = 1024 * KBytes; + +#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort() +#ifdef VIXL_DEBUG + #define VIXL_ASSERT(condition) assert(condition) + #define VIXL_CHECK(condition) VIXL_ASSERT(condition) + #define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT() + #define VIXL_UNREACHABLE() printf("UNREACHABLE\t"); VIXL_ABORT() +#else + #define VIXL_ASSERT(condition) ((void) 0) + #define VIXL_CHECK(condition) assert(condition) + #define VIXL_UNIMPLEMENTED() ((void) 0) + #define VIXL_UNREACHABLE() ((void) 0) +#endif +// This is not as powerful as template based assertions, but it is simple. +// It assumes that the descriptions are unique. If this starts being a problem, +// we can switch to a different implemention. +#define VIXL_CONCAT(a, b) a##b +#define VIXL_STATIC_ASSERT_LINE(line, condition) \ + typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \ + __attribute__((unused)) +#define VIXL_STATIC_ASSERT(condition) VIXL_STATIC_ASSERT_LINE(__LINE__, condition) //NOLINT + +template inline void USE(T) {} + +#define VIXL_ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); VIXL_ABORT() + +#endif // VIXL_GLOBALS_H diff --git a/qemu/disas/libvixl/platform.h b/qemu/disas/libvixl/platform.h new file mode 100644 index 000000000..de2b110cc --- /dev/null +++ b/qemu/disas/libvixl/platform.h @@ -0,0 +1,37 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef PLATFORM_H +#define PLATFORM_H + +// Define platform specific functionalities. +#include + +namespace vixl { +inline void HostBreakpoint() { raise(SIGINT); } +} // namespace vixl + +#endif diff --git a/qemu/disas/libvixl/utils.cc b/qemu/disas/libvixl/utils.cc new file mode 100644 index 000000000..80b132a11 --- /dev/null +++ b/qemu/disas/libvixl/utils.cc @@ -0,0 +1,151 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "utils.h" +#include + +namespace vixl { + +uint32_t float_to_rawbits(float value) { + uint32_t bits = 0; + memcpy(&bits, &value, 4); + return bits; +} + + +uint64_t double_to_rawbits(double value) { + uint64_t bits = 0; + memcpy(&bits, &value, 8); + return bits; +} + + +float rawbits_to_float(uint32_t bits) { + float value = 0.0; + memcpy(&value, &bits, 4); + return value; +} + + +double rawbits_to_double(uint64_t bits) { + double value = 0.0; + memcpy(&value, &bits, 8); + return value; +} + + +int CountLeadingZeros(uint64_t value, int width) { + VIXL_ASSERT((width == 32) || (width == 64)); + int count = 0; + uint64_t bit_test = UINT64_C(1) << (width - 1); + while ((count < width) && ((bit_test & value) == 0)) { + count++; + bit_test >>= 1; + } + return count; +} + + +int CountLeadingSignBits(int64_t value, int width) { + VIXL_ASSERT((width == 32) || (width == 64)); + if (value >= 0) { + return CountLeadingZeros(value, width) - 1; + } else { + return CountLeadingZeros(~value, width) - 1; + } +} + + +int CountTrailingZeros(uint64_t value, int width) { + VIXL_ASSERT((width == 32) || (width == 64)); + int count = 0; + while ((count < width) && (((value >> count) & 1) == 0)) { + count++; + } + return count; +} + + +int CountSetBits(uint64_t value, int width) { + // TODO: Other widths could be added here, as the implementation already + // supports them. + VIXL_ASSERT((width == 32) || (width == 64)); + + // Mask out unused bits to ensure that they are not counted. + value &= (UINT64_C(0xffffffffffffffff) >> (64-width)); + + // Add up the set bits. + // The algorithm works by adding pairs of bit fields together iteratively, + // where the size of each bit field doubles each time. + // An example for an 8-bit value: + // Bits: h g f e d c b a + // \ | \ | \ | \ | + // value = h+g f+e d+c b+a + // \ | \ | + // value = h+g+f+e d+c+b+a + // \ | + // value = h+g+f+e+d+c+b+a + const uint64_t kMasks[] = { + UINT64_C(0x5555555555555555), + UINT64_C(0x3333333333333333), + UINT64_C(0x0f0f0f0f0f0f0f0f), + UINT64_C(0x00ff00ff00ff00ff), + UINT64_C(0x0000ffff0000ffff), + UINT64_C(0x00000000ffffffff), + }; + + for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) { + int shift = 1 << i; + value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]); + } + + return value; +} + + +uint64_t LowestSetBit(uint64_t value) { + return value & -value; +} + + +bool IsPowerOf2(int64_t value) { + return (value != 0) && ((value & (value - 1)) == 0); +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size % 8) == 0); + int count = 0; + for (unsigned i = 0; i < (reg_size / 16); i++) { + if ((imm & 0xffff) == 0) { + count++; + } + imm >>= 16; + } + return count; +} + +} // namespace vixl diff --git a/qemu/disas/libvixl/utils.h b/qemu/disas/libvixl/utils.h new file mode 100644 index 000000000..b4406263a --- /dev/null +++ b/qemu/disas/libvixl/utils.h @@ -0,0 +1,211 @@ +// Copyright 2013, ARM Limited +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_UTILS_H +#define VIXL_UTILS_H + +#include +#include +#include "globals.h" + +namespace vixl { + +// Macros for compile-time format checking. +#if defined(__GNUC__) +#define PRINTF_CHECK(format_index, varargs_index) \ + __attribute__((format(printf, format_index, varargs_index))) +#else +#define PRINTF_CHECK(format_index, varargs_index) +#endif + +// Check number width. +inline bool is_intn(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + int64_t limit = INT64_C(1) << (n - 1); + return (-limit <= x) && (x < limit); +} + +inline bool is_uintn(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return !(x >> n); +} + +inline unsigned truncate_to_intn(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return (x & ((INT64_C(1) << n) - 1)); +} + +#define INT_1_TO_63_LIST(V) \ +V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ +V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ +V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ +V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ +V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ +V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ +V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ +V(57) V(58) V(59) V(60) V(61) V(62) V(63) + +#define DECLARE_IS_INT_N(N) \ +inline bool is_int##N(int64_t x) { return is_intn(N, x); } +#define DECLARE_IS_UINT_N(N) \ +inline bool is_uint##N(int64_t x) { return is_uintn(N, x); } +#define DECLARE_TRUNCATE_TO_INT_N(N) \ +inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); } +INT_1_TO_63_LIST(DECLARE_IS_INT_N) +INT_1_TO_63_LIST(DECLARE_IS_UINT_N) +INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) +#undef DECLARE_IS_INT_N +#undef DECLARE_IS_UINT_N +#undef DECLARE_TRUNCATE_TO_INT_N + +// Bit field extraction. +inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { + return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); +} + +inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { + return (x >> lsb) & ((static_cast(1) << (1 + msb - lsb)) - 1); +} + +inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { + return (x << (31 - msb)) >> (lsb + 31 - msb); +} + +inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) { + return (x << (63 - msb)) >> (lsb + 63 - msb); +} + +// Floating point representation. +uint32_t float_to_rawbits(float value); +uint64_t double_to_rawbits(double value); +float rawbits_to_float(uint32_t bits); +double rawbits_to_double(uint64_t bits); + + +// NaN tests. +inline bool IsSignallingNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + uint64_t raw = double_to_rawbits(num); + if (isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + uint32_t raw = float_to_rawbits(num); + if (isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +template +inline bool IsQuietNaN(T num) { + return isnan(num) && !IsSignallingNaN(num); +} + + +// Convert the NaN in 'num' to a quiet NaN. +inline double ToQuietNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + VIXL_ASSERT(isnan(num)); + return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask); +} + + +inline float ToQuietNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + VIXL_ASSERT(isnan(num)); + return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask); +} + + +// Fused multiply-add. +inline double FusedMultiplyAdd(double op1, double op2, double a) { + return fma(op1, op2, a); +} + + +inline float FusedMultiplyAdd(float op1, float op2, float a) { + return fmaf(op1, op2, a); +} + + +// Bit counting. +int CountLeadingZeros(uint64_t value, int width); +int CountLeadingSignBits(int64_t value, int width); +int CountTrailingZeros(uint64_t value, int width); +int CountSetBits(uint64_t value, int width); +uint64_t LowestSetBit(uint64_t value); +bool IsPowerOf2(int64_t value); + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + +// Pointer alignment +// TODO: rename/refactor to make it specific to instructions. +template +bool IsWordAligned(T pointer) { + VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) + return ((intptr_t)(pointer) & 3) == 0; +} + +// Increment a pointer (up to 64 bits) until it has the specified alignment. +template +T AlignUp(T pointer, size_t alignment) { + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + uint64_t pointer_raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t align_step = (alignment - pointer_raw) % alignment; + VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); + + return (T)(pointer_raw + align_step); +} + +// Decrement a pointer (up to 64 bits) until it has the specified alignment. +template +T AlignDown(T pointer, size_t alignment) { + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + uint64_t pointer_raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t align_step = pointer_raw % alignment; + VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); + + return (T)(pointer_raw - align_step); +} + +} // namespace vixl + +#endif // VIXL_UTILS_H -- cgit 1.2.3-korg