diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/xtensa | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/xtensa')
50 files changed, 2364 insertions, 797 deletions
diff --git a/kernel/arch/xtensa/Kconfig b/kernel/arch/xtensa/Kconfig index 87be10e8b..82044f732 100644 --- a/kernel/arch/xtensa/Kconfig +++ b/kernel/arch/xtensa/Kconfig @@ -14,12 +14,16 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_DMA_API_DEBUG + select HAVE_DMA_ATTRS select HAVE_FUNCTION_TRACER + select HAVE_FUTEX_CMPXCHG if !MMU select HAVE_IRQ_TIME_ACCOUNTING select HAVE_OPROFILE select HAVE_PERF_EVENTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA + select PERF_USE_VMALLOC select VIRT_TO_BUS help Xtensa processors are 32-bit RISC machines designed by Tensilica @@ -61,9 +65,7 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y config MMU - bool - default n if !XTENSA_VARIANT_CUSTOM - default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM + def_bool n config VARIANT_IRQ_SWITCH def_bool n @@ -71,9 +73,6 @@ config VARIANT_IRQ_SWITCH config HAVE_XTENSA_GPIO32 def_bool n -config MAY_HAVE_SMP - def_bool n - menu "Processor type and features" choice @@ -100,7 +99,6 @@ config XTENSA_VARIANT_DC233C config XTENSA_VARIANT_CUSTOM bool "Custom Xtensa processor configuration" - select MAY_HAVE_SMP select HAVE_XTENSA_GPIO32 help Select this variant to use a custom Xtensa processor configuration. @@ -126,10 +124,21 @@ config XTENSA_VARIANT_MMU bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" depends on XTENSA_VARIANT_CUSTOM default y + select MMU help Build a Conventional Kernel with full MMU support, ie: it supports a TLB with auto-loading, page protection. +config XTENSA_VARIANT_HAVE_PERF_EVENTS + bool "Core variant has Performance Monitor Module" + depends on XTENSA_VARIANT_CUSTOM + default n + help + Enable if core variant has Performance Monitor Module with + External Registers Interface. + + If unsure, say N. + config XTENSA_UNALIGNED_USER bool "Unaligned memory access in use space" help @@ -143,7 +152,7 @@ source "kernel/Kconfig.preempt" config HAVE_SMP bool "System Supports SMP (MX)" - depends on MAY_HAVE_SMP + depends on XTENSA_VARIANT_CUSTOM select XTENSA_MX help This option is use to indicate that the system-on-a-chip (SOC) @@ -151,7 +160,7 @@ config HAVE_SMP the CPU core definition and currently needs to be selected manually. Multiprocessor support in implemented with external cache and - interrupt controlers. + interrupt controllers. The MX interrupt distributer adds Interprocessor Interrupts and causes the IRQ numbers to be increased by 4 for devices @@ -389,6 +398,20 @@ config SIMDISK1_FILENAME source "mm/Kconfig" +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + source "drivers/pcmcia/Kconfig" source "drivers/pci/hotplug/Kconfig" @@ -400,7 +423,7 @@ config DEFAULT_MEM_START hex "Physical address of the default memory area start" depends on PLATFORM_WANT_DEFAULT_MEM default 0x00000000 if MMU - default 0x40000000 if !MMU + default 0x60000000 if !MMU help This is a fallback start address of the default memory area, it is used when no physical memory size is passed through DTB or through diff --git a/kernel/arch/xtensa/Makefile b/kernel/arch/xtensa/Makefile index f9e6a068a..709b5748a 100644 --- a/kernel/arch/xtensa/Makefile +++ b/kernel/arch/xtensa/Makefile @@ -101,6 +101,10 @@ zImage: vmlinux %.dtb: $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ +dtbs: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts + define archhelp @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' + @echo ' dtbs - Build device tree blobs for enabled boards' endef diff --git a/kernel/arch/xtensa/boot/boot-elf/boot.lds.S b/kernel/arch/xtensa/boot/boot-elf/boot.lds.S index 958b33af9..e54f2c9df 100644 --- a/kernel/arch/xtensa/boot/boot-elf/boot.lds.S +++ b/kernel/arch/xtensa/boot/boot-elf/boot.lds.S @@ -40,17 +40,4 @@ SECTIONS *(.bss) __bss_end = .; } - -#ifdef CONFIG_MMU - /* - * This is a remapped copy of the Reset Vector Code. - * It keeps gdb in sync with the PC after switching - * to the temporary mapping used while setting up - * the V2 MMU mappings for Linux. - */ - .ResetVector.remapped_text 0x46000000 (INFO): - { - *(.ResetVector.remapped_text) - } -#endif } diff --git a/kernel/arch/xtensa/boot/boot-elf/bootstrap.S b/kernel/arch/xtensa/boot/boot-elf/bootstrap.S index 9341a5750..e6bf31361 100644 --- a/kernel/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/kernel/arch/xtensa/boot/boot-elf/bootstrap.S @@ -58,8 +58,6 @@ _SetupMMU: wsr a0, ps rsync - Offset = _SetupMMU - _ResetVector - #ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #endif @@ -74,29 +72,3 @@ reset: movi a3, 0 movi a4, 0 jx a0 - -#ifdef CONFIG_MMU - .align 4 - - .section .ResetVector.remapped_text, "x" - .global _RemappedResetVector - - /* Do org before literals */ - .org 0 - -_RemappedResetVector: - .begin no-absolute-literals - .literal_position - - _j _RemappedSetupMMU - - /* Position Remapped code at the same location as the original code */ - . = _RemappedResetVector + Offset - -_RemappedSetupMMU: -#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - initialize_mmu -#endif - - .end no-absolute-literals -#endif diff --git a/kernel/arch/xtensa/boot/dts/Makefile b/kernel/arch/xtensa/boot/dts/Makefile index 5f711bba8..a15e241c9 100644 --- a/kernel/arch/xtensa/boot/dts/Makefile +++ b/kernel/arch/xtensa/boot/dts/Makefile @@ -12,4 +12,9 @@ ifneq ($(CONFIG_BUILTIN_DTB),"") obj-$(CONFIG_OF) += $(BUILTIN_DTB) endif -clean-files := *.dtb.S +dtstree := $(srctree)/$(src) +dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) + +always += $(dtb-y) +clean-files += *.dtb *.dtb.S + diff --git a/kernel/arch/xtensa/boot/dts/kc705_nommu.dts b/kernel/arch/xtensa/boot/dts/kc705_nommu.dts new file mode 100644 index 000000000..65f3d741b --- /dev/null +++ b/kernel/arch/xtensa/boot/dts/kc705_nommu.dts @@ -0,0 +1,17 @@ +/dts-v1/; +/include/ "xtfpga.dtsi" +/include/ "xtfpga-flash-128m.dtsi" + +/ { + compatible = "cdns,xtensa-kc705"; + chosen { + bootargs = "earlycon=uart8250,mmio32,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"; + }; + memory@0 { + device_type = "memory"; + reg = <0x60000000 0x10000000>; + }; + soc { + ranges = <0x00000000 0x90000000 0x10000000>; + }; +}; diff --git a/kernel/arch/xtensa/configs/iss_defconfig b/kernel/arch/xtensa/configs/iss_defconfig index e4d193e7a..44c6764d9 100644 --- a/kernel/arch/xtensa/configs/iss_defconfig +++ b/kernel/arch/xtensa/configs/iss_defconfig @@ -169,7 +169,6 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 @@ -616,7 +615,6 @@ CONFIG_SCHED_DEBUG=y # CONFIG_SLUB_DEBUG_ON is not set # CONFIG_SLUB_STATS is not set # CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set diff --git a/kernel/arch/xtensa/configs/nommu_kc705_defconfig b/kernel/arch/xtensa/configs/nommu_kc705_defconfig new file mode 100644 index 000000000..337d5ba2d --- /dev/null +++ b/kernel/arch/xtensa/configs/nommu_kc705_defconfig @@ -0,0 +1,131 @@ +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_MEMCG=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_EXPERT=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_XTENSA_VARIANT_CUSTOM=y +CONFIG_XTENSA_VARIANT_CUSTOM_NAME="de212" +# CONFIG_XTENSA_VARIANT_MMU is not set +CONFIG_XTENSA_UNALIGNED_USER=y +CONFIG_PREEMPT=y +# CONFIG_PCI is not set +CONFIG_XTENSA_PLATFORM_XTFPGA=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="earlycon=uart8250,mmio32,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug" +CONFIG_USE_OF=y +CONFIG_BUILTIN_DTB="kc705_nommu" +CONFIG_DEFAULT_MEM_SIZE=0x10000000 +CONFIG_BINFMT_FLAT=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +CONFIG_MTD=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MARVELL_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_NOWAYOUT=y +CONFIG_SOFT_WATCHDOG=y +# CONFIG_VGA_CONSOLE is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_FANOTIFY=y +CONFIG_VFAT_FS=y +CONFIG_JFFS2_FS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_ROOT_NFS=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +# CONFIG_FRAME_POINTER is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_NOMMU_REGIONS=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_STACKTRACE=y +# CONFIG_RCU_CPU_STALL_INFO is not set +CONFIG_RCU_TRACE=y +# CONFIG_FTRACE is not set +# CONFIG_LD_NO_RELAX is not set +# CONFIG_CRYPTO_ECHAINIV is not set +CONFIG_CRYPTO_ANSI_CPRNG=y diff --git a/kernel/arch/xtensa/include/asm/Kbuild b/kernel/arch/xtensa/include/asm/Kbuild index 86a9ab2e2..b56855a13 100644 --- a/kernel/arch/xtensa/include/asm/Kbuild +++ b/kernel/arch/xtensa/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += bitsperlong.h generic-y += bug.h generic-y += clkdev.h generic-y += cputime.h -generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h @@ -19,14 +18,15 @@ generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += percpu.h generic-y += preempt.h generic-y += resource.h -generic-y += scatterlist.h generic-y += sections.h generic-y += siginfo.h generic-y += statfs.h generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/kernel/arch/xtensa/include/asm/asmmacro.h b/kernel/arch/xtensa/include/asm/asmmacro.h index 755320f6e..746dcc8b5 100644 --- a/kernel/arch/xtensa/include/asm/asmmacro.h +++ b/kernel/arch/xtensa/include/asm/asmmacro.h @@ -35,9 +35,10 @@ * __loop as * restart loop. 'as' register must not have been modified! * - * __endla ar, at, incr + * __endla ar, as, incr * ar start address (modified) - * as scratch register used by macro + * as scratch register used by __loops/__loopi macros or + * end address used by __loopt macro * inc increment */ @@ -97,7 +98,7 @@ .endm /* - * loop from ar to ax + * loop from ar to as */ .macro __loopt ar, as, at, incr_log2 diff --git a/kernel/arch/xtensa/include/asm/atomic.h b/kernel/arch/xtensa/include/asm/atomic.h index 00b7d46b3..fd8017ce2 100644 --- a/kernel/arch/xtensa/include/asm/atomic.h +++ b/kernel/arch/xtensa/include/asm/atomic.h @@ -29,7 +29,7 @@ * * Locking interrupts looks like this: * - * rsil a15, LOCKLEVEL + * rsil a15, TOPLEVEL * <code> * wsr a15, PS * rsync @@ -47,7 +47,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic_read(v) READ_ONCE((v)->counter) /** * atomic_set - set atomic variable @@ -56,7 +56,7 @@ * * Atomically sets the value of @v to @i. */ -#define atomic_set(v,i) ((v)->counter = (i)) +#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) #if XCHAL_HAVE_S32C1I #define ATOMIC_OP(op) \ @@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(LOCKLEVEL)"\n"\ + " rsil a15, "__stringify(TOPLEVEL)"\n"\ " l32i %0, %2, 0\n" \ " " #op " %0, %0, %1\n" \ " s32i %0, %2, 0\n" \ @@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(LOCKLEVEL)"\n" \ + " rsil a15,"__stringify(TOPLEVEL)"\n" \ " l32i %0, %2, 0\n" \ " " #op " %0, %0, %1\n" \ " s32i %0, %2, 0\n" \ @@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " and %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (~mask), "a" (v) - : "memory" - ); -#else - unsigned int all_f = -1; - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " xor %1, %4, %3\n" - " and %0, %0, %4\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); -#endif -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ -#if XCHAL_HAVE_S32C1I - unsigned long tmp; - int result; - - __asm__ __volatile__( - "1: l32i %1, %3, 0\n" - " wsr %1, scompare1\n" - " or %0, %1, %2\n" - " s32c1i %0, %3, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (mask), "a" (v) - : "memory" - ); -#else - unsigned int vval; - - __asm__ __volatile__( - " rsil a15,"__stringify(LOCKLEVEL)"\n" - " l32i %0, %2, 0\n" - " or %0, %0, %1\n" - " s32i %0, %2, 0\n" - " wsr a15, ps\n" - " rsync\n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); -#endif -} - #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/kernel/arch/xtensa/include/asm/cacheasm.h b/kernel/arch/xtensa/include/asm/cacheasm.h index 60e18773e..e0f9e1109 100644 --- a/kernel/arch/xtensa/include/asm/cacheasm.h +++ b/kernel/arch/xtensa/include/asm/cacheasm.h @@ -73,7 +73,9 @@ .macro ___unlock_dcache_all ar at +#if XCHAL_DCACHE_SIZE __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH +#endif .endm @@ -90,30 +92,38 @@ .macro ___flush_invalidate_dcache_all ar at +#if XCHAL_DCACHE_SIZE __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___flush_dcache_all ar at +#if XCHAL_DCACHE_SIZE __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___invalidate_dcache_all ar at +#if XCHAL_DCACHE_SIZE __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \ XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___invalidate_icache_all ar at +#if XCHAL_ICACHE_SIZE __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \ XCHAL_ICACHE_LINEWIDTH +#endif .endm @@ -121,28 +131,36 @@ .macro ___flush_invalidate_dcache_range ar as at +#if XCHAL_DCACHE_SIZE __loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___flush_dcache_range ar as at +#if XCHAL_DCACHE_SIZE __loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___invalidate_dcache_range ar as at +#if XCHAL_DCACHE_SIZE __loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___invalidate_icache_range ar as at +#if XCHAL_ICACHE_SIZE __loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH +#endif .endm @@ -150,27 +168,35 @@ .macro ___flush_invalidate_dcache_page ar as +#if XCHAL_DCACHE_SIZE __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___flush_dcache_page ar as +#if XCHAL_DCACHE_SIZE __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___invalidate_dcache_page ar as +#if XCHAL_DCACHE_SIZE __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH +#endif .endm .macro ___invalidate_icache_page ar as +#if XCHAL_ICACHE_SIZE __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH +#endif .endm diff --git a/kernel/arch/xtensa/include/asm/cacheflush.h b/kernel/arch/xtensa/include/asm/cacheflush.h index 5f67ace97..397d6a1a4 100644 --- a/kernel/arch/xtensa/include/asm/cacheflush.h +++ b/kernel/arch/xtensa/include/asm/cacheflush.h @@ -55,9 +55,14 @@ extern void __flush_dcache_range(unsigned long, unsigned long); extern void __flush_invalidate_dcache_page(unsigned long); extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); #else -# define __flush_dcache_range(p,s) do { } while(0) -# define __flush_dcache_page(p) do { } while(0) -# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) +static inline void __flush_dcache_page(unsigned long va) +{ +} +static inline void __flush_dcache_range(unsigned long va, unsigned long sz) +{ +} +# define __flush_invalidate_dcache_all() __invalidate_dcache_all() +# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s) #endif @@ -174,99 +179,4 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*, #endif -#define XTENSA_CACHEBLK_LOG2 29 -#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2) -#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2) - -#if XCHAL_HAVE_CACHEATTR -static inline u32 xtensa_get_cacheattr(void) -{ - u32 r; - asm volatile(" rsr %0, cacheattr" : "=a"(r)); - return r; -} - -static inline u32 xtensa_get_dtlb1(u32 addr) -{ - u32 r = addr & XTENSA_CACHEBLK_MASK; - return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2))) - & 0xF); -} -#else -static inline u32 xtensa_get_dtlb1(u32 addr) -{ - u32 r; - asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr)); - asm volatile(" dsync"); - return r; -} - -static inline u32 xtensa_get_cacheattr(void) -{ - u32 r = 0; - u32 a = 0; - do { - a -= XTENSA_CACHEBLK_SIZE; - r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF); - } while (a); - return r; -} -#endif - -static inline int xtensa_need_flush_dma_source(u32 addr) -{ - return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4; -} - -static inline int xtensa_need_invalidate_dma_destination(u32 addr) -{ - return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2; -} - -static inline void flush_dcache_unaligned(u32 addr, u32 size) -{ - u32 cnt; - if (size) { - cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) - + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; - while (cnt--) { - asm volatile(" dhwb %0, 0" : : "a"(addr)); - addr += XCHAL_DCACHE_LINESIZE; - } - asm volatile(" dsync"); - } -} - -static inline void invalidate_dcache_unaligned(u32 addr, u32 size) -{ - int cnt; - if (size) { - asm volatile(" dhwbi %0, 0 ;" : : "a"(addr)); - cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) - - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; - while (cnt-- > 0) { - asm volatile(" dhi %0, %1" : : "a"(addr), - "n"(XCHAL_DCACHE_LINESIZE)); - addr += XCHAL_DCACHE_LINESIZE; - } - asm volatile(" dhwbi %0, %1" : : "a"(addr), - "n"(XCHAL_DCACHE_LINESIZE)); - asm volatile(" dsync"); - } -} - -static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) -{ - u32 cnt; - if (size) { - cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr) - + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE; - while (cnt--) { - asm volatile(" dhwbi %0, 0" : : "a"(addr)); - addr += XCHAL_DCACHE_LINESIZE; - } - asm volatile(" dsync"); - } -} - #endif /* _XTENSA_CACHEFLUSH_H */ diff --git a/kernel/arch/xtensa/include/asm/cmpxchg.h b/kernel/arch/xtensa/include/asm/cmpxchg.h index 370b26f38..201e9009e 100644 --- a/kernel/arch/xtensa/include/asm/cmpxchg.h +++ b/kernel/arch/xtensa/include/asm/cmpxchg.h @@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new) return new; #else __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" + " rsil a15, "__stringify(TOPLEVEL)"\n" " l32i %0, %1, 0\n" " bne %0, %2, 1f\n" " s32i %3, %1, 0\n" @@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #else unsigned long tmp; __asm__ __volatile__( - " rsil a15, "__stringify(LOCKLEVEL)"\n" + " rsil a15, "__stringify(TOPLEVEL)"\n" " l32i %0, %1, 0\n" " s32i %2, %1, 0\n" " wsr a15, ps\n" diff --git a/kernel/arch/xtensa/include/asm/device.h b/kernel/arch/xtensa/include/asm/device.h new file mode 100644 index 000000000..fe1f5c878 --- /dev/null +++ b/kernel/arch/xtensa/include/asm/device.h @@ -0,0 +1,19 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_XTENSA_DEVICE_H +#define _ASM_XTENSA_DEVICE_H + +struct dma_map_ops; + +struct dev_archdata { + /* DMA operations on that device */ + struct dma_map_ops *dma_ops; +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_XTENSA_DEVICE_H */ diff --git a/kernel/arch/xtensa/include/asm/dma-mapping.h b/kernel/arch/xtensa/include/asm/dma-mapping.h index ba78ccf65..66c9ba261 100644 --- a/kernel/arch/xtensa/include/asm/dma-mapping.h +++ b/kernel/arch/xtensa/include/asm/dma-mapping.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/dma-mapping.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_DMA_MAPPING_H @@ -13,189 +12,37 @@ #include <asm/cache.h> #include <asm/io.h> + +#include <asm-generic/dma-coherent.h> + #include <linux/mm.h> #include <linux/scatterlist.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -/* - * DMA-consistent mapping functions. - */ - -extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long); -extern void consistent_free(void*, size_t, dma_addr_t); -extern void consistent_sync(void*, size_t, int); - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); +extern struct dma_map_ops xtensa_dma_map_ops; -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - BUG_ON(direction == DMA_NONE); - consistent_sync(ptr, size, direction); - return virt_to_phys(ptr); + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + else + return &xtensa_dma_map_ops; } -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; +#include <asm-generic/dma-mapping-common.h> - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++ ) { - BUG_ON(!sg_page(sg)); - - sg->dma_address = sg_phys(sg); - consistent_sync(sg_virt(sg), sg->length, direction); - } - - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - int i; - for (i = 0; i < nelems; i++, sg++) - consistent_sync(sg_virt(sg), sg->length, dir); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - int i; - for (i = 0; i < nelems; i++, sg++) - consistent_sync(sg_virt(sg), sg->length, dir); -} -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static inline int -dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} - -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - consistent_sync(vaddr, size, direction); -} - -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - return NULL; + return (dma_addr_t)paddr; } -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { + return (phys_addr_t)daddr; } #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/kernel/arch/xtensa/include/asm/initialize_mmu.h b/kernel/arch/xtensa/include/asm/initialize_mmu.h index e256f2270..7a1e07596 100644 --- a/kernel/arch/xtensa/include/asm/initialize_mmu.h +++ b/kernel/arch/xtensa/include/asm/initialize_mmu.h @@ -161,7 +161,8 @@ #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY */ -#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS +#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \ + (XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE) /* Enable data and instruction cache in the DEFAULT_MEMORY region * if the processor has DTLB and ITLB. */ @@ -175,14 +176,18 @@ 1: sub a9, a9, a8 2: +#if XCHAL_DCACHE_SIZE rdtlb1 a3, a5 - ritlb1 a4, a5 and a3, a3, a6 - and a4, a4, a6 or a3, a3, a7 - or a4, a4, a7 wdtlb a3, a5 +#endif +#if XCHAL_ICACHE_SIZE + ritlb1 a4, a5 + and a4, a4, a6 + or a4, a4, a7 witlb a4, a5 +#endif add a5, a5, a8 bltu a8, a9, 1b diff --git a/kernel/arch/xtensa/include/asm/io.h b/kernel/arch/xtensa/include/asm/io.h index fe1600a09..74fed0b4e 100644 --- a/kernel/arch/xtensa/include/asm/io.h +++ b/kernel/arch/xtensa/include/asm/io.h @@ -25,15 +25,6 @@ #ifdef CONFIG_MMU -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) -extern unsigned long xtensa_kio_paddr; - -static inline unsigned long xtensa_get_kio_paddr(void) -{ - return xtensa_kio_paddr; -} -#endif - /* * Return the virtual address for the specified bus memory. * Note that we currently don't support any address outside the KIO segment. @@ -57,8 +48,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset, else BUG(); } +#define ioremap_cache ioremap_cache #define ioremap_wc ioremap_nocache +#define ioremap_wt ioremap_nocache static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { diff --git a/kernel/arch/xtensa/include/asm/irqflags.h b/kernel/arch/xtensa/include/asm/irqflags.h index ea36674c6..8e090c709 100644 --- a/kernel/arch/xtensa/include/asm/irqflags.h +++ b/kernel/arch/xtensa/include/asm/irqflags.h @@ -6,6 +6,7 @@ * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_IRQFLAGS_H @@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_irq_save(void) { unsigned long flags; - asm volatile("rsil %0, "__stringify(LOCKLEVEL) +#if XTENSA_FAKE_NMI +#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL + unsigned long tmp; + + asm volatile("rsr %0, ps\t\n" + "extui %1, %0, 0, 4\t\n" + "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n" + "rsil %0, "__stringify(LOCKLEVEL)"\n" + "1:" + : "=a" (flags), "=a" (tmp) :: "memory"); +#else + asm volatile("rsr %0, ps\t\n" + "or %0, %0, %1\t\n" + "xsr %0, ps\t\n" + "rsync" + : "=&a" (flags) : "a" (LOCKLEVEL) : "memory"); +#endif +#else + asm volatile("rsil %0, "__stringify(LOCKLEVEL) : "=a" (flags) :: "memory"); +#endif return flags; } diff --git a/kernel/arch/xtensa/include/asm/pci.h b/kernel/arch/xtensa/include/asm/pci.h index 5d52dc43d..e438a00fb 100644 --- a/kernel/arch/xtensa/include/asm/pci.h +++ b/kernel/arch/xtensa/include/asm/pci.h @@ -33,7 +33,7 @@ extern struct pci_controller* pcibios_alloc_controller(void); #include <linux/types.h> #include <linux/slab.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/string.h> #include <asm/io.h> diff --git a/kernel/arch/xtensa/include/asm/pgtable.h b/kernel/arch/xtensa/include/asm/pgtable.h index a5e929a10..fb02fdc5e 100644 --- a/kernel/arch/xtensa/include/asm/pgtable.h +++ b/kernel/arch/xtensa/include/asm/pgtable.h @@ -18,7 +18,11 @@ * We only use two ring levels, user and kernel space. */ +#ifdef CONFIG_MMU #define USER_RING 1 /* user ring level */ +#else +#define USER_RING 0 +#endif #define KERNEL_RING 0 /* kernel ring level */ /* diff --git a/kernel/arch/xtensa/include/asm/processor.h b/kernel/arch/xtensa/include/asm/processor.h index b61bdf0ee..83e2e4bc0 100644 --- a/kernel/arch/xtensa/include/asm/processor.h +++ b/kernel/arch/xtensa/include/asm/processor.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/processor.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2008 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_PROCESSOR_H @@ -45,6 +44,14 @@ #define STACK_TOP_MAX STACK_TOP /* + * General exception cause assigned to fake NMI. Fake NMI needs to be handled + * differently from other interrupts, but it uses common kernel entry/exit + * code. + */ + +#define EXCCAUSE_MAPPED_NMI 62 + +/* * General exception cause assigned to debug exceptions. Debug exceptions go * to their own vector, rather than the general exception vectors (user, * kernel, double); and their specific causes are reported via DEBUGCAUSE @@ -65,10 +72,30 @@ #define VALID_DOUBLE_EXCEPTION_ADDRESS 64 +#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno) +#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL + +#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level) +#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK) + +#define IS_POW2(v) (((v) & ((v) - 1)) == 0) + +#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT) + /* LOCKLEVEL defines the interrupt level that masks all * general-purpose interrupts. */ +#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \ + defined(XCHAL_PROFILING_INTERRUPT) && \ + PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \ + XCHAL_EXCM_LEVEL > 1 && \ + IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)) +#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1) +#else #define LOCKLEVEL XCHAL_EXCM_LEVEL +#endif +#define TOPLEVEL XCHAL_EXCM_LEVEL +#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL) /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE * registers diff --git a/kernel/arch/xtensa/include/asm/stacktrace.h b/kernel/arch/xtensa/include/asm/stacktrace.h index 6a05fcb0a..fe06e8ed1 100644 --- a/kernel/arch/xtensa/include/asm/stacktrace.h +++ b/kernel/arch/xtensa/include/asm/stacktrace.h @@ -33,4 +33,12 @@ void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), void *data); +void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, + int (*kfn)(struct stackframe *frame, void *data), + int (*ufn)(struct stackframe *frame, void *data), + void *data); +void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, + int (*ufn)(struct stackframe *frame, void *data), + void *data); + #endif /* _XTENSA_STACKTRACE_H */ diff --git a/kernel/arch/xtensa/include/asm/vectors.h b/kernel/arch/xtensa/include/asm/vectors.h index a46c53f36..288c77673 100644 --- a/kernel/arch/xtensa/include/asm/vectors.h +++ b/kernel/arch/xtensa/include/asm/vectors.h @@ -21,13 +21,26 @@ #include <variant/core.h> #include <platform/hardware.h> +#if XCHAL_HAVE_PTP_MMU #define XCHAL_KIO_CACHED_VADDR 0xe0000000 #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 +#else +#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR +#define XCHAL_KIO_DEFAULT_PADDR 0x90000000 +#endif #define XCHAL_KIO_SIZE 0x10000000 -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) +#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF) #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() +#ifndef __ASSEMBLY__ +extern unsigned long xtensa_kio_paddr; + +static inline unsigned long xtensa_get_kio_paddr(void) +{ + return xtensa_kio_paddr; +} +#endif #else #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR #endif @@ -48,6 +61,9 @@ #define LOAD_MEMORY_ADDRESS 0xD0003000 #endif +#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \ + XCHAL_RESET_VECTOR1_PADDR) + #else /* !defined(CONFIG_MMU) */ /* MMU Not being used - Virtual == Physical */ @@ -60,6 +76,8 @@ /* Loaded just above possibly live vectors */ #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000) +#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR) + #endif /* CONFIG_MMU */ #define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) @@ -67,14 +85,6 @@ /* Used to set VECBASE register */ #define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS -#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \ - VECBASE_RESET_VADDR) -#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS) - -#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \ - VECBASE_RESET_VADDR) -#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) - #if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE #define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) diff --git a/kernel/arch/xtensa/include/uapi/asm/mman.h b/kernel/arch/xtensa/include/uapi/asm/mman.h index 201aec0e0..360944e1d 100644 --- a/kernel/arch/xtensa/include/uapi/asm/mman.h +++ b/kernel/arch/xtensa/include/uapi/asm/mman.h @@ -74,6 +74,12 @@ */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#define MCL_ONFAULT 4 /* lock all pages that are faulted in */ + +/* + * Flags for mlock + */ +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ #define MADV_NORMAL 0 /* no further special treatment */ #define MADV_RANDOM 1 /* expect random page references */ diff --git a/kernel/arch/xtensa/kernel/Makefile b/kernel/arch/xtensa/kernel/Makefile index d3a0f0fd5..4db730290 100644 --- a/kernel/arch/xtensa/kernel/Makefile +++ b/kernel/arch/xtensa/kernel/Makefile @@ -13,8 +13,10 @@ obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_SMP) += smp.o mxhead.o +obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o AFLAGS_head.o += -mtext-section-literals +AFLAGS_mxhead.o += -mtext-section-literals # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. @@ -27,10 +29,11 @@ AFLAGS_head.o += -mtext-section-literals # # Replicate rules in scripts/Makefile.build -sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ - -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ - -e 's/\*(\(\.text .*\))/*(.literal \1)/g' \ - -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' +sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \ + -e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \ + -e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \ + -e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \ + -e 's/\.{text}/.text/g' quiet_cmd__cpp_lds_S = LDS $@ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ diff --git a/kernel/arch/xtensa/kernel/entry.S b/kernel/arch/xtensa/kernel/entry.S index a2a902140..db5c1765b 100644 --- a/kernel/arch/xtensa/kernel/entry.S +++ b/kernel/arch/xtensa/kernel/entry.S @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/entry.S - * * Low-level exception handling * * This file is subject to the terms and conditions of the GNU General Public @@ -8,6 +6,7 @@ * for more details. * * Copyright (C) 2004 - 2008 by Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Chris Zankel <chris@zankel.net> * @@ -75,6 +74,27 @@ #endif .endm + + .macro irq_save flags tmp +#if XTENSA_FAKE_NMI +#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL + rsr \flags, ps + extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei \tmp, LOCKLEVEL, 99f + rsil \tmp, LOCKLEVEL +99: +#else + movi \tmp, LOCKLEVEL + rsr \flags, ps + or \flags, \flags, \tmp + xsr \flags, ps + rsync +#endif +#else + rsil \flags, LOCKLEVEL +#endif + .endm + /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ /* @@ -122,6 +142,7 @@ _user_exception: /* Save SAR and turn off single stepping */ movi a2, 0 + wsr a2, depc # terminate user stack trace with 0 rsr a3, sar xsr a2, icountlevel s32i a3, a1, PT_SAR @@ -301,7 +322,18 @@ _kernel_exception: s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 + _bnei a2, 1, 1f + + /* Copy spill slots of a0 and a1 to imitate movsp + * in order to keep exception stack continuous + */ + l32i a3, a1, PT_SIZE + l32i a0, a1, PT_SIZE + 4 + s32e a3, a1, -16 + s32e a0, a1, -12 1: + l32i a0, a1, PT_AREG0 # restore saved a0 + wsr a0, depc #ifdef KERNEL_STACK_OVERFLOW_CHECK @@ -335,80 +367,96 @@ common_exception: s32i a2, a1, PT_SYSCALL movi a2, 0 s32i a3, a1, PT_EXCVADDR +#if XCHAL_HAVE_LOOPS xsr a2, lcount s32i a2, a1, PT_LCOUNT +#endif /* It is now save to restore the EXC_TABLE_FIXUP variable. */ - rsr a0, exccause + rsr a2, exccause movi a3, 0 - rsr a2, excsave1 - s32i a0, a1, PT_EXCCAUSE - s32i a3, a2, EXC_TABLE_FIXUP - - /* All unrecoverable states are saved on stack, now, and a1 is valid, - * so we can allow exceptions and interrupts (*) again. - * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) + rsr a0, excsave1 + s32i a2, a1, PT_EXCCAUSE + s32i a3, a0, EXC_TABLE_FIXUP + + /* All unrecoverable states are saved on stack, now, and a1 is valid. + * Now we can allow exceptions again. In case we've got an interrupt + * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, + * otherwise it's left unchanged. * - * (*) We only allow interrupts if they were previously enabled and - * we're not handling an IRQ + * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) */ rsr a3, ps - addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT - movi a2, LOCKLEVEL + s32i a3, a1, PT_PS # save ps + +#if XTENSA_FAKE_NMI + /* Correct PS needs to be saved in the PT_PS: + * - in case of exception or level-1 interrupt it's in the PS, + * and is already saved. + * - in case of medium level interrupt it's in the excsave2. + */ + movi a0, EXCCAUSE_MAPPED_NMI + extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + beq a2, a0, .Lmedium_level_irq + bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception + beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 + +.Lmedium_level_irq: + rsr a0, excsave2 + s32i a0, a1, PT_PS # save medium-level interrupt ps + bgei a3, LOCKLEVEL, .Lexception + +.Llevel1_irq: + movi a3, LOCKLEVEL + +.Lexception: + movi a0, 1 << PS_WOE_BIT + or a3, a3, a0 +#else + addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT + movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL - moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt + moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 - rsr a0, exccause - xsr a3, ps + rsr a2, exccause +#endif - s32i a3, a1, PT_PS # save ps + /* restore return address (or 0 if return to userspace) */ + rsr a0, depc + wsr a3, ps + rsync # PS.WOE => rsync => overflow /* Save lbeg, lend */ - - rsr a2, lbeg +#if XCHAL_HAVE_LOOPS + rsr a4, lbeg rsr a3, lend - s32i a2, a1, PT_LBEG + s32i a4, a1, PT_LBEG s32i a3, a1, PT_LEND +#endif /* Save SCOMPARE1 */ #if XCHAL_HAVE_S32C1I - rsr a2, scompare1 - s32i a2, a1, PT_SCOMPARE1 + rsr a3, scompare1 + s32i a3, a1, PT_SCOMPARE1 #endif /* Save optional registers. */ - save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT + save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT -#ifdef CONFIG_TRACE_IRQFLAGS - l32i a4, a1, PT_DEPC - /* Double exception means we came here with an exception - * while PS.EXCM was set, i.e. interrupts disabled. - */ - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - l32i a4, a1, PT_EXCCAUSE - bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f - /* We came here with an interrupt means interrupts were enabled - * and we've just disabled them. - */ - movi a4, trace_hardirqs_off - callx4 a4 -1: -#endif - /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ rsr a4, excsave1 mov a6, a1 # pass stack frame - mov a7, a0 # pass EXCCAUSE - addx4 a4, a0, a4 + mov a7, a2 # pass EXCCAUSE + addx4 a4, a2, a4 l32i a4, a4, EXC_TABLE_DEFAULT # load handler /* Call the second-level handler */ @@ -419,8 +467,17 @@ common_exception: .global common_exception_return common_exception_return: +#if XTENSA_FAKE_NMI + l32i a2, a1, PT_EXCCAUSE + movi a3, EXCCAUSE_MAPPED_NMI + beq a2, a3, .LNMIexit +#endif 1: - rsil a2, LOCKLEVEL + irq_save a2, a3 +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_off + callx4 a4 +#endif /* Jump if we are returning from kernel exceptions. */ @@ -445,6 +502,10 @@ common_exception_return: /* Call do_signal() */ +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_on + callx4 a4 +#endif rsil a2, 0 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) mov a6, a1 @@ -453,6 +514,10 @@ common_exception_return: 3: /* Reschedule */ +#ifdef CONFIG_TRACE_IRQFLAGS + movi a4, trace_hardirqs_on + callx4 a4 +#endif rsil a2, 0 movi a4, schedule # void schedule (void) callx4 a4 @@ -471,6 +536,12 @@ common_exception_return: j 1b #endif +#if XTENSA_FAKE_NMI +.LNMIexit: + l32i a3, a1, PT_PS + _bbci.l a3, PS_UM_BIT, 4f +#endif + 5: #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC @@ -481,16 +552,8 @@ common_exception_return: 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS - l32i a4, a1, PT_DEPC - /* Double exception means we came here with an exception - * while PS.EXCM was set, i.e. interrupts disabled. - */ - bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - l32i a4, a1, PT_EXCCAUSE - bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f - /* We came here with an interrupt means interrupts were enabled - * and we'll reenable them on return. - */ + extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + bgei a4, LOCKLEVEL, 1f movi a4, trace_hardirqs_on callx4 a4 1: @@ -664,13 +727,14 @@ common_exception_exit: wsr a3, sar /* Restore LBEG, LEND, LCOUNT */ - +#if XCHAL_HAVE_LOOPS l32i a2, a1, PT_LBEG l32i a3, a1, PT_LEND wsr a2, lbeg l32i a2, a1, PT_LCOUNT wsr a3, lend wsr a2, lcount +#endif /* We control single stepping through the ICOUNTLEVEL register. */ @@ -1562,6 +1626,13 @@ ENTRY(fast_second_level_miss) rfde 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 + bnez a0, 8b + + /* Even more unlikely case active_mm == 0. + * We can get here with NMI in the middle of context_switch that + * touches vmalloc area. + */ + movi a0, init_mm j 8b #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -1865,10 +1936,8 @@ ENTRY(_switch_to) /* Disable ints while we manipulate the stack pointer. */ - rsil a14, LOCKLEVEL - rsr a3, excsave1 + irq_save a14, a3 rsync - s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ /* Switch CPENABLE */ @@ -1889,9 +1958,7 @@ ENTRY(_switch_to) */ rsr a3, excsave1 # exc_table - movi a6, 0 addi a7, a5, PT_REGS_OFFSET - s32i a6, a3, EXC_TABLE_FIXUP s32i a7, a3, EXC_TABLE_KSTK /* restore context of the task 'next' */ diff --git a/kernel/arch/xtensa/kernel/head.S b/kernel/arch/xtensa/kernel/head.S index 15a461e2a..9ed55649a 100644 --- a/kernel/arch/xtensa/kernel/head.S +++ b/kernel/arch/xtensa/kernel/head.S @@ -249,7 +249,7 @@ ENTRY(_startup) __loopt a2, a3, a4, 2 s32i a0, a2, 0 - __endla a2, a4, 4 + __endla a2, a3, 4 #if XCHAL_DCACHE_IS_WRITEBACK diff --git a/kernel/arch/xtensa/kernel/irq.c b/kernel/arch/xtensa/kernel/irq.c index 3eee94f62..4ac3d2316 100644 --- a/kernel/arch/xtensa/kernel/irq.c +++ b/kernel/arch/xtensa/kernel/irq.c @@ -28,7 +28,7 @@ #include <asm/uaccess.h> #include <asm/platform.h> -atomic_t irq_err_count; +DECLARE_PER_CPU(unsigned long, nmi_count); asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { @@ -57,11 +57,16 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) int arch_show_interrupts(struct seq_file *p, int prec) { + unsigned cpu __maybe_unused; #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif - seq_printf(p, "%*s: ", prec, "ERR"); - seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); +#if XTENSA_FAKE_NMI + seq_printf(p, "%*s:", prec, "NMI"); + for_each_online_cpu(cpu) + seq_printf(p, " %10lu", per_cpu(nmi_count, cpu)); + seq_puts(p, " Non-maskable interrupts\n"); +#endif return 0; } @@ -106,6 +111,12 @@ int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler_name(irq, irq_chip, handle_percpu_irq, "timer"); irq_clear_status_flags(irq, IRQ_LEVEL); +#ifdef XCHAL_INTTYPE_MASK_PROFILING + } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) { + irq_set_chip_and_handler_name(irq, irq_chip, + handle_percpu_irq, "profiling"); + irq_set_status_flags(irq, IRQ_LEVEL); +#endif } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ irq_set_chip_and_handler_name(irq, irq_chip, @@ -166,23 +177,25 @@ void migrate_irqs(void) for_each_active_irq(i) { struct irq_data *data = irq_get_irq_data(i); + struct cpumask *mask; unsigned int newcpu; if (irqd_is_per_cpu(data)) continue; - if (!cpumask_test_cpu(cpu, data->affinity)) + mask = irq_data_get_affinity_mask(data); + if (!cpumask_test_cpu(cpu, mask)) continue; - newcpu = cpumask_any_and(data->affinity, cpu_online_mask); + newcpu = cpumask_any_and(mask, cpu_online_mask); if (newcpu >= nr_cpu_ids) { pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", i, cpu); - cpumask_setall(data->affinity); + cpumask_setall(mask); } - irq_set_affinity(i, data->affinity); + irq_set_affinity(i, mask); } } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/kernel/arch/xtensa/kernel/mxhead.S b/kernel/arch/xtensa/kernel/mxhead.S index 77a161a11..9f3843742 100644 --- a/kernel/arch/xtensa/kernel/mxhead.S +++ b/kernel/arch/xtensa/kernel/mxhead.S @@ -48,8 +48,6 @@ _SetupOCD: rsync _SetupMMU: - Offset = _SetupMMU - _SecondaryResetVector - #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX initialize_mmu #endif @@ -62,24 +60,3 @@ _SetupMMU: jx a3 .end no-absolute-literals - - - .section .SecondaryResetVector.remapped_text, "ax" - .global _RemappedSecondaryResetVector - - .org 0 # Need to do org before literals - -_RemappedSecondaryResetVector: - .begin no-absolute-literals - .literal_position - - _j _RemappedSetupMMU - . = _RemappedSecondaryResetVector + Offset - -_RemappedSetupMMU: - -#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX - initialize_mmu -#endif - - .end no-absolute-literals diff --git a/kernel/arch/xtensa/kernel/pci-dma.c b/kernel/arch/xtensa/kernel/pci-dma.c index e8b76b8e4..cd6669834 100644 --- a/kernel/arch/xtensa/kernel/pci-dma.c +++ b/kernel/arch/xtensa/kernel/pci-dma.c @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/pci-dma.c - * * DMA coherent memory allocation. * * This program is free software; you can redistribute it and/or modify it @@ -9,6 +7,7 @@ * option) any later version. * * Copyright (C) 2002 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Based on version for i386. * @@ -16,22 +15,134 @@ * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> */ -#include <linux/types.h> -#include <linux/mm.h> -#include <linux/string.h> -#include <linux/pci.h> #include <linux/gfp.h> +#include <linux/highmem.h> +#include <linux/mm.h> #include <linux/module.h> -#include <asm/io.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/types.h> #include <asm/cacheflush.h> +#include <asm/io.h> + +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + __flush_invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_FROM_DEVICE: + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_TO_DEVICE: + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + } +} +EXPORT_SYMBOL(dma_cache_sync); + +static void do_cache_op(dma_addr_t dma_handle, size_t size, + void (*fn)(unsigned long, unsigned long)) +{ + unsigned long off = dma_handle & (PAGE_SIZE - 1); + unsigned long pfn = PFN_DOWN(dma_handle); + struct page *page = pfn_to_page(pfn); + + if (!PageHighMem(page)) + fn((unsigned long)bus_to_virt(dma_handle), size); + else + while (size > 0) { + size_t sz = min_t(size_t, size, PAGE_SIZE - off); + void *vaddr = kmap_atomic(page); + + fn((unsigned long)vaddr + off, sz); + kunmap_atomic(vaddr); + off = 0; + ++page; + size -= sz; + } +} + +static void xtensa_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + do_cache_op(dma_handle, size, __invalidate_dcache_range); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_TO_DEVICE: + if (XCHAL_DCACHE_IS_WRITEBACK) + do_cache_op(dma_handle, size, __flush_dcache_range); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_cpu(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + +static void xtensa_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_device(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} /* * Note: We assume that the full memory space is always mapped to 'kseg' * Otherwise we have to use page attributes (not implemented). */ -void * -dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) +static void *xtensa_dma_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t flag, + struct dma_attrs *attrs) { unsigned long ret; unsigned long uncached = 0; @@ -52,20 +163,15 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); + uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; + *handle = virt_to_bus((void *)ret); + __invalidate_dcache_range(ret, size); - if (ret != 0) { - memset((void*) ret, 0, size); - uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR; - *handle = virt_to_bus((void*)ret); - __flush_invalidate_dcache_range(ret, size); - } - - return (void*)uncached; + return (void *)uncached; } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long)vaddr + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; @@ -75,24 +181,78 @@ void dma_free_coherent(struct device *hwdev, size_t size, free_pages(addr, get_order(size)); } -EXPORT_SYMBOL(dma_free_coherent); +static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + dma_addr_t dma_handle = page_to_phys(page) + offset; -void consistent_sync(void *vaddr, size_t size, int direction) + xtensa_sync_single_for_device(dev, dma_handle, size, dir); + return dma_handle; +} + +static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { - switch (direction) { - case PCI_DMA_NONE: - BUG(); - case PCI_DMA_FROMDEVICE: /* invalidate only */ - __invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; + xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); +} - case PCI_DMA_TODEVICE: /* writeback only */ - case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __flush_invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; +static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + } + return nents; +} + +static void xtensa_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_unmap_page(dev, sg_dma_address(s), + sg_dma_len(s), dir, attrs); } } -EXPORT_SYMBOL(consistent_sync); + +int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +struct dma_map_ops xtensa_dma_map_ops = { + .alloc = xtensa_dma_alloc, + .free = xtensa_dma_free, + .map_page = xtensa_map_page, + .unmap_page = xtensa_unmap_page, + .map_sg = xtensa_map_sg, + .unmap_sg = xtensa_unmap_sg, + .sync_single_for_cpu = xtensa_sync_single_for_cpu, + .sync_single_for_device = xtensa_sync_single_for_device, + .sync_sg_for_cpu = xtensa_sync_sg_for_cpu, + .sync_sg_for_device = xtensa_sync_sg_for_device, + .mapping_error = xtensa_dma_mapping_error, +}; +EXPORT_SYMBOL(xtensa_dma_map_ops); + +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init xtensa_dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +fs_initcall(xtensa_dma_init); diff --git a/kernel/arch/xtensa/kernel/perf_event.c b/kernel/arch/xtensa/kernel/perf_event.c new file mode 100644 index 000000000..54f01188c --- /dev/null +++ b/kernel/arch/xtensa/kernel/perf_event.c @@ -0,0 +1,454 @@ +/* + * Xtensa Performance Monitor Module driver + * See Tensilica Debug User's Guide for PMU registers documentation. + * + * Copyright (C) 2015 Cadence Design Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#include <asm/processor.h> +#include <asm/stacktrace.h> + +/* Global control/status for all perf counters */ +#define XTENSA_PMU_PMG 0x1000 +/* Perf counter values */ +#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4) +/* Perf counter control registers */ +#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4) +/* Perf counter status registers */ +#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4) + +#define XTENSA_PMU_PMG_PMEN 0x1 + +#define XTENSA_PMU_COUNTER_MASK 0xffffffffULL +#define XTENSA_PMU_COUNTER_MAX 0x7fffffff + +#define XTENSA_PMU_PMCTRL_INTEN 0x00000001 +#define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008 +#define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0 +#define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8 +#define XTENSA_PMU_PMCTRL_SELECT 0x00001f00 +#define XTENSA_PMU_PMCTRL_MASK_SHIFT 16 +#define XTENSA_PMU_PMCTRL_MASK 0xffff0000 + +#define XTENSA_PMU_MASK(select, mask) \ + (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \ + ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \ + XTENSA_PMU_PMCTRL_TRACELEVEL | \ + XTENSA_PMU_PMCTRL_INTEN) + +#define XTENSA_PMU_PMSTAT_OVFL 0x00000001 +#define XTENSA_PMU_PMSTAT_INTASRT 0x00000010 + +struct xtensa_pmu_events { + /* Array of events currently on this core */ + struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; + /* Bitmap of used hardware counters */ + unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)]; +}; +static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events); + +static const u32 xtensa_hw_ctl[] = { + [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1), + [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff), + [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1), + [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1), + /* Taken and non-taken branches + taken loop ends */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490), + /* Instruction-related + other global stall cycles */ + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff), + /* Data-related global stall cycles */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff), +}; + +#define C(_x) PERF_COUNT_HW_CACHE_##_x + +static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2), + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2), + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2), + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8), + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1), + [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8), + }, + }, +}; + +static int xtensa_pmu_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + int ret; + + cache_type = (config >> 0) & 0xff; + cache_op = (config >> 8) & 0xff; + cache_result = (config >> 16) & 0xff; + + if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) || + cache_op >= C(OP_MAX) || + cache_result >= C(RESULT_MAX)) + return -EINVAL; + + ret = xtensa_cache_ctl[cache_type][cache_op][cache_result]; + + if (ret == 0) + return -EINVAL; + + return ret; +} + +static inline uint32_t xtensa_pmu_read_counter(int idx) +{ + return get_er(XTENSA_PMU_PM(idx)); +} + +static inline void xtensa_pmu_write_counter(int idx, uint32_t v) +{ + set_er(v, XTENSA_PMU_PM(idx)); +} + +static void xtensa_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + uint64_t prev_raw_count, new_raw_count; + int64_t delta; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = xtensa_pmu_read_counter(event->hw.idx); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static bool xtensa_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + bool rc = false; + s64 left; + + if (!is_sampling_event(event)) { + left = XTENSA_PMU_COUNTER_MAX; + } else { + s64 period = hwc->sample_period; + + left = local64_read(&hwc->period_left); + if (left <= -period) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + rc = true; + } else if (left <= 0) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + rc = true; + } + if (left > XTENSA_PMU_COUNTER_MAX) + left = XTENSA_PMU_COUNTER_MAX; + } + + local64_set(&hwc->prev_count, -left); + xtensa_pmu_write_counter(idx, -left); + perf_event_update_userpage(event); + + return rc; +} + +static void xtensa_pmu_enable(struct pmu *pmu) +{ + set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); +} + +static void xtensa_pmu_disable(struct pmu *pmu) +{ + set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); +} + +static int xtensa_pmu_event_init(struct perf_event *event) +{ + int ret; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || + xtensa_hw_ctl[event->attr.config] == 0) + return -EINVAL; + event->hw.config = xtensa_hw_ctl[event->attr.config]; + return 0; + + case PERF_TYPE_HW_CACHE: + ret = xtensa_pmu_cache_event(event->attr.config); + if (ret < 0) + return ret; + event->hw.config = ret; + return 0; + + case PERF_TYPE_RAW: + /* Not 'previous counter' select */ + if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == + (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT)) + return -EINVAL; + event->hw.config = (event->attr.config & + (XTENSA_PMU_PMCTRL_KRNLCNT | + XTENSA_PMU_PMCTRL_TRACELEVEL | + XTENSA_PMU_PMCTRL_SELECT | + XTENSA_PMU_PMCTRL_MASK)) | + XTENSA_PMU_PMCTRL_INTEN; + return 0; + + default: + return -ENOENT; + } +} + +/* + * Starts/Stops a counter present on the PMU. The PMI handler + * should stop the counter when perf_event_overflow() returns + * !0. ->start() will be used to continue. + */ +static void xtensa_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + xtensa_perf_event_set_period(event, hwc, idx); + } + + hwc->state = 0; + + set_er(hwc->config, XTENSA_PMU_PMCTRL(idx)); +} + +static void xtensa_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(hwc->state & PERF_HES_STOPPED)) { + set_er(0, XTENSA_PMU_PMCTRL(idx)); + set_er(get_er(XTENSA_PMU_PMSTAT(idx)), + XTENSA_PMU_PMSTAT(idx)); + hwc->state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && + !(event->hw.state & PERF_HES_UPTODATE)) { + xtensa_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +/* + * Adds/Removes a counter to/from the PMU, can be done inside + * a transaction, see the ->*_txn() methods. + */ +static int xtensa_pmu_add(struct perf_event *event, int flags) +{ + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (__test_and_set_bit(idx, ev->used_mask)) { + idx = find_first_zero_bit(ev->used_mask, + XCHAL_NUM_PERF_COUNTERS); + if (idx == XCHAL_NUM_PERF_COUNTERS) + return -EAGAIN; + + __set_bit(idx, ev->used_mask); + hwc->idx = idx; + } + ev->event[idx] = event; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + xtensa_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + return 0; +} + +static void xtensa_pmu_del(struct perf_event *event, int flags) +{ + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + + xtensa_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, ev->used_mask); + perf_event_update_userpage(event); +} + +static void xtensa_pmu_read(struct perf_event *event) +{ + xtensa_perf_event_update(event, &event->hw, event->hw.idx); +} + +static int callchain_trace(struct stackframe *frame, void *data) +{ + struct perf_callchain_entry *entry = data; + + perf_callchain_store(entry, frame->pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH, + callchain_trace, NULL, entry); +} + +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH, + callchain_trace, entry); +} + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned i; + + local_irq_save(flags); + pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(), + get_er(XTENSA_PMU_PMG)); + for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) + pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n", + i, get_er(XTENSA_PMU_PM(i)), + i, get_er(XTENSA_PMU_PMCTRL(i)), + i, get_er(XTENSA_PMU_PMSTAT(i))); + local_irq_restore(flags); +} + +irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) +{ + irqreturn_t rc = IRQ_NONE; + struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); + unsigned i; + + for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); + i < XCHAL_NUM_PERF_COUNTERS; + i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) { + uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); + struct perf_event *event = ev->event[i]; + struct hw_perf_event *hwc = &event->hw; + u64 last_period; + + if (!(v & XTENSA_PMU_PMSTAT_OVFL)) + continue; + + set_er(v, XTENSA_PMU_PMSTAT(i)); + xtensa_perf_event_update(event, hwc, i); + last_period = hwc->last_period; + if (xtensa_perf_event_set_period(event, hwc, i)) { + struct perf_sample_data data; + struct pt_regs *regs = get_irq_regs(); + + perf_sample_data_init(&data, 0, last_period); + if (perf_event_overflow(event, &data, regs)) + xtensa_pmu_stop(event, 0); + } + + rc = IRQ_HANDLED; + } + return rc; +} + +static struct pmu xtensa_pmu = { + .pmu_enable = xtensa_pmu_enable, + .pmu_disable = xtensa_pmu_disable, + .event_init = xtensa_pmu_event_init, + .add = xtensa_pmu_add, + .del = xtensa_pmu_del, + .start = xtensa_pmu_start, + .stop = xtensa_pmu_stop, + .read = xtensa_pmu_read, +}; + +static void xtensa_pmu_setup(void) +{ + unsigned i; + + set_er(0, XTENSA_PMU_PMG); + for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) { + set_er(0, XTENSA_PMU_PMCTRL(i)); + set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); + } +} + +static int xtensa_pmu_notifier(struct notifier_block *self, + unsigned long action, void *data) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + xtensa_pmu_setup(); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static int __init xtensa_pmu_init(void) +{ + int ret; + int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); + + perf_cpu_notifier(xtensa_pmu_notifier); +#if XTENSA_FAKE_NMI + enable_irq(irq); +#else + ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU, + "pmu", NULL); + if (ret < 0) + return ret; +#endif + + ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW); + if (ret) + free_irq(irq, NULL); + + return ret; +} +early_initcall(xtensa_pmu_init); diff --git a/kernel/arch/xtensa/kernel/setup.c b/kernel/arch/xtensa/kernel/setup.c index 28fc57ef5..9735691f3 100644 --- a/kernel/arch/xtensa/kernel/setup.c +++ b/kernel/arch/xtensa/kernel/setup.c @@ -190,7 +190,7 @@ static int __init parse_bootparam(const bp_tag_t* tag) #ifdef CONFIG_OF bool __initdata dt_memory_scan = false; -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; EXPORT_SYMBOL(xtensa_kio_paddr); @@ -334,7 +334,10 @@ extern char _Level5InterruptVector_text_end; extern char _Level6InterruptVector_text_start; extern char _Level6InterruptVector_text_end; #endif - +#ifdef CONFIG_SMP +extern char _SecondaryResetVector_text_start; +extern char _SecondaryResetVector_text_end; +#endif #ifdef CONFIG_S32C1I_SELFTEST @@ -506,6 +509,10 @@ void __init setup_arch(char **cmdline_p) __pa(&_Level6InterruptVector_text_end), 0); #endif +#ifdef CONFIG_SMP + mem_reserve(__pa(&_SecondaryResetVector_text_start), + __pa(&_SecondaryResetVector_text_end), 0); +#endif parse_early_param(); bootmem_init(); diff --git a/kernel/arch/xtensa/kernel/stacktrace.c b/kernel/arch/xtensa/kernel/stacktrace.c index 7d2c317bd..7538d802b 100644 --- a/kernel/arch/xtensa/kernel/stacktrace.c +++ b/kernel/arch/xtensa/kernel/stacktrace.c @@ -1,11 +1,12 @@ /* - * arch/xtensa/kernel/stacktrace.c + * Kernel and userspace stack tracing. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2013 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/export.h> #include <linux/sched.h> @@ -13,6 +14,170 @@ #include <asm/stacktrace.h> #include <asm/traps.h> +#include <asm/uaccess.h> + +#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS) + +/* Address of common_exception_return, used to check the + * transition from kernel to user space. + */ +extern int common_exception_return; + +/* A struct that maps to the part of the frame containing the a0 and + * a1 registers. + */ +struct frame_start { + unsigned long a0; + unsigned long a1; +}; + +void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, + int (*ufn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long windowstart = regs->windowstart; + unsigned long windowbase = regs->windowbase; + unsigned long a0 = regs->areg[0]; + unsigned long a1 = regs->areg[1]; + unsigned long pc = regs->pc; + struct stackframe frame; + int index; + + if (!depth--) + return; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + + /* Two steps: + * + * 1. Look through the register window for the + * previous PCs in the call trace. + * + * 2. Look on the stack. + */ + + /* Step 1. */ + /* Rotate WINDOWSTART to move the bit corresponding to + * the current window to the bit #0. + */ + windowstart = (windowstart << WSBITS | windowstart) >> windowbase; + + /* Look for bits that are set, they correspond to + * valid windows. + */ + for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) + if (windowstart & (1 << index)) { + /* Get the PC from a0 and a1. */ + pc = MAKE_PC_FROM_RA(a0, pc); + /* Read a0 and a1 from the + * corresponding position in AREGs. + */ + a0 = regs->areg[index * 4]; + a1 = regs->areg[index * 4 + 1]; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + } + + /* Step 2. */ + /* We are done with the register window, we need to + * look through the stack. + */ + if (!depth) + return; + + /* Start from the a1 register. */ + /* a1 = regs->areg[1]; */ + while (a0 != 0 && depth--) { + struct frame_start frame_start; + /* Get the location for a1, a0 for the + * previous frame from the current a1. + */ + unsigned long *psp = (unsigned long *)a1; + + psp -= 4; + + /* Check if the region is OK to access. */ + if (!access_ok(VERIFY_READ, psp, sizeof(frame_start))) + return; + /* Copy a1, a0 from user space stack frame. */ + if (__copy_from_user_inatomic(&frame_start, psp, + sizeof(frame_start))) + return; + + pc = MAKE_PC_FROM_RA(a0, pc); + a0 = frame_start.a0; + a1 = frame_start.a1; + + frame.pc = pc; + frame.sp = a1; + + if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) + return; + } +} +EXPORT_SYMBOL(xtensa_backtrace_user); + +void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, + int (*kfn)(struct stackframe *frame, void *data), + int (*ufn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ? + regs->depc : regs->pc; + unsigned long sp_start, sp_end; + unsigned long a0 = regs->areg[0]; + unsigned long a1 = regs->areg[1]; + + sp_start = a1 & ~(THREAD_SIZE - 1); + sp_end = sp_start + THREAD_SIZE; + + /* Spill the register window to the stack first. */ + spill_registers(); + + /* Read the stack frames one by one and create the PC + * from the a0 and a1 registers saved there. + */ + while (a1 > sp_start && a1 < sp_end && depth--) { + struct stackframe frame; + unsigned long *psp = (unsigned long *)a1; + + frame.pc = pc; + frame.sp = a1; + + if (kernel_text_address(pc) && kfn(&frame, data)) + return; + + if (pc == (unsigned long)&common_exception_return) { + regs = (struct pt_regs *)a1; + if (user_mode(regs)) { + if (ufn == NULL) + return; + xtensa_backtrace_user(regs, depth, ufn, data); + return; + } + a0 = regs->areg[0]; + a1 = regs->areg[1]; + continue; + } + + sp_start = a1; + + pc = MAKE_PC_FROM_RA(a0, pc); + a0 = *(psp - 4); + a1 = *(psp - 3); + } +} +EXPORT_SYMBOL(xtensa_backtrace_kernel); + +#endif void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), diff --git a/kernel/arch/xtensa/kernel/time.c b/kernel/arch/xtensa/kernel/time.c index 2a1823de6..b9ad9fead 100644 --- a/kernel/arch/xtensa/kernel/time.c +++ b/kernel/arch/xtensa/kernel/time.c @@ -52,8 +52,6 @@ static struct clocksource ccount_clocksource = { static int ccount_timer_set_next_event(unsigned long delta, struct clock_event_device *dev); -static void ccount_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt); struct ccount_timer { struct clock_event_device evt; int irq_enabled; @@ -77,35 +75,34 @@ static int ccount_timer_set_next_event(unsigned long delta, return ret; } -static void ccount_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +/* + * There is no way to disable the timer interrupt at the device level, + * only at the intenable register itself. Since enable_irq/disable_irq + * calls are nested, we need to make sure that these calls are + * balanced. + */ +static int ccount_timer_shutdown(struct clock_event_device *evt) +{ + struct ccount_timer *timer = + container_of(evt, struct ccount_timer, evt); + + if (timer->irq_enabled) { + disable_irq(evt->irq); + timer->irq_enabled = 0; + } + return 0; +} + +static int ccount_timer_set_oneshot(struct clock_event_device *evt) { struct ccount_timer *timer = container_of(evt, struct ccount_timer, evt); - /* - * There is no way to disable the timer interrupt at the device level, - * only at the intenable register itself. Since enable_irq/disable_irq - * calls are nested, we need to make sure that these calls are - * balanced. - */ - switch (mode) { - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - if (timer->irq_enabled) { - disable_irq(evt->irq); - timer->irq_enabled = 0; - } - break; - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_ONESHOT: - if (!timer->irq_enabled) { - enable_irq(evt->irq); - timer->irq_enabled = 1; - } - default: - break; + if (!timer->irq_enabled) { + enable_irq(evt->irq); + timer->irq_enabled = 1; } + return 0; } static irqreturn_t timer_interrupt(int irq, void *dev_id); @@ -126,7 +123,9 @@ void local_timer_setup(unsigned cpu) clockevent->features = CLOCK_EVT_FEAT_ONESHOT; clockevent->rating = 300; clockevent->set_next_event = ccount_timer_set_next_event; - clockevent->set_mode = ccount_timer_set_mode; + clockevent->set_state_shutdown = ccount_timer_shutdown; + clockevent->set_state_oneshot = ccount_timer_set_oneshot; + clockevent->tick_resume = ccount_timer_set_oneshot; clockevent->cpumask = cpumask_of(cpu); clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); if (WARN(!clockevent->irq, "error: can't map timer irq")) @@ -149,7 +148,7 @@ void __init time_init(void) local_timer_setup(0); setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction); sched_clock_register(ccount_sched_clock_read, 32, ccount_freq); - clocksource_of_init(); + clocksource_probe(); } /* diff --git a/kernel/arch/xtensa/kernel/traps.c b/kernel/arch/xtensa/kernel/traps.c index 9d2f45f01..42d441f78 100644 --- a/kernel/arch/xtensa/kernel/traps.c +++ b/kernel/arch/xtensa/kernel/traps.c @@ -62,6 +62,7 @@ extern void fast_coprocessor(void); extern void do_illegal_instruction (struct pt_regs*); extern void do_interrupt (struct pt_regs*); +extern void do_nmi(struct pt_regs *); extern void do_unaligned_user (struct pt_regs*); extern void do_multihit (struct pt_regs*, unsigned long); extern void do_page_fault (struct pt_regs*, unsigned long); @@ -146,6 +147,9 @@ COPROCESSOR(6), #if XTENSA_HAVE_COPROCESSOR(7) COPROCESSOR(7), #endif +#if XTENSA_FAKE_NMI +{ EXCCAUSE_MAPPED_NMI, 0, do_nmi }, +#endif { EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { -1, -1, 0 } @@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) extern void do_IRQ(int, struct pt_regs *); +#if XTENSA_FAKE_NMI + +irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); + +DEFINE_PER_CPU(unsigned long, nmi_count); + +void do_nmi(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + + if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL) + trace_hardirqs_off(); + + old_regs = set_irq_regs(regs); + nmi_enter(); + ++*this_cpu_ptr(&nmi_count); + xtensa_pmu_irq_handler(0, NULL); + nmi_exit(); + set_irq_regs(old_regs); +} +#endif + void do_interrupt(struct pt_regs *regs) { static const unsigned int_level_mask[] = { @@ -211,8 +237,11 @@ void do_interrupt(struct pt_regs *regs) XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; - struct pt_regs *old_regs = set_irq_regs(regs); + struct pt_regs *old_regs; + + trace_hardirqs_off(); + old_regs = set_irq_regs(regs); irq_enter(); for (;;) { diff --git a/kernel/arch/xtensa/kernel/vectors.S b/kernel/arch/xtensa/kernel/vectors.S index 1b397a902..fc25318e7 100644 --- a/kernel/arch/xtensa/kernel/vectors.S +++ b/kernel/arch/xtensa/kernel/vectors.S @@ -478,6 +478,9 @@ _DoubleExceptionVector_handle_exception: ENDPROC(_DoubleExceptionVector) + .end literal_prefix + + .text /* * Fixup handler for TLB miss in double exception handler for window owerflow. * We get here with windowbase set to the window that was being spilled and @@ -587,7 +590,6 @@ ENTRY(window_overflow_restore_a0_fixup) ENDPROC(window_overflow_restore_a0_fixup) - .end literal_prefix /* * Debug interrupt vector * @@ -627,7 +629,11 @@ ENTRY(_Level\level\()InterruptVector) wsr a0, excsave2 rsr a0, epc\level wsr a0, epc1 + .if \level <= LOCKLEVEL movi a0, EXCCAUSE_LEVEL1_INTERRUPT + .else + movi a0, EXCCAUSE_MAPPED_NMI + .endif wsr a0, exccause rsr a0, eps\level # branch to user or kernel vector @@ -682,11 +688,13 @@ ENDPROC(_WindowOverflow4) .align 4 _SimulateUserKernelVectorException: addi a0, a0, (1 << PS_EXCM_BIT) +#if !XTENSA_FAKE_NMI wsr a0, ps +#endif bbsi.l a0, PS_UM_BIT, 1f # branch if user mode - rsr a0, excsave2 # restore a0 + xsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception -1: rsr a0, excsave2 # restore a0 +1: xsr a0, excsave2 # restore a0 j _UserExceptionVector # simulate user vector exception #endif diff --git a/kernel/arch/xtensa/kernel/vmlinux.lds.S b/kernel/arch/xtensa/kernel/vmlinux.lds.S index fc1bc2ba8..c417cbe4e 100644 --- a/kernel/arch/xtensa/kernel/vmlinux.lds.S +++ b/kernel/arch/xtensa/kernel/vmlinux.lds.S @@ -166,8 +166,6 @@ SECTIONS RELOCATE_ENTRY(_DebugInterruptVector_text, .DebugInterruptVector.text); #if defined(CONFIG_SMP) - RELOCATE_ENTRY(_SecondaryResetVector_literal, - .SecondaryResetVector.literal); RELOCATE_ENTRY(_SecondaryResetVector_text, .SecondaryResetVector.text); #endif @@ -282,17 +280,11 @@ SECTIONS #if defined(CONFIG_SMP) - SECTION_VECTOR (_SecondaryResetVector_literal, - .SecondaryResetVector.literal, - RESET_VECTOR1_VADDR - 4, - SIZEOF(.DoubleExceptionVector.text), - .DoubleExceptionVector.text) - SECTION_VECTOR (_SecondaryResetVector_text, .SecondaryResetVector.text, RESET_VECTOR1_VADDR, - 4, - .SecondaryResetVector.literal) + SIZEOF(.DoubleExceptionVector.text), + .DoubleExceptionVector.text) . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); @@ -306,31 +298,6 @@ SECTIONS _end = .; - /* only used by the boot loader */ - - . = ALIGN(0x10); - .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) } - - .ResetVector.text RESET_VECTOR_VADDR : - { - *(.ResetVector.text) - } - - - /* - * This is a remapped copy of the Secondary Reset Vector Code. - * It keeps gdb in sync with the PC after switching - * to the temporary mapping used while setting up - * the V2 MMU mappings for Linux. - * - * Only debug information about this section is put in the kernel image. - */ - .SecondaryResetVector.remapped_text 0x46000000 (INFO): - { - *(.SecondaryResetVector.remapped_text) - } - - .xt.lit : { *(.xt.lit) } .xt.prop : { *(.xt.prop) } diff --git a/kernel/arch/xtensa/lib/usercopy.S b/kernel/arch/xtensa/lib/usercopy.S index ace1892a8..7ea4dd688 100644 --- a/kernel/arch/xtensa/lib/usercopy.S +++ b/kernel/arch/xtensa/lib/usercopy.S @@ -222,8 +222,8 @@ __xtensa_copy_user: loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ beqz a7, .Loop2done - slli a10, a7, 4 - add a10, a10, a3 # a10 = end of last 16B source chunk + slli a12, a7, 4 + add a12, a12, a3 # a12 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: EX(l32i, a7, a3, 4, l_fixup) @@ -241,7 +241,7 @@ __xtensa_copy_user: EX(s32i, a9, a5, 12, s_fixup) addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS - blt a3, a10, .Loop2 + blt a3, a12, .Loop2 #endif /* !XCHAL_HAVE_LOOPS */ .Loop2done: bbci.l a4, 3, .L12 diff --git a/kernel/arch/xtensa/mm/fault.c b/kernel/arch/xtensa/mm/fault.c index 83a44a33c..c9784c1b1 100644 --- a/kernel/arch/xtensa/mm/fault.c +++ b/kernel/arch/xtensa/mm/fault.c @@ -15,6 +15,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/hardirq.h> +#include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> @@ -142,6 +143,12 @@ good_area: } up_read(&mm->mmap_sem); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (flags & VM_FAULT_MAJOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + else if (flags & VM_FAULT_MINOR) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + return; /* Something tried to access memory that isn't in our memory map.. diff --git a/kernel/arch/xtensa/oprofile/backtrace.c b/kernel/arch/xtensa/oprofile/backtrace.c index 5f03a593d..8f952034e 100644 --- a/kernel/arch/xtensa/oprofile/backtrace.c +++ b/kernel/arch/xtensa/oprofile/backtrace.c @@ -2,168 +2,26 @@ * @file backtrace.c * * @remark Copyright 2008 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * @remark Read the file COPYING * */ #include <linux/oprofile.h> -#include <linux/sched.h> -#include <linux/mm.h> #include <asm/ptrace.h> -#include <asm/uaccess.h> -#include <asm/traps.h> +#include <asm/stacktrace.h> -/* Address of common_exception_return, used to check the - * transition from kernel to user space. - */ -extern int common_exception_return; - -/* A struct that maps to the part of the frame containing the a0 and - * a1 registers. - */ -struct frame_start { - unsigned long a0; - unsigned long a1; -}; - -static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth) -{ - unsigned long windowstart = regs->windowstart; - unsigned long windowbase = regs->windowbase; - unsigned long a0 = regs->areg[0]; - unsigned long a1 = regs->areg[1]; - unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc); - int index; - - /* First add the current PC to the trace. */ - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - - /* Two steps: - * - * 1. Look through the register window for the - * previous PCs in the call trace. - * - * 2. Look on the stack. - */ - - /* Step 1. */ - /* Rotate WINDOWSTART to move the bit corresponding to - * the current window to the bit #0. - */ - windowstart = (windowstart << WSBITS | windowstart) >> windowbase; - - /* Look for bits that are set, they correspond to - * valid windows. - */ - for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) - if (windowstart & (1 << index)) { - /* Read a0 and a1 from the - * corresponding position in AREGs. - */ - a0 = regs->areg[index * 4]; - a1 = regs->areg[index * 4 + 1]; - /* Get the PC from a0 and a1. */ - pc = MAKE_PC_FROM_RA(a0, pc); - - /* Add the PC to the trace. */ - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - } - - /* Step 2. */ - /* We are done with the register window, we need to - * look through the stack. - */ - if (depth > 0) { - /* Start from the a1 register. */ - /* a1 = regs->areg[1]; */ - while (a0 != 0 && depth--) { - - struct frame_start frame_start; - /* Get the location for a1, a0 for the - * previous frame from the current a1. - */ - unsigned long *psp = (unsigned long *)a1; - psp -= 4; - - /* Check if the region is OK to access. */ - if (!access_ok(VERIFY_READ, psp, sizeof(frame_start))) - return; - /* Copy a1, a0 from user space stack frame. */ - if (__copy_from_user_inatomic(&frame_start, psp, - sizeof(frame_start))) - return; - - a0 = frame_start.a0; - a1 = frame_start.a1; - pc = MAKE_PC_FROM_RA(a0, pc); - - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - } - } -} - -static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth) +static int xtensa_backtrace_cb(struct stackframe *frame, void *data) { - unsigned long pc = regs->pc; - unsigned long *psp; - unsigned long sp_start, sp_end; - unsigned long a0 = regs->areg[0]; - unsigned long a1 = regs->areg[1]; - - sp_start = a1 & ~(THREAD_SIZE-1); - sp_end = sp_start + THREAD_SIZE; - - /* Spill the register window to the stack first. */ - spill_registers(); - - /* Read the stack frames one by one and create the PC - * from the a0 and a1 registers saved there. - */ - while (a1 > sp_start && a1 < sp_end && depth--) { - pc = MAKE_PC_FROM_RA(a0, pc); - - /* Add the PC to the trace. */ - oprofile_add_trace(pc); - if (pc == (unsigned long) &common_exception_return) { - regs = (struct pt_regs *)a1; - if (user_mode(regs)) { - pc = regs->pc; - if (pc != 0 && pc <= TASK_SIZE) - oprofile_add_trace(pc); - else - return; - return xtensa_backtrace_user(regs, depth); - } - a0 = regs->areg[0]; - a1 = regs->areg[1]; - continue; - } - - psp = (unsigned long *)a1; - - a0 = *(psp - 4); - a1 = *(psp - 3); - - if (a1 <= (unsigned long)psp) - return; - - } - return; + oprofile_add_trace(frame->pc); + return 0; } void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth) { if (user_mode(regs)) - xtensa_backtrace_user(regs, depth); + xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL); else - xtensa_backtrace_kernel(regs, depth); + xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb, + xtensa_backtrace_cb, NULL); } diff --git a/kernel/arch/xtensa/platforms/iss/network.c b/kernel/arch/xtensa/platforms/iss/network.c index 17b1ef323..976a38594 100644 --- a/kernel/arch/xtensa/platforms/iss/network.c +++ b/kernel/arch/xtensa/platforms/iss/network.c @@ -105,13 +105,17 @@ static char *split_if_spec(char *str, ...) va_start(ap, str); while ((arg = va_arg(ap, char**)) != NULL) { - if (*str == '\0') + if (*str == '\0') { + va_end(ap); return NULL; + } end = strchr(str, ','); if (end != str) *arg = str; - if (end == NULL) + if (end == NULL) { + va_end(ap); return NULL; + } *end++ = '\0'; str = end; } @@ -681,6 +685,4 @@ static int iss_net_init(void) return 1; } - -module_init(iss_net_init); - +device_initcall(iss_net_init); diff --git a/kernel/arch/xtensa/platforms/iss/setup.c b/kernel/arch/xtensa/platforms/iss/setup.c index da7d18240..391820539 100644 --- a/kernel/arch/xtensa/platforms/iss/setup.c +++ b/kernel/arch/xtensa/platforms/iss/setup.c @@ -61,7 +61,9 @@ void platform_restart(void) #if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" #endif +#if XCHAL_HAVE_LOOPS "wsr a2, lcount\n\t" +#endif "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" "isync\n\t" diff --git a/kernel/arch/xtensa/platforms/iss/simdisk.c b/kernel/arch/xtensa/platforms/iss/simdisk.c index 48eebacdf..3c3ace2c4 100644 --- a/kernel/arch/xtensa/platforms/iss/simdisk.c +++ b/kernel/arch/xtensa/platforms/iss/simdisk.c @@ -101,8 +101,9 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, spin_unlock(&dev->lock); } -static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) +static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) { + struct simdisk *dev = q->queuedata; struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; @@ -116,17 +117,11 @@ static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) sector += len; __bio_kunmap_atomic(buffer); } - return 0; -} -static void simdisk_make_request(struct request_queue *q, struct bio *bio) -{ - struct simdisk *dev = q->queuedata; - int status = simdisk_xfer_bio(dev, bio); - bio_endio(bio, status); + bio_endio(bio); + return BLK_QC_T_NONE; } - static int simdisk_open(struct block_device *bdev, fmode_t mode) { struct simdisk *dev = bdev->bd_disk->private_data; diff --git a/kernel/arch/xtensa/platforms/xt2000/setup.c b/kernel/arch/xtensa/platforms/xt2000/setup.c index b90555cb8..87678961a 100644 --- a/kernel/arch/xtensa/platforms/xt2000/setup.c +++ b/kernel/arch/xtensa/platforms/xt2000/setup.c @@ -72,7 +72,9 @@ void platform_restart(void) #if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" #endif +#if XCHAL_HAVE_LOOPS "wsr a2, lcount\n\t" +#endif "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" "isync\n\t" diff --git a/kernel/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/kernel/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index 0a55bb9c5..dbeea2b44 100644 --- a/kernel/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/kernel/arch/xtensa/platforms/xtfpga/include/platform/hardware.h @@ -12,13 +12,15 @@ * This file contains the hardware configuration of the XTAVNET boards. */ +#include <asm/types.h> + #ifndef __XTENSA_XTAVNET_HARDWARE_H #define __XTENSA_XTAVNET_HARDWARE_H /* Memory configuration. */ -#define PLATFORM_DEFAULT_MEM_START CONFIG_DEFAULT_MEM_START -#define PLATFORM_DEFAULT_MEM_SIZE CONFIG_DEFAULT_MEM_SIZE +#define PLATFORM_DEFAULT_MEM_START __XTENSA_UL(CONFIG_DEFAULT_MEM_START) +#define PLATFORM_DEFAULT_MEM_SIZE __XTENSA_UL(CONFIG_DEFAULT_MEM_SIZE) /* Interrupt configuration. */ diff --git a/kernel/arch/xtensa/platforms/xtfpga/setup.c b/kernel/arch/xtensa/platforms/xtfpga/setup.c index b4cf70e53..e9f65f79c 100644 --- a/kernel/arch/xtensa/platforms/xtfpga/setup.c +++ b/kernel/arch/xtensa/platforms/xtfpga/setup.c @@ -63,7 +63,9 @@ void platform_restart(void) #if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" #endif +#if XCHAL_HAVE_LOOPS "wsr a2, lcount\n\t" +#endif "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" "isync\n\t" diff --git a/kernel/arch/xtensa/variants/de212/include/variant/core.h b/kernel/arch/xtensa/variants/de212/include/variant/core.h new file mode 100644 index 000000000..59e91e47e --- /dev/null +++ b/kernel/arch/xtensa/variants/de212/include/variant/core.h @@ -0,0 +1,594 @@ +/* + * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa + * processor CORE configuration + * + * See <xtensa/config/core.h>, which includes this file, for more details. + */ + +/* Xtensa processor core configuration information. + + Copyright (c) 1999-2015 Tensilica Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef _XTENSA_CORE_CONFIGURATION_H +#define _XTENSA_CORE_CONFIGURATION_H + + +/**************************************************************************** + Parameters Useful for Any Code, USER or PRIVILEGED + ****************************************************************************/ + +/* + * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is + * configured, and a value of 0 otherwise. These macros are always defined. + */ + + +/*---------------------------------------------------------------------- + ISA + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */ +#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */ +#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */ +#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */ +#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */ +#define XCHAL_HAVE_DEBUG 1 /* debug option */ +#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */ +#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */ +#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */ +#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */ +#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */ +#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */ +#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */ +#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */ +#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */ +#define XCHAL_HAVE_MUL32 1 /* MULL instruction */ +#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */ +#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */ +#define XCHAL_HAVE_L32R 1 /* L32R instruction */ +#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */ +#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */ +#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */ +#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */ +#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */ +#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */ +#define XCHAL_HAVE_ABS 1 /* ABS instruction */ +/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */ +/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */ +#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */ +#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */ +#define XCHAL_HAVE_SPECULATION 0 /* speculation */ +#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */ +#define XCHAL_NUM_CONTEXTS 1 /* */ +#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */ +#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */ +#define XCHAL_HAVE_PRID 1 /* processor ID register */ +#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */ +#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */ +#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */ +#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */ +#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */ +#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */ +#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */ +#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */ +#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */ +#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */ +#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */ +#define XCHAL_HAVE_MAC16 1 /* MAC16 package */ + +#define XCHAL_HAVE_FUSION 0 /* Fusion*/ +#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */ +#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */ +#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */ +#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */ +#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */ +#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */ +#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */ +#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */ +#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */ +#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */ +#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */ +#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */ +#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */ +#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */ +#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */ +#define XCHAL_HAVE_HIFI_MINI 0 + + +#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */ +#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */ +#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */ +#define XCHAL_HAVE_FP 0 /* single prec floating point */ +#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */ +#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */ +#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */ +#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */ +#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */ +#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */ +#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/ +#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */ +#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/ +#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */ +#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */ + +#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */ +#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */ +#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */ +#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */ +#define XCHAL_HAVE_PDX4 0 /* PDX4 */ +#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */ +#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */ +#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */ +#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */ +#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */ +#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */ +#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */ +#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */ +#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */ +#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */ +#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */ +#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */ +#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */ +#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */ +#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */ +#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */ + + +/*---------------------------------------------------------------------- + MISC + ----------------------------------------------------------------------*/ + +#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */ +#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */ +#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */ +#define XCHAL_DATA_WIDTH 4 /* data width in bytes */ +#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay + (1 = 5-stage, 2 = 7-stage) */ +#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */ +#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */ +/* In T1050, applies to selected core load and store instructions (see ISA): */ +#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */ +#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/ +#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */ +#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/ + +#define XCHAL_SW_VERSION 1100002 /* sw version of this header */ + +#define XCHAL_CORE_ID "de212" /* alphanum core name + (CoreID) set in the Xtensa + Processor Generator */ + +#define XCHAL_BUILD_UNIQUE_ID 0x0005A985 /* 22-bit sw build ID */ + +/* + * These definitions describe the hardware targeted by this software. + */ +#define XCHAL_HW_CONFIGID0 0xC283DFFE /* ConfigID hi 32 bits*/ +#define XCHAL_HW_CONFIGID1 0x1C85A985 /* ConfigID lo 32 bits*/ +#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */ +#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */ +#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */ +#define XCHAL_HW_VERSION 260002 /* major*100+minor */ +#define XCHAL_HW_REL_LX6 1 +#define XCHAL_HW_REL_LX6_0 1 +#define XCHAL_HW_REL_LX6_0_2 1 +#define XCHAL_HW_CONFIGID_RELIABLE 1 +/* If software targets a *range* of hardware versions, these are the bounds: */ +#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */ +#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */ +#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */ +#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */ +#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */ +#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */ + + +/*---------------------------------------------------------------------- + CACHE + ----------------------------------------------------------------------*/ + +#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */ +#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */ +#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */ +#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */ + +#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */ +#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */ + +#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */ +#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */ + +#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */ +#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */ +#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */ +#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */ +#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */ +#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */ +#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */ +#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */ +#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */ +#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */ + + + + +/**************************************************************************** + Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code + ****************************************************************************/ + + +#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY + +/*---------------------------------------------------------------------- + CACHE + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */ + +/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */ + +/* Number of cache sets in log2(lines per way): */ +#define XCHAL_ICACHE_SETWIDTH 7 +#define XCHAL_DCACHE_SETWIDTH 7 + +/* Cache set associativity (number of ways): */ +#define XCHAL_ICACHE_WAYS 2 +#define XCHAL_DCACHE_WAYS 2 + +/* Cache features: */ +#define XCHAL_ICACHE_LINE_LOCKABLE 1 +#define XCHAL_DCACHE_LINE_LOCKABLE 1 +#define XCHAL_ICACHE_ECC_PARITY 0 +#define XCHAL_DCACHE_ECC_PARITY 0 + +/* Cache access size in bytes (affects operation of SICW instruction): */ +#define XCHAL_ICACHE_ACCESS_SIZE 4 +#define XCHAL_DCACHE_ACCESS_SIZE 4 + +#define XCHAL_DCACHE_BANKS 1 /* number of banks */ + +/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */ +#define XCHAL_CA_BITS 4 + +/* Whether MEMCTL register has anything useful */ +#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \ + XCHAL_DCACHE_IS_COHERENT || \ + XCHAL_HAVE_ICACHE_DYN_WAYS || \ + XCHAL_HAVE_DCACHE_DYN_WAYS) && \ + (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0)) + + +/*---------------------------------------------------------------------- + INTERNAL I/D RAM/ROMs and XLMI + ----------------------------------------------------------------------*/ + +#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */ +#define XCHAL_NUM_INSTRAM 1 /* number of core instr. RAMs */ +#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */ +#define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */ +#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/ +#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */ + +/* Instruction RAM 0: */ +#define XCHAL_INSTRAM0_VADDR 0x40000000 /* virtual address */ +#define XCHAL_INSTRAM0_PADDR 0x40000000 /* physical address */ +#define XCHAL_INSTRAM0_SIZE 131072 /* size in bytes */ +#define XCHAL_INSTRAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */ + +/* Data RAM 0: */ +#define XCHAL_DATARAM0_VADDR 0x3FFE0000 /* virtual address */ +#define XCHAL_DATARAM0_PADDR 0x3FFE0000 /* physical address */ +#define XCHAL_DATARAM0_SIZE 131072 /* size in bytes */ +#define XCHAL_DATARAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */ +#define XCHAL_DATARAM0_BANKS 1 /* number of banks */ + +/* XLMI Port 0: */ +#define XCHAL_XLMI0_VADDR 0x3FFC0000 /* virtual address */ +#define XCHAL_XLMI0_PADDR 0x3FFC0000 /* physical address */ +#define XCHAL_XLMI0_SIZE 131072 /* size in bytes */ +#define XCHAL_XLMI0_ECC_PARITY 0 /* ECC/parity type, 0=none */ + +#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/ + + +/*---------------------------------------------------------------------- + INTERRUPTS and TIMERS + ----------------------------------------------------------------------*/ + +#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */ +#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */ +#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */ +#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */ +#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */ +#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */ +#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */ +#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */ +#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels + (not including level zero) */ +#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */ + /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */ + +/* Masks of interrupts at each interrupt level: */ +#define XCHAL_INTLEVEL1_MASK 0x001F80FF +#define XCHAL_INTLEVEL2_MASK 0x00000100 +#define XCHAL_INTLEVEL3_MASK 0x00200E00 +#define XCHAL_INTLEVEL4_MASK 0x00001000 +#define XCHAL_INTLEVEL5_MASK 0x00002000 +#define XCHAL_INTLEVEL6_MASK 0x00000000 +#define XCHAL_INTLEVEL7_MASK 0x00004000 + +/* Masks of interrupts at each range 1..n of interrupt levels: */ +#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF +#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF +#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF +#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF +#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF +#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF +#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF + +/* Level of each interrupt: */ +#define XCHAL_INT0_LEVEL 1 +#define XCHAL_INT1_LEVEL 1 +#define XCHAL_INT2_LEVEL 1 +#define XCHAL_INT3_LEVEL 1 +#define XCHAL_INT4_LEVEL 1 +#define XCHAL_INT5_LEVEL 1 +#define XCHAL_INT6_LEVEL 1 +#define XCHAL_INT7_LEVEL 1 +#define XCHAL_INT8_LEVEL 2 +#define XCHAL_INT9_LEVEL 3 +#define XCHAL_INT10_LEVEL 3 +#define XCHAL_INT11_LEVEL 3 +#define XCHAL_INT12_LEVEL 4 +#define XCHAL_INT13_LEVEL 5 +#define XCHAL_INT14_LEVEL 7 +#define XCHAL_INT15_LEVEL 1 +#define XCHAL_INT16_LEVEL 1 +#define XCHAL_INT17_LEVEL 1 +#define XCHAL_INT18_LEVEL 1 +#define XCHAL_INT19_LEVEL 1 +#define XCHAL_INT20_LEVEL 1 +#define XCHAL_INT21_LEVEL 3 +#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */ +#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */ +#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with + EXCSAVE/EPS/EPC_n, RFI n) */ + +/* Type of each interrupt: */ +#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE +#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL +#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER +#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI +#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE +#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE + +/* Masks of interrupts for each type of interrupt: */ +#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000 +#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880 +#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000 +#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F +#define XCHAL_INTTYPE_MASK_TIMER 0x00002440 +#define XCHAL_INTTYPE_MASK_NMI 0x00004000 +#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000 +#define XCHAL_INTTYPE_MASK_PROFILING 0x00000000 + +/* Interrupt numbers assigned to specific interrupt sources: */ +#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */ +#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */ +#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */ +#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED +#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */ + +/* Interrupt numbers for levels at which only one interrupt is configured: */ +#define XCHAL_INTLEVEL2_NUM 8 +#define XCHAL_INTLEVEL4_NUM 12 +#define XCHAL_INTLEVEL5_NUM 13 +#define XCHAL_INTLEVEL7_NUM 14 +/* (There are many interrupts each at level(s) 1, 3.) */ + + +/* + * External interrupt mapping. + * These macros describe how Xtensa processor interrupt numbers + * (as numbered internally, eg. in INTERRUPT and INTENABLE registers) + * map to external BInterrupt<n> pins, for those interrupts + * configured as external (level-triggered, edge-triggered, or NMI). + * See the Xtensa processor databook for more details. + */ + +/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */ +#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */ +#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */ +#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */ +#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */ +#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */ +#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */ +#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */ +#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */ +#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */ +#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */ +#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */ +#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */ +#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */ +#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */ +#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */ +#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */ +#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */ +/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */ +#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */ +#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */ +#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */ +#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */ +#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */ +#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */ +#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */ +#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */ +#define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */ +#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */ +#define XCHAL_INT15_EXTNUM 10 /* (intlevel 1) */ +#define XCHAL_INT16_EXTNUM 11 /* (intlevel 1) */ +#define XCHAL_INT17_EXTNUM 12 /* (intlevel 1) */ +#define XCHAL_INT18_EXTNUM 13 /* (intlevel 1) */ +#define XCHAL_INT19_EXTNUM 14 /* (intlevel 1) */ +#define XCHAL_INT20_EXTNUM 15 /* (intlevel 1) */ +#define XCHAL_INT21_EXTNUM 16 /* (intlevel 3) */ + + +/*---------------------------------------------------------------------- + EXCEPTIONS and VECTORS + ----------------------------------------------------------------------*/ + +#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture + number: 1 == XEA1 (old) + 2 == XEA2 (new) + 0 == XEAX (extern) or TX */ +#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */ +#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */ +#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */ +#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */ +#define XCHAL_HAVE_HALT 0 /* halt architecture option */ +#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */ +#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */ +#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */ +#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */ +#define XCHAL_VECBASE_RESET_VADDR 0x60000000 /* VECBASE reset value */ +#define XCHAL_VECBASE_RESET_PADDR 0x60000000 +#define XCHAL_RESET_VECBASE_OVERLAP 0 + +#define XCHAL_RESET_VECTOR0_VADDR 0x50000000 +#define XCHAL_RESET_VECTOR0_PADDR 0x50000000 +#define XCHAL_RESET_VECTOR1_VADDR 0x40000400 +#define XCHAL_RESET_VECTOR1_PADDR 0x40000400 +#define XCHAL_RESET_VECTOR_VADDR 0x50000000 +#define XCHAL_RESET_VECTOR_PADDR 0x50000000 +#define XCHAL_USER_VECOFS 0x00000340 +#define XCHAL_USER_VECTOR_VADDR 0x60000340 +#define XCHAL_USER_VECTOR_PADDR 0x60000340 +#define XCHAL_KERNEL_VECOFS 0x00000300 +#define XCHAL_KERNEL_VECTOR_VADDR 0x60000300 +#define XCHAL_KERNEL_VECTOR_PADDR 0x60000300 +#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0 +#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x600003C0 +#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x600003C0 +#define XCHAL_WINDOW_OF4_VECOFS 0x00000000 +#define XCHAL_WINDOW_UF4_VECOFS 0x00000040 +#define XCHAL_WINDOW_OF8_VECOFS 0x00000080 +#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0 +#define XCHAL_WINDOW_OF12_VECOFS 0x00000100 +#define XCHAL_WINDOW_UF12_VECOFS 0x00000140 +#define XCHAL_WINDOW_VECTORS_VADDR 0x60000000 +#define XCHAL_WINDOW_VECTORS_PADDR 0x60000000 +#define XCHAL_INTLEVEL2_VECOFS 0x00000180 +#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x60000180 +#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x60000180 +#define XCHAL_INTLEVEL3_VECOFS 0x000001C0 +#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x600001C0 +#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x600001C0 +#define XCHAL_INTLEVEL4_VECOFS 0x00000200 +#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x60000200 +#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x60000200 +#define XCHAL_INTLEVEL5_VECOFS 0x00000240 +#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x60000240 +#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x60000240 +#define XCHAL_INTLEVEL6_VECOFS 0x00000280 +#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x60000280 +#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x60000280 +#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS +#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR +#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR +#define XCHAL_NMI_VECOFS 0x000002C0 +#define XCHAL_NMI_VECTOR_VADDR 0x600002C0 +#define XCHAL_NMI_VECTOR_PADDR 0x600002C0 +#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS +#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR +#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR + + +/*---------------------------------------------------------------------- + DEBUG MODULE + ----------------------------------------------------------------------*/ + +/* Misc */ +#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */ +#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */ +#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */ + +/* On-Chip Debug (OCD) */ +#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */ +#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */ +#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */ +#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */ +#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */ + +/* TRAX (in core) */ +#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */ +#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */ +#define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */ +#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */ +#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */ + +/* Perf counters */ +#define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */ + + +/*---------------------------------------------------------------------- + MMU + ----------------------------------------------------------------------*/ + +/* See core-matmap.h header file for more details. */ + +#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */ +#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */ +#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */ +#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */ +#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */ +#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */ +#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */ +#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table + [autorefill] and protection) + usable for an MMU-based OS */ +/* If none of the above last 4 are set, it's a custom TLB configuration. */ + +#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */ +#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */ +#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */ + +#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ + + +#endif /* _XTENSA_CORE_CONFIGURATION_H */ + diff --git a/kernel/arch/xtensa/variants/de212/include/variant/tie-asm.h b/kernel/arch/xtensa/variants/de212/include/variant/tie-asm.h new file mode 100644 index 000000000..77755354f --- /dev/null +++ b/kernel/arch/xtensa/variants/de212/include/variant/tie-asm.h @@ -0,0 +1,170 @@ +/* + * tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE + * + * NOTE: This header file is not meant to be included directly. + */ + +/* This header file contains assembly-language definitions (assembly + macros, etc.) for this specific Xtensa processor's TIE extensions + and options. It is customized to this Xtensa processor configuration. + + Copyright (c) 1999-2015 Cadence Design Systems Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef _XTENSA_CORE_TIE_ASM_H +#define _XTENSA_CORE_TIE_ASM_H + +/* Selection parameter values for save-area save/restore macros: */ +/* Option vs. TIE: */ +#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */ +#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */ +#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */ +/* Whether used automatically by compiler: */ +#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */ +#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */ +#define XTHAL_SAS_ANYCC 0x000C /* both of the above */ +/* ABI handling across function calls: */ +#define XTHAL_SAS_CALR 0x0010 /* caller-saved */ +#define XTHAL_SAS_CALE 0x0020 /* callee-saved */ +#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */ +#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */ +/* Misc */ +#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */ +#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \ + | ((ccuse) & XTHAL_SAS_ANYCC) \ + | ((abi) & XTHAL_SAS_ANYABI) ) + + + /* + * Macro to store all non-coprocessor (extra) custom TIE and optional state + * (not including zero-overhead loop registers). + * Required parameters: + * ptr Save area pointer address register (clobbered) + * (register must contain a 4 byte aligned address). + * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS + * registers are clobbered, the remaining are unused). + * Optional parameters: + * continue If macro invoked as part of a larger store sequence, set to 1 + * if this is not the first in the sequence. Defaults to 0. + * ofs Offset from start of larger sequence (from value of first ptr + * in sequence) at which to store. Defaults to next available space + * (or 0 if <continue> is 0). + * select Select what category(ies) of registers to store, as a bitmask + * (see XTHAL_SAS_xxx constants). Defaults to all registers. + * alloc Select what category(ies) of registers to allocate; if any + * category is selected here that is not in <select>, space for + * the corresponding registers is skipped without doing any store. + */ + .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 + xchal_sa_start \continue, \ofs + // Optional caller-saved registers used by default by the compiler: + .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select) + xchal_sa_align \ptr, 0, 1016, 4, 4 + rsr.ACCLO \at1 // MAC16 option + s32i \at1, \ptr, .Lxchal_ofs_+0 + rsr.ACCHI \at1 // MAC16 option + s32i \at1, \ptr, .Lxchal_ofs_+4 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 8 + .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 + xchal_sa_align \ptr, 0, 1016, 4, 4 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 8 + .endif + // Optional caller-saved registers not used by default by the compiler: + .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select) + xchal_sa_align \ptr, 0, 1004, 4, 4 + rsr.SCOMPARE1 \at1 // conditional store option + s32i \at1, \ptr, .Lxchal_ofs_+0 + rsr.M0 \at1 // MAC16 option + s32i \at1, \ptr, .Lxchal_ofs_+4 + rsr.M1 \at1 // MAC16 option + s32i \at1, \ptr, .Lxchal_ofs_+8 + rsr.M2 \at1 // MAC16 option + s32i \at1, \ptr, .Lxchal_ofs_+12 + rsr.M3 \at1 // MAC16 option + s32i \at1, \ptr, .Lxchal_ofs_+16 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 20 + .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 + xchal_sa_align \ptr, 0, 1004, 4, 4 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 20 + .endif + .endm // xchal_ncp_store + + /* + * Macro to load all non-coprocessor (extra) custom TIE and optional state + * (not including zero-overhead loop registers). + * Required parameters: + * ptr Save area pointer address register (clobbered) + * (register must contain a 4 byte aligned address). + * at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS + * registers are clobbered, the remaining are unused). + * Optional parameters: + * continue If macro invoked as part of a larger load sequence, set to 1 + * if this is not the first in the sequence. Defaults to 0. + * ofs Offset from start of larger sequence (from value of first ptr + * in sequence) at which to load. Defaults to next available space + * (or 0 if <continue> is 0). + * select Select what category(ies) of registers to load, as a bitmask + * (see XTHAL_SAS_xxx constants). Defaults to all registers. + * alloc Select what category(ies) of registers to allocate; if any + * category is selected here that is not in <select>, space for + * the corresponding registers is skipped without doing any load. + */ + .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0 + xchal_sa_start \continue, \ofs + // Optional caller-saved registers used by default by the compiler: + .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select) + xchal_sa_align \ptr, 0, 1016, 4, 4 + l32i \at1, \ptr, .Lxchal_ofs_+0 + wsr.ACCLO \at1 // MAC16 option + l32i \at1, \ptr, .Lxchal_ofs_+4 + wsr.ACCHI \at1 // MAC16 option + .set .Lxchal_ofs_, .Lxchal_ofs_ + 8 + .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 + xchal_sa_align \ptr, 0, 1016, 4, 4 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 8 + .endif + // Optional caller-saved registers not used by default by the compiler: + .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select) + xchal_sa_align \ptr, 0, 1004, 4, 4 + l32i \at1, \ptr, .Lxchal_ofs_+0 + wsr.SCOMPARE1 \at1 // conditional store option + l32i \at1, \ptr, .Lxchal_ofs_+4 + wsr.M0 \at1 // MAC16 option + l32i \at1, \ptr, .Lxchal_ofs_+8 + wsr.M1 \at1 // MAC16 option + l32i \at1, \ptr, .Lxchal_ofs_+12 + wsr.M2 \at1 // MAC16 option + l32i \at1, \ptr, .Lxchal_ofs_+16 + wsr.M3 \at1 // MAC16 option + .set .Lxchal_ofs_, .Lxchal_ofs_ + 20 + .elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0 + xchal_sa_align \ptr, 0, 1004, 4, 4 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 20 + .endif + .endm // xchal_ncp_load + + +#define XCHAL_NCP_NUM_ATMPS 1 + +#define XCHAL_SA_NUM_ATMPS 1 + +#endif /*_XTENSA_CORE_TIE_ASM_H*/ + diff --git a/kernel/arch/xtensa/variants/de212/include/variant/tie.h b/kernel/arch/xtensa/variants/de212/include/variant/tie.h new file mode 100644 index 000000000..b8a061a3f --- /dev/null +++ b/kernel/arch/xtensa/variants/de212/include/variant/tie.h @@ -0,0 +1,136 @@ +/* + * tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration + * + * NOTE: This header file is not meant to be included directly. + */ + +/* This header file describes this specific Xtensa processor's TIE extensions + that extend basic Xtensa core functionality. It is customized to this + Xtensa processor configuration. + + Copyright (c) 1999-2015 Cadence Design Systems Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef _XTENSA_CORE_TIE_H +#define _XTENSA_CORE_TIE_H + +#define XCHAL_CP_NUM 0 /* number of coprocessors */ +#define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */ +#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ +#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ + +/* Save area for non-coprocessor optional and custom (TIE) state: */ +#define XCHAL_NCP_SA_SIZE 28 +#define XCHAL_NCP_SA_ALIGN 4 + +/* Total save area for optional and custom state (NCP + CPn): */ +#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */ +#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */ + +/* + * Detailed contents of save areas. + * NOTE: caller must define the XCHAL_SA_REG macro (not defined here) + * before expanding the XCHAL_xxx_SA_LIST() macros. + * + * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize, + * dbnum,base,regnum,bitsz,gapsz,reset,x...) + * + * s = passed from XCHAL_*_LIST(s), eg. to select how to expand + * ccused = set if used by compiler without special options or code + * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global) + * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg) + * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg) + * name = lowercase reg name (no quotes) + * galign = group byte alignment (power of 2) (galign >= align) + * align = register byte alignment (power of 2) + * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz) + * (not including any pad bytes required to galign this or next reg) + * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>) + * base = reg shortname w/o index (or sr=special, ur=TIE user reg) + * regnum = reg index in regfile, or special/TIE-user reg number + * bitsz = number of significant bits (regfile width, or ur/sr mask bits) + * gapsz = intervening bits, if bitsz bits not stored contiguously + * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize) + * reset = register reset value (or 0 if undefined at reset) + * x = reserved for future use (0 until then) + * + * To filter out certain registers, e.g. to expand only the non-global + * registers used by the compiler, you can do something like this: + * + * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p) + * #define SELCC0(p...) + * #define SELCC1(abikind,p...) SELAK##abikind(p) + * #define SELAK0(p...) REG(p) + * #define SELAK1(p...) REG(p) + * #define SELAK2(p...) + * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \ + * ...what you want to expand... + */ + +#define XCHAL_NCP_SA_NUM 7 +#define XCHAL_NCP_SA_LIST(s) \ + XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \ + XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \ + XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \ + XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \ + XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \ + XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \ + XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) + +#define XCHAL_CP0_SA_NUM 0 +#define XCHAL_CP0_SA_LIST(s) /* empty */ + +#define XCHAL_CP1_SA_NUM 0 +#define XCHAL_CP1_SA_LIST(s) /* empty */ + +#define XCHAL_CP2_SA_NUM 0 +#define XCHAL_CP2_SA_LIST(s) /* empty */ + +#define XCHAL_CP3_SA_NUM 0 +#define XCHAL_CP3_SA_LIST(s) /* empty */ + +#define XCHAL_CP4_SA_NUM 0 +#define XCHAL_CP4_SA_LIST(s) /* empty */ + +#define XCHAL_CP5_SA_NUM 0 +#define XCHAL_CP5_SA_LIST(s) /* empty */ + +#define XCHAL_CP6_SA_NUM 0 +#define XCHAL_CP6_SA_LIST(s) /* empty */ + +#define XCHAL_CP7_SA_NUM 0 +#define XCHAL_CP7_SA_LIST(s) /* empty */ + +/* Byte length of instruction from its first nibble (op0 field), per FLIX. */ +#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3 +/* Byte length of instruction from its first byte, per FLIX. */ +#define XCHAL_BYTE0_FORMAT_LENGTHS \ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3,\ + 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3, 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3 + +#endif /*_XTENSA_CORE_TIE_H*/ + |