From 52f993b8e89487ec9ee15a7fb4979e0f09a45b27 Mon Sep 17 00:00:00 2001 From: Yunhong Jiang Date: Wed, 8 Mar 2017 23:13:28 -0800 Subject: Upgrade to 4.4.50-rt62 The current kernel is based on rt kernel v4.4.6-rt14. We will upgrade it to 4.4.50-rt62. The command to achieve it is: a) Clone a git repo from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git b) Get the diff between this two changesets: git diff 640eca2901f3435e616157b11379d3223a44b391 705619beeea1b0b48219a683fd1a901a86fdaf5e where the two commits are: [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 640eca2901f3435e616157b11379d3223a44b391 640eca2901f3 v4.4.6-rt14 localversion-rt [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 705619beeea1b0b48219a683fd1a901a86fdaf5e 705619beeea1 Linux 4.4.50-rt62 localversion-rt c) One patch has been backported thus revert the patch before applying. filterdiff -p1 -x scripts/package/Makefile ~/tmp/v4.4.6-rt14-4.4.50-rt62.diff |patch -p1 --dry-run Upstream status: backport Change-Id: I244d57a32f6066e5a5b9915f9fbf99e7bbca6e01 Signed-off-by: Yunhong Jiang --- kernel/arch/mips/Kconfig.debug | 36 ---- kernel/arch/mips/Makefile | 4 - kernel/arch/mips/alchemy/devboards/db1000.c | 18 +- kernel/arch/mips/alchemy/devboards/db1550.c | 4 +- kernel/arch/mips/ath79/early_printk.c | 6 +- kernel/arch/mips/include/asm/asmmacro.h | 41 ++-- kernel/arch/mips/include/asm/cacheflush.h | 6 - kernel/arch/mips/include/asm/kvm_host.h | 10 +- .../include/asm/mach-paravirt/kernel-entry-init.h | 2 + kernel/arch/mips/include/asm/msa.h | 13 ++ kernel/arch/mips/include/asm/pgtable.h | 26 ++- kernel/arch/mips/include/asm/processor.h | 2 +- kernel/arch/mips/include/asm/ptrace.h | 2 +- kernel/arch/mips/include/asm/switch_to.h | 2 +- kernel/arch/mips/include/asm/uaccess.h | 3 + kernel/arch/mips/include/asm/uprobes.h | 1 - kernel/arch/mips/include/asm/watch.h | 10 +- kernel/arch/mips/include/uapi/asm/siginfo.h | 22 +- kernel/arch/mips/kernel/csrc-r4k.c | 4 +- kernel/arch/mips/kernel/mips-r2-to-r6-emul.c | 107 +++++----- kernel/arch/mips/kernel/pm.c | 2 +- kernel/arch/mips/kernel/process.c | 14 +- kernel/arch/mips/kernel/ptrace.c | 27 ++- kernel/arch/mips/kernel/r4k_fpu.S | 10 +- kernel/arch/mips/kernel/scall64-n32.S | 2 +- kernel/arch/mips/kernel/scall64-o32.S | 2 +- kernel/arch/mips/kernel/setup.c | 3 + kernel/arch/mips/kernel/signal.c | 15 +- kernel/arch/mips/kernel/smp.c | 7 +- kernel/arch/mips/kernel/traps.c | 8 +- kernel/arch/mips/kernel/unaligned.c | 51 +++-- kernel/arch/mips/kernel/uprobes.c | 25 +-- kernel/arch/mips/kernel/vdso.c | 8 +- kernel/arch/mips/kernel/watch.c | 5 +- kernel/arch/mips/kvm/emulate.c | 224 ++++++++++++++------- kernel/arch/mips/kvm/interrupt.h | 1 + kernel/arch/mips/kvm/locore.S | 1 + kernel/arch/mips/kvm/mips.c | 15 +- kernel/arch/mips/kvm/tlb.c | 70 ++++--- kernel/arch/mips/kvm/trap_emul.c | 2 +- kernel/arch/mips/lib/ashldi3.c | 2 +- kernel/arch/mips/lib/ashrdi3.c | 2 +- kernel/arch/mips/lib/bswapdi.c | 2 +- kernel/arch/mips/lib/bswapsi.c | 2 +- kernel/arch/mips/lib/cmpdi2.c | 2 +- kernel/arch/mips/lib/lshrdi3.c | 2 +- kernel/arch/mips/lib/ucmpdi2.c | 2 +- kernel/arch/mips/loongson64/loongson-3/hpet.c | 14 +- kernel/arch/mips/loongson64/loongson-3/numa.c | 6 +- kernel/arch/mips/math-emu/cp1emu.c | 8 +- kernel/arch/mips/mm/cache.c | 41 ++-- kernel/arch/mips/mm/uasm-mips.c | 2 +- kernel/arch/mips/mti-malta/malta-setup.c | 8 +- kernel/arch/mips/vdso/Makefile | 6 +- 54 files changed, 511 insertions(+), 399 deletions(-) (limited to 'kernel/arch/mips') diff --git a/kernel/arch/mips/Kconfig.debug b/kernel/arch/mips/Kconfig.debug index f0e314ceb..7f975b20b 100644 --- a/kernel/arch/mips/Kconfig.debug +++ b/kernel/arch/mips/Kconfig.debug @@ -113,42 +113,6 @@ config SPINLOCK_TEST help Add several files to the debugfs to test spinlock speed. -if CPU_MIPSR6 - -choice - prompt "Compact branch policy" - default MIPS_COMPACT_BRANCHES_OPTIMAL - -config MIPS_COMPACT_BRANCHES_NEVER - bool "Never (force delay slot branches)" - help - Pass the -mcompact-branches=never flag to the compiler in order to - force it to always emit branches with delay slots, and make no use - of the compact branch instructions introduced by MIPSr6. This is - useful if you suspect there may be an issue with compact branches in - either the compiler or the CPU. - -config MIPS_COMPACT_BRANCHES_OPTIMAL - bool "Optimal (use where beneficial)" - help - Pass the -mcompact-branches=optimal flag to the compiler in order for - it to make use of compact branch instructions where it deems them - beneficial, and use branches with delay slots elsewhere. This is the - default compiler behaviour, and should be used unless you have a - reason to choose otherwise. - -config MIPS_COMPACT_BRANCHES_ALWAYS - bool "Always (force compact branches)" - help - Pass the -mcompact-branches=always flag to the compiler in order to - force it to always emit compact branches, making no use of branch - instructions with delay slots. This can result in more compact code - which may be beneficial in some scenarios. - -endchoice - -endif # CPU_MIPSR6 - config SCACHE_DEBUGFS bool "L2 cache debugfs entries" depends on DEBUG_FS diff --git a/kernel/arch/mips/Makefile b/kernel/arch/mips/Makefile index 3f70ba54a..252e34795 100644 --- a/kernel/arch/mips/Makefile +++ b/kernel/arch/mips/Makefile @@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$( cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA endif -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal -cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always - # # Firmware support # diff --git a/kernel/arch/mips/alchemy/devboards/db1000.c b/kernel/arch/mips/alchemy/devboards/db1000.c index bdeed9d13..433c4b9a9 100644 --- a/kernel/arch/mips/alchemy/devboards/db1000.c +++ b/kernel/arch/mips/alchemy/devboards/db1000.c @@ -503,15 +503,15 @@ int __init db1000_dev_setup(void) if (board == BCSR_WHOAMI_DB1500) { c0 = AU1500_GPIO2_INT; c1 = AU1500_GPIO5_INT; - d0 = AU1500_GPIO0_INT; - d1 = AU1500_GPIO3_INT; + d0 = 0; /* GPIO number, NOT irq! */ + d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1500_GPIO1_INT; s1 = AU1500_GPIO4_INT; } else if (board == BCSR_WHOAMI_DB1100) { c0 = AU1100_GPIO2_INT; c1 = AU1100_GPIO5_INT; - d0 = AU1100_GPIO0_INT; - d1 = AU1100_GPIO3_INT; + d0 = 0; /* GPIO number, NOT irq! */ + d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1100_GPIO1_INT; s1 = AU1100_GPIO4_INT; @@ -545,15 +545,15 @@ int __init db1000_dev_setup(void) } else if (board == BCSR_WHOAMI_DB1000) { c0 = AU1000_GPIO2_INT; c1 = AU1000_GPIO5_INT; - d0 = AU1000_GPIO0_INT; - d1 = AU1000_GPIO3_INT; + d0 = 0; /* GPIO number, NOT irq! */ + d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1000_GPIO1_INT; s1 = AU1000_GPIO4_INT; platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs)); } else if ((board == BCSR_WHOAMI_PB1500) || (board == BCSR_WHOAMI_PB1500R2)) { c0 = AU1500_GPIO203_INT; - d0 = AU1500_GPIO201_INT; + d0 = 1; /* GPIO number, NOT irq! */ s0 = AU1500_GPIO202_INT; twosocks = 0; flashsize = 64; @@ -566,7 +566,7 @@ int __init db1000_dev_setup(void) */ } else if (board == BCSR_WHOAMI_PB1100) { c0 = AU1100_GPIO11_INT; - d0 = AU1100_GPIO9_INT; + d0 = 9; /* GPIO number, NOT irq! */ s0 = AU1100_GPIO10_INT; twosocks = 0; flashsize = 64; @@ -583,7 +583,6 @@ int __init db1000_dev_setup(void) } else return 0; /* unknown board, no further dev setup to do */ - irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW); @@ -597,7 +596,6 @@ int __init db1000_dev_setup(void) c0, d0, /*s0*/0, 0, 0); if (twosocks) { - irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW); diff --git a/kernel/arch/mips/alchemy/devboards/db1550.c b/kernel/arch/mips/alchemy/devboards/db1550.c index 5740bcfdf..6c37b9326 100644 --- a/kernel/arch/mips/alchemy/devboards/db1550.c +++ b/kernel/arch/mips/alchemy/devboards/db1550.c @@ -514,7 +514,7 @@ static void __init db1550_devices(void) AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR, AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, - AU1550_GPIO3_INT, AU1550_GPIO0_INT, + AU1550_GPIO3_INT, 0, /*AU1550_GPIO21_INT*/0, 0, 0); db1x_register_pcmcia_socket( @@ -524,7 +524,7 @@ static void __init db1550_devices(void) AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, - AU1550_GPIO5_INT, AU1550_GPIO1_INT, + AU1550_GPIO5_INT, 1, /*AU1550_GPIO22_INT*/0, 0, 1); platform_device_register(&db1550_nand_dev); diff --git a/kernel/arch/mips/ath79/early_printk.c b/kernel/arch/mips/ath79/early_printk.c index b955fafc5..d1adc59af 100644 --- a/kernel/arch/mips/ath79/early_printk.c +++ b/kernel/arch/mips/ath79/early_printk.c @@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val) } while (1); } +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + static void prom_putchar_ar71xx(unsigned char ch) { void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE)); - prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE); + prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY); __raw_writel(ch, base + UART_TX * 4); - prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE); + prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY); } static void prom_putchar_ar933x(unsigned char ch) diff --git a/kernel/arch/mips/include/asm/asmmacro.h b/kernel/arch/mips/include/asm/asmmacro.h index 867f924b0..8dedee1de 100644 --- a/kernel/arch/mips/include/asm/asmmacro.h +++ b/kernel/arch/mips/include/asm/asmmacro.h @@ -135,6 +135,7 @@ ldc1 $f28, THREAD_FPR28(\thread) ldc1 $f30, THREAD_FPR30(\thread) ctc1 \tmp, fcr31 + .set pop .endm .macro fpu_restore_16odd thread @@ -298,21 +299,21 @@ .set pop .endm - .macro copy_u_w ws, n + .macro copy_s_w ws, n .set push .set mips32r2 .set fp=64 .set msa - copy_u.w $1, $w\ws[\n] + copy_s.w $1, $w\ws[\n] .set pop .endm - .macro copy_u_d ws, n + .macro copy_s_d ws, n .set push .set mips64r2 .set fp=64 .set msa - copy_u.d $1, $w\ws[\n] + copy_s.d $1, $w\ws[\n] .set pop .endm @@ -346,8 +347,8 @@ #define STH_MSA_INSN 0x5800081f #define STW_MSA_INSN 0x5800082f #define STD_MSA_INSN 0x5800083f -#define COPY_UW_MSA_INSN 0x58f00056 -#define COPY_UD_MSA_INSN 0x58f80056 +#define COPY_SW_MSA_INSN 0x58b00056 +#define COPY_SD_MSA_INSN 0x58b80056 #define INSERT_W_MSA_INSN 0x59300816 #define INSERT_D_MSA_INSN 0x59380816 #else @@ -361,8 +362,8 @@ #define STH_MSA_INSN 0x78000825 #define STW_MSA_INSN 0x78000826 #define STD_MSA_INSN 0x78000827 -#define COPY_UW_MSA_INSN 0x78f00059 -#define COPY_UD_MSA_INSN 0x78f80059 +#define COPY_SW_MSA_INSN 0x78b00059 +#define COPY_SD_MSA_INSN 0x78b80059 #define INSERT_W_MSA_INSN 0x79300819 #define INSERT_D_MSA_INSN 0x79380819 #endif @@ -393,7 +394,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDB_MSA_INSN | (\wd << 6) .set pop .endm @@ -402,7 +403,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDH_MSA_INSN | (\wd << 6) .set pop .endm @@ -411,7 +412,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDW_MSA_INSN | (\wd << 6) .set pop .endm @@ -420,7 +421,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word LDD_MSA_INSN | (\wd << 6) .set pop .endm @@ -429,7 +430,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STB_MSA_INSN | (\wd << 6) .set pop .endm @@ -438,7 +439,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STH_MSA_INSN | (\wd << 6) .set pop .endm @@ -447,7 +448,7 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STW_MSA_INSN | (\wd << 6) .set pop .endm @@ -456,26 +457,26 @@ .set push .set noat SET_HARDFLOAT - addu $1, \base, \off + PTR_ADDU $1, \base, \off .word STD_MSA_INSN | (\wd << 6) .set pop .endm - .macro copy_u_w ws, n + .macro copy_s_w ws, n .set push .set noat SET_HARDFLOAT .insn - .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) + .word COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11) .set pop .endm - .macro copy_u_d ws, n + .macro copy_s_d ws, n .set push .set noat SET_HARDFLOAT .insn - .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) + .word COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11) .set pop .endm diff --git a/kernel/arch/mips/include/asm/cacheflush.h b/kernel/arch/mips/include/asm/cacheflush.h index 723229f4c..176de586a 100644 --- a/kernel/arch/mips/include/asm/cacheflush.h +++ b/kernel/arch/mips/include/asm/cacheflush.h @@ -51,7 +51,6 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); extern void __flush_dcache_page(struct page *page); -extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_page(struct page *page) @@ -77,11 +76,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) { - if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) && - Page_dcache_dirty(page)) { - __flush_icache_page(vma, page); - ClearPageDcacheDirty(page); - } } extern void (*flush_icache_range)(unsigned long start, unsigned long end); diff --git a/kernel/arch/mips/include/asm/kvm_host.h b/kernel/arch/mips/include/asm/kvm_host.h index 6ded8d347..c8c04a1f1 100644 --- a/kernel/arch/mips/include/asm/kvm_host.h +++ b/kernel/arch/mips/include/asm/kvm_host.h @@ -372,6 +372,7 @@ struct kvm_mips_tlb { #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { void *host_ebase, *guest_ebase; + int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); unsigned long host_stack; unsigned long host_gp; @@ -399,7 +400,10 @@ struct kvm_vcpu_arch { /* Host KSEG0 address of the EI/DI offset */ void *kseg0_commpage; - u32 io_gpr; /* GPR used as IO source/target */ + /* Resume PC after MMIO completion */ + unsigned long io_pc; + /* GPR used as IO source/target */ + u32 io_gpr; struct hrtimer comparecount_timer; /* Count timer control KVM register */ @@ -421,8 +425,6 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; - unsigned long pending_load_cause; - /* Save/Restore the entryhi register when are are preempted/scheduled back in */ unsigned long preempt_entryhi; @@ -784,7 +786,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); -void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare); +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack); void kvm_mips_init_count(struct kvm_vcpu *vcpu); int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); diff --git a/kernel/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/kernel/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h index 2f82bfa3a..c9f5769df 100644 --- a/kernel/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h +++ b/kernel/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h @@ -11,11 +11,13 @@ #define CP0_EBASE $15, 1 .macro kernel_entry_setup +#ifdef CONFIG_SMP mfc0 t0, CP0_EBASE andi t0, t0, 0x3ff # CPUNum beqz t0, 1f # CPUs other than zero goto smp_bootstrap j smp_bootstrap +#endif /* CONFIG_SMP */ 1: .endm diff --git a/kernel/arch/mips/include/asm/msa.h b/kernel/arch/mips/include/asm/msa.h index bbb85fe21..6e4effa6f 100644 --- a/kernel/arch/mips/include/asm/msa.h +++ b/kernel/arch/mips/include/asm/msa.h @@ -147,6 +147,19 @@ static inline void restore_msa(struct task_struct *t) _restore_msa(t); } +static inline void init_msa_upper(void) +{ + /* + * Check cpu_has_msa only if it's a constant. This will allow the + * compiler to optimise out code for CPUs without MSA without adding + * an extra redundant check for CPUs with MSA. + */ + if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa) + return; + + _init_msa_upper(); +} + #ifdef TOOLCHAIN_SUPPORTS_MSA #define __BUILD_MSA_CTL_REG(name, cs) \ diff --git a/kernel/arch/mips/include/asm/pgtable.h b/kernel/arch/mips/include/asm/pgtable.h index 18826aa15..4e68c644a 100644 --- a/kernel/arch/mips/include/asm/pgtable.h +++ b/kernel/arch/mips/include/asm/pgtable.h @@ -127,10 +127,14 @@ do { \ } \ } while(0) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval); + #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) +#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) static inline void set_pte(pte_t *ptep, pte_t pte) { @@ -148,7 +152,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) buddy->pte_high |= _PAGE_GLOBAL; } } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -166,6 +169,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) +#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) /* * Certain architectures need to do special things when pte's @@ -218,7 +222,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) } #endif } -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -234,6 +237,22 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt } #endif +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + extern void __update_cache(unsigned long address, pte_t pte); + + if (!pte_present(pteval)) + goto cache_sync_done; + + if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval))) + goto cache_sync_done; + + __update_cache(addr, pteval); +cache_sync_done: + set_pte(ptep, pteval); +} + /* * (pmds are folded into puds so this doesn't get actually called, * but the define is needed for a generic inline function.) @@ -430,15 +449,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); -extern void __update_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; __update_tlb(vma, address, pte); - __update_cache(vma, address, pte); } static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, diff --git a/kernel/arch/mips/include/asm/processor.h b/kernel/arch/mips/include/asm/processor.h index 3f832c3dd..041153f5c 100644 --- a/kernel/arch/mips/include/asm/processor.h +++ b/kernel/arch/mips/include/asm/processor.h @@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count; * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ -#define TASK_SIZE 0x7fff8000UL +#define TASK_SIZE 0x80000000UL #endif #define STACK_TOP_MAX TASK_SIZE diff --git a/kernel/arch/mips/include/asm/ptrace.h b/kernel/arch/mips/include/asm/ptrace.h index f6fc6aac5..b6578611d 100644 --- a/kernel/arch/mips/include/asm/ptrace.h +++ b/kernel/arch/mips/include/asm/ptrace.h @@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs) static inline long regs_return_value(struct pt_regs *regs) { - if (is_syscall_success(regs)) + if (is_syscall_success(regs) || !user_mode(regs)) return regs->regs[2]; else return -regs->regs[2]; diff --git a/kernel/arch/mips/include/asm/switch_to.h b/kernel/arch/mips/include/asm/switch_to.h index 28b5d84a5..ebb5c0f2f 100644 --- a/kernel/arch/mips/include/asm/switch_to.h +++ b/kernel/arch/mips/include/asm/switch_to.h @@ -105,7 +105,7 @@ do { \ __clear_software_ll_bit(); \ if (cpu_has_userlocal) \ write_c0_userlocal(task_thread_info(next)->tp_value); \ - __restore_watch(); \ + __restore_watch(next); \ (last) = resume(prev, next, task_thread_info(next)); \ } while (0) diff --git a/kernel/arch/mips/include/asm/uaccess.h b/kernel/arch/mips/include/asm/uaccess.h index 095ecafe6..c74c32ccc 100644 --- a/kernel/arch/mips/include/asm/uaccess.h +++ b/kernel/arch/mips/include/asm/uaccess.h @@ -14,6 +14,7 @@ #include #include #include +#include #include /* @@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len = __invoke_copy_from_user(__cu_to, \ __cu_from, \ __cu_len); \ + } else { \ + memset(__cu_to, 0, __cu_len); \ } \ } \ __cu_len; \ diff --git a/kernel/arch/mips/include/asm/uprobes.h b/kernel/arch/mips/include/asm/uprobes.h index 34c325c67..70a4a2f17 100644 --- a/kernel/arch/mips/include/asm/uprobes.h +++ b/kernel/arch/mips/include/asm/uprobes.h @@ -36,7 +36,6 @@ struct arch_uprobe { unsigned long resume_epc; u32 insn[2]; u32 ixol[2]; - union mips_instruction orig_inst[MAX_UINSN_BYTES / 4]; }; struct arch_uprobe_task { diff --git a/kernel/arch/mips/include/asm/watch.h b/kernel/arch/mips/include/asm/watch.h index 20126ec79..6ffe3eadf 100644 --- a/kernel/arch/mips/include/asm/watch.h +++ b/kernel/arch/mips/include/asm/watch.h @@ -12,21 +12,21 @@ #include -void mips_install_watch_registers(void); +void mips_install_watch_registers(struct task_struct *t); void mips_read_watch_registers(void); void mips_clear_watch_registers(void); void mips_probe_watch_registers(struct cpuinfo_mips *c); #ifdef CONFIG_HARDWARE_WATCHPOINTS -#define __restore_watch() do { \ +#define __restore_watch(task) do { \ if (unlikely(test_bit(TIF_LOAD_WATCH, \ - ¤t_thread_info()->flags))) { \ - mips_install_watch_registers(); \ + &task_thread_info(task)->flags))) { \ + mips_install_watch_registers(task); \ } \ } while (0) #else -#define __restore_watch() do {} while (0) +#define __restore_watch(task) do {} while (0) #endif #endif /* _ASM_WATCH_H */ diff --git a/kernel/arch/mips/include/uapi/asm/siginfo.h b/kernel/arch/mips/include/uapi/asm/siginfo.h index 2cb7fdead..e2b5337e8 100644 --- a/kernel/arch/mips/include/uapi/asm/siginfo.h +++ b/kernel/arch/mips/include/uapi/asm/siginfo.h @@ -28,7 +28,7 @@ #define __ARCH_SIGSYS -#include +#include /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ typedef struct siginfo { @@ -42,13 +42,13 @@ typedef struct siginfo { /* kill() */ struct { - pid_t _pid; /* sender's pid */ + __kernel_pid_t _pid; /* sender's pid */ __ARCH_SI_UID_T _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { - timer_t _tid; /* timer id */ + __kernel_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; sigval_t _sigval; /* same as below */ @@ -57,26 +57,26 @@ typedef struct siginfo { /* POSIX.1b signals */ struct { - pid_t _pid; /* sender's pid */ + __kernel_pid_t _pid; /* sender's pid */ __ARCH_SI_UID_T _uid; /* sender's uid */ sigval_t _sigval; } _rt; /* SIGCHLD */ struct { - pid_t _pid; /* which child */ + __kernel_pid_t _pid; /* which child */ __ARCH_SI_UID_T _uid; /* sender's uid */ int _status; /* exit code */ - clock_t _utime; - clock_t _stime; + __kernel_clock_t _utime; + __kernel_clock_t _stime; } _sigchld; /* IRIX SIGCHLD */ struct { - pid_t _pid; /* which child */ - clock_t _utime; + __kernel_pid_t _pid; /* which child */ + __kernel_clock_t _utime; int _status; /* exit code */ - clock_t _stime; + __kernel_clock_t _stime; } _irix_sigchld; /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ @@ -118,6 +118,4 @@ typedef struct siginfo { #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ -#include - #endif /* _UAPI_ASM_SIGINFO_H */ diff --git a/kernel/arch/mips/kernel/csrc-r4k.c b/kernel/arch/mips/kernel/csrc-r4k.c index 1f910563f..d76275da5 100644 --- a/kernel/arch/mips/kernel/csrc-r4k.c +++ b/kernel/arch/mips/kernel/csrc-r4k.c @@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static u64 notrace r4k_read_sched_clock(void) +static u64 __maybe_unused notrace r4k_read_sched_clock(void) { return read_c0_count(); } @@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void) clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); +#ifndef CONFIG_CPU_FREQ sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); +#endif return 0; } diff --git a/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c b/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c index 1f5aac7f9..af27334d6 100644 --- a/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/kernel/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -1163,7 +1164,9 @@ fpu_emul: regs->regs[31] = r31; regs->cp0_epc = epc; if (!used_math()) { /* First time FPU user. */ + preempt_disable(); err = init_fpu(); + preempt_enable(); set_used_math(); } lose_fpu(1); /* Save FPU state for the emulator. */ @@ -1251,10 +1254,10 @@ fpu_emul: " j 10b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1326,10 +1329,10 @@ fpu_emul: " j 10b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1397,10 +1400,10 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1467,10 +1470,10 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1582,14 +1585,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1701,14 +1704,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1820,14 +1823,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -1938,14 +1941,14 @@ fpu_emul: " j 9b\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .word 1b,8b\n" - " .word 2b,8b\n" - " .word 3b,8b\n" - " .word 4b,8b\n" - " .word 5b,8b\n" - " .word 6b,8b\n" - " .word 7b,8b\n" - " .word 0b,8b\n" + STR(PTR) " 1b,8b\n" + STR(PTR) " 2b,8b\n" + STR(PTR) " 3b,8b\n" + STR(PTR) " 4b,8b\n" + STR(PTR) " 5b,8b\n" + STR(PTR) " 6b,8b\n" + STR(PTR) " 7b,8b\n" + STR(PTR) " 0b,8b\n" " .previous\n" " .set pop\n" : "+&r"(rt), "=&r"(rs), @@ -2000,7 +2003,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "=&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV) @@ -2058,7 +2061,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "+&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV)); @@ -2119,7 +2122,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "=&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV) @@ -2182,7 +2185,7 @@ fpu_emul: "j 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" - ".word 1b, 3b\n" + STR(PTR) " 1b,3b\n" ".previous\n" : "+&r"(res), "+&r"(err) : "r"(vaddr), "i"(SIGSEGV)); diff --git a/kernel/arch/mips/kernel/pm.c b/kernel/arch/mips/kernel/pm.c index fefdf39d3..dc8148921 100644 --- a/kernel/arch/mips/kernel/pm.c +++ b/kernel/arch/mips/kernel/pm.c @@ -56,7 +56,7 @@ static void mips_cpu_restore(void) write_c0_userlocal(current_thread_info()->tp_value); /* Restore watch registers */ - __restore_watch(); + __restore_watch(current); } /** diff --git a/kernel/arch/mips/kernel/process.c b/kernel/arch/mips/kernel/process.c index f2975d4d1..44a6f25e9 100644 --- a/kernel/arch/mips/kernel/process.c +++ b/kernel/arch/mips/kernel/process.c @@ -457,7 +457,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { regs = (struct pt_regs *)*sp; pc = regs->cp0_epc; - if (__kernel_text_address(pc)) { + if (!user_mode(regs) && __kernel_text_address(pc)) { *sp = regs->regs[29]; *ra = regs->regs[31]; return pc; @@ -593,16 +593,19 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) return -EOPNOTSUPP; /* Avoid inadvertently triggering emulation */ - if ((value & PR_FP_MODE_FR) && cpu_has_fpu && - !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) return -EOPNOTSUPP; - if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) return -EOPNOTSUPP; /* FR = 0 not supported in MIPS R6 */ - if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) return -EOPNOTSUPP; + /* Proceed with the mode switch */ + preempt_disable(); + /* Save FP & vector context, then disable FPU & MSA */ if (task->signal == current->signal) lose_fpu(1); @@ -661,6 +664,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) /* Allow threads to use FP again */ atomic_set(&task->mm->context.fp_mode_switching, 0); + preempt_enable(); return 0; } diff --git a/kernel/arch/mips/kernel/ptrace.c b/kernel/arch/mips/kernel/ptrace.c index 4f0ac78d1..74d581569 100644 --- a/kernel/arch/mips/kernel/ptrace.c +++ b/kernel/arch/mips/kernel/ptrace.c @@ -57,8 +57,7 @@ static void init_fp_ctx(struct task_struct *target) /* Begin with data registers set to all 1s... */ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); - /* ...and FCSR zeroed */ - target->thread.fpu.fcr31 = 0; + /* FCSR has been preset by `mips_set_personality_nan'. */ /* * Record that the target has "used" math, such that the context @@ -79,6 +78,22 @@ void ptrace_disable(struct task_struct *child) clear_tsk_thread_flag(child, TIF_LOAD_WATCH); } +/* + * Poke at FCSR according to its mask. Don't set the cause bits as + * this is currently not handled correctly in FP context restoration + * and will cause an oops if a corresponding enable bit is set. + */ +static void ptrace_setfcr31(struct task_struct *child, u32 value) +{ + u32 fcr31; + u32 mask; + + value &= ~FPU_CSR_ALL_X; + fcr31 = child->thread.fpu.fcr31; + mask = boot_cpu_data.fpu_msk31; + child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); +} + /* * Read a general register set. We always use the 64-bit format, even * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. @@ -159,9 +174,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) { union fpureg *fregs; u64 fpr_val; - u32 fcr31; u32 value; - u32 mask; int i; if (!access_ok(VERIFY_READ, data, 33 * 8)) @@ -176,9 +189,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) } __get_user(value, data + 64); - fcr31 = child->thread.fpu.fcr31; - mask = boot_cpu_data.fpu_msk31; - child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); + ptrace_setfcr31(child, value); /* FIR may not be written. */ @@ -808,7 +819,7 @@ long arch_ptrace(struct task_struct *child, long request, break; #endif case FPC_CSR: - child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X; + ptrace_setfcr31(child, data); break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; diff --git a/kernel/arch/mips/kernel/r4k_fpu.S b/kernel/arch/mips/kernel/r4k_fpu.S index f09546ee2..bc74485ec 100644 --- a/kernel/arch/mips/kernel/r4k_fpu.S +++ b/kernel/arch/mips/kernel/r4k_fpu.S @@ -244,17 +244,17 @@ LEAF(\name) .set push .set noat #ifdef CONFIG_64BIT - copy_u_d \wr, 1 + copy_s_d \wr, 1 EX sd $1, \off(\base) #elif defined(CONFIG_CPU_LITTLE_ENDIAN) - copy_u_w \wr, 2 + copy_s_w \wr, 2 EX sw $1, \off(\base) - copy_u_w \wr, 3 + copy_s_w \wr, 3 EX sw $1, (\off+4)(\base) #else /* CONFIG_CPU_BIG_ENDIAN */ - copy_u_w \wr, 2 + copy_s_w \wr, 2 EX sw $1, (\off+4)(\base) - copy_u_w \wr, 3 + copy_s_w \wr, 3 EX sw $1, \off(\base) #endif .set pop diff --git a/kernel/arch/mips/kernel/scall64-n32.S b/kernel/arch/mips/kernel/scall64-n32.S index 5a69eb48d..ee93d5fe6 100644 --- a/kernel/arch/mips/kernel/scall64-n32.S +++ b/kernel/arch/mips/kernel/scall64-n32.S @@ -344,7 +344,7 @@ EXPORT(sysn32_call_table) PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key PTR sys_request_key - PTR sys_keyctl /* 6245 */ + PTR compat_sys_keyctl /* 6245 */ PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch diff --git a/kernel/arch/mips/kernel/scall64-o32.S b/kernel/arch/mips/kernel/scall64-o32.S index e4b6d7c97..b77052ec6 100644 --- a/kernel/arch/mips/kernel/scall64-o32.S +++ b/kernel/arch/mips/kernel/scall64-o32.S @@ -500,7 +500,7 @@ EXPORT(sys32_call_table) PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ PTR sys_request_key - PTR sys_keyctl + PTR compat_sys_keyctl PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch /* 4285 */ diff --git a/kernel/arch/mips/kernel/setup.c b/kernel/arch/mips/kernel/setup.c index 66aac55df..8acae316f 100644 --- a/kernel/arch/mips/kernel/setup.c +++ b/kernel/arch/mips/kernel/setup.c @@ -706,6 +706,9 @@ static void __init arch_mem_init(char **cmdline_p) for_each_memblock(reserved, reg) if (reg->size != 0) reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + + reserve_bootmem_region(__pa_symbol(&__nosave_begin), + __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ } static void __init resource_init(void) diff --git a/kernel/arch/mips/kernel/signal.c b/kernel/arch/mips/kernel/signal.c index bf792e283..9e35b6b26 100644 --- a/kernel/arch/mips/kernel/signal.c +++ b/kernel/arch/mips/kernel/signal.c @@ -195,6 +195,9 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size) unsigned int csr; int i, err; + if (!config_enabled(CONFIG_CPU_HAS_MSA)) + return SIGSYS; + if (size != sizeof(*msa)) return -EINVAL; @@ -398,8 +401,8 @@ int protected_restore_fp_context(void __user *sc) } fp_done: - if (used & USED_EXTCONTEXT) - err |= restore_extcontext(sc_to_extcontext(sc)); + if (!err && (used & USED_EXTCONTEXT)) + err = restore_extcontext(sc_to_extcontext(sc)); return err ?: sig; } @@ -767,15 +770,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) sigset_t *oldset = sigmask_to_save(); int ret; struct mips_abi *abi = current->thread.abi; -#ifdef CONFIG_CPU_MICROMIPS - void *vdso; - unsigned long tmp = (unsigned long)current->mm->context.vdso; - - set_isa16_mode(tmp); - vdso = (void *)tmp; -#else void *vdso = current->mm->context.vdso; -#endif if (regs->regs[0]) { switch(regs->regs[2]) { diff --git a/kernel/arch/mips/kernel/smp.c b/kernel/arch/mips/kernel/smp.c index 2b521e07b..7fef02a9e 100644 --- a/kernel/arch/mips/kernel/smp.c +++ b/kernel/arch/mips/kernel/smp.c @@ -174,6 +174,9 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); + cpumask_set_cpu(cpu, &cpu_callin_map); + synchronise_count_slave(cpu); + set_cpu_online(cpu, true); set_cpu_sibling_map(cpu); @@ -181,10 +184,6 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); - cpumask_set_cpu(cpu, &cpu_callin_map); - - synchronise_count_slave(cpu); - /* * irq will be enabled in ->smp_finish(), enabling it too early * is dangerous. diff --git a/kernel/arch/mips/kernel/traps.c b/kernel/arch/mips/kernel/traps.c index ca9a81007..99a402231 100644 --- a/kernel/arch/mips/kernel/traps.c +++ b/kernel/arch/mips/kernel/traps.c @@ -144,7 +144,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) if (!task) task = current; - if (raw_show_trace || !__kernel_text_address(pc)) { + if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { show_raw_backtrace(sp); return; } @@ -1241,7 +1241,7 @@ static int enable_restore_fp_context(int msa) err = init_fpu(); if (msa && !err) { enable_msa(); - _init_msa_upper(); + init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } @@ -1304,7 +1304,7 @@ static int enable_restore_fp_context(int msa) */ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); if (!prior_msa && was_fpu_owner) { - _init_msa_upper(); + init_msa_upper(); goto out; } @@ -1321,7 +1321,7 @@ static int enable_restore_fp_context(int msa) * of each vector register such that it cannot see data left * behind by another task. */ - _init_msa_upper(); + init_msa_upper(); } else { /* We need to restore the vector context. */ restore_msa(current); diff --git a/kernel/arch/mips/kernel/unaligned.c b/kernel/arch/mips/kernel/unaligned.c index 490cea569..5c62065cb 100644 --- a/kernel/arch/mips/kernel/unaligned.c +++ b/kernel/arch/mips/kernel/unaligned.c @@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, { union mips_instruction insn; unsigned long value; - unsigned int res; + unsigned int res, preempted; unsigned long origpc; unsigned long orig31; void __user *fault_addr = NULL; @@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) goto sigbus; - /* - * Disable preemption to avoid a race between copying - * state from userland, migrating to another CPU and - * updating the hardware vector register below. - */ - preempt_disable(); - - res = __copy_from_user_inatomic(fpr, addr, - sizeof(*fpr)); - if (res) - goto fault; - - /* - * Update the hardware register if it is in use by the - * task in this quantum, in order to avoid having to - * save & restore the whole vector context. - */ - if (test_thread_flag(TIF_USEDMSA)) - write_msa_wr(wd, fpr, df); + do { + /* + * If we have live MSA context keep track of + * whether we get preempted in order to avoid + * the register context we load being clobbered + * by the live context as it's saved during + * preemption. If we don't have live context + * then it can't be saved to clobber the value + * we load. + */ + preempted = test_thread_flag(TIF_USEDMSA); + + res = __copy_from_user_inatomic(fpr, addr, + sizeof(*fpr)); + if (res) + goto fault; - preempt_enable(); + /* + * Update the hardware register if it is in use + * by the task in this quantum, in order to + * avoid having to save & restore the whole + * vector context. + */ + preempt_disable(); + if (test_thread_flag(TIF_USEDMSA)) { + write_msa_wr(wd, fpr, df); + preempted = 0; + } + preempt_enable(); + } while (preempted); break; case msa_st_op: diff --git a/kernel/arch/mips/kernel/uprobes.c b/kernel/arch/mips/kernel/uprobes.c index 8452d933a..4e7b89f2e 100644 --- a/kernel/arch/mips/kernel/uprobes.c +++ b/kernel/arch/mips/kernel/uprobes.c @@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn) int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - union mips_instruction insn; /* * Now find the EPC where to resume after the breakpoint has been @@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) unsigned long epc; epc = regs->cp0_epc; - __compute_return_epc_for_insn(regs, insn); + __compute_return_epc_for_insn(regs, + (union mips_instruction) aup->insn[0]); aup->resume_epc = regs->cp0_epc; } - utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; regs->cp0_epc = current->utask->xol_vaddr; @@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr( ra = regs->regs[31]; /* Replace the return address with the trampoline address */ - regs->regs[31] = ra; + regs->regs[31] = trampoline_vaddr; return ra; } @@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); } -/** - * set_orig_insn - Restore the original instruction. - * @mm: the probed process address space. - * @auprobe: arch specific probepoint information. - * @vaddr: the virtual address to insert the opcode. - * - * For mm @mm, restore the original opcode (opcode) at @vaddr. - * Return 0 (success) or a negative errno. - * - * This overrides the weak version in kernel/events/uprobes.c. - */ -int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, - unsigned long vaddr) -{ - return uprobe_write_opcode(mm, vaddr, - *(uprobe_opcode_t *)&auprobe->orig_inst[0].word); -} - void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { diff --git a/kernel/arch/mips/kernel/vdso.c b/kernel/arch/mips/kernel/vdso.c index 975e99759..5649a9e42 100644 --- a/kernel/arch/mips/kernel/vdso.c +++ b/kernel/arch/mips/kernel/vdso.c @@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = { static void __init init_vdso_image(struct mips_vdso_image *image) { unsigned long num_pages, i; + unsigned long data_pfn; BUG_ON(!PAGE_ALIGNED(image->data)); BUG_ON(!PAGE_ALIGNED(image->size)); num_pages = image->size / PAGE_SIZE; - for (i = 0; i < num_pages; i++) { - image->mapping.pages[i] = - virt_to_page(image->data + (i * PAGE_SIZE)); - } + data_pfn = __phys_to_pfn(__pa_symbol(image->data)); + for (i = 0; i < num_pages; i++) + image->mapping.pages[i] = pfn_to_page(data_pfn + i); } static int __init init_vdso(void) diff --git a/kernel/arch/mips/kernel/watch.c b/kernel/arch/mips/kernel/watch.c index 2a03abb5b..9b78e3751 100644 --- a/kernel/arch/mips/kernel/watch.c +++ b/kernel/arch/mips/kernel/watch.c @@ -15,10 +15,9 @@ * Install the watch registers for the current thread. A maximum of * four registers are installed although the machine may have more. */ -void mips_install_watch_registers(void) +void mips_install_watch_registers(struct task_struct *t) { - struct mips3264_watch_reg_state *watches = - ¤t->thread.watch.mips3264; + struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264; switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); diff --git a/kernel/arch/mips/kvm/emulate.c b/kernel/arch/mips/kvm/emulate.c index 41b1b090f..4c85ab808 100644 --- a/kernel/arch/mips/kvm/emulate.c +++ b/kernel/arch/mips/kvm/emulate.c @@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) */ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) { - ktime_t expires; + struct mips_coproc *cop0 = vcpu->arch.cop0; + ktime_t expires, threshold; + uint32_t count, compare; int running; - /* Is the hrtimer pending? */ + /* Calculate the biased and scaled guest CP0_Count */ + count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); + compare = kvm_read_c0_guest_compare(cop0); + + /* + * Find whether CP0_Count has reached the closest timer interrupt. If + * not, we shouldn't inject it. + */ + if ((int32_t)(count - compare) < 0) + return count; + + /* + * The CP0_Count we're going to return has already reached the closest + * timer interrupt. Quickly check if it really is a new interrupt by + * looking at whether the interval until the hrtimer expiry time is + * less than 1/4 of the timer period. + */ expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); - if (ktime_compare(now, expires) >= 0) { + threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); + if (ktime_before(expires, threshold)) { /* * Cancel it while we handle it so there's no chance of * interference with the timeout handler. @@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) } } - /* Return the biased and scaled guest CP0_Count */ - return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); + return count; } /** @@ -419,32 +437,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); } -/** - * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. - * @vcpu: Virtual CPU. - * - * Recalculates and updates the expiry time of the hrtimer. This can be used - * after timer parameters have been altered which do not depend on the time that - * the change occurs (in those cases kvm_mips_freeze_hrtimer() and - * kvm_mips_resume_hrtimer() are used directly). - * - * It is guaranteed that no timer interrupts will be lost in the process. - * - * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). - */ -static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) -{ - ktime_t now; - uint32_t count; - - /* - * freeze_hrtimer takes care of a timer interrupts <= count, and - * resume_hrtimer the hrtimer takes care of a timer interrupts > count. - */ - now = kvm_mips_freeze_hrtimer(vcpu, &count); - kvm_mips_resume_hrtimer(vcpu, now, count); -} - /** * kvm_mips_write_count() - Modify the count and update timer. * @vcpu: Virtual CPU. @@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) * kvm_mips_write_compare() - Modify compare and update timer. * @vcpu: Virtual CPU. * @compare: New CP0_Compare value. + * @ack: Whether to acknowledge timer interrupt. * * Update CP0_Compare to a new value and update the timeout. + * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure + * any pending timer interrupt is preserved. */ -void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack) { struct mips_coproc *cop0 = vcpu->arch.cop0; + int dc; + u32 old_compare = kvm_read_c0_guest_compare(cop0); + ktime_t now; + uint32_t count; /* if unchanged, must just be an ack */ - if (kvm_read_c0_guest_compare(cop0) == compare) + if (old_compare == compare) { + if (!ack) + return; + kvm_mips_callbacks->dequeue_timer_int(vcpu); + kvm_write_c0_guest_compare(cop0, compare); return; + } + + /* freeze_hrtimer() takes care of timer interrupts <= count */ + dc = kvm_mips_count_disabled(vcpu); + if (!dc) + now = kvm_mips_freeze_hrtimer(vcpu, &count); + + if (ack) + kvm_mips_callbacks->dequeue_timer_int(vcpu); - /* Update compare */ kvm_write_c0_guest_compare(cop0, compare); - /* Update timeout if count enabled */ - if (!kvm_mips_count_disabled(vcpu)) - kvm_mips_update_hrtimer(vcpu); + /* resume_hrtimer() takes care of timer interrupts > count */ + if (!dc) + kvm_mips_resume_hrtimer(vcpu, now, count); } /** @@ -741,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; - if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { + if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { + kvm_clear_c0_guest_status(cop0, ST0_ERL); + vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); + } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_read_c0_guest_epc(cop0)); kvm_clear_c0_guest_status(cop0, ST0_EXL); vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); - } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { - kvm_clear_c0_guest_status(cop0, ST0_ERL); - vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); } else { kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", vcpu->arch.pc); @@ -796,6 +807,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) return EMULATE_FAIL; } +/** + * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. + * @vcpu: VCPU with changed mappings. + * @tlb: TLB entry being removed. + * + * This is called to indicate a single change in guest MMU mappings, so that we + * can arrange TLB flushes on this and other CPUs. + */ +static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb) +{ + int cpu, i; + bool user; + + /* No need to flush for entries which are already invalid */ + if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V)) + return; + /* User address space doesn't need flushing for KSeg2/3 changes */ + user = tlb->tlb_hi < KVM_GUEST_KSEG0; + + preempt_disable(); + + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + /* Invalidate the whole ASID on other CPUs */ + cpu = smp_processor_id(); + for_each_possible_cpu(i) { + if (i == cpu) + continue; + if (user) + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + + preempt_enable(); +} + /* Write Guest TLB Entry @ Index */ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { @@ -815,11 +867,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) } tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -848,11 +897,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -971,6 +1016,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, int32_t rt, rd, copz, sel, co_bit, op; uint32_t pc = vcpu->arch.pc; unsigned long curr_pc; + int cpu, i; /* * Update PC and hold onto current PC in case there is @@ -1078,8 +1124,16 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, vcpu->arch.gprs[rt] & ASID_MASK); + preempt_disable(); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); + cpu = smp_processor_id(); + for_each_possible_cpu(i) + if (i != cpu) { + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, vcpu->arch.gprs[rt]); @@ -1095,9 +1149,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, /* If we are writing to COMPARE */ /* Clear pending timer interrupt, if any */ - kvm_mips_callbacks->dequeue_timer_int(vcpu); kvm_mips_write_compare(vcpu, - vcpu->arch.gprs[rt]); + vcpu->arch.gprs[rt], + true); } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { unsigned int old_val, val, change; @@ -1419,6 +1473,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; + unsigned long curr_pc; int32_t op, base, rt, offset; uint32_t bytes; @@ -1427,7 +1482,18 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, offset = inst & 0xffff; op = (inst >> 26) & 0x3f; - vcpu->arch.pending_load_cause = cause; + /* + * Find the resume PC now while we have safe and easy access to the + * prior branch instruction, and save it for + * kvm_mips_complete_mmio_load() to restore later. + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + vcpu->arch.io_pc = vcpu->arch.pc; + vcpu->arch.pc = curr_pc; + vcpu->arch.io_gpr = rt; switch (op) { @@ -1618,8 +1684,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, preempt_disable(); if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) - kvm_mips_handle_kseg0_tlb_fault(va, vcpu); + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && + kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { + kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", + __func__, va, vcpu, read_c0_entryhi()); + er = EMULATE_FAIL; + preempt_enable(); + goto done; + } } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { int index; @@ -1654,14 +1726,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, run, vcpu); preempt_enable(); goto dont_update_pc; - } else { - /* - * We fault an entry from the guest tlb to the - * shadow host TLB - */ - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, - NULL, - NULL); + } + /* + * We fault an entry from the guest tlb to the + * shadow host TLB + */ + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, + NULL, NULL)) { + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, va, index, vcpu, + read_c0_entryhi()); + er = EMULATE_FAIL; + preempt_enable(); + goto done; } } } else { @@ -2396,9 +2473,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, goto done; } - er = update_pc(vcpu, vcpu->arch.pending_load_cause); - if (er == EMULATE_FAIL) - return er; + /* Restore saved resume PC */ + vcpu->arch.pc = vcpu->arch.io_pc; switch (run->mmio.len) { case 4: @@ -2420,11 +2496,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, break; } - if (vcpu->arch.pending_load_cause & CAUSEF_BD) - kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", - vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, - vcpu->mmio_needed); - done: return er; } @@ -2622,8 +2693,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, * OK we have a Guest TLB entry, now inject it into the * shadow host TLB */ - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, - NULL); + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, + NULL, NULL)) { + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, va, index, vcpu, + read_c0_entryhi()); + er = EMULATE_FAIL; + } } } diff --git a/kernel/arch/mips/kvm/interrupt.h b/kernel/arch/mips/kvm/interrupt.h index 4ab4bdfad..214388470 100644 --- a/kernel/arch/mips/kvm/interrupt.h +++ b/kernel/arch/mips/kvm/interrupt.h @@ -28,6 +28,7 @@ #define MIPS_EXC_MAX 12 /* XXXSL More to follow */ +extern char __kvm_mips_vcpu_run_end[]; extern char mips32_exception[], mips32_exceptionEnd[]; extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; diff --git a/kernel/arch/mips/kvm/locore.S b/kernel/arch/mips/kvm/locore.S index 7e2210846..777064336 100644 --- a/kernel/arch/mips/kvm/locore.S +++ b/kernel/arch/mips/kvm/locore.S @@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1) /* Jump to guest */ eret +EXPORT(__kvm_mips_vcpu_run_end) VECTOR(MIPSX(exception), unknown) /* Find out what mode we came from and jump to the proper handler. */ diff --git a/kernel/arch/mips/kvm/mips.c b/kernel/arch/mips/kvm/mips.c index 8a113298b..8d4d92701 100644 --- a/kernel/arch/mips/kvm/mips.c +++ b/kernel/arch/mips/kvm/mips.c @@ -314,9 +314,18 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) memcpy(gebase + offset, mips32_GuestException, mips32_GuestExceptionEnd - mips32_GuestException); +#ifdef MODULE + offset += mips32_GuestExceptionEnd - mips32_GuestException; + memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, + __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); + vcpu->arch.vcpu_run = gebase + offset; +#else + vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; +#endif + /* Invalidate the icache for these ranges */ - local_flush_icache_range((unsigned long)gebase, - (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); + flush_icache_range((unsigned long)gebase, + (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); /* * Allocate comm page for guest kernel, a TLB will be reserved for @@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Disable hardware page table walking while in guest */ htw_stop(); - r = __kvm_mips_vcpu_run(run, vcpu); + r = vcpu->arch.vcpu_run(run, vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); diff --git a/kernel/arch/mips/kvm/tlb.c b/kernel/arch/mips/kvm/tlb.c index aed0ac2a4..eff71c75d 100644 --- a/kernel/arch/mips/kvm/tlb.c +++ b/kernel/arch/mips/kvm/tlb.c @@ -152,7 +152,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); - if (kvm_mips_is_error_pfn(pfn)) { + if (is_error_noslot_pfn(pfn)) { kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); err = -EFAULT; goto out; @@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, } gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); - if (gfn >= kvm->arch.guest_pmap_npages) { + if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, gfn, badvaddr); kvm_mips_dump_host_tlbs(); @@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; struct kvm *kvm = vcpu->kvm; pfn_t pfn0, pfn1; - - if ((tlb->tlb_hi & VPN2_MASK) == 0) { - pfn0 = 0; - pfn1 = 0; - } else { - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) - >> PAGE_SHIFT) < 0) - return -1; - - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) - >> PAGE_SHIFT) < 0) - return -1; - - pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) - >> PAGE_SHIFT]; - pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) - >> PAGE_SHIFT]; + gfn_t gfn0, gfn1; + long tlb_lo[2]; + + tlb_lo[0] = tlb->tlb_lo0; + tlb_lo[1] = tlb->tlb_lo1; + + /* + * The commpage address must not be mapped to anything else if the guest + * TLB contains entries nearby, or commpage accesses will break. + */ + if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & + VPN2_MASK & (PAGE_MASK << 1))) + tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; + + gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; + gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; + if (gfn0 >= kvm->arch.guest_pmap_npages || + gfn1 >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", + __func__, gfn0, gfn1, tlb->tlb_hi); + kvm_mips_dump_guest_tlbs(vcpu); + return -1; } + if (kvm_mips_map_page(kvm, gfn0) < 0) + return -1; + + if (kvm_mips_map_page(kvm, gfn1) < 0) + return -1; + + pfn0 = kvm->arch.guest_pmap[gfn0]; + pfn1 = kvm->arch.guest_pmap[gfn1]; + if (hpa0) *hpa0 = pfn0 << PAGE_SHIFT; @@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); + (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V); entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); + (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V); kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, tlb->tlb_lo0, tlb->tlb_lo1); @@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) local_irq_restore(flags); return KVM_INVALID_INST; } - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, - &vcpu->arch. - guest_tlb[index], - NULL, NULL); + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, + &vcpu->arch.guest_tlb[index], + NULL, NULL)) { + kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, opc, index, vcpu, + read_c0_entryhi()); + kvm_mips_dump_guest_tlbs(vcpu); + local_irq_restore(flags); + return KVM_INVALID_INST; + } inst = *(opc); } local_irq_restore(flags); diff --git a/kernel/arch/mips/kvm/trap_emul.c b/kernel/arch/mips/kvm/trap_emul.c index d836ed5b0..307cc4c98 100644 --- a/kernel/arch/mips/kvm/trap_emul.c +++ b/kernel/arch/mips/kvm/trap_emul.c @@ -547,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, kvm_mips_write_count(vcpu, v); break; case KVM_REG_MIPS_CP0_COMPARE: - kvm_mips_write_compare(vcpu, v); + kvm_mips_write_compare(vcpu, v, false); break; case KVM_REG_MIPS_CP0_CAUSE: /* diff --git a/kernel/arch/mips/lib/ashldi3.c b/kernel/arch/mips/lib/ashldi3.c index beb80f316..927dc94a0 100644 --- a/kernel/arch/mips/lib/ashldi3.c +++ b/kernel/arch/mips/lib/ashldi3.c @@ -2,7 +2,7 @@ #include "libgcc.h" -long long __ashldi3(long long u, word_type b) +long long notrace __ashldi3(long long u, word_type b) { DWunion uu, w; word_type bm; diff --git a/kernel/arch/mips/lib/ashrdi3.c b/kernel/arch/mips/lib/ashrdi3.c index c884a912b..9fdf1a598 100644 --- a/kernel/arch/mips/lib/ashrdi3.c +++ b/kernel/arch/mips/lib/ashrdi3.c @@ -2,7 +2,7 @@ #include "libgcc.h" -long long __ashrdi3(long long u, word_type b) +long long notrace __ashrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; diff --git a/kernel/arch/mips/lib/bswapdi.c b/kernel/arch/mips/lib/bswapdi.c index 77e5f9c1f..e3e77aa52 100644 --- a/kernel/arch/mips/lib/bswapdi.c +++ b/kernel/arch/mips/lib/bswapdi.c @@ -1,6 +1,6 @@ #include -unsigned long long __bswapdi2(unsigned long long u) +unsigned long long notrace __bswapdi2(unsigned long long u) { return (((u) & 0xff00000000000000ull) >> 56) | (((u) & 0x00ff000000000000ull) >> 40) | diff --git a/kernel/arch/mips/lib/bswapsi.c b/kernel/arch/mips/lib/bswapsi.c index 2b302ff12..530a8afe6 100644 --- a/kernel/arch/mips/lib/bswapsi.c +++ b/kernel/arch/mips/lib/bswapsi.c @@ -1,6 +1,6 @@ #include -unsigned int __bswapsi2(unsigned int u) +unsigned int notrace __bswapsi2(unsigned int u) { return (((u) & 0xff000000) >> 24) | (((u) & 0x00ff0000) >> 8) | diff --git a/kernel/arch/mips/lib/cmpdi2.c b/kernel/arch/mips/lib/cmpdi2.c index 8c1306437..06857da96 100644 --- a/kernel/arch/mips/lib/cmpdi2.c +++ b/kernel/arch/mips/lib/cmpdi2.c @@ -2,7 +2,7 @@ #include "libgcc.h" -word_type __cmpdi2(long long a, long long b) +word_type notrace __cmpdi2(long long a, long long b) { const DWunion au = { .ll = a diff --git a/kernel/arch/mips/lib/lshrdi3.c b/kernel/arch/mips/lib/lshrdi3.c index dcf8d6810..364547449 100644 --- a/kernel/arch/mips/lib/lshrdi3.c +++ b/kernel/arch/mips/lib/lshrdi3.c @@ -2,7 +2,7 @@ #include "libgcc.h" -long long __lshrdi3(long long u, word_type b) +long long notrace __lshrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; diff --git a/kernel/arch/mips/lib/ucmpdi2.c b/kernel/arch/mips/lib/ucmpdi2.c index bb4cb2f82..bd599f582 100644 --- a/kernel/arch/mips/lib/ucmpdi2.c +++ b/kernel/arch/mips/lib/ucmpdi2.c @@ -2,7 +2,7 @@ #include "libgcc.h" -word_type __ucmpdi2(unsigned long long a, unsigned long long b) +word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b) { const DWunion au = {.ll = a}; const DWunion bu = {.ll = b}; diff --git a/kernel/arch/mips/loongson64/loongson-3/hpet.c b/kernel/arch/mips/loongson64/loongson-3/hpet.c index a2631a52c..444802e78 100644 --- a/kernel/arch/mips/loongson64/loongson-3/hpet.c +++ b/kernel/arch/mips/loongson64/loongson-3/hpet.c @@ -13,8 +13,8 @@ #define SMBUS_PCI_REG64 0x64 #define SMBUS_PCI_REGB4 0xb4 -#define HPET_MIN_CYCLES 64 -#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) +#define HPET_MIN_CYCLES 16 +#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12) static DEFINE_SPINLOCK(hpet_lock); DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); @@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt) static int hpet_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned int cnt; - int res; + u32 cnt; + s32 res; cnt = hpet_read(HPET_COUNTER); - cnt += delta; + cnt += (u32) delta; hpet_write(HPET_T0_CMP, cnt); - res = (int)(cnt - hpet_read(HPET_COUNTER)); + res = (s32)(cnt - hpet_read(HPET_COUNTER)); return res < HPET_MIN_CYCLES ? -ETIME : 0; } @@ -230,7 +230,7 @@ void __init setup_hpet_timer(void) cd = &per_cpu(hpet_clockevent_device, cpu); cd->name = "hpet"; - cd->rating = 320; + cd->rating = 100; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; cd->set_state_shutdown = hpet_set_state_shutdown; cd->set_state_periodic = hpet_set_state_periodic; diff --git a/kernel/arch/mips/loongson64/loongson-3/numa.c b/kernel/arch/mips/loongson64/loongson-3/numa.c index 6f9e010ce..282c5a8c2 100644 --- a/kernel/arch/mips/loongson64/loongson-3/numa.c +++ b/kernel/arch/mips/loongson64/loongson-3/numa.c @@ -213,10 +213,10 @@ static void __init node_mem_init(unsigned int node) BOOTMEM_DEFAULT); if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { - /* Reserve 0xff800000~0xffffffff for RS780E integrated GPU */ + /* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */ reserve_bootmem_node(NODE_DATA(node), - (node_addrspace_offset | 0xff800000), - 8 << 20, BOOTMEM_DEFAULT); + (node_addrspace_offset | 0xfe000000), + 32 << 20, BOOTMEM_DEFAULT); } sparse_memory_present_with_active_regions(node); diff --git a/kernel/arch/mips/math-emu/cp1emu.c b/kernel/arch/mips/math-emu/cp1emu.c index 32f0e19a0..734a2c766 100644 --- a/kernel/arch/mips/math-emu/cp1emu.c +++ b/kernel/arch/mips/math-emu/cp1emu.c @@ -445,9 +445,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, case spec_op: switch (insn.r_format.func) { case jalr_op: - regs->regs[insn.r_format.rd] = - regs->cp0_epc + dec_insn.pc_inc + - dec_insn.next_pc_inc; + if (insn.r_format.rd != 0) { + regs->regs[insn.r_format.rd] = + regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + } /* Fall through */ case jr_op: /* For R6, JR already emulated in jalr_op */ diff --git a/kernel/arch/mips/mm/cache.c b/kernel/arch/mips/mm/cache.c index aab218c36..e87bccd6e 100644 --- a/kernel/arch/mips/mm/cache.c +++ b/kernel/arch/mips/mm/cache.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -83,8 +84,6 @@ void __flush_dcache_page(struct page *page) struct address_space *mapping = page_mapping(page); unsigned long addr; - if (PageHighMem(page)) - return; if (mapping && !mapping_mapped(mapping)) { SetPageDcacheDirty(page); return; @@ -95,8 +94,15 @@ void __flush_dcache_page(struct page *page) * case is for exec env/arg pages and those are %99 certainly going to * get faulted into the tlb (and thus flushed) anyways. */ - addr = (unsigned long) page_address(page); + if (PageHighMem(page)) + addr = (unsigned long)kmap_atomic(page); + else + addr = (unsigned long)page_address(page); + flush_data_cache_page(addr); + + if (PageHighMem(page)) + __kunmap_atomic((void *)addr); } EXPORT_SYMBOL(__flush_dcache_page); @@ -119,33 +125,28 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) EXPORT_SYMBOL(__flush_anon_page); -void __flush_icache_page(struct vm_area_struct *vma, struct page *page) -{ - unsigned long addr; - - if (PageHighMem(page)) - return; - - addr = (unsigned long) page_address(page); - flush_data_cache_page(addr); -} -EXPORT_SYMBOL_GPL(__flush_icache_page); - -void __update_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) +void __update_cache(unsigned long address, pte_t pte) { struct page *page; unsigned long pfn, addr; - int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc; + int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; pfn = pte_pfn(pte); if (unlikely(!pfn_valid(pfn))) return; page = pfn_to_page(pfn); - if (page_mapping(page) && Page_dcache_dirty(page)) { - addr = (unsigned long) page_address(page); + if (Page_dcache_dirty(page)) { + if (PageHighMem(page)) + addr = (unsigned long)kmap_atomic(page); + else + addr = (unsigned long)page_address(page); + if (exec || pages_do_alias(addr, address & PAGE_MASK)) flush_data_cache_page(addr); + + if (PageHighMem(page)) + __kunmap_atomic((void *)addr); + ClearPageDcacheDirty(page); } } diff --git a/kernel/arch/mips/mm/uasm-mips.c b/kernel/arch/mips/mm/uasm-mips.c index b4a837893..5abe51cad 100644 --- a/kernel/arch/mips/mm/uasm-mips.c +++ b/kernel/arch/mips/mm/uasm-mips.c @@ -65,7 +65,7 @@ static struct insn insn_table[] = { #ifndef CONFIG_CPU_MIPSR6 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, #else - { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, + { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, #endif { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, diff --git a/kernel/arch/mips/mti-malta/malta-setup.c b/kernel/arch/mips/mti-malta/malta-setup.c index 4740c82fb..36b09b2ea 100644 --- a/kernel/arch/mips/mti-malta/malta-setup.c +++ b/kernel/arch/mips/mti-malta/malta-setup.c @@ -39,6 +39,9 @@ #include #endif +#define ROCIT_CONFIG_GEN0 0x1f403000 +#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) + extern void malta_be_init(void); extern int malta_be_handler(struct pt_regs *regs, int is_fixup); @@ -107,6 +110,8 @@ static void __init fd_activate(void) static int __init plat_enable_iocoherency(void) { int supported = 0; + u32 cfg; + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; @@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void) } else if (mips_cm_numiocu() != 0) { /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); - if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { + cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); + if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); return 0; } diff --git a/kernel/arch/mips/vdso/Makefile b/kernel/arch/mips/vdso/Makefile index 14568900f..6c7d78546 100644 --- a/kernel/arch/mips/vdso/Makefile +++ b/kernel/arch/mips/vdso/Makefile @@ -5,10 +5,12 @@ obj-vdso-y := elf.o gettimeofday.o sigreturn.o ccflags-vdso := \ $(filter -I%,$(KBUILD_CFLAGS)) \ $(filter -E%,$(KBUILD_CFLAGS)) \ + $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) cflags-vdso := $(ccflags-vdso) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ - -O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \ + -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \ + -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-stack-protector) aflags-vdso := $(ccflags-vdso) \ $(filter -I%,$(KBUILD_CFLAGS)) \ @@ -73,7 +75,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o) $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi) $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi) -$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi) +$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi) $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) -- cgit 1.2.3-korg