summaryrefslogtreecommitdiffstats
path: root/kernel/arch/x86/include
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:13:28 -0800
committerYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:36:15 -0800
commit52f993b8e89487ec9ee15a7fb4979e0f09a45b27 (patch)
treed65304486afe0bea4a311c783c0d72791c8c0aa2 /kernel/arch/x86/include
parentc189ccac5702322ed843fe17057035b7222a59b6 (diff)
Upgrade to 4.4.50-rt62
The current kernel is based on rt kernel v4.4.6-rt14. We will upgrade it to 4.4.50-rt62. The command to achieve it is: a) Clone a git repo from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git b) Get the diff between this two changesets: git diff 640eca2901f3435e616157b11379d3223a44b391 705619beeea1b0b48219a683fd1a901a86fdaf5e where the two commits are: [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 640eca2901f3435e616157b11379d3223a44b391 640eca2901f3 v4.4.6-rt14 localversion-rt [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 705619beeea1b0b48219a683fd1a901a86fdaf5e 705619beeea1 Linux 4.4.50-rt62 localversion-rt c) One patch has been backported thus revert the patch before applying. filterdiff -p1 -x scripts/package/Makefile ~/tmp/v4.4.6-rt14-4.4.50-rt62.diff |patch -p1 --dry-run Upstream status: backport Change-Id: I244d57a32f6066e5a5b9915f9fbf99e7bbca6e01 Signed-off-by: Yunhong Jiang <yunhong.jiang@linux.intel.com>
Diffstat (limited to 'kernel/arch/x86/include')
-rw-r--r--kernel/arch/x86/include/asm/apic.h2
-rw-r--r--kernel/arch/x86/include/asm/hugetlb.h1
-rw-r--r--kernel/arch/x86/include/asm/hw_irq.h1
-rw-r--r--kernel/arch/x86/include/asm/kvm_host.h2
-rw-r--r--kernel/arch/x86/include/asm/microcode.h26
-rw-r--r--kernel/arch/x86/include/asm/mtrr.h6
-rw-r--r--kernel/arch/x86/include/asm/pat.h2
-rw-r--r--kernel/arch/x86/include/asm/pci_x86.h2
-rw-r--r--kernel/arch/x86/include/asm/perf_event.h1
-rw-r--r--kernel/arch/x86/include/asm/preempt.h17
-rw-r--r--kernel/arch/x86/include/asm/pvclock.h2
-rw-r--r--kernel/arch/x86/include/asm/tlbflush.h9
-rw-r--r--kernel/arch/x86/include/asm/uaccess.h10
-rw-r--r--kernel/arch/x86/include/asm/xen/hypervisor.h2
14 files changed, 71 insertions, 12 deletions
diff --git a/kernel/arch/x86/include/asm/apic.h b/kernel/arch/x86/include/asm/apic.h
index 9686289d2..6a03a760f 100644
--- a/kernel/arch/x86/include/asm/apic.h
+++ b/kernel/arch/x86/include/asm/apic.h
@@ -640,8 +640,8 @@ static inline void entering_irq(void)
static inline void entering_ack_irq(void)
{
- ack_APIC_irq();
entering_irq();
+ ack_APIC_irq();
}
static inline void ipi_entering_ack_irq(void)
diff --git a/kernel/arch/x86/include/asm/hugetlb.h b/kernel/arch/x86/include/asm/hugetlb.h
index f8a29d2c9..e6a8613fb 100644
--- a/kernel/arch/x86/include/asm/hugetlb.h
+++ b/kernel/arch/x86/include/asm/hugetlb.h
@@ -4,6 +4,7 @@
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
+#define hugepages_supported() cpu_has_pse
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
diff --git a/kernel/arch/x86/include/asm/hw_irq.h b/kernel/arch/x86/include/asm/hw_irq.h
index 1e3408e88..59caa55fb 100644
--- a/kernel/arch/x86/include/asm/hw_irq.h
+++ b/kernel/arch/x86/include/asm/hw_irq.h
@@ -136,6 +136,7 @@ struct irq_alloc_info {
struct irq_cfg {
unsigned int dest_apicid;
u8 vector;
+ u8 old_vector;
};
extern struct irq_cfg *irq_cfg(unsigned int irq);
diff --git a/kernel/arch/x86/include/asm/kvm_host.h b/kernel/arch/x86/include/asm/kvm_host.h
index fe68e836e..2cf52f11a 100644
--- a/kernel/arch/x86/include/asm/kvm_host.h
+++ b/kernel/arch/x86/include/asm/kvm_host.h
@@ -41,7 +41,7 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
diff --git a/kernel/arch/x86/include/asm/microcode.h b/kernel/arch/x86/include/asm/microcode.h
index 34e62b1dc..712b24ed3 100644
--- a/kernel/arch/x86/include/asm/microcode.h
+++ b/kernel/arch/x86/include/asm/microcode.h
@@ -2,6 +2,7 @@
#define _ASM_X86_MICROCODE_H
#include <linux/earlycpio.h>
+#include <linux/initrd.h>
#define native_rdmsr(msr, val1, val2) \
do { \
@@ -168,4 +169,29 @@ static inline void reload_early_microcode(void) { }
static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
#endif
+
+static inline unsigned long get_initrd_start(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ return initrd_start;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned long get_initrd_start_addr(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+#ifdef CONFIG_X86_32
+ unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+
+ return (unsigned long)__pa_nodebug(*initrd_start_p);
+#else
+ return get_initrd_start();
+#endif
+#else /* CONFIG_BLK_DEV_INITRD */
+ return 0;
+#endif
+}
+
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/kernel/arch/x86/include/asm/mtrr.h b/kernel/arch/x86/include/asm/mtrr.h
index b94f6f64e..dbff1456d 100644
--- a/kernel/arch/x86/include/asm/mtrr.h
+++ b/kernel/arch/x86/include/asm/mtrr.h
@@ -24,6 +24,7 @@
#define _ASM_X86_MTRR_H
#include <uapi/asm/mtrr.h>
+#include <asm/pat.h>
/*
@@ -83,9 +84,12 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
+static inline void mtrr_bp_init(void)
+{
+ pat_disable("MTRRs disabled, skipping PAT initialization too.");
+}
#define mtrr_ap_init() do {} while (0)
-#define mtrr_bp_init() do {} while (0)
#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
diff --git a/kernel/arch/x86/include/asm/pat.h b/kernel/arch/x86/include/asm/pat.h
index ca6c228d5..0b1ff4c1c 100644
--- a/kernel/arch/x86/include/asm/pat.h
+++ b/kernel/arch/x86/include/asm/pat.h
@@ -5,8 +5,8 @@
#include <asm/pgtable_types.h>
bool pat_enabled(void);
+void pat_disable(const char *reason);
extern void pat_init(void);
-void pat_init_cache_modes(u64);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/kernel/arch/x86/include/asm/pci_x86.h b/kernel/arch/x86/include/asm/pci_x86.h
index fa1195dae..164e3f8d3 100644
--- a/kernel/arch/x86/include/asm/pci_x86.h
+++ b/kernel/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/kernel/arch/x86/include/asm/perf_event.h b/kernel/arch/x86/include/asm/perf_event.h
index 7bcb861a0..5a2ed3ed2 100644
--- a/kernel/arch/x86/include/asm/perf_event.h
+++ b/kernel/arch/x86/include/asm/perf_event.h
@@ -165,6 +165,7 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_ASIF BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
+#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
* IBS cpuid feature detection
diff --git a/kernel/arch/x86/include/asm/preempt.h b/kernel/arch/x86/include/asm/preempt.h
index 5dbd2d0f9..6f432adc5 100644
--- a/kernel/arch/x86/include/asm/preempt.h
+++ b/kernel/arch/x86/include/asm/preempt.h
@@ -89,6 +89,8 @@ static __always_inline bool __preempt_count_dec_and_test(void)
if (____preempt_count_dec_and_test())
return true;
#ifdef CONFIG_PREEMPT_LAZY
+ if (current_thread_info()->preempt_lazy_count)
+ return false;
return test_thread_flag(TIF_NEED_RESCHED_LAZY);
#else
return false;
@@ -101,8 +103,19 @@ static __always_inline bool __preempt_count_dec_and_test(void)
static __always_inline bool should_resched(int preempt_offset)
{
#ifdef CONFIG_PREEMPT_LAZY
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
- test_thread_flag(TIF_NEED_RESCHED_LAZY));
+ u32 tmp;
+
+ tmp = raw_cpu_read_4(__preempt_count);
+ if (tmp == preempt_offset)
+ return true;
+
+ /* preempt count == 0 ? */
+ tmp &= ~PREEMPT_NEED_RESCHED;
+ if (tmp)
+ return false;
+ if (current_thread_info()->preempt_lazy_count)
+ return false;
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
#else
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
#endif
diff --git a/kernel/arch/x86/include/asm/pvclock.h b/kernel/arch/x86/include/asm/pvclock.h
index 7a6bed5c0..baad72e4c 100644
--- a/kernel/arch/x86/include/asm/pvclock.h
+++ b/kernel/arch/x86/include/asm/pvclock.h
@@ -76,6 +76,8 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
u8 ret_flags;
version = src->version;
+ /* Make the latest version visible */
+ smp_rmb();
offset = pvclock_get_nsec_offset(src);
ret = src->system_time + offset;
diff --git a/kernel/arch/x86/include/asm/tlbflush.h b/kernel/arch/x86/include/asm/tlbflush.h
index 6df202940..6433e28dc 100644
--- a/kernel/arch/x86/include/asm/tlbflush.h
+++ b/kernel/arch/x86/include/asm/tlbflush.h
@@ -32,7 +32,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
- this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
+ this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
}
/* Set in this cpu's CR4. */
@@ -86,7 +86,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
static inline void __native_flush_tlb(void)
{
+ /*
+ * If current->mm == NULL then we borrow a mm which may change during a
+ * task switch and therefore we must not be preempted while we write CR3
+ * back:
+ */
+ preempt_disable();
native_write_cr3(native_read_cr3());
+ preempt_enable();
}
static inline void __native_flush_tlb_global_irq_disabled(void)
diff --git a/kernel/arch/x86/include/asm/uaccess.h b/kernel/arch/x86/include/asm/uaccess.h
index 09b1b0ab9..b8c75f3aa 100644
--- a/kernel/arch/x86/include/asm/uaccess.h
+++ b/kernel/arch/x86/include/asm/uaccess.h
@@ -332,7 +332,7 @@ do { \
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
- __get_user_asm_ex(x, ptr, "q", "", "=r")
+ __get_user_asm_ex(x, ptr, "q", "", "=&r")
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
@@ -375,13 +375,13 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
+ __get_user_asm_ex(x, ptr, "b", "b", "=&q"); \
break; \
case 2: \
- __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
+ __get_user_asm_ex(x, ptr, "w", "w", "=&r"); \
break; \
case 4: \
- __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
+ __get_user_asm_ex(x, ptr, "l", "k", "=&r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
@@ -395,7 +395,7 @@ do { \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
- : ltype(x) : "m" (__m(addr)))
+ : ltype(x) : "m" (__m(addr)), "0" (0))
#define __put_user_nocheck(x, ptr, size) \
({ \
diff --git a/kernel/arch/x86/include/asm/xen/hypervisor.h b/kernel/arch/x86/include/asm/xen/hypervisor.h
index 8b2d4bea9..39171b364 100644
--- a/kernel/arch/x86/include/asm/xen/hypervisor.h
+++ b/kernel/arch/x86/include/asm/xen/hypervisor.h
@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
void xen_arch_unregister_cpu(int num);
#endif
+extern void xen_set_iopl_mask(unsigned mask);
+
#endif /* _ASM_X86_XEN_HYPERVISOR_H */