diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/arm/xen | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/arm/xen')
-rw-r--r-- | kernel/arch/arm/xen/enlighten.c | 105 | ||||
-rw-r--r-- | kernel/arch/arm/xen/hypercall.S | 15 | ||||
-rw-r--r-- | kernel/arch/arm/xen/mm.c | 43 | ||||
-rw-r--r-- | kernel/arch/arm/xen/p2m.c | 8 |
4 files changed, 113 insertions, 58 deletions
diff --git a/kernel/arch/arm/xen/enlighten.c b/kernel/arch/arm/xen/enlighten.c index 7d0f07020..fc7ea529f 100644 --- a/kernel/arch/arm/xen/enlighten.c +++ b/kernel/arch/arm/xen/enlighten.c @@ -24,6 +24,7 @@ #include <linux/cpuidle.h> #include <linux/cpufreq.h> #include <linux/cpu.h> +#include <linux/console.h> #include <linux/mm.h> @@ -44,44 +45,39 @@ static struct vcpu_info __percpu *xen_vcpu_info; unsigned long xen_released_pages; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; -/* TODO: to be removed */ -__read_mostly int xen_have_vector_callback; -EXPORT_SYMBOL_GPL(xen_have_vector_callback); +static __read_mostly unsigned int xen_events_irq; -int xen_platform_pci_unplug = XEN_UNPLUG_ALL; -EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); +static __initdata struct device_node *xen_node; -static __read_mostly int xen_events_irq = -1; - -int xen_remap_domain_mfn_array(struct vm_area_struct *vma, +int xen_remap_domain_gfn_array(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t *mfn, int nr, + xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) { - return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, + return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, prot, domid, pages); } -EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); +EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); /* Not used by XENFEAT_auto_translated guests. */ -int xen_remap_domain_mfn_range(struct vm_area_struct *vma, +int xen_remap_domain_gfn_range(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t mfn, int nr, + xen_pfn_t gfn, int nr, pgprot_t prot, unsigned domid, struct page **pages) { return -ENOSYS; } -EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); +EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); -int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, +int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) { return xen_xlate_unmap_gfn_range(vma, nr, pages); } -EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); +EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); static void xen_percpu_init(void) { @@ -90,16 +86,25 @@ static void xen_percpu_init(void) int err; int cpu = get_cpu(); + /* + * VCPUOP_register_vcpu_info cannot be called twice for the same + * vcpu, so if vcpu_info is already registered, just get out. This + * can happen with cpu-hotplug. + */ + if (per_cpu(xen_vcpu, cpu) != NULL) + goto after_register_vcpu_info; + pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); - info.mfn = __pa(vcpup) >> PAGE_SHIFT; - info.offset = offset_in_page(vcpup); + info.mfn = virt_to_gfn(vcpup); + info.offset = xen_offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; +after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); put_cpu(); } @@ -128,6 +133,9 @@ static int xen_cpu_notification(struct notifier_block *self, case CPU_STARTING: xen_percpu_init(); break; + case CPU_DYING: + disable_percpu_irq(xen_events_irq); + break; default: break; } @@ -150,40 +158,28 @@ static irqreturn_t xen_arm_callback(int irq, void *arg) * documentation of the Xen Device Tree format. */ #define GRANT_TABLE_PHYSADDR 0 -static int __init xen_guest_init(void) +void __init xen_early_init(void) { - struct xen_add_to_physmap xatp; - static struct shared_info *shared_info_page = 0; - struct device_node *node; int len; const char *s = NULL; const char *version = NULL; const char *xen_prefix = "xen,xen-"; - struct resource res; - phys_addr_t grant_frames; - node = of_find_compatible_node(NULL, NULL, "xen,xen"); - if (!node) { + xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); + if (!xen_node) { pr_debug("No Xen support\n"); - return 0; + return; } - s = of_get_property(node, "compatible", &len); + s = of_get_property(xen_node, "compatible", &len); if (strlen(xen_prefix) + 3 < len && !strncmp(xen_prefix, s, strlen(xen_prefix))) version = s + strlen(xen_prefix); if (version == NULL) { pr_debug("Xen version not found\n"); - return 0; + return; } - if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) - return 0; - grant_frames = res.start; - xen_events_irq = irq_of_parse_and_map(node, 0); - pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n", - version, xen_events_irq, &grant_frames); - if (xen_events_irq < 0) - return -ENODEV; + pr_info("Xen %s support found\n", version); xen_domain_type = XEN_HVM_DOMAIN; @@ -194,9 +190,34 @@ static int __init xen_guest_init(void) else xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); - if (!shared_info_page) - shared_info_page = (struct shared_info *) - get_zeroed_page(GFP_KERNEL); + if (!console_set_on_cmdline && !xen_initial_domain()) + add_preferred_console("hvc", 0, NULL); +} + +static int __init xen_guest_init(void) +{ + struct xen_add_to_physmap xatp; + struct shared_info *shared_info_page = NULL; + struct resource res; + phys_addr_t grant_frames; + + if (!xen_domain()) + return 0; + + if (of_address_to_resource(xen_node, GRANT_TABLE_PHYSADDR, &res)) { + pr_err("Xen grant table base address not found\n"); + return -ENODEV; + } + grant_frames = res.start; + + xen_events_irq = irq_of_parse_and_map(xen_node, 0); + if (!xen_events_irq) { + pr_err("Xen event channel interrupt not found\n"); + return -ENODEV; + } + + shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL); + if (!shared_info_page) { pr_err("not enough memory\n"); return -ENOMEM; @@ -204,7 +225,7 @@ static int __init xen_guest_init(void) xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; + xatp.gpfn = virt_to_gfn(shared_info_page); if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); @@ -275,7 +296,7 @@ void xen_arch_resume(void) { } void xen_arch_suspend(void) { } -/* In the hypervisor.S file. */ +/* In the hypercall.S file. */ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); diff --git a/kernel/arch/arm/xen/hypercall.S b/kernel/arch/arm/xen/hypercall.S index f00e08075..10fd99c56 100644 --- a/kernel/arch/arm/xen/hypercall.S +++ b/kernel/arch/arm/xen/hypercall.S @@ -98,8 +98,23 @@ ENTRY(privcmd_call) mov r1, r2 mov r2, r3 ldr r3, [sp, #8] + /* + * Privcmd calls are issued by the userspace. We need to allow the + * kernel to access the userspace memory before issuing the hypercall. + */ + uaccess_enable r4 + + /* r4 is loaded now as we use it as scratch register before */ ldr r4, [sp, #4] __HVC(XEN_IMM) + + /* + * Disable userspace access from kernel. This is fine to do it + * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is + * called before. + */ + uaccess_disable r4 + ldm sp!, {r4} ret lr ENDPROC(privcmd_call); diff --git a/kernel/arch/arm/xen/mm.c b/kernel/arch/arm/xen/mm.c index 498325074..c5f9a9e3d 100644 --- a/kernel/arch/arm/xen/mm.c +++ b/kernel/arch/arm/xen/mm.c @@ -15,17 +15,17 @@ #include <xen/xen.h> #include <xen/interface/grant_table.h> #include <xen/interface/memory.h> +#include <xen/page.h> #include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> -#include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> unsigned long xen_get_swiotlb_free_pages(unsigned int order) { struct memblock_region *reg; - gfp_t flags = __GFP_NOWARN; + gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; for_each_memblock(memory, reg) { if (reg->base < (phys_addr_t)0xffffffff) { @@ -48,22 +48,22 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir, enum dma_cache_op op) { struct gnttab_cache_flush cflush; - unsigned long pfn; + unsigned long xen_pfn; size_t left = size; - pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; + xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE; + offset %= XEN_PAGE_SIZE; do { size_t len = left; /* buffers in highmem or foreign pages cannot cross page * boundaries */ - if (len + offset > PAGE_SIZE) - len = PAGE_SIZE - offset; + if (len + offset > XEN_PAGE_SIZE) + len = XEN_PAGE_SIZE - offset; cflush.op = 0; - cflush.a.dev_bus_addr = pfn << PAGE_SHIFT; + cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT; cflush.offset = offset; cflush.length = len; @@ -79,7 +79,7 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset, HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); offset = 0; - pfn++; + xen_pfn++; left -= len; } while (left); } @@ -138,10 +138,29 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, } bool xen_arch_need_swiotlb(struct device *dev, - unsigned long pfn, - unsigned long mfn) + phys_addr_t phys, + dma_addr_t dev_addr) { - return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev)); + unsigned int xen_pfn = XEN_PFN_DOWN(phys); + unsigned int bfn = XEN_PFN_DOWN(dev_addr); + + /* + * The swiotlb buffer should be used if + * - Xen doesn't have the cache flush hypercall + * - The Linux page refers to foreign memory + * - The device doesn't support coherent DMA request + * + * The Linux page may be spanned acrros multiple Xen page, although + * it's not possible to have a mix of local and foreign Xen page. + * Furthermore, range_straddles_page_boundary is already checking + * if buffer is physically contiguous in the host RAM. + * + * Therefore we only need to check the first Xen page to know if we + * require a bounce buffer because the device doesn't support coherent + * memory and we are not able to flush the cache. + */ + return (!hypercall_cflush && (xen_pfn != bfn) && + !is_device_dma_coherent(dev)); } int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, diff --git a/kernel/arch/arm/xen/p2m.c b/kernel/arch/arm/xen/p2m.c index cb7a14c5c..0ed01f2d5 100644 --- a/kernel/arch/arm/xen/p2m.c +++ b/kernel/arch/arm/xen/p2m.c @@ -10,10 +10,10 @@ #include <xen/xen.h> #include <xen/interface/memory.h> +#include <xen/page.h> #include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> -#include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/interface.h> @@ -93,8 +93,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { if (map_ops[i].status) continue; - set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, - map_ops[i].dev_bus_addr >> PAGE_SHIFT); + set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); } return 0; @@ -108,7 +108,7 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, int i; for (i = 0; i < count; i++) { - set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, + set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT, INVALID_P2M_ENTRY); } |