summaryrefslogtreecommitdiffstats
path: root/kernel/arch/avr32/include
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/avr32/include
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/avr32/include')
-rw-r--r--kernel/arch/avr32/include/asm/Kbuild3
-rw-r--r--kernel/arch/avr32/include/asm/atomic.h16
-rw-r--r--kernel/arch/avr32/include/asm/cmpxchg.h2
-rw-r--r--kernel/arch/avr32/include/asm/dma-mapping.h19
-rw-r--r--kernel/arch/avr32/include/asm/io.h2
-rw-r--r--kernel/arch/avr32/include/asm/switch_to.h7
6 files changed, 33 insertions, 16 deletions
diff --git a/kernel/arch/avr32/include/asm/Kbuild b/kernel/arch/avr32/include/asm/Kbuild
index 528d70d47..241b9b972 100644
--- a/kernel/arch/avr32/include/asm/Kbuild
+++ b/kernel/arch/avr32/include/asm/Kbuild
@@ -12,12 +12,13 @@ generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += scatterlist.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/kernel/arch/avr32/include/asm/atomic.h b/kernel/arch/avr32/include/asm/atomic.h
index 2d07ce1c5..d74fd8ce9 100644
--- a/kernel/arch/avr32/include/asm/atomic.h
+++ b/kernel/arch/avr32/include/asm/atomic.h
@@ -19,8 +19,8 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
static inline int __atomic_##op##_return(int i, atomic_t *v) \
@@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
+#define ATOMIC_OP(op, asm_op) \
+ATOMIC_OP_RETURN(op, asm_op, r) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ (void)__atomic_##op##_return(i, v); \
+}
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OP
#undef ATOMIC_OP_RETURN
/*
diff --git a/kernel/arch/avr32/include/asm/cmpxchg.h b/kernel/arch/avr32/include/asm/cmpxchg.h
index 962a6aeab..366bbeaeb 100644
--- a/kernel/arch/avr32/include/asm/cmpxchg.h
+++ b/kernel/arch/avr32/include/asm/cmpxchg.h
@@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
-#define __HAVE_ARCH_CMPXCHG 1
-
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
diff --git a/kernel/arch/avr32/include/asm/dma-mapping.h b/kernel/arch/avr32/include/asm/dma-mapping.h
index b3d18f9f3..ae7ac9205 100644
--- a/kernel/arch/avr32/include/asm/dma-mapping.h
+++ b/kernel/arch/avr32/include/asm/dma-mapping.h
@@ -209,17 +209,18 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
* the same here.
*/
static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nents; i++) {
+ for_each_sg(sglist, sg, nents, i) {
char *virt;
- sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;
- virt = sg_virt(&sg[i]);
- dma_cache_sync(dev, virt, sg[i].length, direction);
+ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+ virt = sg_virt(sg);
+ dma_cache_sync(dev, virt, sg->length, direction);
}
return nents;
@@ -321,14 +322,14 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
}
static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nents; i++) {
- dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
- }
+ for_each_sg(sglist, sg, nents, i)
+ dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
}
/* Now for the API extensions over the pci_ one */
diff --git a/kernel/arch/avr32/include/asm/io.h b/kernel/arch/avr32/include/asm/io.h
index 4f5ec2bb7..f855646e0 100644
--- a/kernel/arch/avr32/include/asm/io.h
+++ b/kernel/arch/avr32/include/asm/io.h
@@ -296,6 +296,8 @@ extern void __iounmap(void __iomem *addr);
__iounmap(addr)
#define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
+#define ioremap_uc ioremap_nocache
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)
diff --git a/kernel/arch/avr32/include/asm/switch_to.h b/kernel/arch/avr32/include/asm/switch_to.h
index 9a8e9d520..6f00581c3 100644
--- a/kernel/arch/avr32/include/asm/switch_to.h
+++ b/kernel/arch/avr32/include/asm/switch_to.h
@@ -15,11 +15,13 @@
*/
#ifdef CONFIG_OWNERSHIP_TRACE
#include <asm/ocd.h>
-#define finish_arch_switch(prev) \
+#define ocd_switch(prev, next) \
do { \
ocd_write(PID, prev->pid); \
- ocd_write(PID, current->pid); \
+ ocd_write(PID, next->pid); \
} while(0)
+#else
+#define ocd_switch(prev, next)
#endif
/*
@@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct cpu_context *);
#define switch_to(prev, next, last) \
do { \
+ ocd_switch(prev, next); \
last = __switch_to(prev, &prev->thread.cpu_context + 1, \
&next->thread.cpu_context); \
} while (0)