summaryrefslogtreecommitdiffstats
path: root/kernel/arch/alpha
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/alpha
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/alpha')
-rw-r--r--kernel/arch/alpha/Kconfig1
-rw-r--r--kernel/arch/alpha/include/asm/Kbuild2
-rw-r--r--kernel/arch/alpha/include/asm/atomic.h50
-rw-r--r--kernel/arch/alpha/include/asm/cmpxchg.h2
-rw-r--r--kernel/arch/alpha/include/asm/dma-mapping.h36
-rw-r--r--kernel/arch/alpha/include/asm/io.h4
-rw-r--r--kernel/arch/alpha/include/asm/pci.h18
-rw-r--r--kernel/arch/alpha/include/asm/serial.h2
-rw-r--r--kernel/arch/alpha/include/asm/spinlock.h5
-rw-r--r--kernel/arch/alpha/include/asm/word-at-a-time.h2
-rw-r--r--kernel/arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--kernel/arch/alpha/kernel/core_irongate.c1
-rw-r--r--kernel/arch/alpha/kernel/irq.c4
-rw-r--r--kernel/arch/alpha/kernel/osf_sys.c13
-rw-r--r--kernel/arch/alpha/kernel/pci-noop.c10
-rw-r--r--kernel/arch/alpha/kernel/pci.c6
-rw-r--r--kernel/arch/alpha/kernel/pci_iommu.c11
-rw-r--r--kernel/arch/alpha/kernel/sys_eiger.c1
-rw-r--r--kernel/arch/alpha/kernel/sys_nautilus.c1
-rw-r--r--kernel/arch/alpha/kernel/time.c18
-rw-r--r--kernel/arch/alpha/lib/udelay.c1
21 files changed, 69 insertions, 122 deletions
diff --git a/kernel/arch/alpha/Kconfig b/kernel/arch/alpha/Kconfig
index bf9e9d3b3..f515a4dbf 100644
--- a/kernel/arch/alpha/Kconfig
+++ b/kernel/arch/alpha/Kconfig
@@ -3,6 +3,7 @@ config ALPHA
default y
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_AOUT
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/kernel/arch/alpha/include/asm/Kbuild b/kernel/arch/alpha/include/asm/Kbuild
index 76aeb8fa5..ffd9cf5ec 100644
--- a/kernel/arch/alpha/include/asm/Kbuild
+++ b/kernel/arch/alpha/include/asm/Kbuild
@@ -5,7 +5,7 @@ generic-y += cputime.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += preempt.h
-generic-y += scatterlist.h
generic-y += sections.h
generic-y += trace_clock.h
diff --git a/kernel/arch/alpha/include/asm/atomic.h b/kernel/arch/alpha/include/asm/atomic.h
index 8f8eafbed..572b228c4 100644
--- a/kernel/arch/alpha/include/asm/atomic.h
+++ b/kernel/arch/alpha/include/asm/atomic.h
@@ -17,11 +17,11 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) ((v)->counter = (i))
-#define atomic64_set(v,i) ((v)->counter = (i))
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
@@ -29,13 +29,13 @@
* branch back to restart the operation.
*/
-#define ATOMIC_OP(op) \
+#define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
- " " #op "l %0,%2,%0\n" \
+ " " #asm_op " %0,%2,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
:"Ir" (i), "m" (v->counter)); \
} \
-#define ATOMIC_OP_RETURN(op) \
+#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
- " " #op "l %0,%3,%2\n" \
- " " #op "l %0,%3,%0\n" \
+ " " #asm_op " %0,%3,%2\n" \
+ " " #asm_op " %0,%3,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \
}
-#define ATOMIC64_OP(op) \
+#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
- " " #op "q %0,%2,%0\n" \
+ " " #asm_op " %0,%2,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
:"Ir" (i), "m" (v->counter)); \
} \
-#define ATOMIC64_OP_RETURN(op) \
+#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
{ \
long temp, result; \
smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
- " " #op "q %0,%3,%2\n" \
- " " #op "q %0,%3,%0\n" \
+ " " #asm_op " %0,%3,%2\n" \
+ " " #asm_op " %0,%3,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
return result; \
}
-#define ATOMIC_OPS(opg) \
- ATOMIC_OP(opg) \
- ATOMIC_OP_RETURN(opg) \
- ATOMIC64_OP(opg) \
- ATOMIC64_OP_RETURN(opg)
+#define ATOMIC_OPS(op) \
+ ATOMIC_OP(op, op##l) \
+ ATOMIC_OP_RETURN(op, op##l) \
+ ATOMIC64_OP(op, op##q) \
+ ATOMIC64_OP_RETURN(op, op##q)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+#define atomic_andnot atomic_andnot
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, bis)
+ATOMIC_OP(xor, xor)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, bis)
+ATOMIC64_OP(xor, xor)
+
#undef ATOMIC_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/kernel/arch/alpha/include/asm/cmpxchg.h b/kernel/arch/alpha/include/asm/cmpxchg.h
index 429e8cd0d..e51177665 100644
--- a/kernel/arch/alpha/include/asm/cmpxchg.h
+++ b/kernel/arch/alpha/include/asm/cmpxchg.h
@@ -66,6 +66,4 @@
#undef __ASM__MB
#undef ____cmpxchg
-#define __HAVE_ARCH_CMPXCHG 1
-
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/kernel/arch/alpha/include/asm/dma-mapping.h b/kernel/arch/alpha/include/asm/dma-mapping.h
index dfa32f061..72a8ca779 100644
--- a/kernel/arch/alpha/include/asm/dma-mapping.h
+++ b/kernel/arch/alpha/include/asm/dma-mapping.h
@@ -12,42 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#include <asm-generic/dma-mapping-common.h>
-#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
-{
- return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
-}
-
-#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
-}
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return get_dma_ops(dev)->dma_supported(dev, mask);
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- return get_dma_ops(dev)->set_dma_mask(dev, mask);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
#define dma_cache_sync(dev, va, size, dir) ((void)0)
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/kernel/arch/alpha/include/asm/io.h b/kernel/arch/alpha/include/asm/io.h
index f05bdb4b1..ff4049155 100644
--- a/kernel/arch/alpha/include/asm/io.h
+++ b/kernel/arch/alpha/include/asm/io.h
@@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
unsigned long size)
{
return ioremap(offset, size);
-}
+}
+
+#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr)
{
diff --git a/kernel/arch/alpha/include/asm/pci.h b/kernel/arch/alpha/include/asm/pci.h
index f7f680f74..98f2eeee8 100644
--- a/kernel/arch/alpha/include/asm/pci.h
+++ b/kernel/arch/alpha/include/asm/pci.h
@@ -5,7 +5,7 @@
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <asm/machvec.h>
#include <asm-generic/pci-bridge.h>
@@ -71,22 +71,6 @@ extern void pcibios_set_master(struct pci_dev *dev);
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- unsigned long cacheline_size;
- u8 byte;
-
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
- if (byte == 0)
- cacheline_size = 1024;
- else
- cacheline_size = (int) byte * 4;
-
- *strat = PCI_DMA_BURST_BOUNDARY;
- *strategy_parameter = cacheline_size;
-}
#endif
/* TODO: integrate with include/asm-generic/pci.h ? */
diff --git a/kernel/arch/alpha/include/asm/serial.h b/kernel/arch/alpha/include/asm/serial.h
index 9d263e8d8..22909b83f 100644
--- a/kernel/arch/alpha/include/asm/serial.h
+++ b/kernel/arch/alpha/include/asm/serial.h
@@ -13,7 +13,7 @@
#define BASE_BAUD ( 1843200 / 16 )
/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#ifdef CONFIG_SERIAL_DETECT_IRQ
+#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else
diff --git a/kernel/arch/alpha/include/asm/spinlock.h b/kernel/arch/alpha/include/asm/spinlock.h
index 37b570d01..fed9c6f44 100644
--- a/kernel/arch/alpha/include/asm/spinlock.h
+++ b/kernel/arch/alpha/include/asm/spinlock.h
@@ -16,6 +16,11 @@
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.lock == 0;
+}
+
static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
diff --git a/kernel/arch/alpha/include/asm/word-at-a-time.h b/kernel/arch/alpha/include/asm/word-at-a-time.h
index 6b340d0f1..902e6ab00 100644
--- a/kernel/arch/alpha/include/asm/word-at-a-time.h
+++ b/kernel/arch/alpha/include/asm/word-at-a-time.h
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
#endif
}
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/kernel/arch/alpha/include/uapi/asm/mman.h b/kernel/arch/alpha/include/uapi/asm/mman.h
index 0086b472b..f2f949671 100644
--- a/kernel/arch/alpha/include/uapi/asm/mman.h
+++ b/kernel/arch/alpha/include/uapi/asm/mman.h
@@ -37,6 +37,9 @@
#define MCL_CURRENT 8192 /* lock all currently mapped pages */
#define MCL_FUTURE 16384 /* lock all additions to address space */
+#define MCL_ONFAULT 32768 /* lock all pages that are faulted in */
+
+#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
diff --git a/kernel/arch/alpha/kernel/core_irongate.c b/kernel/arch/alpha/kernel/core_irongate.c
index 00096df0f..83d0a359a 100644
--- a/kernel/arch/alpha/kernel/core_irongate.c
+++ b/kernel/arch/alpha/kernel/core_irongate.c
@@ -22,7 +22,6 @@
#include <linux/bootmem.h>
#include <asm/ptrace.h>
-#include <asm/pci.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
diff --git a/kernel/arch/alpha/kernel/irq.c b/kernel/arch/alpha/kernel/irq.c
index 51f2c8654..2d6efcff3 100644
--- a/kernel/arch/alpha/kernel/irq.c
+++ b/kernel/arch/alpha/kernel/irq.c
@@ -59,7 +59,7 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
- cpumask_copy(data->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0;
}
@@ -117,6 +117,6 @@ handle_irq(int irq)
}
irq_enter();
- generic_handle_irq_desc(irq, desc);
+ generic_handle_irq_desc(desc);
irq_exit();
}
diff --git a/kernel/arch/alpha/kernel/osf_sys.c b/kernel/arch/alpha/kernel/osf_sys.c
index 36dc91ace..6cc08166f 100644
--- a/kernel/arch/alpha/kernel/osf_sys.c
+++ b/kernel/arch/alpha/kernel/osf_sys.c
@@ -1138,6 +1138,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
{
struct rusage32 r;
cputime_t utime, stime;
+ unsigned long utime_jiffies, stime_jiffies;
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
return -EINVAL;
@@ -1146,14 +1147,18 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
switch (who) {
case RUSAGE_SELF:
task_cputime(current, &utime, &stime);
- jiffies_to_timeval32(utime, &r.ru_utime);
- jiffies_to_timeval32(stime, &r.ru_stime);
+ utime_jiffies = cputime_to_jiffies(utime);
+ stime_jiffies = cputime_to_jiffies(stime);
+ jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
+ jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
r.ru_minflt = current->min_flt;
r.ru_majflt = current->maj_flt;
break;
case RUSAGE_CHILDREN:
- jiffies_to_timeval32(current->signal->cutime, &r.ru_utime);
- jiffies_to_timeval32(current->signal->cstime, &r.ru_stime);
+ utime_jiffies = cputime_to_jiffies(current->signal->cutime);
+ stime_jiffies = cputime_to_jiffies(current->signal->cstime);
+ jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
+ jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
r.ru_minflt = current->signal->cmin_flt;
r.ru_majflt = current->signal->cmaj_flt;
break;
diff --git a/kernel/arch/alpha/kernel/pci-noop.c b/kernel/arch/alpha/kernel/pci-noop.c
index df24b76f9..2b1f4a1e9 100644
--- a/kernel/arch/alpha/kernel/pci-noop.c
+++ b/kernel/arch/alpha/kernel/pci-noop.c
@@ -166,15 +166,6 @@ static int alpha_noop_supported(struct device *dev, u64 mask)
return mask < 0x00ffffffUL ? 0 : 1;
}
-static int alpha_noop_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
- return 0;
-}
-
struct dma_map_ops alpha_noop_ops = {
.alloc = alpha_noop_alloc_coherent,
.free = alpha_noop_free_coherent,
@@ -182,7 +173,6 @@ struct dma_map_ops alpha_noop_ops = {
.map_sg = alpha_noop_map_sg,
.mapping_error = alpha_noop_mapping_error,
.dma_supported = alpha_noop_supported,
- .set_dma_mask = alpha_noop_set_mask,
};
struct dma_map_ops *dma_ops = &alpha_noop_ops;
diff --git a/kernel/arch/alpha/kernel/pci.c b/kernel/arch/alpha/kernel/pci.c
index 82f738e5d..5f387ee5b 100644
--- a/kernel/arch/alpha/kernel/pci.c
+++ b/kernel/arch/alpha/kernel/pci.c
@@ -245,9 +245,9 @@ void pcibios_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev = bus->self;
if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_read_bridge_bases(bus);
- }
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ }
list_for_each_entry(dev, &bus->devices, bus_list) {
pdev_save_srm_config(dev);
diff --git a/kernel/arch/alpha/kernel/pci_iommu.c b/kernel/arch/alpha/kernel/pci_iommu.c
index eddee7720..8969bf2df 100644
--- a/kernel/arch/alpha/kernel/pci_iommu.c
+++ b/kernel/arch/alpha/kernel/pci_iommu.c
@@ -939,16 +939,6 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
return dma_addr == 0;
}
-static int alpha_pci_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask ||
- !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
- return -EIO;
-
- *dev->dma_mask = mask;
- return 0;
-}
-
struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent,
@@ -958,7 +948,6 @@ struct dma_map_ops alpha_pci_ops = {
.unmap_sg = alpha_pci_unmap_sg,
.mapping_error = alpha_pci_mapping_error,
.dma_supported = alpha_pci_supported,
- .set_dma_mask = alpha_pci_set_mask,
};
struct dma_map_ops *dma_ops = &alpha_pci_ops;
diff --git a/kernel/arch/alpha/kernel/sys_eiger.c b/kernel/arch/alpha/kernel/sys_eiger.c
index 79d69d7f6..15f42083b 100644
--- a/kernel/arch/alpha/kernel/sys_eiger.c
+++ b/kernel/arch/alpha/kernel/sys_eiger.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pci.h>
#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
diff --git a/kernel/arch/alpha/kernel/sys_nautilus.c b/kernel/arch/alpha/kernel/sys_nautilus.c
index 700686d04..2cfaa0e5c 100644
--- a/kernel/arch/alpha/kernel/sys_nautilus.c
+++ b/kernel/arch/alpha/kernel/sys_nautilus.c
@@ -39,7 +39,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pci.h>
#include <asm/pgtable.h>
#include <asm/core_irongate.h>
#include <asm/hwrpb.h>
diff --git a/kernel/arch/alpha/kernel/time.c b/kernel/arch/alpha/kernel/time.c
index 643a9dcdf..5b6202a82 100644
--- a/kernel/arch/alpha/kernel/time.c
+++ b/kernel/arch/alpha/kernel/time.c
@@ -93,7 +93,7 @@ rtc_timer_interrupt(int irq, void *dev)
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
/* Don't run the hook for UNUSED or SHUTDOWN. */
- if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
+ if (likely(clockevent_state_periodic(ce)))
ce->event_handler(ce);
if (test_irq_work_pending()) {
@@ -104,13 +104,6 @@ rtc_timer_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static void
-rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
-{
- /* The mode member of CE is updated in generic code.
- Since we only support periodic events, nothing to do. */
-}
-
static int
rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
@@ -129,7 +122,6 @@ init_rtc_clockevent(void)
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 100,
.cpumask = cpumask_of(cpu),
- .set_mode = rtc_ce_set_mode,
.set_next_event = rtc_ce_set_next_event,
};
@@ -161,12 +153,12 @@ static struct clocksource qemu_cs = {
* The QEMU alarm as a clock_event_device primitive.
*/
-static void
-qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
+static int qemu_ce_shutdown(struct clock_event_device *ce)
{
/* The mode member of CE is updated for us in generic code.
Just make sure that the event is disabled. */
qemu_set_alarm_abs(0);
+ return 0;
}
static int
@@ -197,7 +189,9 @@ init_qemu_clockevent(void)
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.cpumask = cpumask_of(cpu),
- .set_mode = qemu_ce_set_mode,
+ .set_state_shutdown = qemu_ce_shutdown,
+ .set_state_oneshot = qemu_ce_shutdown,
+ .tick_resume = qemu_ce_shutdown,
.set_next_event = qemu_ce_set_next_event,
};
diff --git a/kernel/arch/alpha/lib/udelay.c b/kernel/arch/alpha/lib/udelay.c
index 69d52aa37..f2d81ff38 100644
--- a/kernel/arch/alpha/lib/udelay.c
+++ b/kernel/arch/alpha/lib/udelay.c
@@ -30,6 +30,7 @@ __delay(int loops)
" bgt %0,1b"
: "=&r" (tmp), "=r" (loops) : "1"(loops));
}
+EXPORT_SYMBOL(__delay);
#ifdef CONFIG_SMP
#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy