diff options
Diffstat (limited to 'kernel/arch/alpha/include')
-rw-r--r-- | kernel/arch/alpha/include/asm/Kbuild | 2 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/atomic.h | 50 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/cmpxchg.h | 2 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/dma-mapping.h | 36 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/io.h | 4 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/pci.h | 18 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/serial.h | 2 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/spinlock.h | 5 | ||||
-rw-r--r-- | kernel/arch/alpha/include/asm/word-at-a-time.h | 2 | ||||
-rw-r--r-- | kernel/arch/alpha/include/uapi/asm/mman.h | 3 |
10 files changed, 47 insertions, 77 deletions
diff --git a/kernel/arch/alpha/include/asm/Kbuild b/kernel/arch/alpha/include/asm/Kbuild index 76aeb8fa5..ffd9cf5ec 100644 --- a/kernel/arch/alpha/include/asm/Kbuild +++ b/kernel/arch/alpha/include/asm/Kbuild @@ -5,7 +5,7 @@ generic-y += cputime.h generic-y += exec.h generic-y += irq_work.h generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += preempt.h -generic-y += scatterlist.h generic-y += sections.h generic-y += trace_clock.h diff --git a/kernel/arch/alpha/include/asm/atomic.h b/kernel/arch/alpha/include/asm/atomic.h index 8f8eafbed..572b228c4 100644 --- a/kernel/arch/alpha/include/asm/atomic.h +++ b/kernel/arch/alpha/include/asm/atomic.h @@ -17,11 +17,11 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) ACCESS_ONCE((v)->counter) -#define atomic64_read(v) ACCESS_ONCE((v)->counter) +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic64_read(v) READ_ONCE((v)->counter) -#define atomic_set(v,i) ((v)->counter = (i)) -#define atomic64_set(v,i) ((v)->counter = (i)) +#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) +#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) /* * To get proper branch prediction for the main line, we must branch @@ -29,13 +29,13 @@ * branch back to restart the operation. */ -#define ATOMIC_OP(op) \ +#define ATOMIC_OP(op, asm_op) \ static __inline__ void atomic_##op(int i, atomic_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC_OP_RETURN(op) \ +#define ATOMIC_OP_RETURN(op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%3,%2\n" \ - " " #op "l %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return result; \ } -#define ATOMIC64_OP(op) \ +#define ATOMIC64_OP(op, asm_op) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC64_OP_RETURN(op) \ +#define ATOMIC64_OP_RETURN(op, asm_op) \ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%3,%2\n" \ - " " #op "q %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ return result; \ } -#define ATOMIC_OPS(opg) \ - ATOMIC_OP(opg) \ - ATOMIC_OP_RETURN(opg) \ - ATOMIC64_OP(opg) \ - ATOMIC64_OP_RETURN(opg) +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##l) \ + ATOMIC_OP_RETURN(op, op##l) \ + ATOMIC64_OP(op, op##q) \ + ATOMIC64_OP_RETURN(op, op##q) ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define atomic_andnot atomic_andnot +#define atomic64_andnot atomic64_andnot + +ATOMIC_OP(and, and) +ATOMIC_OP(andnot, bic) +ATOMIC_OP(or, bis) +ATOMIC_OP(xor, xor) +ATOMIC64_OP(and, and) +ATOMIC64_OP(andnot, bic) +ATOMIC64_OP(or, bis) +ATOMIC64_OP(xor, xor) + #undef ATOMIC_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/kernel/arch/alpha/include/asm/cmpxchg.h b/kernel/arch/alpha/include/asm/cmpxchg.h index 429e8cd0d..e51177665 100644 --- a/kernel/arch/alpha/include/asm/cmpxchg.h +++ b/kernel/arch/alpha/include/asm/cmpxchg.h @@ -66,6 +66,4 @@ #undef __ASM__MB #undef ____cmpxchg -#define __HAVE_ARCH_CMPXCHG 1 - #endif /* _ALPHA_CMPXCHG_H */ diff --git a/kernel/arch/alpha/include/asm/dma-mapping.h b/kernel/arch/alpha/include/asm/dma-mapping.h index dfa32f061..72a8ca779 100644 --- a/kernel/arch/alpha/include/asm/dma-mapping.h +++ b/kernel/arch/alpha/include/asm/dma-mapping.h @@ -12,42 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include <asm-generic/dma-mapping-common.h> -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs); -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs); -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return get_dma_ops(dev)->mapping_error(dev, dma_addr); -} - -static inline int dma_supported(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->dma_supported(dev, mask); -} - -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->set_dma_mask(dev, mask); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define dma_cache_sync(dev, va, size, dir) ((void)0) #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/kernel/arch/alpha/include/asm/io.h b/kernel/arch/alpha/include/asm/io.h index f05bdb4b1..ff4049155 100644 --- a/kernel/arch/alpha/include/asm/io.h +++ b/kernel/arch/alpha/include/asm/io.h @@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset, unsigned long size) { return ioremap(offset, size); -} +} + +#define ioremap_uc ioremap_nocache static inline void iounmap(volatile void __iomem *addr) { diff --git a/kernel/arch/alpha/include/asm/pci.h b/kernel/arch/alpha/include/asm/pci.h index f7f680f74..98f2eeee8 100644 --- a/kernel/arch/alpha/include/asm/pci.h +++ b/kernel/arch/alpha/include/asm/pci.h @@ -5,7 +5,7 @@ #include <linux/spinlock.h> #include <linux/dma-mapping.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <asm/machvec.h> #include <asm-generic/pci-bridge.h> @@ -71,22 +71,6 @@ extern void pcibios_set_master(struct pci_dev *dev); /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include <asm-generic/pci-dma-compat.h> -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_BOUNDARY; - *strategy_parameter = cacheline_size; -} #endif /* TODO: integrate with include/asm-generic/pci.h ? */ diff --git a/kernel/arch/alpha/include/asm/serial.h b/kernel/arch/alpha/include/asm/serial.h index 9d263e8d8..22909b83f 100644 --- a/kernel/arch/alpha/include/asm/serial.h +++ b/kernel/arch/alpha/include/asm/serial.h @@ -13,7 +13,7 @@ #define BASE_BAUD ( 1843200 / 16 ) /* Standard COM flags (except for COM4, because of the 8514 problem) */ -#ifdef CONFIG_SERIAL_DETECT_IRQ +#ifdef CONFIG_SERIAL_8250_DETECT_IRQ #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) #define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) #else diff --git a/kernel/arch/alpha/include/asm/spinlock.h b/kernel/arch/alpha/include/asm/spinlock.h index 37b570d01..fed9c6f44 100644 --- a/kernel/arch/alpha/include/asm/spinlock.h +++ b/kernel/arch/alpha/include/asm/spinlock.h @@ -16,6 +16,11 @@ #define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.lock == 0; +} + static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); diff --git a/kernel/arch/alpha/include/asm/word-at-a-time.h b/kernel/arch/alpha/include/asm/word-at-a-time.h index 6b340d0f1..902e6ab00 100644 --- a/kernel/arch/alpha/include/asm/word-at-a-time.h +++ b/kernel/arch/alpha/include/asm/word-at-a-time.h @@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits) #endif } +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/kernel/arch/alpha/include/uapi/asm/mman.h b/kernel/arch/alpha/include/uapi/asm/mman.h index 0086b472b..f2f949671 100644 --- a/kernel/arch/alpha/include/uapi/asm/mman.h +++ b/kernel/arch/alpha/include/uapi/asm/mman.h @@ -37,6 +37,9 @@ #define MCL_CURRENT 8192 /* lock all currently mapped pages */ #define MCL_FUTURE 16384 /* lock all additions to address space */ +#define MCL_ONFAULT 32768 /* lock all pages that are faulted in */ + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ #define MADV_NORMAL 0 /* no further special treatment */ #define MADV_RANDOM 1 /* expect random page references */ |